source
stringlengths
3
92
c
stringlengths
26
2.25M
config.h
/* config.h. Generated from config.in by configure. */ /* config.in. Generated from configure.ac by autoheader. */ /* Check that config.h is #included before system headers (this works only for glibc, but that should be enough). */ #if defined(__GLIBC__) && !defined(__FreeBSD_kernel__) && !defined(__CONFIG_H__) # error config.h must be #included before system headers #endif #define __CONFIG_H__ 1 /* Define if building universal (internal helper macro) */ /* #undef AC_APPLE_UNIVERSAL_BUILD */ /* Define to 1 if translation of program messages to the user's native language is requested. */ /* #undef ENABLE_NLS */ /* Define to enable linker plugins */ #define ENABLE_PLUGINS 1 /* Define to do multi-threaded linking */ /* #undef ENABLE_THREADS */ /* Default big endian (true or false) */ #define GOLD_DEFAULT_BIG_ENDIAN false /* Default machine code */ /* #define GOLD_DEFAULT_MACHINE EM_X86_64 */ /* Default OSABI code */ #define GOLD_DEFAULT_OSABI ELFOSABI_NONE /* Default size (32 or 64) */ /* #define GOLD_DEFAULT_SIZE 64 */ /* Define to 1 if you have the <byteswap.h> header file. */ /* #undef HAVE_BYTESWAP_H */ /* Define to 1 if you have the `chsize' function. */ /* #undef HAVE_CHSIZE */ /* Define to 1 if you have the declaration of `asprintf', and to 0 if you don't. */ #define HAVE_DECL_ASPRINTF 1 /* Define to 1 if you have the declaration of `basename', and to 0 if you don't. */ #define HAVE_DECL_BASENAME 0 /* Define to 1 if you have the declaration of `ffs', and to 0 if you don't. */ #define HAVE_DECL_FFS 1 /* Define to 1 if you have the declaration of `memmem', and to 0 if you don't. */ #define HAVE_DECL_MEMMEM 1 /* Define to 1 if you have the declaration of `snprintf', and to 0 if you don't. */ #define HAVE_DECL_SNPRINTF 1 /* Define to 1 if you have the declaration of `strndup', and to 0 if you don't. */ #define HAVE_DECL_STRNDUP 1 /* Define to 1 if you have the declaration of `strverscmp', and to 0 if you don't. */ #define HAVE_DECL_STRVERSCMP 0 /* Define to 1 if you have the declaration of `vasprintf', and to 0 if you don't. */ #define HAVE_DECL_VASPRINTF 1 /* Define to 1 if you have the declaration of `vsnprintf', and to 0 if you don't. */ #define HAVE_DECL_VSNPRINTF 1 /* Define to 1 if you have the <dlfcn.h> header file. */ #define HAVE_DLFCN_H 1 /* Define to 1 if you have the <ext/hash_map> header file. */ #define HAVE_EXT_HASH_MAP 1 /* Define to 1 if you have the <ext/hash_set> header file. */ #define HAVE_EXT_HASH_SET 1 /* Define to 1 if you have the `fallocate' function. */ /* #undef HAVE_FALLOCATE */ /* Define to 1 if you have the `ffsll' function. */ #define HAVE_FFSLL 1 /* Define to 1 if you have the `ftruncate' function. */ #define HAVE_FTRUNCATE 1 /* Define to 1 if you have the <inttypes.h> header file. */ #define HAVE_INTTYPES_H 1 /* Define if your <locale.h> file defines LC_MESSAGES. */ #define HAVE_LC_MESSAGES 1 /* Define to 1 if you have the <locale.h> header file. */ #define HAVE_LOCALE_H 1 /* Define to 1 if you have the `mallinfo' function. */ /* #undef HAVE_MALLINFO */ /* Define to 1 if you have the <memory.h> header file. */ #define HAVE_MEMORY_H 1 /* Define to 1 if you have the `mmap' function. */ #define HAVE_MMAP 1 /* Define to 1 if you have the mremap function with MREMAP_MAYMOVE support */ /* #undef HAVE_MREMAP */ /* Define if compiler supports #pragma omp threadprivate */ #define HAVE_OMP_SUPPORT 1 /* Define to 1 if you have the `posix_fallocate' function. */ /* #undef HAVE_POSIX_FALLOCATE */ /* Define to 1 if you have the `pread' function. */ #define HAVE_PREAD 1 /* Define to 1 if you have the `readv' function. */ #define HAVE_READV 1 /* Define to 1 if you have the `setlocale' function. */ #define HAVE_SETLOCALE 1 /* Define if struct stat has a field st_mtim with timespec for mtime */ #define HAVE_STAT_ST_MTIM 1 /* Define to 1 if you have the <stdint.h> header file. */ #define HAVE_STDINT_H 1 /* Define to 1 if you have the <stdlib.h> header file. */ #define HAVE_STDLIB_H 1 /* Define to 1 if you have the <strings.h> header file. */ #define HAVE_STRINGS_H 1 /* Define to 1 if you have the <string.h> header file. */ #define HAVE_STRING_H 1 /* Define to 1 if you have the `sysconf' function. */ #define HAVE_SYSCONF 1 /* Define to 1 if you have the <sys/mman.h> header file. */ #define HAVE_SYS_MMAN_H 1 /* Define to 1 if you have the <sys/stat.h> header file. */ #define HAVE_SYS_STAT_H 1 /* Define to 1 if you have the <sys/types.h> header file. */ #define HAVE_SYS_TYPES_H 1 /* Define to support 32-bit big-endian targets */ #define HAVE_TARGET_32_BIG 1 /* Define to support 32-bit little-endian targets */ #define HAVE_TARGET_32_LITTLE 1 /* Define to support 64-bit big-endian targets */ #define HAVE_TARGET_64_BIG 1 /* Define to support 64-bit little-endian targets */ #define HAVE_TARGET_64_LITTLE 1 /* Define if attributes work on C++ templates */ #define HAVE_TEMPLATE_ATTRIBUTES 1 /* Define to 1 if you have the `times' function. */ #define HAVE_TIMES 1 /* Define if std::tr1::hash<off_t> is usable */ #define HAVE_TR1_HASH_OFF_T 1 /* Define to 1 if you have the <tr1/unordered_map> header file. */ #define HAVE_TR1_UNORDERED_MAP 1 /* Define if ::std::tr1::unordered_map::rehash is usable */ #define HAVE_TR1_UNORDERED_MAP_REHASH 1 /* Define to 1 if you have the <tr1/unordered_set> header file. */ #define HAVE_TR1_UNORDERED_SET 1 /* Define to 1 if you have the <unistd.h> header file. */ #define HAVE_UNISTD_H 1 /* Define to 1 if you have the <windows.h> header file. */ /* #undef HAVE_WINDOWS_H */ /* Define to 1 if you have the <zlib.h> header file. */ #define HAVE_ZLIB_H 1 /* Default library search path */ #define LIB_PATH "::DEFAULT::" /* Whether configured as a native linker */ #define NATIVE_LINKER 1 /* Name of package */ #define PACKAGE "gold" /* Define to the address where bug reports for this package should be sent. */ #define PACKAGE_BUGREPORT "" /* Define to the full name of this package. */ #define PACKAGE_NAME "gold" /* Define to the full name and version of this package. */ #define PACKAGE_STRING "gold 0.1" /* Define to the one symbol short name of this package. */ #define PACKAGE_TARNAME "gold" /* Define to the home page for this package. */ #define PACKAGE_URL "" /* Define to the version of this package. */ #define PACKAGE_VERSION "0.1" /* Define to 1 if you have the ANSI C header files. */ #define STDC_HEADERS 1 /* System root for target files */ #define TARGET_SYSTEM_ROOT "" /* Whether the system root can be relocated */ #define TARGET_SYSTEM_ROOT_RELOCATABLE 0 /* Enable extensions on AIX 3, Interix. */ #ifndef _ALL_SOURCE # define _ALL_SOURCE 1 #endif /* Enable GNU extensions on systems that have them. */ #ifndef _GNU_SOURCE # define _GNU_SOURCE 1 #endif /* Enable threading extensions on Solaris. */ #ifndef _POSIX_PTHREAD_SEMANTICS # define _POSIX_PTHREAD_SEMANTICS 1 #endif /* Enable extensions on HP NonStop. */ #ifndef _TANDEM_SOURCE # define _TANDEM_SOURCE 1 #endif /* Enable general extensions on Solaris. */ #ifndef __EXTENSIONS__ # define __EXTENSIONS__ 1 #endif /* Version number of package */ #define VERSION "0.1" /* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most significant byte first (like Motorola and SPARC, unlike Intel). */ #if defined AC_APPLE_UNIVERSAL_BUILD # if defined __BIG_ENDIAN__ # define WORDS_BIGENDIAN 1 # endif #else # ifndef WORDS_BIGENDIAN /* # undef WORDS_BIGENDIAN */ # endif #endif /* Define to 1 if on MINIX. */ /* #undef _MINIX */ /* Define to 2 if the system does not provide POSIX.1 features except with this defined. */ /* #undef _POSIX_1_SOURCE */ /* Define to 1 if you need to in order for `stat' and other things to work. */ /* #undef _POSIX_SOURCE */
origin.h
#pragma once #include <assert.h> #include <iostream> //#include <malloc.h> #include <memory> #include "omp.h" #include <stdlib.h> #include <time.h> #include <vector> using namespace std; void bcxy_kcrs_conv(float *In, float *Ker, float *Out, int Nb, int Nt, int Nx, int Ny, int Ns, int Nw, int Nh) { int StrideX=uSx; int StrideY=uSy; #pragma omp parallel for collapse(5) for (int b = 0; b < Nb; b++) { for (int t = 0; t < Nt; t++) { for (int x = 0; x < Nx; x++) { for (int y = 0; y < Ny; y++) { for (int s = 0; s < Ns; s++) { for (int w = 0; w < Nw; w++) { for (int h = 0; h < Nh; h++) { Out[b * Nt * Nx * Ny + t * Nx * Ny + x * Ny + y] += In[b * Ns * (StrideX*Nx + Nw - 1) * (StrideY*Ny + Nh - 1) + s * (StrideX* Nx + Nw - 1) * (StrideY*Ny + Nh - 1) + (StrideX*x + w) * (StrideY*Ny + Nh - 1) + (StrideY*y + h)] * Ker[t * Ns * Nw * Nh + s * Nw * Nh + w * Nh + h]; } } } } } } } } void origin_conv(float *In, float *Ker, float *Out, int Nb, int Nt, int Nx, int Ny, int Ns, int Nw, int Nh) { int StrideX=uSx; int StrideY=uSy; #pragma omp parallel for collapse(5) for (int b = 0; b < Nb; b++) { for (int t = 0; t < Nt; t++) { for (int x = 0; x < Nx; x++) { for (int y = 0; y < Ny; y++) { for (int w = 0; w < Nw; w++) { for (int h = 0; h < Nh; h++) { for (int s = 0; s < Ns; s++) { /* Out[b * Nt * Nx * Ny + t * Nx * Ny + x * Ny + y] += */ /* In[b * Ns * (Nx + Nw - 1) * (Ny + Nh - 1) + */ /* s * (Nx + Nw - 1) * (Ny + Nh - 1) + */ /* (x + w) * (Ny + Nh - 1) + (y + h)] * */ /* Ker[t * Ns * Nw * Nh + s * Nw * Nh + w * Nh + h]; */ int kt1 = t /LKF; int kt2 = t %LKF; int ot1 = t /LOF; int ot2 = t %LOF; int s1 = s / LC; int s2 = s%LC; int Ooffset = b* Nt * Nx * Ny + ot1 * Nx * Ny*LOF + x*Ny*LOF + y*LOF + ot2; int Ioffset = b * Ns * (StrideX*Nx + Nw - 1) * (StrideY*Ny + Nh - 1) + s1 * (StrideX*Nx + Nw - 1) * (StrideY*Ny + Nh - 1) * LC + (StrideX*x + w) * (StrideY*Ny + Nh - 1)*LC + (StrideY*y + h) * LC + s2; int Koffset = kt1 * Ns * Nw * Nh * LKF + s * Nw * Nh*LKF + w * Nh*LKF + h*LKF + kt2; Out[Ooffset] += In[Ioffset]* Ker[Koffset]; // if(Ooffset == 896){ // cout<<"Inoff="<<Ioffset<<", Koff="<<Koffset<<endl; // } } } } } } } } } int compare(float *C1, float *C2, int size) { cout << "comparing" << endl; cout<<C1[0]<<","<<C2[0]<<endl; for (int i = 0; i < size; i++) { if(C2[i]==0){ cout<<"C2 is 0"<<endl; return -1; } if (C1[i] != C2[i]) { cout << "data at " << i << " C1=" << C1[i] << ", C2=" << C2[i] << endl; return -1; } } cout << "fin compare\n"; return 0; }
GB_unop__cos_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__cos_fc32_fc32 // op(A') function: GB_unop_tran__cos_fc32_fc32 // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = ccosf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = ccosf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = ccosf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_COS || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__cos_fc32_fc32 ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = ccosf (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__cos_fc32_fc32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
adam_op.h
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <math.h> // for sqrt in CPU and CUDA #include <Eigen/Dense> #include <string> #include <unordered_map> #include <vector> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/threadpool.h" #include "paddle/fluid/operators/jit/kernels.h" #include "paddle/fluid/operators/math/selected_rows_functor.h" #include "paddle/fluid/platform/for_range.h" #include "paddle/fluid/platform/profiler.h" #include "paddle/phi/kernels/funcs/algorithm.h" namespace paddle { namespace operators { namespace scatter = paddle::operators::math::scatter; static inline float GetAttrFromTensor(const framework::Tensor* tensor) { const float* tensor_data = tensor->data<float>(); framework::Tensor cpu_tensor; if (platform::is_gpu_place(tensor->place())) { paddle::framework::TensorCopySync(*tensor, platform::CPUPlace(), &cpu_tensor); tensor_data = cpu_tensor.data<float>(); } if (platform::is_xpu_place(tensor->place())) { paddle::framework::TensorCopySync(*tensor, platform::CPUPlace(), &cpu_tensor); tensor_data = cpu_tensor.data<float>(); } return tensor_data[0]; } class AdamOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override; framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override; framework::OpKernelType GetKernelTypeForVar( const std::string& var_name, const framework::Tensor& tensor, const framework::OpKernelType& expected_kernel_type) const override; }; struct GPUAdam; struct CPUAdam; template <typename T, typename Flavour> class AdamFunctor; template <typename T> class AdamFunctor<T, GPUAdam> { private: T beta1_; T beta2_; T epsilon_; const T* beta1_pow_; const T* beta2_pow_; const T* moment1_; T* moment1_out_; const T* moment2_; T* moment2_out_; const T* lr_; const T* grad_; const T* param_; T* param_out_; public: AdamFunctor(T beta1, T beta2, T epsilon, const T* beta1_pow, const T* beta2_pow, const T* mom1, T* mom1_out, const T* mom2, T* mom2_out, const T* lr, const T* grad, const T* param, T* param_out) : beta1_(beta1), beta2_(beta2), epsilon_(epsilon), beta1_pow_(beta1_pow), beta2_pow_(beta2_pow), moment1_(mom1), moment1_out_(mom1_out), moment2_(mom2), moment2_out_(mom2_out), lr_(lr), grad_(grad), param_(param), param_out_(param_out) {} inline HOSTDEVICE void operator()(size_t i) const { // Merge all memory access together. T g = grad_[i]; T mom1 = moment1_[i]; T mom2 = moment2_[i]; T lr = *lr_; T beta1_pow = *beta1_pow_; T beta2_pow = *beta2_pow_; T p = param_[i]; // Calculation lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow); mom1 = beta1_ * mom1 + (1 - beta1_) * g; mom2 = beta2_ * mom2 + (1 - beta2_) * g * g; p -= lr * (mom1 / (sqrt(mom2) + epsilon_ * sqrt(1 - beta2_pow))); // Write back to global memory moment1_out_[i] = mom1; moment2_out_[i] = mom2; param_out_[i] = p; } }; template <typename T> class AdamFunctor<T, CPUAdam> { private: T beta1_; T beta2_; T epsilon_; const T* beta1_pow_; const T* beta2_pow_; const T* moment1_; T* moment1_out_; const T* moment2_; T* moment2_out_; const T* lr_; const T* grad_; const T* param_; T* param_out_; public: AdamFunctor(T beta1, T beta2, T epsilon, const T* beta1_pow, const T* beta2_pow, const T* mom1, T* mom1_out, const T* mom2, T* mom2_out, const T* lr, const T* grad, const T* param, T* param_out) : beta1_(beta1), beta2_(beta2), epsilon_(epsilon), beta1_pow_(beta1_pow), beta2_pow_(beta2_pow), moment1_(mom1), moment1_out_(mom1_out), moment2_(mom2), moment2_out_(mom2_out), lr_(lr), grad_(grad), param_(param), param_out_(param_out) {} void operator()(size_t numel) const { Eigen::Map<const Eigen::Array<T, 1, Eigen::Dynamic>> g{ grad_, static_cast<Eigen::Index>(numel)}; Eigen::Map<const Eigen::Array<T, 1, Eigen::Dynamic>> mom1{ moment1_, static_cast<Eigen::Index>(numel)}; Eigen::Map<const Eigen::Array<T, 1, Eigen::Dynamic>> mom2{ moment2_, static_cast<Eigen::Index>(numel)}; Eigen::Map<const Eigen::Array<T, 1, Eigen::Dynamic>> param{ param_, static_cast<Eigen::Index>(numel)}; Eigen::Map<Eigen::Array<T, 1, Eigen::Dynamic>> param_out{ param_out_, static_cast<Eigen::Index>(numel)}; Eigen::Map<Eigen::Array<T, 1, Eigen::Dynamic>> moment1_out{ moment1_out_, static_cast<Eigen::Index>(numel)}; Eigen::Map<Eigen::Array<T, 1, Eigen::Dynamic>> moment2_out{ moment2_out_, static_cast<Eigen::Index>(numel)}; T lr = *lr_; T beta1_pow = *beta1_pow_; T beta2_pow = *beta2_pow_; // Calculation lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow); moment1_out = beta1_ * mom1 + (1 - beta1_) * g; moment2_out = beta2_ * mom2 + (1 - beta2_) * g * g; param_out = param - lr * (moment1_out / (moment2_out.sqrt() + epsilon_ * sqrt(1 - beta2_pow))); } }; template <typename T, typename Flavour, typename MT = T> class SparseAdamFunctor; template <typename T, typename MT> class SparseAdamFunctor<T, GPUAdam, MT> { private: MT beta1_; MT beta2_; MT epsilon_; const MT* beta1_pow_; const MT* beta2_pow_; const MT* moment1_; MT* moment1_out_; const MT* moment2_; MT* moment2_out_; const MT* lr_; const T* grad_; const T* param_; T* param_out_; const MT* master_param_; MT* master_param_out_; const int64_t* rows_; int64_t row_numel_; int64_t row_count_; bool lazy_mode_; public: SparseAdamFunctor(MT beta1, MT beta2, MT epsilon, const MT* beta1_pow, const MT* beta2_pow, const MT* mom1, MT* mom1_out, const MT* mom2, MT* mom2_out, const MT* lr, const T* grad, const T* param, T* param_out, const MT* master_param, MT* master_param_out, const int64_t* rows, int64_t row_numel, int64_t row_count, bool lazy_mode) : beta1_(beta1), beta2_(beta2), epsilon_(epsilon), beta1_pow_(beta1_pow), beta2_pow_(beta2_pow), moment1_(mom1), moment1_out_(mom1_out), moment2_(mom2), moment2_out_(mom2_out), lr_(lr), grad_(grad), param_(param), param_out_(param_out), master_param_(master_param), master_param_out_(master_param_out), rows_(rows), row_numel_(row_numel), row_count_(row_count), lazy_mode_(lazy_mode) {} inline HOSTDEVICE void adam_update(size_t i, MT g) const { // The following code is the same as dense MT mom1 = moment1_[i]; MT mom2 = moment2_[i]; MT lr = *lr_; MT beta1_pow = *beta1_pow_; MT beta2_pow = *beta2_pow_; MT p = master_param_ ? master_param_[i] : static_cast<MT>(param_[i]); // Calculation lr *= sqrt(static_cast<MT>(1.0) - beta2_pow) / (static_cast<MT>(1.0) - beta1_pow); mom1 = beta1_ * mom1 + (static_cast<MT>(1.0) - beta1_) * g; mom2 = beta2_ * mom2 + (static_cast<MT>(1.0) - beta2_) * g * g; p -= lr * (mom1 / (sqrt(mom2) + epsilon_ * sqrt(static_cast<MT>(1.0) - beta2_pow))); // Write back to global memory moment1_out_[i] = mom1; moment2_out_[i] = mom2; param_out_[i] = static_cast<T>(p); if (master_param_out_) { master_param_out_[i] = p; } } inline HOSTDEVICE void operator()(size_t i) const { auto row_idx = phi::funcs::BinarySearch<int64_t>(rows_, row_count_, i / row_numel_); if (lazy_mode_ && row_idx < 0) { return; } else { MT g = row_idx >= 0 ? static_cast<MT>(grad_[row_idx * row_numel_ + i % row_numel_]) : static_cast<MT>(0); adam_update(i, g); } } }; template <typename T> class SparseAdamFunctor<T, CPUAdam, T> { private: T beta1_; T beta2_; T epsilon_; const T* beta1_pow_; const T* beta2_pow_; const T* moment1_; T* moment1_out_; const T* moment2_; T* moment2_out_; const T* lr_; const T* grad_; const T* param_; T* param_out_; const int64_t* rows_; int64_t row_numel_; int64_t row_count_; public: SparseAdamFunctor(T beta1, T beta2, T epsilon, const T* beta1_pow, const T* beta2_pow, const T* mom1, T* mom1_out, const T* mom2, T* mom2_out, const T* lr, const T* grad, const T* param, T* param_out, const int64_t* rows, int64_t row_numel, int64_t row_count, bool lazy_mode) : beta1_(beta1), beta2_(beta2), epsilon_(epsilon), beta1_pow_(beta1_pow), beta2_pow_(beta2_pow), moment1_(mom1), moment1_out_(mom1_out), moment2_(mom2), moment2_out_(mom2_out), lr_(lr), grad_(grad), param_(param), param_out_(param_out), rows_(rows), row_numel_(row_numel), row_count_(row_count) {} inline HOSTDEVICE void adam_update(size_t i, T g) const { // The following code is the same as dense T mom1 = moment1_[i]; T mom2 = moment2_[i]; T lr = *lr_; T beta1_pow = *beta1_pow_; T beta2_pow = *beta2_pow_; T p = param_[i]; // Calculation lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow); mom1 = beta1_ * mom1 + (1 - beta1_) * g; mom2 = beta2_ * mom2 + (1 - beta2_) * g * g; p -= lr * (mom1 / (sqrt(mom2) + epsilon_ * sqrt(1 - beta2_pow))); // Write back to global memory moment1_out_[i] = mom1; moment2_out_[i] = mom2; param_out_[i] = p; } inline void operator()(size_t numel) const { // lr could be reuse T lr = *lr_; T beta1_pow = *beta1_pow_; T beta2_pow = *beta2_pow_; lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow); int64_t row_count = static_cast<int64_t>(numel / row_numel_); for (int64_t i = 0, j = 0; i != row_count; ++i) { if (i == *(rows_ + j)) { for (int64_t k = 0; k != row_numel_; ++k) { T g = grad_[j * row_numel_ + k]; adam_update(i * row_numel_ + k, g); } ++j; } else { for (int64_t k = 0; k != row_numel_; ++k) { T mom1 = moment1_[i * row_numel_ + k]; T mom2 = moment2_[i * row_numel_ + k]; T p = param_[i * row_numel_ + k]; mom1 = beta1_ * mom1; mom2 = beta2_ * mom2; p -= lr * (mom1 / (sqrt(mom2) + epsilon_)); // Write back to global memory moment1_out_[i * row_numel_ + k] = mom1; moment2_out_[i * row_numel_ + k] = mom2; param_out_[i * row_numel_ + k] = p; } } } } }; template <typename DeviceContext, typename T> class AdamOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { const auto* param_var = ctx.InputVar("Param"); PADDLE_ENFORCE_EQ(param_var->IsType<framework::LoDTensor>(), true, platform::errors::InvalidArgument( "The Var(%s)'s type should be LoDTensor, " "but the received is %s", ctx.InputNames("Param").front(), framework::ToTypeName(param_var->Type()))); using paddle::framework::LoDTensor; int64_t min_row_size_to_use_multithread = ctx.Attr<int64_t>("min_row_size_to_use_multithread"); bool lazy_mode = ctx.Attr<bool>("lazy_mode"); bool use_global_beta_pow = ctx.Attr<bool>("use_global_beta_pow"); VLOG(4) << "use_global_beta_pow:" << use_global_beta_pow; auto* param = ctx.Input<LoDTensor>("Param"); auto* grad_var = ctx.InputVar("Grad"); auto* mom1 = ctx.Input<LoDTensor>("Moment1"); auto* mom2 = ctx.Input<LoDTensor>("Moment2"); auto* lr = ctx.Input<LoDTensor>("LearningRate"); auto* beta1_pow = ctx.Input<LoDTensor>("Beta1Pow"); auto* beta2_pow = ctx.Input<LoDTensor>("Beta2Pow"); auto* param_out = ctx.Output<LoDTensor>("ParamOut"); auto* mom1_out = ctx.Output<LoDTensor>("Moment1Out"); auto* mom2_out = ctx.Output<LoDTensor>("Moment2Out"); auto* beta1_pow_out = ctx.Output<LoDTensor>("Beta1PowOut"); auto* beta2_pow_out = ctx.Output<LoDTensor>("Beta2PowOut"); bool skip_update = false; if (ctx.HasInput("SkipUpdate")) { auto* skip_update_tensor = ctx.Input<framework::Tensor>("SkipUpdate"); PADDLE_ENFORCE_EQ(skip_update_tensor->numel(), 1, platform::errors::InvalidArgument( "Input(SkipUpdate) size must be 1, but get %d", skip_update_tensor->numel())); std::vector<bool> skip_update_vec; paddle::framework::TensorToVector(*skip_update_tensor, ctx.device_context(), &skip_update_vec); skip_update = skip_update_vec[0]; } // skip_update=true, just copy input to output, and TensorCopy will call // mutable_data if (skip_update) { VLOG(4) << "Adam skip update"; framework::TensorCopy( *param, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), param_out); framework::TensorCopy( *mom1, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), mom1_out); framework::TensorCopy( *mom2, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), mom2_out); framework::TensorCopy( *beta1_pow, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), beta1_pow_out); framework::TensorCopy( *beta2_pow, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), beta2_pow_out); return; } T beta1 = static_cast<T>(ctx.Attr<float>("beta1")); if (ctx.HasInput("Beta1Tensor")) { auto* beta1_tensor = ctx.Input<framework::Tensor>("Beta1Tensor"); PADDLE_ENFORCE_EQ(beta1_tensor->numel(), 1, platform::errors::InvalidArgument( "Input(Beta1Tensor) size must be 1, but get %d", beta1_tensor->numel())); beta1 = static_cast<T>(GetAttrFromTensor(beta1_tensor)); } T beta2 = static_cast<T>(ctx.Attr<float>("beta2")); if (ctx.HasInput("Beta2Tensor")) { auto* beta2_tensor = ctx.Input<framework::Tensor>("Beta2Tensor"); PADDLE_ENFORCE_EQ(beta2_tensor->numel(), 1, platform::errors::InvalidArgument( "Input(Beta2Tensor) size must be 1, but get %d", beta2_tensor->numel())); beta2 = static_cast<T>(GetAttrFromTensor(beta2_tensor)); } T epsilon = static_cast<T>(ctx.Attr<float>("epsilon")); if (ctx.HasInput("EpsilonTensor")) { auto* epsilon_tensor = ctx.Input<framework::Tensor>("EpsilonTensor"); PADDLE_ENFORCE_EQ(epsilon_tensor->numel(), 1, platform::errors::InvalidArgument( "Input(EpsilonTensor) size must be 1, but get %d", epsilon_tensor->numel())); epsilon = static_cast<T>(GetAttrFromTensor(epsilon_tensor)); } VLOG(3) << "beta1_pow.numel() : " << beta1_pow->numel() << "beta2_pow.numel() : " << beta2_pow->numel(); VLOG(3) << "param.numel(): " << param->numel(); PADDLE_ENFORCE_EQ(beta1_pow_out->numel(), 1, platform::errors::InvalidArgument( "beta1 pow output size should be 1, but received " "value is:%d.", beta1_pow_out->numel())); PADDLE_ENFORCE_EQ(beta2_pow_out->numel(), 1, platform::errors::InvalidArgument( "beta2 pow output size should be 1, but received " "value is:%d.", beta2_pow_out->numel())); if (grad_var->IsType<framework::LoDTensor>()) { T beta1_p = beta1_pow->data<T>()[0]; T beta2_p = beta2_pow->data<T>()[0]; if (!use_global_beta_pow) { beta1_pow_out->mutable_data<T>(ctx.GetPlace())[0] = beta1 * beta1_pow->data<T>()[0]; beta2_pow_out->mutable_data<T>(ctx.GetPlace())[0] = beta2 * beta2_pow->data<T>()[0]; } auto* grad = ctx.Input<LoDTensor>("Grad"); T* param_out_ptr = param_out->mutable_data<T>(ctx.GetPlace()); T* mom1_out_ptr = mom1_out->mutable_data<T>(ctx.GetPlace()); T* mom2_out_ptr = mom2_out->mutable_data<T>(ctx.GetPlace()); T learning_rate = lr->data<T>()[0] * (sqrt(1 - beta2_p) / (1 - beta1_p)); T eps = epsilon * sqrt(1 - beta2_p); jit::adam_attr_t attr(beta1, beta2); int64_t numel = param->numel(); const T* param_ptr = param->data<T>(); const T* mom1_ptr = mom1->data<T>(); const T* mom2_ptr = mom2->data<T>(); const T* grad_ptr = grad->data<T>(); auto adam = jit::KernelFuncs<jit::AdamTuple<T>, platform::CPUPlace>::Cache().At( attr); static constexpr int64_t chunk_size = 512; #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int64_t i = 0; i < numel / chunk_size; ++i) { const int64_t offset = i * chunk_size; adam(beta1, beta2, -learning_rate, eps, chunk_size, grad_ptr + offset, mom1_ptr + offset, mom2_ptr + offset, param_ptr + offset, mom1_out_ptr + offset, mom2_out_ptr + offset, param_out_ptr + offset); } if (numel % chunk_size != 0) { const int64_t offset = (numel / chunk_size) * chunk_size; const int64_t tail_numel = numel % chunk_size; adam(beta1, beta2, -learning_rate, eps, tail_numel, grad_ptr + offset, mom1_ptr + offset, mom2_ptr + offset, param_ptr + offset, mom1_out_ptr + offset, mom2_out_ptr + offset, param_out_ptr + offset); } } else if (grad_var->IsType<phi::SelectedRows>()) { auto* grad = ctx.Input<phi::SelectedRows>("Grad"); if (grad->rows().size() == 0) { VLOG(3) << "grad row size is 0!!"; return; } std::vector<int64_t> cpu_rows(grad->rows().begin(), grad->rows().end()); bool is_strict_sorted = true; for (size_t i = 1; i < cpu_rows.size(); ++i) { if (cpu_rows[i - 1] >= cpu_rows[i]) { is_strict_sorted = false; break; } } phi::SelectedRows tmp_grad_merge; const phi::SelectedRows* grad_merge_ptr; if (is_strict_sorted) { grad_merge_ptr = grad; } else { // merge duplicated rows if any. // The rows of grad_merge have been sorted inside MergeAdd functor scatter::MergeAdd<DeviceContext, T> merge_func; merge_func(ctx.template device_context<DeviceContext>(), *grad, &tmp_grad_merge, true); grad_merge_ptr = &tmp_grad_merge; } auto& grad_merge = *grad_merge_ptr; auto& grad_tensor = grad_merge.value(); const T* grad_data = grad_tensor.template data<T>(); const int64_t* rows = grad_merge.rows().Data(ctx.GetPlace()); auto row_numel = grad_tensor.numel() / grad_merge.rows().size(); SparseAdamFunctor<T, CPUAdam> functor( beta1, beta2, epsilon, beta1_pow->data<T>(), beta2_pow->data<T>(), mom1->data<T>(), mom1_out->mutable_data<T>(ctx.GetPlace()), mom2->data<T>(), mom2_out->mutable_data<T>(ctx.GetPlace()), lr->data<T>(), grad_data, param->data<T>(), param_out->mutable_data<T>(ctx.GetPlace()), rows, row_numel, grad_merge.rows().size(), lazy_mode); // update beta1 and beta2 if (!use_global_beta_pow) { beta1_pow_out->mutable_data<T>(ctx.GetPlace())[0] = beta1 * beta1_pow->data<T>()[0]; beta2_pow_out->mutable_data<T>(ctx.GetPlace())[0] = beta2 * beta2_pow->data<T>()[0]; } if (lazy_mode) { VLOG(3) << "run cpu lazy mode"; size_t row_count = grad_merge.rows().size(); std::vector<int64_t> cpu_rows(grad_merge.rows()); for (size_t row_index = 0; row_index < row_count; ++row_index) { for (size_t offset = 0; offset < row_numel; ++offset) { size_t i = cpu_rows[row_index] * row_numel + offset; functor.adam_update(i, grad_data[row_index * row_numel + offset]); } } } #ifndef _WIN32 else if (FLAGS_inner_op_parallelism > 1 && // NOLINT min_row_size_to_use_multithread > 0 && param->dims()[0] > min_row_size_to_use_multithread) { VLOG(3) << "use multi thread, inner_op_parallelism=" << FLAGS_inner_op_parallelism << " min_row_size_to_use_multithread=" << min_row_size_to_use_multithread; if (FLAGS_inner_op_parallelism > 10) { VLOG(1) << "FLAGS_inner_op_parallelism " << FLAGS_inner_op_parallelism << " is two large!"; } auto& grad_rows = grad_merge.rows(); std::unordered_map<size_t, int> row_id_to_grad_row_offset; size_t param_row_count = param->numel() / row_numel; if (param_row_count < 1000) { VLOG(1) << "param_row_count should be larger then 1000 to use " "multi thread, currently " << param_row_count; } for (size_t i = 0; i < grad_rows.size(); ++i) { row_id_to_grad_row_offset[grad_rows[i]] = i; } std::vector<std::future<void>> fs; int64_t line_in_each_thread = param_row_count / FLAGS_inner_op_parallelism + 1; for (int i = 0; i < FLAGS_inner_op_parallelism; ++i) { int64_t start = i * line_in_each_thread; int64_t end = (i + 1) * line_in_each_thread; if (start >= static_cast<int64_t>(param_row_count)) { break; } if (end > static_cast<int64_t>(param_row_count)) { end = static_cast<int64_t>(param_row_count); } fs.push_back(framework::Async([&functor, &row_id_to_grad_row_offset, &grad_data, row_numel, start, end]() { for (int64_t row_id = start; row_id < end; ++row_id) { auto iter = row_id_to_grad_row_offset.find(row_id); if (iter != row_id_to_grad_row_offset.end()) { for (size_t row_offset = 0U; row_offset < row_numel; ++row_offset) { functor.adam_update( row_id * row_numel + row_offset, grad_data[iter->second * row_numel + row_offset]); } } else { for (size_t row_offset = 0U; row_offset < row_numel; ++row_offset) { functor.adam_update(row_id * row_numel + row_offset, 0); } } } })); } for (size_t i = 0; i < fs.size(); ++i) fs[i].wait(); } #endif // !_WIN32 else { // NOLINT functor(param->numel()); } } else { PADDLE_THROW(platform::errors::InvalidArgument( "Variable type not supported by adam_op")); } } }; } // namespace operators } // namespace paddle
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 8; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,8);t1++) { lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16)); ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(16*t2-Nz-4,8)),t1);t3<=min(min(min(floord(Nt+Ny-4,8),floord(8*t1+Ny+13,8)),floord(16*t2+Ny+12,8)),floord(16*t1-16*t2+Nz+Ny+11,8));t3++) { for (t4=max(max(max(0,ceild(t1-127,128)),ceild(16*t2-Nz-1020,1024)),ceild(8*t3-Ny-1020,1024));t4<=min(min(min(min(floord(Nt+Nx-4,1024),floord(8*t1+Nx+13,1024)),floord(16*t2+Nx+12,1024)),floord(8*t3+Nx+4,1024)),floord(16*t1-16*t2+Nz+Nx+11,1024));t4++) { for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),8*t3-Ny+2),1024*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),8*t3+6),1024*t4+1022),16*t1-16*t2+Nz+13);t5++) { for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) { lbv=max(1024*t4,t5+1); ubv=min(1024*t4+1023,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
convolution_1x1_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_transform_kernel_pack4_sse(const Mat& kernel, Mat& kernel_pack4, int inch, int outch) { // interleave // src = inch-outch // dst = 4b-4a-inch/4a-outch/4b kernel_pack4.create(1, inch / 4, outch / 4, (size_t)4u * 16, 16); int q = 0; for (; q + 3 < outch; q += 4) { const float* k0 = (const float*)kernel + (q + 0) * inch; const float* k1 = (const float*)kernel + (q + 1) * inch; const float* k2 = (const float*)kernel + (q + 2) * inch; const float* k3 = (const float*)kernel + (q + 3) * inch; float* g0 = kernel_pack4.channel(q / 4); for (int p = 0; p + 3 < inch; p += 4) { g0[0] = k0[0]; g0[1] = k1[0]; g0[2] = k2[0]; g0[3] = k3[0]; g0[4] = k0[1]; g0[5] = k1[1]; g0[6] = k2[1]; g0[7] = k3[1]; g0[8] = k0[2]; g0[9] = k1[2]; g0[10] = k2[2]; g0[11] = k3[2]; g0[12] = k0[3]; g0[13] = k1[3]; g0[14] = k2[3]; g0[15] = k3[3]; k0 += 4; k1 += 4; k2 += 4; k3 += 4; g0 += 16; } } } static void conv1x1s1_sgemm_pack4_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; const int size = w * h; const float* bias = _bias; // interleave Mat tmp(4, inch, size / 4 + (size % 4) / 2 + size % 2, elemsize, elempack, opt.workspace_allocator); { int nn_size; int remain_size_start; remain_size_start = 0; nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; const float* img0 = bottom_blob.channel(0); img0 += i * 4; float* tmpptr = tmp.channel(i / 4); for (int q = 0; q < inch; q++) { __m128 _r0 = _mm_loadu_ps(img0); __m128 _r1 = _mm_loadu_ps(img0 + 4); __m128 _r2 = _mm_loadu_ps(img0 + 8); __m128 _r3 = _mm_loadu_ps(img0 + 12); _mm_storeu_ps(tmpptr, _r0); _mm_storeu_ps(tmpptr + 4, _r1); _mm_storeu_ps(tmpptr + 8, _r2); _mm_storeu_ps(tmpptr + 12, _r3); tmpptr += 16; img0 += bottom_blob.cstep * 4; } } remain_size_start += nn_size << 2; nn_size = (size - remain_size_start) >> 1; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 2; const float* img0 = bottom_blob.channel(0); img0 += i * 4; float* tmpptr = tmp.channel(i / 4 + (i % 4) / 2); for (int q = 0; q < inch; q++) { __m128 _r0 = _mm_loadu_ps(img0); __m128 _r1 = _mm_loadu_ps(img0 + 4); _mm_storeu_ps(tmpptr, _r0); _mm_storeu_ps(tmpptr + 4, _r1); tmpptr += 8; img0 += bottom_blob.cstep * 4; } } remain_size_start += nn_size << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { const float* img0 = bottom_blob.channel(0); img0 += i * 4; float* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); for (int q = 0; q < inch; q++) { __m128 _r0 = _mm_loadu_ps(img0); _mm_storeu_ps(tmpptr, _r0); tmpptr += 4; img0 += bottom_blob.cstep * 4; } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* outptr0 = top_blob.channel(p); const float zeros[4] = {0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + p * 4 : zeros; int i = 0; for (; i + 3 < size; i += 4) { float* tmpptr = tmp.channel(i / 4); const float* kptr0 = (const float*)kernel.channel(p); __m128 _sum0 = _mm_loadu_ps(biasptr); __m128 _sum1 = _mm_loadu_ps(biasptr); __m128 _sum2 = _mm_loadu_ps(biasptr); __m128 _sum3 = _mm_loadu_ps(biasptr); for (int q = 0; q < inch; q++) { __m128 _val00 = _mm_load1_ps(tmpptr); __m128 _val01 = _mm_load1_ps(tmpptr + 1); __m128 _val02 = _mm_load1_ps(tmpptr + 2); __m128 _val03 = _mm_load1_ps(tmpptr + 3); __m128 _val10 = _mm_load1_ps(tmpptr + 4); __m128 _val11 = _mm_load1_ps(tmpptr + 5); __m128 _val12 = _mm_load1_ps(tmpptr + 6); __m128 _val13 = _mm_load1_ps(tmpptr + 7); __m128 _val20 = _mm_load1_ps(tmpptr + 8); __m128 _val21 = _mm_load1_ps(tmpptr + 9); __m128 _val22 = _mm_load1_ps(tmpptr + 10); __m128 _val23 = _mm_load1_ps(tmpptr + 11); __m128 _val30 = _mm_load1_ps(tmpptr + 12); __m128 _val31 = _mm_load1_ps(tmpptr + 13); __m128 _val32 = _mm_load1_ps(tmpptr + 14); __m128 _val33 = _mm_load1_ps(tmpptr + 15); __m128 _w0 = _mm_load_ps(kptr0); __m128 _w1 = _mm_load_ps(kptr0 + 4); __m128 _w2 = _mm_load_ps(kptr0 + 8); __m128 _w3 = _mm_load_ps(kptr0 + 12); _sum0 = _mm_comp_fmadd_ps(_w0, _val00, _sum0); _sum0 = _mm_comp_fmadd_ps(_w1, _val01, _sum0); _sum0 = _mm_comp_fmadd_ps(_w2, _val02, _sum0); _sum0 = _mm_comp_fmadd_ps(_w3, _val03, _sum0); _sum1 = _mm_comp_fmadd_ps(_w0, _val10, _sum1); _sum1 = _mm_comp_fmadd_ps(_w1, _val11, _sum1); _sum1 = _mm_comp_fmadd_ps(_w2, _val12, _sum1); _sum1 = _mm_comp_fmadd_ps(_w3, _val13, _sum1); _sum2 = _mm_comp_fmadd_ps(_w0, _val20, _sum2); _sum2 = _mm_comp_fmadd_ps(_w1, _val21, _sum2); _sum2 = _mm_comp_fmadd_ps(_w2, _val22, _sum2); _sum2 = _mm_comp_fmadd_ps(_w3, _val23, _sum2); _sum3 = _mm_comp_fmadd_ps(_w0, _val30, _sum3); _sum3 = _mm_comp_fmadd_ps(_w1, _val31, _sum3); _sum3 = _mm_comp_fmadd_ps(_w2, _val32, _sum3); _sum3 = _mm_comp_fmadd_ps(_w3, _val33, _sum3); tmpptr += 16; kptr0 += 16; } _mm_store_ps(outptr0, _sum0); _mm_store_ps(outptr0 + 4, _sum1); _mm_store_ps(outptr0 + 8, _sum2); _mm_store_ps(outptr0 + 12, _sum3); outptr0 += 16; } for (; i + 1 < size; i += 2) { float* tmpptr = tmp.channel(i / 4 + (i % 4) / 2); const float* kptr0 = (const float*)kernel.channel(p); __m128 _sum0 = _mm_loadu_ps(biasptr); __m128 _sum1 = _mm_loadu_ps(biasptr); for (int q = 0; q < inch; q++) { __m128 _val00 = _mm_load1_ps(tmpptr); __m128 _val01 = _mm_load1_ps(tmpptr + 1); __m128 _val02 = _mm_load1_ps(tmpptr + 2); __m128 _val03 = _mm_load1_ps(tmpptr + 3); __m128 _val10 = _mm_load1_ps(tmpptr + 4); __m128 _val11 = _mm_load1_ps(tmpptr + 5); __m128 _val12 = _mm_load1_ps(tmpptr + 6); __m128 _val13 = _mm_load1_ps(tmpptr + 7); __m128 _w0 = _mm_load_ps(kptr0); __m128 _w1 = _mm_load_ps(kptr0 + 4); __m128 _w2 = _mm_load_ps(kptr0 + 8); __m128 _w3 = _mm_load_ps(kptr0 + 12); _sum0 = _mm_comp_fmadd_ps(_w0, _val00, _sum0); _sum0 = _mm_comp_fmadd_ps(_w1, _val01, _sum0); _sum0 = _mm_comp_fmadd_ps(_w2, _val02, _sum0); _sum0 = _mm_comp_fmadd_ps(_w3, _val03, _sum0); _sum1 = _mm_comp_fmadd_ps(_w0, _val10, _sum1); _sum1 = _mm_comp_fmadd_ps(_w1, _val11, _sum1); _sum1 = _mm_comp_fmadd_ps(_w2, _val12, _sum1); _sum1 = _mm_comp_fmadd_ps(_w3, _val13, _sum1); tmpptr += 8; kptr0 += 16; } _mm_store_ps(outptr0, _sum0); _mm_store_ps(outptr0 + 4, _sum1); outptr0 += 8; } for (; i < size; i++) { float* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); const float* kptr0 = (const float*)kernel.channel(p); __m128 _sum = _mm_loadu_ps(biasptr); for (int q = 0; q < inch; q++) { __m128 _val0 = _mm_load1_ps(tmpptr); __m128 _val1 = _mm_load1_ps(tmpptr + 1); __m128 _val2 = _mm_load1_ps(tmpptr + 2); __m128 _val3 = _mm_load1_ps(tmpptr + 3); __m128 _w0 = _mm_load_ps(kptr0); __m128 _w1 = _mm_load_ps(kptr0 + 4); __m128 _w2 = _mm_load_ps(kptr0 + 8); __m128 _w3 = _mm_load_ps(kptr0 + 12); _sum = _mm_comp_fmadd_ps(_w0, _val0, _sum); _sum = _mm_comp_fmadd_ps(_w1, _val1, _sum); _sum = _mm_comp_fmadd_ps(_w2, _val2, _sum); _sum = _mm_comp_fmadd_ps(_w3, _val3, _sum); tmpptr += 4; kptr0 += 16; } _mm_store_ps(outptr0, _sum); outptr0 += 4; } } // // NOTE sgemm // for (; p<outch; p++) // { // Mat out0 = top_blob.channel(p); // // const float bias0 = bias ? bias[p] : 0.f; // // float* outptr0 = out0; // // for (int i=0; i<size; i++) // { // float sum = bias0; // // const float* kptr = _kernel.channel(p); // // for (int q=0; q<inch; q++) // { // const float* img0 = bottom_blob.channel(q); // // sum += img0[i] * kptr[0]; // kptr ++; // } // // outptr0[i] = sum; // } // } } static void conv1x1s2_pack4_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = (w - 2 * outw + w) * 4; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < channels; p++) { const float* r0 = bottom_blob.channel(p); float* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { __m128 _v = _mm_load_ps(r0); _mm_store_ps(outptr, _v); r0 += 8; outptr += 4; } r0 += tailstep; } } conv1x1s1_sgemm_pack4_sse(bottom_blob_shrinked, top_blob, kernel, _bias, opt); }
atomic_read_codegen.c
// RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp -x c -emit-llvm %s -o - | FileCheck %s // RUN: %clang_cc1 -fopenmp -x c -triple x86_64-apple-darwin10 -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp -x c -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s // expected-no-diagnostics // REQUIRES: x86-registered-target #ifndef HEADER #define HEADER _Bool bv, bx; char cv, cx; unsigned char ucv, ucx; short sv, sx; unsigned short usv, usx; int iv, ix; unsigned int uiv, uix; long lv, lx; unsigned long ulv, ulx; long long llv, llx; unsigned long long ullv, ullx; float fv, fx; double dv, dx; long double ldv, ldx; _Complex int civ, cix; _Complex float cfv, cfx; _Complex double cdv, cdx; typedef int int4 __attribute__((__vector_size__(16))); int4 int4x; struct BitFields { int : 32; int a : 31; } bfx; struct BitFields_packed { int : 32; int a : 31; } __attribute__ ((__packed__)) bfx_packed; struct BitFields2 { int : 31; int a : 1; } bfx2; struct BitFields2_packed { int : 31; int a : 1; } __attribute__ ((__packed__)) bfx2_packed; struct BitFields3 { int : 11; int a : 14; } bfx3; struct BitFields3_packed { int : 11; int a : 14; } __attribute__ ((__packed__)) bfx3_packed; struct BitFields4 { short : 16; int a: 1; long b : 7; } bfx4; struct BitFields4_packed { short : 16; int a: 1; long b : 7; } __attribute__ ((__packed__)) bfx4_packed; typedef float float2 __attribute__((ext_vector_type(2))); float2 float2x; register int rix __asm__("0"); int main() { // CHECK: load atomic i8, i8* // CHECK: store i8 #pragma omp atomic read bv = bx; // CHECK: load atomic i8, i8* // CHECK: store i8 #pragma omp atomic read cv = cx; // CHECK: load atomic i8, i8* // CHECK: store i8 #pragma omp atomic read ucv = ucx; // CHECK: load atomic i16, i16* // CHECK: store i16 #pragma omp atomic read sv = sx; // CHECK: load atomic i16, i16* // CHECK: store i16 #pragma omp atomic read usv = usx; // CHECK: load atomic i32, i32* // CHECK: store i32 #pragma omp atomic read iv = ix; // CHECK: load atomic i32, i32* // CHECK: store i32 #pragma omp atomic read uiv = uix; // CHECK: load atomic i64, i64* // CHECK: store i64 #pragma omp atomic read lv = lx; // CHECK: load atomic i64, i64* // CHECK: store i64 #pragma omp atomic read ulv = ulx; // CHECK: load atomic i64, i64* // CHECK: store i64 #pragma omp atomic read llv = llx; // CHECK: load atomic i64, i64* // CHECK: store i64 #pragma omp atomic read ullv = ullx; // CHECK: load atomic i32, i32* bitcast (float* // CHECK: bitcast i32 {{.*}} to float // CHECK: store float #pragma omp atomic read fv = fx; // CHECK: load atomic i64, i64* bitcast (double* // CHECK: bitcast i64 {{.*}} to double // CHECK: store double #pragma omp atomic read dv = dx; // CHECK: [[LD:%.+]] = load atomic i128, i128* bitcast (x86_fp80* // CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[LDTEMP:%.*]] to i128* // CHECK: store i128 [[LD]], i128* [[BITCAST]] // CHECK: [[LD:%.+]] = load x86_fp80, x86_fp80* [[LDTEMP]] // CHECK: store x86_fp80 [[LD]] #pragma omp atomic read ldv = ldx; // CHECK: call{{.*}} void @__atomic_load(i64 8, // CHECK: store i32 // CHECK: store i32 #pragma omp atomic read civ = cix; // CHECK: call{{.*}} void @__atomic_load(i64 8, // CHECK: store float // CHECK: store float #pragma omp atomic read cfv = cfx; // CHECK: call{{.*}} void @__atomic_load(i64 16, // CHECK: call{{.*}} @__kmpc_flush( // CHECK: store double // CHECK: store double #pragma omp atomic seq_cst read cdv = cdx; // CHECK: load atomic i64, i64* // CHECK: store i8 #pragma omp atomic read bv = ulx; // CHECK: load atomic i8, i8* // CHECK: store i8 #pragma omp atomic read cv = bx; // CHECK: load atomic i8, i8* // CHECK: call{{.*}} @__kmpc_flush( // CHECK: store i8 #pragma omp atomic read, seq_cst ucv = cx; // CHECK: load atomic i64, i64* // CHECK: store i16 #pragma omp atomic read sv = ulx; // CHECK: load atomic i64, i64* // CHECK: store i16 #pragma omp atomic read usv = lx; // CHECK: load atomic i32, i32* // CHECK: call{{.*}} @__kmpc_flush( // CHECK: store i32 #pragma omp atomic seq_cst, read iv = uix; // CHECK: load atomic i32, i32* // CHECK: store i32 #pragma omp atomic read uiv = ix; // CHECK: call{{.*}} void @__atomic_load(i64 8, // CHECK: store i64 #pragma omp atomic read lv = cix; // CHECK: load atomic i32, i32* // CHECK: store i64 #pragma omp atomic read ulv = fx; // CHECK: load atomic i64, i64* // CHECK: store i64 #pragma omp atomic read llv = dx; // CHECK: load atomic i128, i128* // CHECK: store i64 #pragma omp atomic read ullv = ldx; // CHECK: call{{.*}} void @__atomic_load(i64 8, // CHECK: store float #pragma omp atomic read fv = cix; // CHECK: load atomic i16, i16* // CHECK: store double #pragma omp atomic read dv = sx; // CHECK: load atomic i8, i8* // CHECK: store x86_fp80 #pragma omp atomic read ldv = bx; // CHECK: load atomic i8, i8* // CHECK: store i32 // CHECK: store i32 #pragma omp atomic read civ = bx; // CHECK: load atomic i16, i16* // CHECK: store float // CHECK: store float #pragma omp atomic read cfv = usx; // CHECK: load atomic i64, i64* // CHECK: store double // CHECK: store double #pragma omp atomic read cdv = llx; // CHECK: [[I128VAL:%.+]] = load atomic i128, i128* bitcast (<4 x i32>* @{{.+}} to i128*) monotonic // CHECK: [[I128PTR:%.+]] = bitcast <4 x i32>* [[LDTEMP:%.+]] to i128* // CHECK: store i128 [[I128VAL]], i128* [[I128PTR]] // CHECK: [[LD:%.+]] = load <4 x i32>, <4 x i32>* [[LDTEMP]] // CHECK: extractelement <4 x i32> [[LD]] // CHECK: store i8 #pragma omp atomic read bv = int4x[0]; // CHECK: [[LD:%.+]] = load atomic i32, i32* bitcast (i8* getelementptr (i8, i8* bitcast (%{{.+}}* @{{.+}} to i8*), i64 4) to i32*) monotonic // CHECK: store i32 [[LD]], i32* [[LDTEMP:%.+]] // CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]] // CHECK: [[SHL:%.+]] = shl i32 [[LD]], 1 // CHECK: ashr i32 [[SHL]], 1 // CHECK: store x86_fp80 #pragma omp atomic read ldv = bfx.a; // CHECK: [[LDTEMP_VOID_PTR:%.+]] = bitcast i32* [[LDTEMP:%.+]] to i8* // CHECK: call void @__atomic_load(i64 4, i8* getelementptr (i8, i8* bitcast (%struct.BitFields_packed* @bfx_packed to i8*), i64 4), i8* [[LDTEMP_VOID_PTR]], i32 0) // CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]] // CHECK: [[SHL:%.+]] = shl i32 [[LD]], 1 // CHECK: ashr i32 [[SHL]], 1 // CHECK: store x86_fp80 #pragma omp atomic read ldv = bfx_packed.a; // CHECK: [[LD:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields2, %struct.BitFields2* @bfx2, i32 0, i32 0) monotonic // CHECK: store i32 [[LD]], i32* [[LDTEMP:%.+]] // CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]] // CHECK: ashr i32 [[LD]], 31 // CHECK: store x86_fp80 #pragma omp atomic read ldv = bfx2.a; // CHECK: [[LD:%.+]] = load atomic i8, i8* getelementptr (i8, i8* bitcast (%struct.BitFields2_packed* @bfx2_packed to i8*), i64 3) monotonic // CHECK: store i8 [[LD]], i8* [[LDTEMP:%.+]] // CHECK: [[LD:%.+]] = load i8, i8* [[LDTEMP]] // CHECK: ashr i8 [[LD]], 7 // CHECK: store x86_fp80 #pragma omp atomic read ldv = bfx2_packed.a; // CHECK: [[LD:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields3, %struct.BitFields3* @bfx3, i32 0, i32 0) monotonic // CHECK: store i32 [[LD]], i32* [[LDTEMP:%.+]] // CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]] // CHECK: [[SHL:%.+]] = shl i32 [[LD]], 7 // CHECK: ashr i32 [[SHL]], 18 // CHECK: store x86_fp80 #pragma omp atomic read ldv = bfx3.a; // CHECK: [[LDTEMP_VOID_PTR:%.+]] = bitcast i24* [[LDTEMP:%.+]] to i8* // CHECK: call void @__atomic_load(i64 3, i8* getelementptr (i8, i8* bitcast (%struct.BitFields3_packed* @bfx3_packed to i8*), i64 1), i8* [[LDTEMP_VOID_PTR]], i32 0) // CHECK: [[LD:%.+]] = load i24, i24* [[LDTEMP]] // CHECK: [[SHL:%.+]] = shl i24 [[LD]], 7 // CHECK: [[ASHR:%.+]] = ashr i24 [[SHL]], 10 // CHECK: sext i24 [[ASHR]] to i32 // CHECK: store x86_fp80 #pragma omp atomic read ldv = bfx3_packed.a; // CHECK: [[LD:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @bfx4 to i64*) monotonic // CHECK: store i64 [[LD]], i64* [[LDTEMP:%.+]] // CHECK: [[LD:%.+]] = load i64, i64* [[LDTEMP]] // CHECK: [[SHL:%.+]] = shl i64 [[LD]], 47 // CHECK: [[ASHR:%.+]] = ashr i64 [[SHL]], 63 // CHECK: trunc i64 [[ASHR]] to i32 // CHECK: store x86_fp80 #pragma omp atomic read ldv = bfx4.a; // CHECK: [[LD:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @bfx4_packed, i32 0, i32 0, i64 2) monotonic // CHECK: store i8 [[LD]], i8* [[LDTEMP:%.+]] // CHECK: [[LD:%.+]] = load i8, i8* [[LDTEMP]] // CHECK: [[SHL:%.+]] = shl i8 [[LD]], 7 // CHECK: [[ASHR:%.+]] = ashr i8 [[SHL]], 7 // CHECK: sext i8 [[ASHR]] to i32 // CHECK: store x86_fp80 #pragma omp atomic read ldv = bfx4_packed.a; // CHECK: [[LD:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @bfx4 to i64*) monotonic // CHECK: store i64 [[LD]], i64* [[LDTEMP:%.+]] // CHECK: [[LD:%.+]] = load i64, i64* [[LDTEMP]] // CHECK: [[SHL:%.+]] = shl i64 [[LD]], 40 // CHECK: [[ASHR:%.+]] = ashr i64 [[SHL]], 57 // CHECK: store x86_fp80 #pragma omp atomic read ldv = bfx4.b; // CHECK: [[LD:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @bfx4_packed, i32 0, i32 0, i64 2) monotonic // CHECK: store i8 [[LD]], i8* [[LDTEMP:%.+]] // CHECK: [[LD:%.+]] = load i8, i8* [[LDTEMP]] // CHECK: [[ASHR:%.+]] = ashr i8 [[LD]], 1 // CHECK: sext i8 [[ASHR]] to i64 // CHECK: store x86_fp80 #pragma omp atomic read ldv = bfx4_packed.b; // CHECK: [[LD:%.+]] = load atomic i64, i64* bitcast (<2 x float>* @{{.+}} to i64*) monotonic // CHECK: [[BITCAST:%.+]] = bitcast <2 x float>* [[LDTEMP:%.+]] to i64* // CHECK: store i64 [[LD]], i64* [[BITCAST]] // CHECK: [[LD:%.+]] = load <2 x float>, <2 x float>* [[LDTEMP]] // CHECK: extractelement <2 x float> [[LD]] // CHECK: store i64 #pragma omp atomic read ulv = float2x.x; // CHECK: call{{.*}} i{{[0-9]+}} @llvm.read_register // CHECK: call{{.*}} @__kmpc_flush( // CHECK: store double #pragma omp atomic read seq_cst dv = rix; return 0; } #endif
residual_displacement_and_other_dof_criteria.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_RESIDUAL_DISPLACEMENT_AND_OTHER_DOF_CRITERIA ) #define KRATOS_RESIDUAL_DISPLACEMENT_AND_OTHER_DOF_CRITERIA // System includes // External includes // Project includes #include "includes/model_part.h" #include "includes/define.h" #include "solving_strategies/convergencecriterias/convergence_criteria.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class ResidualDisplacementAndOtherDoFCriteria * @ingroup StructuralMechanicsApplication * @brief This is a convergence criteria that employes the residual as criteria, it divides the problem in two dofs, displacement and another one * @details The reactions from the RHS are not computed in the residual. Can be used with for example rotations or pressure * @author Vicente Mataix Ferrandiz */ template<class TSparseSpace, class TDenseSpace > class ResidualDisplacementAndOtherDoFCriteria : public ConvergenceCriteria< TSparseSpace, TDenseSpace > { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION( ResidualDisplacementAndOtherDoFCriteria ); typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType; typedef TSparseSpace SparseSpaceType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef std::size_t IndexType; typedef std::size_t SizeType; ///@} ///@name Life Cycle ///@{ /** Constructor. * @param RatioTolerance: Relative tolerance for error * @param AbsoluteTolerance: Absolute tolerance for error * @param OtherDoFName: The name of the other DoF */ ResidualDisplacementAndOtherDoFCriteria( TDataType RatioTolerance, TDataType AbsoluteTolerance, const std::string OtherDoFName = "ROTATION" ) : ConvergenceCriteria< TSparseSpace, TDenseSpace >(), mOtherDoFName(OtherDoFName), mRatioTolerance(RatioTolerance), mAbsoluteTolerance(AbsoluteTolerance) { this->mActualizeRHSIsNeeded = true; } //* Copy constructor. ResidualDisplacementAndOtherDoFCriteria( ResidualDisplacementAndOtherDoFCriteria const& rOther ) :BaseType(rOther) ,mOtherDoFName(rOther.mOtherDoFName) ,mRatioTolerance(rOther.mRatioTolerance) ,mAbsoluteTolerance(rOther.mAbsoluteTolerance) ,mInitialResidualDispNorm(rOther.mInitialResidualDispNorm) ,mCurrentResidualDispNorm(rOther.mCurrentResidualDispNorm) ,mInitialResidualOtherDoFNorm(rOther.mInitialResidualOtherDoFNorm) ,mCurrentResidualOtherDoFNorm(rOther.mCurrentResidualOtherDoFNorm) { this->mActualizeRHSIsNeeded = true; } //* Destructor. ~ResidualDisplacementAndOtherDoFCriteria() override {} ///@} ///@name Operators ///@{ /** * Compute relative and absolute error. * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) * @return true if convergence is achieved, false otherwise */ bool PostCriteria( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { if (TSparseSpace::Size(rb) != 0) { //if we are solving for something TDataType ratio_displacement = 0.0; TDataType ratio_other_dof = 0.0; SizeType disp_size; CalculateResidualNorm(rModelPart, mCurrentResidualDispNorm, mCurrentResidualOtherDoFNorm, disp_size, rDofSet, rb); if (mInitialResidualDispNorm == 0.0) { ratio_displacement = 0.0; } else { ratio_displacement = mCurrentResidualDispNorm/mInitialResidualDispNorm; } if (mInitialResidualOtherDoFNorm == 0.0) { ratio_other_dof = 0.0; } else { ratio_other_dof = mCurrentResidualOtherDoFNorm/mInitialResidualOtherDoFNorm; } const std::size_t system_size = TSparseSpace::Size(rb); const TDataType absolute_norm_disp = (mCurrentResidualDispNorm/static_cast<TDataType>(disp_size)); const TDataType absolute_norm_other_dof = (mCurrentResidualOtherDoFNorm/static_cast<TDataType>(system_size - disp_size)); KRATOS_INFO_IF("ResidualDisplacementAndOtherDoFCriteria", this->GetEchoLevel() > 0) << "RESIDUAL DISPLACEMENT CRITERION :: Ratio = "<< ratio_displacement << "; Norm = " << absolute_norm_disp << std::endl; KRATOS_INFO_IF("ResidualDisplacementAndOtherDoFCriteria", this->GetEchoLevel() > 0) << "RESIDUAL " << mOtherDoFName << " CRITERION :: Ratio = "<< ratio_other_dof << "; Norm = " << absolute_norm_other_dof << std::endl; rModelPart.GetProcessInfo()[CONVERGENCE_RATIO] = ratio_displacement; rModelPart.GetProcessInfo()[RESIDUAL_NORM] = absolute_norm_disp; if ((ratio_displacement <= mRatioTolerance || absolute_norm_disp < mAbsoluteTolerance) && (ratio_other_dof <= mRatioTolerance || absolute_norm_other_dof < mAbsoluteTolerance)) { KRATOS_INFO_IF("ResidualDisplacementAndOtherDoFCriteria", this->GetEchoLevel() > 0) << "Convergence is achieved" << std::endl; return true; } else { return false; } } else { return true; } } /** * This function initialize the convergence criteria * @param rModelPart Reference to the ModelPart containing the contact problem. (unused) */ void Initialize( ModelPart& rModelPart ) override { BaseType::mConvergenceCriteriaIsInitialized = true; } /** * This function initializes the solution step * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param A System matrix (unused) * @param Dx Vector of results (variations on nodal variables) * @param b RHS vector (residual) */ void InitializeSolutionStep( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { BaseType::InitializeSolutionStep(rModelPart, rDofSet, rA, rDx, rb); // Filling mActiveDofs when MPC exist if (rModelPart.NumberOfMasterSlaveConstraints() > 0) { mActiveDofs.resize(rDofSet.size()); #pragma omp parallel for for(int i=0; i<static_cast<int>(mActiveDofs.size()); ++i) { mActiveDofs[i] = true; } #pragma omp parallel for for (int i=0; i<static_cast<int>(rDofSet.size()); ++i) { const auto it_dof = rDofSet.begin() + i; if (it_dof->IsFixed()) { mActiveDofs[it_dof->EquationId()] = false; } } for (const auto& r_mpc : rModelPart.MasterSlaveConstraints()) { for (const auto& r_dof : r_mpc.GetMasterDofsVector()) { mActiveDofs[r_dof->EquationId()] = false; } for (const auto& r_dof : r_mpc.GetSlaveDofsVector()) { mActiveDofs[r_dof->EquationId()] = false; } } } SizeType size_residual; CalculateResidualNorm(rModelPart, mInitialResidualDispNorm, mInitialResidualOtherDoFNorm, size_residual, rDofSet, rb); } ///@} ///@name Operations ///@{ ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ std::string mOtherDoFName; // The name of the other DoF TDataType mInitialResidualDispNorm; // The initial residual norm for displacements TDataType mCurrentResidualDispNorm; // The current residual norm for displacements TDataType mInitialResidualOtherDoFNorm; // The initial residual norm for displacements TDataType mCurrentResidualOtherDoFNorm; // The current residual norm for displacements TDataType mRatioTolerance; // The tolerance admited in the ratio TDataType mAbsoluteTolerance; // The tolerance admited in the absolutte value std::vector<bool> mActiveDofs; /// This vector contains the dofs that are active ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ /** * @brief This method computes the norm of the residual * @details It checks if the dof is fixed * @param rModelPart Reference to the ModelPart containing the problem. * @param rResidualSolutionNorm The norm of the residual * @param rDofNum The number of DoFs * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rb RHS vector (residual + reactions) */ virtual void CalculateResidualNorm( ModelPart& rModelPart, TDataType& rResidualSolutionNormDisp, TDataType& rResidualSolutionNormOtherDof, SizeType& rDofNumDisp, DofsArrayType& rDofSet, const TSystemVectorType& rb ) { // Initialize TDataType residual_solution_norm_disp = TDataType(); TDataType residual_solution_norm_other_dof = TDataType(); SizeType disp_dof_num = 0; // Auxiliar values TDataType residual_dof_value = 0.0; const auto it_dof_begin = rDofSet.begin(); const int number_of_dof = static_cast<int>(rDofSet.size()); // Loop over Dofs if (rModelPart.NumberOfMasterSlaveConstraints() > 0) { #pragma omp parallel for firstprivate(residual_dof_value) reduction(+:residual_solution_norm_disp, residual_solution_norm_other_dof, disp_dof_num) for (int i = 0; i < number_of_dof; i++) { auto it_dof = it_dof_begin + i; const IndexType dof_id = it_dof->EquationId(); residual_dof_value = TSparseSpace::GetValue(rb,dof_id); if (mActiveDofs[dof_id]) { if (it_dof->GetVariable() == DISPLACEMENT_X || it_dof->GetVariable() == DISPLACEMENT_Y || it_dof->GetVariable() == DISPLACEMENT_Z) { residual_solution_norm_disp += std::pow(residual_dof_value, 2); disp_dof_num++; } else { residual_solution_norm_other_dof += std::pow(residual_dof_value, 2); } } } } else { #pragma omp parallel for firstprivate(residual_dof_value) reduction(+:residual_solution_norm_disp, residual_solution_norm_other_dof, disp_dof_num) for (int i = 0; i < number_of_dof; i++) { auto it_dof = it_dof_begin + i; if (!it_dof->IsFixed()) { const IndexType dof_id = it_dof->EquationId(); residual_dof_value = TSparseSpace::GetValue(rb,dof_id); if (it_dof->GetVariable() == DISPLACEMENT_X || it_dof->GetVariable() == DISPLACEMENT_Y || it_dof->GetVariable() == DISPLACEMENT_Z) { residual_solution_norm_disp += std::pow(residual_dof_value, 2); disp_dof_num++; } else { residual_solution_norm_other_dof += std::pow(residual_dof_value, 2); } } } } rDofNumDisp = disp_dof_num; rResidualSolutionNormDisp = std::sqrt(residual_solution_norm_disp); rResidualSolutionNormOtherDof = std::sqrt(residual_solution_norm_other_dof); } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; // Class ClassName ///@} ///@name Type Definitions ///@{ ///@} } // namespace Kratos. #endif // KRATOS_RESIDUAL_DISPLACEMENT_AND_OTHER_DOF_CRITERIA defined
spmfeatures.h
// Licensed under the Apache License, Version 2.0 (the "License"); you may // not use this file except in compliance with the License. You may obtain // a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the // License for the specific language governing permissions and limitations // under the License. // spmfeatures.h // // Mark Johnson, 17th Feb 2010 // // spfeatures.h, but modified to use sp-mdata.h so it can read Slav's 4 parsers' output // // PLEASE DO NOT MODIFY THIS FILE. // // If you decide to change or define new features, make your changes // to a copy of this file, and modify features.h to include your copy // of this file instead of this file itself. Also, change the // FEATURESNICKNAME variable in the top-level Makefile to some new new // name; that way the original model and your new model can co-exist // (and you can compare their performance). // // Each feature is an instance of a subclass of FeatureClass. To define // FeatureClassPtrs{} is a class that holds one or more FeatureClass // objects. The standard way of interacting with FeatureClass objects // is via the methods of FeatureClassPtrs. // // This version uses sptree instead of tree (sptree are annotated trees). #pragma once #include <algorithm> // #include <boost/lexical_cast.hpp> #include <cassert> #include <cstdio> #include <ext/hash_map> #include <iostream> #include <limits> #include <map> #include <set> #include <string> #include <utility> #include <vector> #include "lexical_cast.h" #include "sstring.h" #include "sp-mdata.h" #include "heads.h" #include "popen.h" #include "sptree.h" #include "sym.h" #include "tree.h" #include "utility.h" #define FloatTol 1e-7 // SPFEATURES_COMMON_DEFINITIONS contains all of the common definitions // that every feature function needs. // #define SPFEATURES_COMMON_DEFINITIONS \ \ typedef ext::hash_map<Feature,Id> Feature_Id; \ Feature_Id feature_id; \ \ virtual void extract_features(const sp_sentence_type& s) { \ extract_features_helper(*this, s); \ } \ \ virtual Id prune_and_renumber(const size_type mincount, Id nextid, \ std::ostream& os) { \ return prune_and_renumber_helper(*this, mincount, nextid, os); \ } \ \ virtual void feature_values(const sp_sentence_type& s, \ Id_Floats& p_i_v) \ { \ feature_values_helper(*this, s, p_i_v); \ } \ \ virtual std::ostream& print_feature_ids(std::ostream& os) const { \ return print_feature_ids_helper(*this, os); \ } \ \ virtual std::istream& read_feature(std::istream& is, Id id) { \ return read_feature_helper(*this, is, id); \ } extern int debug_level; extern bool absolute_counts; //!< produce absolute rather than relative counts extern bool collect_correct; //!< collect features from correct parse extern bool collect_incorrect; //!< collect features from incorrect parse extern bool lowercase_flag; //!< lowercase all terminals when reading tree typedef unsigned int size_type; typedef size_type Id; //!< type of feature Ids #define SCANF_ID_TYPE "%u" typedef std::map<Id,Float> Id_Float; typedef std::vector<Id_Float> Id_Floats; //////////////////////////////////////////////////////////////////////// // // // FeatureClass{} // // // //////////////////////////////////////////////////////////////////////// //! FeatureClass{} is an ABC for classes of features. A FeatureClass //! identifies features in parse trees. To use the template functions //! in FeatureClass and its descendants, each subclass of //! FeatureClass{} should define the following embedded types as //! well as the virtual functions of FeatureClass{}. //! //! Feature -- the type of features belonging to FeatureClass //! (operator << should be defined on Feature) //! //! Feature_Id -- an efficient map type from features to Id //! //! Each FeatureClass object must also have members: //! //! Feature_Id feature_id; // class FeatureClass { public: //! destructor is virtual -- put it first so it isn't forgotten! // virtual ~FeatureClass() { }; // These next virtual functions must be implemented by any FeatureClass //! identifier() returns a unique identifying string for this //! FeatureClass. By convention, a FeatureClass::identifier() consists //! of a major class name followed by subclass specifications, all //! separated by ':'. E.g., the identifier "RFrag:2:2:5" identifes the //! Rule Fragment feature class with 2 ancestors and fragment lengths //! between 2 and 5. // virtual const char* identifier() const = 0; //! extract_features() extracts the relevant features from sentence s // virtual void extract_features(const sp_sentence_type& s) = 0; //! prune_and_renumber() prunes all features with a count of //! less than mincount and renumbers them from nextid. //! It returns the updated nextid. The pruned features are //! written to os. // virtual Id prune_and_renumber(const size_type mincount, Id nextid, std::ostream& os) = 0; //! feature_values() collects the feature values for the sentence s // virtual void feature_values(const sp_sentence_type& s, Id_Floats& piv) = 0; //! print_feature_ids() prints out the features and their ids. // virtual std::ostream& print_feature_ids(std::ostream& os) const = 0; //! read_feature() reads the feature definition from in, and //! sets its id to id. // virtual std::istream& read_feature(std::istream& is, Id id) = 0; //! define commonly used symbols // inline static symbol endmarker() { static symbol e("_"); return e; } inline static symbol childmarker() { static symbol c("*CHILD*"); return c; } inline static symbol adjunctmarker() { static symbol a("*ADJ*"); return a; } inline static symbol conjunctmarker() { static symbol c("*CONJ*"); return c; } inline static symbol headmarker() { static symbol h("*HEAD*"); return h; } inline static symbol lastadjunctmarker() { static symbol l("*LASTADJ*"); return l; } inline static symbol lastconjunctmarker() { static symbol l("*LASTCONJ*"); return l; } inline static symbol nonrootmarker() { static symbol r("*NONROOT*"); return r; } inline static symbol postheadmarker() { static symbol p("*POSTHEAD*"); return p; } inline static symbol preheadmarker() { static symbol p("*PREHEAD*"); return p; } inline static symbol DT() { static symbol dt("DT"); return dt; } inline static symbol NP() { static symbol np("NP"); return np; } inline static symbol ROOT() { static symbol root("ROOT"); return root; } inline static symbol S() { static symbol s("S"); return s; } inline static symbol SBAR() { static symbol sbar("SBAR"); return sbar; } inline static symbol SINV() { static symbol sinv("SINV"); return sinv; } inline static symbol VB() { static symbol vb("VB"); return vb; } inline static symbol VP() { static symbol vp("VP"); return vp; } inline static symbol ZERO() { static symbol zero("0"); return zero; } //! symbol() returns a new symbol consisting of the the nsuffixletters //! of s // inline static symbol suffix(symbol s, size_type nsuffixletters) { if (nsuffixletters == 0 || s.string_reference().size() <= nsuffixletters) return s; else return symbol(s.string_reference().substr(s.string_reference().size()-nsuffixletters, nsuffixletters)); } // FeatureClass::suffix() //! quantize() is a utility function mapping positive ints to a //! small number of discrete values // inline static int quantize(int v) { assert(v >= 0); switch (v) { case 0: return 0; case 1: return 1; case 2: return 2; case 3: case 4: return 4; default: return 5; } return 5; } // FeatureClass::quantize() //! symbol_quantize() is a utility function mapping positive ints to a //! small number of discrete values // inline symbol symbol_quantize(int v) { static symbol zero("0"), one("1"), two("2"), four("4"), five("5"); assert(v >= 0); switch (v) { case 0: return zero; case 1: return one; case 2: return two; case 3: case 4: return four; default: return five; } return five; } // FeatureClass::symbol_quantize() //! is_bounding_node() is true of nodes labeled NP, ROOT, S or SBAR // inline static bool is_bounding_node(const sptree* node) { return node != NULL && ( node->label.cat == NP() || node->label.cat == ROOT() || node->label.cat == S() || node->label.cat == SBAR() ); } // FeatureClass::is_bounding_node() // The following templated static functions do most of the actual work; // the specialized FeatureClasses define a few data structures, and // the specialized virtual functions call the templated functions below. //! A FeatureParseVal object defines operator[] to accumulate feature counts //! for the parse with id parse // template <typename FeatClass> struct FeatureParseVal { typedef typename FeatClass::Feature F; typedef Float V; typedef std::map<size_type,V> C_V; typedef std::map<F,C_V> F_C_V; size_type parse; // parse which we are currently collecting stats from F_C_V f_p_v; // feature -> parse -> value V& operator[](const F& feat) { return f_p_v[feat][parse]; } }; // FeatureClass::FeatureParseVal{} //! An IdParseVal object is like a FeatureParseVal object except that //! it maps each feature to its Id first // template <typename FeatClass> struct IdParseVal { typedef typename FeatClass::Feature Feature; typedef Id F; typedef Float V; typedef std::map<size_type,V> C_V; typedef std::map<F,C_V> F_C_V; FeatClass& fc; size_type parse; F_C_V f_p_v; V ignored; IdParseVal(FeatClass& fc) : fc(fc), ignored(0) { } V& operator[](const Feature& f) { typedef typename FeatClass::Feature_Id::const_iterator It; It it = fc.feature_id.find(f); if (it != fc.feature_id.end()) return f_p_v[it->second][parse]; else return ignored; } // IdParseVal::operator[] }; // FeatureClass::IdParseVal{} //! sentence_parsefidvals() calls parse_featurecount() to get the //! feature count for each parse, then subtracts the most common //! count for each feature from each count. This means that feature //! values can be negative! Setting absolute_counts disables this. // template <typename FeatClass, typename Fid_Parse_Val, typename Parse_Fid_Val> static void sentence_parsefidvals(FeatClass& fc, const sp_sentence_type& s, Fid_Parse_Val& fid_parse_val, Parse_Fid_Val& parse_fid_val) { assert(parse_fid_val.size() == s.nparses()); fid_parse_val.f_p_v.clear(); for (size_type i = 0; i < s.nparses(); ++i) { fid_parse_val.parse = i; fc.parse_featurecount(fc, s.parses[i], fid_parse_val); } // copy into parse_fid_val, removing pseudo-constant features typedef typename Fid_Parse_Val::F F; typedef typename Fid_Parse_Val::V V; typedef typename Fid_Parse_Val::C_V C_V; typedef typename Fid_Parse_Val::F_C_V F_C_V; typedef std::map<V, size_type> V_C; cforeach (typename F_C_V, fit, fid_parse_val.f_p_v) { const F& feat = fit->first; const C_V& parse_val = fit->second; if (absolute_counts) { for (size_type i = 0; i < s.nparses(); ++i) { V val = dfind(parse_val, i); if (val != 0) #pragma omp critical (sentence_parsefidvals0) parse_fid_val[i][feat] = val; } } else { // relative counts V_C val_gain; // number of times each feature value occured for (size_type i = 0; i < s.nparses(); ++i) { const V val = dfind(parse_val, i); val_gain[val] += 2; val_gain[val-1] += 1; } const V& highest_gain_val = max_element(val_gain, second_lessthan())->first; for (size_type i = 0; i < s.nparses(); ++i) { const V val = dfind(parse_val, i) - highest_gain_val; if (val != 0) #pragma omp critical (sentence_parsefidvals1) parse_fid_val[i][feat] = val; } } } } // FeatureClass::sentence_parsefidvals() //! extract_features_helper() increments by one all of the non-pseudo-constant //! features that occur in one or more parses of this sentence. // template <typename FeatClass> static void extract_features_helper(FeatClass& fc, const sp_sentence_type& s) { if (s.nparses() <= 1) // ignore unambiguous sentences return; typedef FeatureParseVal<FeatClass> FPV; typedef typename FPV::F_C_V F_C_V; typedef typename FPV::C_V C_V; typedef typename FPV::V V; FPV fpv; for (size_type i = 0; i < s.nparses(); ++i) { fpv.parse = i; fc.parse_featurecount(fc, s.parses[i], fpv); } // trace if required if (debug_level > 1000) { cforeach (typename F_C_V, it, fpv.f_p_v) { const C_V& p_v = it->second; typename C_V::const_iterator it1 = p_v.find(0); if (it1 != p_v.end()) std::cerr << '\t' << fc.identifier() << '\t' << it->first << '\t' << it1->second << std::endl; } } // only insert non-pseudo-constant features cforeach (typename F_C_V, it, fpv.f_p_v) { const C_V& p_v = it->second; bool pseudoconstant = true; if (p_v.size() != s.nparses()) // does feature occur on pseudoconstant = false; // every parse? else { assert(!p_v.empty()); V v0 = p_v.begin()->second; cforeach (typename C_V, it1, p_v) if (it1->second != v0) { pseudoconstant = false; break; } } if (pseudoconstant == false) if ((collect_correct && p_v.find(0) != p_v.end()) || (collect_incorrect && (p_v.find(0) == p_v.end() || p_v.size() > 1))) ++fc.feature_id[it->first]; } } // FeatureClass::extract_features_helper() //! print_feature_ids_helper() prints the feature_ids // template <typename FeatClass> static std::ostream& print_feature_ids_helper(const FeatClass& fc, std::ostream& os) { typedef const typename FeatClass::Feature* FeaturePtr; typedef std::pair<Id,FeaturePtr> IdFeaturePtr; typedef std::vector<IdFeaturePtr> IdFeaturePtrs; IdFeaturePtrs idfps;; idfps.reserve(fc.feature_id.size()); cforeach (typename FeatClass::Feature_Id, it, fc.feature_id) idfps.push_back(IdFeaturePtr(it->second, &it->first)); std::sort(idfps.begin(), idfps.end()); cforeach (typename IdFeaturePtrs, it, idfps) os << it->first << '\t' << fc.identifier() << ' ' << *it->second << '\n'; os << std::flush; return os; } // FeatureClass::print_feature_ids_helper() //! prune_and_renumber_helper() extracts all features with at //! least mincount count, and numbers the remaining features //! incrementally from nextid. // template <typename FeatClass> static Id prune_and_renumber_helper(FeatClass& fc, const size_type mincount, Id nextid, std::ostream& os) { typedef typename FeatClass::Feature F; typedef std::vector<F> Fs; Fs fs; cforeach (typename FeatClass::Feature_Id, it, fc.feature_id) if (it->second >= mincount) fs.push_back(it->first); fc.feature_id.clear(); cforeach (typename Fs, it, fs) fc.feature_id[*it] = nextid++; print_feature_ids_helper(fc, os); return nextid; } // FeatureClass::prune_and_renumber_helper() //! feature_values_helper() maps a sentence to its parse_id_count vector // template <typename FeatClass> static void feature_values_helper(FeatClass& fc, const sp_sentence_type& s, Id_Floats& p_i_v) { assert(p_i_v.size() == s.nparses()); IdParseVal<FeatClass> i_p_v(fc); sentence_parsefidvals(fc, s, i_p_v, p_i_v); } // FeatureClass::feature_values_helper() //! read_feature_helper() reads the next feature from is, and //! sets its id to id. This method reads the entire rest of the //! line and defines the feature accordingly. // template <typename FeatClass> static std::istream& read_feature_helper(FeatClass& fc, std::istream& is, Id id) { typedef typename FeatClass::Feature F; typedef typename FeatClass::Feature_Id::value_type FI; F f; is >> f; assert(is); bool inserted = fc.feature_id.insert(FI(f, id)).second; if (!inserted) { std::cerr << "## Error in spfeatures:read_feature_helper(): " << "duplicate feature, id = " << id << ", f = `" << f << "'" << std::endl; exit(EXIT_FAILURE); } return is; } // FeatureClass::read_feature_helper() }; // FeatureClass{} //////////////////////////////////////////////////////////////////////// // // // FeatureClassPtrs{} // // // //////////////////////////////////////////////////////////////////////// //! FeatureClassPtrs{} holds pointers to FeatureClass{} objects. //! These objects are responsible for identifying features in trees //! and mapping a set of parse trees for a sentence to a vector //! of feature-counts. //! //! In general, one or more FeatureClass pointers will be pushed //! onto the FeatureClassPtrs object, then extract_features() //! is called to count how many sentences each feature occurs //! in, then prune_and_renumber() is called to prune features //! and assign them id numbers, and finally write_features() //! is called to map parse trees to feature vectors. //! //! extract-spmfeatures.cc and best-spmparses.cc create a FeatureClassPtrs //! object that contains pointers to each of the FeatureClass objects, //! which are responsible for actually extracting a class of features. //! //! A FeatureClassPtrs object fcps implements the following: //! //! extract_features() counts how often each feature appears in //! the training data //! //! prune_and_renumber() prunes features (say by deleting all //! low frequency features) and assigns the remaining features //! an identifying number (id) //! //! write_features() writes out the features for each parse in the n-best //! parse corpus //! //! read_feature_ids() instantiates the features from a file, and assigns //! them appropriate identifying numbers (id) //! //! best_parse() finds the best parse for a sentence produced by an n-best //! parser // class FeatureClassPtrs : public std::vector<FeatureClass*> { private: struct extract_features_visitor { struct FeatureClassPtrs& fcps; extract_features_visitor(FeatureClassPtrs& fcps) : fcps(fcps) { } void operator() (const sp_sentence_type& s) { if (debug_level > 1000) std::cerr << '\n' << s.parses[0].parse << '\n' << std::endl; // foreach (FeatureClassPtrs, it, fcps) // (*it)->extract_features(s); #pragma omp parallel for schedule(dynamic) for (unsigned i = 0; i < fcps.size(); ++i) fcps[i]->extract_features(s); } // FeatureClassPtrs::extract_features_visitor::operator() }; // FeatureClassPtrs::extract_features_visitor{} public: //! The following load FeatureClassPtrs with various sets of features // inline FeatureClassPtrs(const char* fcname=NULL); inline void features_sumlogp(); inline void features_logps(); inline void features_avlogps(); inline void features_wavlogp(); inline void features_logpparses(); inline void features_rank(unsigned featurelevel=0); inline void features_rankplus(bool lxfeats, bool edgefeats); inline void features_spnn(bool avparses=false, bool nngram=false); inline void features_mfeatures(unsigned maxwidth=1, unsigned maxsumwidth=2, unsigned maxwords=1, const char* parser=NULL); //! extract_features() extracts features from the tree file infile. // void extract_features(const char* parseincmd, const char* goldincmd) { extract_features_visitor efv(*this); sp_corpus_type::map_sentences_cmd(parseincmd, goldincmd, efv, lowercase_flag); } // FeatureClassPtrs::extract_features() //! prune_and_renumber() prunes all features that occur in less than //! mincount sentences, and then assigns them a number starting at 1. // Id prune_and_renumber(size_type mincount=5, std::ostream& os=std::cout) { Id nextid = 0; cforeach (FeatureClassPtrs, it, *this) nextid = (*it)->prune_and_renumber(mincount, nextid, os); return nextid; } // FeatureClassPtrs::prune_and_renumber() //! write_features() maps a tree data file into a feature //! data file. This is used to prepare a feature counts //! file from a tree data file. // void write_features(const char* parseincmd, const char* goldincmd, const char* outfile) { const char* filesuffix = strrchr(outfile, '.'); std::string command(strcasecmp(filesuffix, ".bz2") ? (strcasecmp(filesuffix, ".gz") ? "cat > " : "gzip > ") : "bzip2 > "); command += outfile; FILE *out = popen(command.c_str(), "w"); if (out == NULL) { std::cerr << "## Error: can't popen command " << command << std::endl; exit(EXIT_FAILURE); } command.clear(); ipstream parsein(parseincmd); if (!parsein) { std::cerr << "## Error: can't popen parseincmd = " << parseincmd << std::endl; exit(EXIT_FAILURE); } ipstream goldin(goldincmd); if (!goldin) { std::cerr << "## Error: can't popen goldincmd = " << goldincmd << std::endl; exit(EXIT_FAILURE); } unsigned int nsentences; if (!(goldin >> nsentences)) { std::cerr << "## Failed to read nsentences from " << goldincmd << std::endl; exit(EXIT_FAILURE); } fprintf(out, "S=%u\n", nsentences); sp_sentence_type sentence; Id_Floats p_i_v; for (size_type i = 0; i < nsentences; ++i) { sentence.read(parsein, goldin, lowercase_flag); precrec_type::edges goldedges(sentence.gold); fprintf(out, "G=%u N=%u", goldedges.nedges(), unsigned(sentence.parses.size())); p_i_v.clear(); // Clear feature-counts p_i_v.resize(sentence.nparses()); // cforeach (FeatureClassPtrs, it, *this) // (*it)->feature_values(sentence, p_i_v); #pragma omp parallel for schedule(dynamic) for (unsigned k = 0; k < size(); ++k) (*this)[k]->feature_values(sentence, p_i_v); for (size_type j = 0; j < sentence.parses.size(); ++j) { const sp_parse_type& p = sentence.parses[j]; precrec_type pr(goldedges, p.parse); fprintf(out, " P=%u W=%u", pr.ntest, pr.ncommon); const Id_Float& i_v = p_i_v[j]; cforeach (Id_Float, it, i_v) if (it->second == 1) fprintf(out, " " SCANF_ID_TYPE, it->first); else fprintf(out, " " SCANF_ID_TYPE "=%g", it->first, it->second); fprintf(out, ","); } fprintf(out, "\n"); } pclose(out); } // FeatureClassPtrs::write_features() //! read_feature_ids() reads feature ids from is, and sets //! each feature class' feature_id hash accordingly. // Id read_feature_ids(std::istream& is) { typedef std::map<std::string, FeatureClass*> St_FCp; St_FCp fcident_fcp; for (iterator it = begin(); it != end(); ++it) fcident_fcp[(*it)->identifier()] = *it; Id id, maxid = 0; std::string fcident; while (is >> id >> fcident) { St_FCp::const_iterator it = fcident_fcp.find(fcident); if (it != fcident_fcp.end()) it->second->read_feature(is, id); else { std::cerr << "## Error: can't find feature identifier " << fcident << " in feature list.\n" << "## best-parses incompatible with feature definition data file." << std::endl; exit(EXIT_FAILURE); } is.ignore(std::numeric_limits<int>::max(), '\n'); if (id > maxid) maxid = id; } return maxid; } // FeatureClassPtrs::read_feature_ids() //! best_parse() returns the best parse tree from n-best parses for a sentence // template <typename Ws> const tree* best_parse(const sp_sentence_type& sentence, const Ws& ws) const { assert(sentence.nparses() > 0); Id_Floats p_i_v(sentence.nparses()); // cforeach (FeatureClassPtrs, it, *this) // (*it)->feature_values(sentence, p_i_v); #pragma omp parallel for schedule(dynamic) for (unsigned k = 0; k < size(); ++k) (*this)[k]->feature_values(sentence, p_i_v); Float max_weight = 0; size_type i_max = 0; for (size_type i = 0; i < sentence.nparses(); ++i) { const Id_Float& i_v = p_i_v[i]; Float w = 0; cforeach (Id_Float, ivit, i_v) { assert(ivit->first < ws.size()); w += ivit->second * ws[ivit->first]; } if (i == 0 || w > max_weight) { i_max = i; max_weight = w; } } return sentence.parses[i_max].parse0; } // FeatureClassPtrs::best_parse() //! write_ranked_trees() sorts all of the trees by their conditional //! probability and then writes them out in sorted order. // template <typename Ws> std::ostream& write_ranked_trees(const sp_sentence_type& sentence, const Ws& ws, std::ostream& os) const { assert(sentence.nparses() > 0); os << sentence.nparses() << ' ' << sentence.label << std::endl; Id_Floats p_i_v(sentence.nparses()); cforeach (FeatureClassPtrs, it, *this) (*it)->feature_values(sentence, p_i_v); typedef std::pair<Id,Float> IdFloat; typedef std::vector<IdFloat> IdFloats; IdFloats idweights(sentence.nparses()); for (size_type i = 0; i < sentence.nparses(); ++i) { idweights[i].first = i; const Id_Float& i_v = p_i_v[i]; Float w = 0; cforeach (Id_Float, ivit, i_v) { assert(ivit->first < ws.size()); w += ivit->second * ws[ivit->first]; } idweights[i].second = w; } std::sort(idweights.begin(), idweights.end(), second_greaterthan()); cforeach (IdFloats, it, idweights) { const sp_parse_type& parse = sentence.parses[it->first]; os << it->second << ' ' << parse.sum_logprob << '\n'; write_tree_noquote_root(os, parse.parse0); os << std::endl; } return os; } // FeatureClassPtrs::write_ranked_trees() //! write_features_debug() writes out the features associated with each parse. // template <typename Ws> std::ostream& write_features_debug(const sp_sentence_type& sentence, const Ws& ws, std::ostream& os) const { assert(sentence.nparses() > 0); Id_Floats p_i_v(sentence.nparses()); cforeach (FeatureClassPtrs, it, *this) (*it)->feature_values(sentence, p_i_v); for (size_type i = 0; i < sentence.nparses(); ++i) { const Id_Float& i_v = p_i_v[i]; cforeach (Id_Float, ivit, i_v) { if (ivit->first == 0) continue; // skip first feature if (ws[ivit->first] == 0) continue; // skip features with zero weight os << sentence.label << ' ' << i << ' ' << ivit->first << ' ' << ivit->second << std::endl; } } return os; } // FeatureClassPtrs::write_features_debug() }; // FeatureClassPtrs{} std::ostream& operator<< (std::ostream& os, const FeatureClassPtrs& fcps) { cforeach (FeatureClassPtrs, it, fcps) (*it)->print_feature_ids(os); return os; } // operator<<(FeatureClassPtrs&) //////////////////////////////////////////////////////////////////////// // // // FeatureClass{} specializations // // // //////////////////////////////////////////////////////////////////////// //! SumLogP is the sum of the log parse probabilities //! //! The identifier is SumLogP // class SumLogP : public FeatureClass { public: typedef unsigned Feature; // Always zero std::string identifier_string; SumLogP() : identifier_string("SumLogP") { } template <typename FeatClass, typename Feat_Count> void parse_featurecount(FeatClass& fc, const sp_parse_type& parse, Feat_Count& feat_count) { feat_count[0] = parse.sum_logprob; } // SumLogP::parse_featurecount(); // Here is the stuff that every feature needs virtual const char *identifier() const { return identifier_string.c_str(); } // SumLogP::identifier() // These virtual functions just pass control to the static template functions // SPFEATURES_COMMON_DEFINITIONS; }; // SumLogP{} //! LogP is the log parse probability given by each parser //! //! The identifier is LogP // class LogP : public FeatureClass { public: typedef symbol Feature; std::string identifier_string; LogP() : identifier_string("LogP") { } template <typename FeatClass, typename Feat_Count> void parse_featurecount(FeatClass& fc, const sp_parse_type& parse, Feat_Count& feat_count) { cforeach (S_F, it, parse.parser_logprob) feat_count[it->first] = it->second; } // LogP::parse_featurecount(); // Here is the stuff that every feature needs virtual const char *identifier() const { return identifier_string.c_str(); } // LogP::identifier() // These virtual functions just pass control to the static template functions // SPFEATURES_COMMON_DEFINITIONS; }; // LogP{} //! LogPx is the log parse probability given by a specific parser //! //! The identifier is LogPx // class LogPx : public FeatureClass { public: typedef unsigned Feature; // Always zero symbol parserid; std::string identifier_string; LogPx(symbol parserid) : parserid(parserid), identifier_string("LogPx:") { identifier_string += parserid.c_str(); } template <typename FeatClass, typename Feat_Count> void parse_featurecount(FeatClass& fc, const sp_parse_type& parse, Feat_Count& feat_count) { S_F::const_iterator it = parse.parser_logprob.find(parserid); if (it != parse.parser_logprob.end()) feat_count[0] = it->second; } // LogPx::parse_featurecount(); // Here is the stuff that every feature needs virtual const char *identifier() const { return identifier_string.c_str(); } // LogPx::identifier() // These virtual functions just pass control to the static template functions // SPFEATURES_COMMON_DEFINITIONS; }; // LogPx{} //! AvLogP is the sum of the log parse probabilities //! //! The identifier is AvLogP // class AvLogP : public FeatureClass { public: typedef unsigned Feature; // Always zero std::string identifier_string; AvLogP() : identifier_string("AvLogP") { } template <typename FeatClass, typename Feat_Count> void parse_featurecount(FeatClass& fc, const sp_parse_type& parse, Feat_Count& feat_count) { feat_count[0] = parse.sum_logprob/parse.parser_logprob.size(); } // AvLogP::parse_featurecount(); // Here is the stuff that every feature needs virtual const char *identifier() const { return identifier_string.c_str(); } // AvLogP::identifier() // These virtual functions just pass control to the static template functions // SPFEATURES_COMMON_DEFINITIONS; }; // AvLogP{} //! WAvLogP is a weighted average of the log parse probabilities //! //! The identifier is WAvLogP // class WAvLogP : public FeatureClass { public: typedef unsigned Feature; // Always zero symbol parser0; Float parser0Weight; std::string identifier_string; WAvLogP(symbol parser0, Float parser0weight) : identifier_string("WAvLogP:") { identifier_string += lexical_cast<std::string>(parser0); identifier_string += "="; identifier_string += lexical_cast<std::string>(parser0Weight); } template <typename FeatClass, typename Feat_Count> void parse_featurecount(FeatClass& fc, const sp_parse_type& parse, Feat_Count& feat_count) { Float sum_logprob = (parser0Weight - 1) * dfind(parse.parser_logprob, parser0); cforeach (S_F, it, parse.parser_logprob) sum_logprob += it->second; feat_count[0] = sum_logprob/parse.parser_logprob.size(); } // WAvLogP::parse_featurecount(); // Here is the stuff that every feature needs virtual const char *identifier() const { return identifier_string.c_str(); } // WAvLogP::identifier() // These virtual functions just pass control to the static template functions // SPFEATURES_COMMON_DEFINITIONS; }; // WAvLogP{} //! Parsed[i] is 1 when parser i returns a prob for this parse, and zero otherwise //! //! The identifier is Parsed // class Parsed : public FeatureClass { public: typedef symbol Feature; std::string identifier_string; Parsed() : identifier_string("Parsed") { } template <typename FeatClass, typename Feat_Count> void parse_featurecount(FeatClass& fc, const sp_parse_type& parse, Feat_Count& feat_count) { cforeach (S_F, it, parse.parser_logprob) feat_count[it->first] = 1; } // Parsed::parse_featurecount(); // Here is the stuff that every feature needs virtual const char *identifier() const { return identifier_string.c_str(); } // Parsed::identifier() // These virtual functions just pass control to the static template functions // SPFEATURES_COMMON_DEFINITIONS; }; // Parsed{} //! Parsed2[(i,j)] is 1 when both parsers i and j returns a prob for //! this parse, and zero otherwise //! //! The identifier is Parsed2 // class Parsed2 : public FeatureClass { public: typedef std::pair<symbol,symbol> SS; typedef SS Feature; std::string identifier_string; Parsed2() : identifier_string("Parsed2") { } template <typename FeatClass, typename Feat_Count> void parse_featurecount(FeatClass& fc, const sp_parse_type& parse, Feat_Count& feat_count) { cforeach (S_F, it0, parse.parser_logprob) { S_F::const_iterator it1 = it0; for (++it1; it1 != parse.parser_logprob.end(); ++it1) feat_count[SS(it0->first,it1->first)] = 1; } } // Parsed2::parse_featurecount(); // Here is the stuff that every feature needs virtual const char *identifier() const { return identifier_string.c_str(); } // Parsed2::identifier() // These virtual functions just pass control to the static template functions // SPFEATURES_COMMON_DEFINITIONS; }; // Parsed2{} //! Failed[i] is 1 when parser i failed to return a prob for this parse, and zero otherwise //! //! The identifier is Failed // class Failed : public FeatureClass { public: typedef symbol Feature; std::string identifier_string; Failed() : identifier_string("Failed") { } template <typename FeatClass, typename Feat_Count> void parse_featurecount(FeatClass& fc, const sp_parse_type& parse, Feat_Count& feat_count) { typedef sp_parse_type::Ss Ss; cforeach (Ss, it, parse.failedparsers) feat_count[*it] = 1; } // Failed::parse_featurecount(); // Here is the stuff that every feature needs virtual const char *identifier() const { return identifier_string.c_str(); } // Failed::identifier() // These virtual functions just pass control to the static template functions // SPFEATURES_COMMON_DEFINITIONS; }; // Failed{} //! Failed2[(i,j)] is 1 when neither parsers i and j returns a prob for //! this parse, and zero otherwise //! //! The identifier is Failed2 // class Failed2 : public FeatureClass { public: typedef std::pair<symbol,symbol> SS; typedef SS Feature; std::string identifier_string; Failed2() : identifier_string("Failed2") { } template <typename FeatClass, typename Feat_Count> void parse_featurecount(FeatClass& fc, const sp_parse_type& parse, Feat_Count& feat_count) { typedef sp_parse_type::Ss Ss; cforeach (Ss, it0, parse.failedparsers) { Ss::const_iterator it1 = it0; for (++it1; it1 != parse.failedparsers.end(); ++it1) feat_count[SS(*it0,*it1)] = 1; } } // Failed2::parse_featurecount(); // Here is the stuff that every feature needs virtual const char *identifier() const { return identifier_string.c_str(); } // Failed2::identifier() // These virtual functions just pass control to the static template functions // SPFEATURES_COMMON_DEFINITIONS; }; // Failed2{} //! ParsedFailed[(i,j)] is 1 when parser i returns a prob for //! this parse but parser j does not, and zero otherwise //! //! The identifier is ParsedFailed // class ParsedFailed : public FeatureClass { public: typedef std::pair<symbol,symbol> SS; typedef SS Feature; std::string identifier_string; ParsedFailed() : identifier_string("ParsedFailed") { } template <typename FeatClass, typename Feat_Count> void parse_featurecount(FeatClass& fc, const sp_parse_type& parse, Feat_Count& feat_count) { typedef sp_parse_type::Ss Ss; cforeach (S_F, it0, parse.parser_logprob) cforeach (Ss, it1, parse.failedparsers) feat_count[SS(it0->first,*it1)] = 1; } // ParsedFailed::parse_featurecount(); // Here is the stuff that every feature needs virtual const char *identifier() const { return identifier_string.c_str(); } // ParsedFailed::identifier() // These virtual functions just pass control to the static template functions // SPFEATURES_COMMON_DEFINITIONS; }; // ParsedFailed{} //! NParseds is an indicator for the number of parsers that returned a parse for this sentence //! //! The identifier is NParseds // class NParseds : public FeatureClass { public: typedef unsigned Feature; std::string identifier_string; NParseds() : identifier_string("NParseds") { } template <typename FeatClass, typename Feat_Count> void parse_featurecount(FeatClass& fc, const sp_parse_type& parse, Feat_Count& feat_count) { feat_count[parse.parser_logprob.size()] = 1; } // NParseds::parse_featurecount(); // Here is the stuff that every feature needs virtual const char *identifier() const { return identifier_string.c_str(); } // NParseds::identifier() // These virtual functions just pass control to the static template functions // SPFEATURES_COMMON_DEFINITIONS; }; // NParseds{} //! A LogBinnerFeatureClass is an ABC for classes of features which //! provides the logBin() function // class LogBinnerFeatureClass : public FeatureClass { public: int logBin(int rank, Float base) { if (rank == 0) return 0; else { assert(rank > 0); return 1+log(rank)/log(base); } } }; // LogBinnerFeatureClass{} //! BinnedParseRank{} is a feature that returns the binned rank of //! the prob assigned to this tree by a given parser //! //! The identifier is BinnedParseRank // class BinnedParseRank : public LogBinnerFeatureClass { public: typedef std::pair<symbol,int> Feature; std::string identifier_string; const Float rankbase; // The base of the log used to bin ranks BinnedParseRank(Float rankbase=2) : identifier_string("BinnedParseRank:"), rankbase(rankbase) { identifier_string += lexical_cast<std::string>(rankbase); } template <typename FeatClass, typename Feat_Count> void parse_featurecount(FeatClass& fc, const sp_parse_type& parse, Feat_Count& feat_count) { assert(parse.parser_logprob.size() == parse.parser_rank.size()); cforeach (S_U, it, parse.parser_rank) feat_count[Feature(it->first,logBin(it->second,rankbase))] = 1; } // BinnedParseRank::parse_featurecount(); // Here is the stuff that every feature needs virtual const char *identifier() const { return identifier_string.c_str(); } // BinnedParseRank::identifier() // These virtual functions just pass control to the static template functions // SPFEATURES_COMMON_DEFINITIONS; }; // BinnedParseRank{} //! BinnedParseRank2{} is a feature that returns the binned rank of //! the prob assigned to this tree by a given parser //! //! The identifier is BinnedParseRank2 // class BinnedParseRank2 : public LogBinnerFeatureClass { public: typedef std::pair<symbol,int> SI; typedef std::pair<SI,SI> Feature; std::string identifier_string; const Float rankbase; // The base of the log used to bin ranks BinnedParseRank2(Float rankbase=2) : identifier_string("BinnedParseRank2:"), rankbase(rankbase) { identifier_string += lexical_cast<std::string>(rankbase); } template <typename FeatClass, typename Feat_Count> void parse_featurecount(FeatClass& fc, const sp_parse_type& parse, Feat_Count& feat_count) { cforeach (S_U, it0, parse.parser_rank) { int binnedrank0 = logBin(it0->second, rankbase); S_U::const_iterator it1 = it0; for (++it1; it1 != parse.parser_rank.end(); ++it1) { int binnedrank1 = logBin(it1->second, rankbase); feat_count[Feature(SI(it0->first,binnedrank0),SI(it1->first,-1))] = 1; feat_count[Feature(SI(it0->first,-1),SI(it1->first,binnedrank1))] = 1; feat_count[Feature(SI(it0->first,binnedrank0),SI(it1->first,binnedrank1))] = 1; } cforeach (sp_parse_type::Ss, it2, parse.failedparsers) feat_count[Feature(SI(it0->first,binnedrank0),SI(*it2,-2))] = 1; } } // BinnedParseRank2::parse_featurecount(); // Here is the stuff that every feature needs virtual const char *identifier() const { return identifier_string.c_str(); } // BinnedParseRank2::identifier() // These virtual functions just pass control to the static template functions // SPFEATURES_COMMON_DEFINITIONS; }; // BinnedParseRank2{} //! A TreeFeatureClass is an ABC for classes of features where the //! feature count for a parse is defined by the parse's tree (most //! features are like this). //! //! Every subclass to TreeFeatureClass must define a method: //! //! tree_featurecount(fc, tp, feat_count); // class TreeFeatureClass : public FeatureClass { public: //! parse_featurecount() passes the tree to tree_featurecount() //! to analyse. // template <typename FeatClass, typename Feat_Count> static void parse_featurecount(FeatClass& fc, const sp_parse_type& p, Feat_Count& feat_count) { assert(p.parse != NULL); fc.tree_featurecount(fc, p.parse, feat_count); } // TreeFeatureClass::parse_featurecount() }; // TreeFeatureClass{} //! A NodeFeatureClass is an ABC for classes of features where the //! feature count for a tree is the sum of the feature counts for //! each node. //! //! Every subclass to NodeFeatureClass must define a method: //! //! node_featurecount(fc, tp, feat_count); // class NodeFeatureClass : public TreeFeatureClass { public: //! tree_featurecount() sums the features on each node to get //! the feature count on each tree // template <typename FeatClass, typename Feat_Count> static void tree_featurecount(FeatClass& fc, const sptree* tp, Feat_Count& feat_count) { assert(tp != NULL); fc.node_featurecount(fc, tp, feat_count); if (tp->is_nonterminal()) tree_featurecount(fc, tp->child, feat_count); if (tp->next != NULL) tree_featurecount(fc, tp->next, feat_count); } // NodeFeatureClass::tree_featurecount() }; // NodeFeatureClass{} //! A RuleFeatureClass is an ABC for classes of features //! with rule-like features //! class RuleFeatureClass : public NodeFeatureClass { public: enum annotation_level { none, pos, lexical }; enum annotation_type { semantic, syntactic }; RuleFeatureClass(std::string identifier_stem, //!< Stem of identifier size_type nanccats, //!< Number of ancestor categories above trees bool label_root, //!< Annotate with "in root context" bool label_conjunct, //!< Annotate with "belongs to conjunction" annotation_level head, //!< Amount of head annotation annotation_level functional, //!< Amount of function word annotation annotation_level all, //!< Amount of lexical word annotation annotation_type type //!< Head type ) : nanccats(nanccats), label_root(label_root), label_conjunct(label_conjunct), head(head), functional(functional), all(all), type(type), identifier_string(identifier_stem), max_annotation_level(std::max(head,std::max(functional,all))) { identifier_string += ":"; identifier_string += lexical_cast<std::string>(nanccats) + ":"; identifier_string += lexical_cast<std::string>(label_root) + ":"; identifier_string += lexical_cast<std::string>(label_conjunct) + ":"; identifier_string += lexical_cast<std::string>(head) + ":"; identifier_string += lexical_cast<std::string>(functional) + ":"; identifier_string += lexical_cast<std::string>(all) + ":"; identifier_string += lexical_cast<std::string>(type); } // RuleFeatureClass::RuleFeatureClass() const size_type nanccats; const bool label_root; const bool label_conjunct; annotation_level head; //!< annotation on rule's head annotation_level functional; //!< annotation on (projections of) functional categories annotation_level all; //!< annotation on all words annotation_type type; //!< syntactic or semantic annotation std::string identifier_string; const annotation_level max_annotation_level; typedef std::vector<symbol> Feature; //! push_child_features() pushes the features for this child node // void push_child_features(const sptree* node, const sptree* parent, Feature& f, annotation_level& highest_level) { const sptree* parent_headchild = (type == semantic ? parent->label.semantic_headchild : parent->label.syntactic_headchild); bool is_headchild = (node == parent_headchild); const sptree_label& label = node->label; f.push_back(label.cat); const sptree* lexhead = (type == semantic ? label.semantic_lexhead : label.syntactic_lexhead); if (lexhead == NULL) return; if (all < pos && (!lexhead->is_functional() || functional < pos) && (!is_headchild || head < pos)) return; if (lexhead != node) { f.push_back(headmarker()); f.push_back(lexhead->label.cat); highest_level = std::max(highest_level, pos); } if (all < lexical && (!lexhead->is_functional() || functional < lexical) && (!is_headchild || head < lexical)) return; f.push_back(lexhead->child->label.cat); highest_level = std::max(highest_level, lexical); } // RuleFeatureClass::push_child_features() //! push_ancestor_features() pushes features for ancestor nodes. // void push_ancestor_features(const sptree* node, Feature& f) { f.push_back(endmarker()); const sptree* parent = node->label.parent; for (size_type i = 0; i <= nanccats && parent != NULL; ++i) { f.push_back(node->label.cat); if (label_conjunct && parent != NULL) { if (parent->is_coordination()) f.push_back(parent->is_last_nonpunctuation() ? lastconjunctmarker() : conjunctmarker()); else if (parent->is_adjunction()) f.push_back(parent->is_last_nonpunctuation() ? lastadjunctmarker() : adjunctmarker()); } node = parent; parent = node->label.parent; } if (label_root) for (node = parent; node != NULL; node = node->label.parent) if (is_bounding_node(node) && !is_bounding_node(node->label.parent)) { f.push_back(nonrootmarker()); break; } } // RuleFeatureClass::push_top_ancestor_features() virtual const char *identifier() const { return identifier_string.c_str(); } // RuleFeatureClass::identifier() }; // RuleFeatureClass{} //! Identifier is Rule:<nanctrees>:<nanccats>:<root>:<conj>:<head>:<functional>:<all>:<type> //! class Rule : public RuleFeatureClass { public: Rule(size_type nanctrees = 0, //!< Number of ancestor local trees size_type nanccats = 0, //!< Number of ancestor categories above trees bool label_root = false, //!< Annotate with "in root context" bool label_conjunct = false, //!< Annotate with "belongs to conjunction" annotation_level head = none, //!< Amount of head annotation annotation_level functional = none, //!< Amount of function word annotation annotation_level all = none, //!< Amount of lexical word annotation annotation_type type = syntactic ) : RuleFeatureClass(std::string("Rule:")+lexical_cast<std::string>(nanctrees), nanccats, label_root, label_conjunct, head, functional, all, type), nanctrees(nanctrees) { } // Rule::Rule() size_type nanctrees; template <typename FeatClass, typename Feat_Count> void node_featurecount(FeatClass& fc, const sptree* node, Feat_Count& feat_count) { if (!node->is_nonterminal()) return; Feature f; annotation_level highest_level = none; // push (possibly lexicalized) children for (const sptree* child = node->child; child != NULL; child = child->next) push_child_features(child, node, f, highest_level); // push (possibly lexicalized) ancestor rules for (size_type i = 0; i < nanctrees && node->label.parent != NULL; ++i) { f.push_back(endmarker()); for (const sptree* child = node->label.parent->child; child != NULL; child = child->next) if (child == node) { f.push_back(childmarker()); f.push_back(child->label.cat); } else push_child_features(child, node, f, highest_level); node = node->label.parent; } if (highest_level != max_annotation_level) return; push_ancestor_features(node, f); ++feat_count[f]; } // Rule::node_featurecount() // Here is the stuff that every feature needs SPFEATURES_COMMON_DEFINITIONS; }; // Rule{} //! NGram //! //! Identifier is NGram:<frag_len>:<nanccats>:<root>:<conj>:<head>:<functional>:<all>:<type> // class NGram : public RuleFeatureClass { public: NGram(size_type fraglen = 3, //!< Number of children in sequence size_type nanccats = 1, //!< Number of ancestor categories above trees bool label_root = false, //!< Annotate with "in root context" bool label_conjunct = false, //!< Annotate with "belongs to conjunction" annotation_level head = none, //!< Amount of head annotation annotation_level functional = none, //!< Amount of function word annotation annotation_level all = none, //!< Amount of lexical word annotation annotation_type type = syntactic ) : RuleFeatureClass(std::string("NGram:")+lexical_cast<std::string>(fraglen), nanccats, label_root, label_conjunct, head, functional, all, type), fraglen(fraglen) { } size_type fraglen; template <typename FeatClass, typename Feat_Count> void node_featurecount(FeatClass& fc, const sptree* node, Feat_Count& feat_count) { if (!node->is_nonterminal()) return; size_type nchildren = 0; for (const sptree* child = node->child; child != NULL; child = child->next) ++nchildren; if (nchildren < fraglen) return; const sptree* headchild = (type == semantic ? node->label.semantic_headchild : node->label.syntactic_headchild); typedef std::vector<const sptree*> Tptrs; Tptrs children; children.push_back(NULL); for (const sptree* child = node->child; child != NULL; child = child->next) children.push_back(child); children.push_back(NULL); symbol headposition = preheadmarker(); for (size_type start = 0; start+fraglen <= children.size(); ++start) { if (children[start] == headchild) headposition = postheadmarker(); Feature f; annotation_level highest_level = none; bool includes_headchild = false; for (size_type pos = start; pos < start+fraglen; ++pos) { const sptree* child = children[pos]; if (child != NULL) push_child_features(child, node, f, highest_level); else f.push_back(endmarker()); if (child == headchild) includes_headchild = true; } f.push_back(headposition); if (includes_headchild == false && head != none) push_child_features(headchild, node, f, highest_level); if (highest_level != max_annotation_level) return; push_ancestor_features(node, f); ++feat_count[f]; } } // NGram::node_featurecount() SPFEATURES_COMMON_DEFINITIONS; }; // NGram{} //! NNGram //! //! Identifier is NNGram:<fraglen>:<headdir>:<headdist>:<nanccats>:<root>:<conj>:<head>:<functional>:<all>:<type> // class NNGram : public RuleFeatureClass { public: NNGram(size_type fraglen = 3, //!< Number of children in sequence size_type nanccats = 1, //!< Number of ancestor categories above trees bool label_root = false, //!< Annotate with "in root context" bool label_conjunct = false, //!< Annotate with "belongs to conjunction" annotation_level head = none, //!< Amount of head annotation annotation_level functional = none, //!< Amount of function word annotation annotation_level all = none, //!< Amount of lexical word annotation annotation_type type = syntactic, //!< Type of head to use bool headdir = false, //!< Annotate direction to head bool headdist = false //!< Annotate distance from head ) : RuleFeatureClass(std::string("NNGram:")+lexical_cast<std::string>(fraglen) +":"+lexical_cast<std::string>(headdir)+":"+lexical_cast<std::string>(headdist), nanccats, label_root, label_conjunct, head, functional, all, type), fraglen(fraglen), headdir(headdir), headdist(headdist) { } size_type fraglen; bool headdir, headdist; template <typename FeatClass, typename Feat_Count> void node_featurecount(FeatClass& fc, const sptree* node, Feat_Count& feat_count) { if (!node->is_nonterminal()) return; const sptree* headchild = (type == semantic ? node->label.semantic_headchild : node->label.syntactic_headchild); size_type nchildren = 0; size_type headlocation = 0; //!< location of head in sequence of children for (const sptree* child = node->child; child != NULL; child = child->next) { if (child == headchild) headlocation = nchildren; ++nchildren; } if (nchildren+1 < fraglen) return; typedef std::vector<const sptree*> Tptrs; Tptrs children; children.push_back(NULL); for (const sptree* child = node->child; child != NULL; child = child->next) children.push_back(child); children.push_back(NULL); symbol headposition = preheadmarker(); for (size_type start1 = 0; start1+fraglen <= children.size(); ++start1) { if (children[start1] == headchild) headposition = postheadmarker(); Feature f; annotation_level highest_level = none; bool includes_headchild = false; for (size_type pos1 = start1; pos1 < start1+fraglen; ++pos1) { const sptree* child = children[pos1]; if (child != NULL) { push_child_features(child, node, f, highest_level); if (child == headchild) includes_headchild = true; } else f.push_back(endmarker()); } if (headdir) { if (includes_headchild) { assert(headlocation+1 >= start1); f.push_back(symbol_quantize(headlocation+1-start1)); } else f.push_back(headposition); } if (headdist) { if (headlocation+1 < start1) f.push_back(symbol_quantize(start1-headlocation-1)); else if (headlocation+1 >= start1+fraglen) { assert(headlocation + 2 > start1+fraglen); f.push_back(symbol_quantize(headlocation + 2 - (start1+fraglen))); } else f.push_back(symbol_quantize(0)); } if (head != none) { if (headchild != NULL) push_child_features(headchild, node, f, highest_level); else f.push_back(headmarker()); } if (highest_level != max_annotation_level) return; push_ancestor_features(node, f); ++feat_count[f]; } } // NNGram::node_featurecount() SPFEATURES_COMMON_DEFINITIONS; }; // NNGram{} //! Word{} collects information on words in their vertical context. //! //! Identifier is Word:<nanccats> //! class Word : public NodeFeatureClass { public: Word(size_type nanccats = 1) //!< Number of ancestor local trees : nanccats(nanccats), identifier_string("Word:") { identifier_string += lexical_cast<std::string>(nanccats); } // Word::Word() const size_type nanccats; std::string identifier_string; typedef std::vector<symbol> Feature; template <typename FeatClass, typename Feat_Count> void node_featurecount(FeatClass& fc, const sptree* node, Feat_Count& feat_count) { if (!node->is_preterminal()) return; Feature f; f.push_back(node->child->label.cat); for (size_type i = 0; i < nanccats; ++i) { if (node == NULL) return; f.push_back(node->label.cat); node = node->label.parent; } ++feat_count[f]; } // Word::node_featurecount() // Here is the stuff that every feature needs virtual const char *identifier() const { return identifier_string.c_str(); } // Word::identifier() SPFEATURES_COMMON_DEFINITIONS; }; // Word{} //! WProj{} collects information on words in their vertical context. //! It projects each word up to its maximal projection. //! //! Identifier is WProj:<HeadType>:<IncludeNonMaximal>:<NAncs> //! class WProj : public NodeFeatureClass { public: enum annotation_type { semantic, syntactic }; WProj(annotation_type type = semantic, //!< Head type bool include_nonmaximal = false, //!< Only include maximal projection size_type nancs = 1) //!< Extra ancestors to include : type(type), include_nonmaximal(include_nonmaximal), nancs(nancs), identifier_string("WProj:") { (identifier_string += lexical_cast<std::string>(type)) += ':'; (identifier_string += lexical_cast<std::string>(include_nonmaximal)) += ':'; identifier_string += lexical_cast<std::string>(nancs); } // WProj::WProj() annotation_type type; bool include_nonmaximal; size_type nancs; std::string identifier_string; typedef std::vector<symbol> Feature; template <typename FeatClass, typename Feat_Count> void node_featurecount(FeatClass& fc, const sptree* node, Feat_Count& feat_count) { if (node->is_punctuation() || !node->is_preterminal()) return; Feature f; f.push_back(node->child->label.cat); while (node->label.parent) { const sptree* parent = node->label.parent; const sptree* parent_headchild = (type == semantic ? parent->label.semantic_headchild : parent->label.syntactic_headchild); bool is_headchild = (node == parent_headchild && !parent->is_root()); if (is_headchild) { if (include_nonmaximal) f.push_back(node->label.cat); } else break; node = parent; } for (size_type i = 0; node != NULL && i <= nancs; node = node->label.parent, ++i) f.push_back(node->label.cat); ++feat_count[f]; } // WProj::node_featurecount() // Here is the stuff that every feature needs virtual const char *identifier() const { return identifier_string.c_str(); } // WProj::identifier() SPFEATURES_COMMON_DEFINITIONS; }; // WProj{} //! The RightBranch defines two features: 1, which is true //! of lexical nodes on the right-most branch and 0, which is true of //! nodes that are not on the right-most branch. // class RightBranch : public TreeFeatureClass { public: //! tree_featurecount() counts the number of nodes on a right branch // template <typename FeatClass, typename Feat_Count> static void tree_featurecount(FeatClass& fc, const sptree* tp, Feat_Count& feat_count) { rightbranch_count(tp, 1, feat_count); } // RightBranch::tree_featurecount() //! rightbranch_count() is called with rightmost==1 iff the parent was //! on the rightmost branch. // template <typename Feat_Count> static int rightbranch_count(const sptree* tp, int rightmost, Feat_Count& fc) { if (tp->next) rightmost = rightbranch_count(tp->next, rightmost, fc); if (tp->is_punctuation()) return rightmost; ++fc[rightmost]; if (tp->is_nonterminal()) rightbranch_count(tp->child, rightmost, fc); return 0; } // RightBranch::rightbranch_count() // required types typedef int Feature; // 0 = non-rightmost branch, 1 = rightmost branch virtual const char* identifier() const { static char p[] = "RightBranch"; return p; } // RightBranch::identifier() SPFEATURES_COMMON_DEFINITIONS; }; // RightBranch{} //! LeftBranchLength is a feature whose value is the log of //! the length of the left-branching chain terminating in //! each preterminal. //! //! Identifier is LeftBranchLength // class LeftBranchLength : public TreeFeatureClass { public: //! tree_featurecount() counts the length of a rightmost branching chain // template <typename FeatClass, typename Feat_Count> static void tree_featurecount(FeatClass& fc, const sptree* tp, Feat_Count& feat_count) { leftbranch_count(tp, 1, feat_count); } // RightBranch::tree_featurecount() //! leftbranch_count() is called with the number of left branching //! nodes above this one // template <typename Feat_Count> static void leftbranch_count(const sptree* tp, int leftmost, Feat_Count& fc) { if (tp==NULL) return; else if (tp->is_punctuation()) leftbranch_count(tp->next, leftmost, fc); else { if (tp->is_preterminal()) { assert(leftmost >= 1); int log2_leftmost = int(log2f(float(leftmost))); ++fc[log2_leftmost]; } else leftbranch_count(tp->child, leftmost+1, fc); leftbranch_count(tp->next, 1, fc); } } // LeftBranchLength::leftbranch_count() // required types typedef int Feature; // log2 length of right branch virtual const char* identifier() const { static char p[] = "LeftBranchLength"; return p; } // LeftBranchLength::identifier() SPFEATURES_COMMON_DEFINITIONS; }; // LeftBranchLength{} //! RightBranchLength is a feature whose value is the log of //! the length of the right-branching chain terminating in //! each preterminal. //! //! Identifier is RightBranchLength // class RightBranchLength : public TreeFeatureClass { public: //! tree_featurecound() counts the length of a rightmost branching chain // template <typename FeatClass, typename Feat_Count> static void tree_featurecount(FeatClass& fc, const sptree* tp, Feat_Count& feat_count) { rightbranch_count(tp, 1, feat_count); } // RightBranch::tree_featurecount() //! rightbranch_count() is called with the number of right branching //! nodes above this one // template <typename Feat_Count> static int rightbranch_count(const sptree* tp, int rightmost, Feat_Count& fc) { if (tp->next) rightmost = rightbranch_count(tp->next, rightmost, fc); if (tp->is_punctuation()) return rightmost; if (tp->is_preterminal()) { assert(rightmost >= 1); int log2_rightmost = int(log2f(float(rightmost))); ++fc[log2_rightmost]; } else // tp->is_nonterminal() rightbranch_count(tp->child, rightmost+1, fc); return 1; } // RightBranchLength::rightbranch_count() // required types typedef int Feature; // log2 length of right branch virtual const char* identifier() const { static char p[] = "RightBranchLength"; return p; } // RightBranchLength::identifier() SPFEATURES_COMMON_DEFINITIONS; }; // RightBranchLength{} //! RBContext //! //! Identifier is RBContext:<conjunct>:<parent>:<governor>:<type> // class RBContext : public NodeFeatureClass { public: typedef std::vector<symbol> Feature; enum head_type_type { syntactic, semantic }; const bool label_coordination; const bool label_parent; const bool label_governor; const head_type_type head_type; std::string identifier_string; RBContext(bool label_coordination = false, //!< Annotate with "is coordination" bool label_parent = false, //!< Annotate with parent's category bool label_governor = false, //!< Annotate with governor's category head_type_type head_type = syntactic ) : label_coordination(label_coordination), label_parent(label_parent), label_governor(label_governor), head_type(head_type), identifier_string("RBContext:") { identifier_string += lexical_cast<std::string>(label_coordination) + ":"; identifier_string += lexical_cast<std::string>(label_parent) + ":"; identifier_string += lexical_cast<std::string>(label_governor) + ":"; identifier_string += lexical_cast<std::string>(head_type); } // RBContext::RBContext() const sptree* headchild(const sptree* node) const { return head_type == semantic ? node->label.semantic_headchild : node->label.syntactic_headchild; } // RBContext::headchild(); const sptree* lexhead(const sptree* node) const { return head_type == semantic ? node->label.semantic_lexhead : node->label.syntactic_lexhead; } // RBContext::lexhead(); template <typename FeatClass, typename Feat_Count> void node_featurecount(FeatClass& fc, const sptree* node, Feat_Count& feat_count) { if (!node->is_nonterminal()) return; const sptree* hchild = headchild(node); if (hchild == NULL) return; const sptree* lhchild = lexhead(hchild); if (lhchild == NULL) return; Feature f; if (label_coordination && node->is_coordination()) f.push_back(conjunctmarker()); if (label_parent) f.push_back(node->label.cat); if (label_governor) { f.push_back(hchild->label.cat); f.push_back(symbol_quantize(hchild->label.right - lhchild->label.right)); } for (const sptree* child = node->child; child != NULL; child = child->next) { if (child == hchild) { f.push_back(postheadmarker()); continue; } const sptree* lchild = lexhead(child); if (lchild == NULL) continue; f.push_back(child->label.cat); f.push_back(symbol_quantize(child->label.right - lchild->label.right)); ++feat_count[f]; f.pop_back(); f.pop_back(); } } // Here is the stuff that every feature needs //! The identifier string is RBContext:<conjunct>:<parent>:<governor>:<type> // virtual const char *identifier() const { return identifier_string.c_str(); } // RBContext::identifier() SPFEATURES_COMMON_DEFINITIONS; }; // RBContext{} //! Heads is a feature of n levels of head-to-head dependencies. //! Heads takes special care to follow head dependencies through //! conjunctions. //! //! The identifier string is Heads:nheads:governorlex:dependentlex:headtype. // class Heads : public NodeFeatureClass { public: typedef std::vector<symbol> Feature; enum head_type_type { syntactic, semantic }; const size_type nheads; //!< number of levels of heads to use const bool governorlex; //!< use governor's word (in addition to its POS) const bool dependentlex; //!< use dependent's head word (in addition its POS) const head_type_type head_type; //!< type of head dependency to track std::string identifier_string; Heads(size_type nheads = 2, //!< number of levels of heads to use bool governorlex = true, //!< use governor's word (in addition to its POS) bool dependentlex = true, //!< use dependent's head word (in addition to its POS) head_type_type head_type = syntactic) : nheads(nheads), governorlex(governorlex), dependentlex(dependentlex), head_type(head_type), identifier_string("Heads:") { identifier_string += lexical_cast<std::string>(nheads) + ":"; identifier_string += lexical_cast<std::string>(governorlex) + ":"; identifier_string += lexical_cast<std::string>(dependentlex) + ":"; identifier_string += lexical_cast<std::string>(head_type); } // Heads::Heads() const sptree* headchild(const sptree* node) const { return head_type == semantic ? node->label.semantic_headchild : node->label.syntactic_headchild; } // Heads::headchild(); //! node_featurecount() uses headchild() to find all of the heads //! of this node and its ancestors. // template <typename FeatClass, typename Feat_Count> void node_featurecount(FeatClass& fc, const sptree* node, Feat_Count& feat_count) { if (!node->is_preterminal()) // only consider preterminal heads return; Feature f; f.push_back(node->label.cat); if (dependentlex) f.push_back(node->child->label.cat); visit_ancestors(feat_count, node, 1, f); assert(f.size() == (dependentlex ? 2 : 1)); } // Heads::node_featurecount() //! visit_ancestors() is written in continuation-passing style, in order //! to enumerate all possible governors. // template <typename Feat_Count> void visit_ancestors(Feat_Count& feat_count, const sptree* node, size_type nsofar, Feature& f) { if (nsofar == nheads) { // are we done? ++feat_count[f]; return; } const sptree* ancestor = node->label.parent; if (ancestor == NULL) return; // no more ancestors, so we can't find enough governors if (ancestor->is_coordination()) visit_ancestors(feat_count, ancestor, nsofar, f); // skip this level else { const sptree* hchild = headchild(ancestor); if (hchild != NULL && node != hchild) visit_descendants(feat_count, ancestor, nsofar, f, hchild); else visit_ancestors(feat_count, ancestor, nsofar, f); } } // Heads::visit_ancestors() //! visit_descendants() collects the head(s) of head and then visits ancestors // template <typename Feat_Count> void visit_descendants(Feat_Count& feat_count, const sptree* ancestor, size_type nsofar, Feature& f, const sptree* head) { if (head->is_preterminal()) { f.push_back(head->label.cat); // push governor label if (governorlex) f.push_back(head->child->label.cat); visit_ancestors(feat_count, ancestor, nsofar+1, f); // visit ancestors f.pop_back(); // pop governor if (governorlex) f.pop_back(); } else { const sptree* hchild = headchild(head); if (head->is_coordination()) { // all children count as heads for (const sptree* child = head->child; child != NULL; child = child->next) if (child->label.cat == head->label.cat || (hchild != NULL && child->label.cat == hchild->label.cat)) visit_descendants(feat_count, ancestor, nsofar, f, child); } else { // visit head child if (hchild != NULL) visit_descendants(feat_count, ancestor, nsofar, f, hchild); } } } // Heads::visit_descendants() // Here is the stuff that every feature needs //! The identifier string is Heads:nheads:governorlex:dependentlex:headtype. // virtual const char *identifier() const { return identifier_string.c_str(); } // Heads::identifier() SPFEATURES_COMMON_DEFINITIONS; }; // Heads{} //! WSHeads is a feature of n levels of head-to-head dependencies. //! Heads takes special care to follow head dependencies through //! conjunctions. //! //! The identifier string is WSHeads:nsuffixletters:distribute:nheads:governorinfo:dependentinfo:headtype. // class WSHeads : public NodeFeatureClass { public: typedef std::vector<symbol> Feature; enum head_type_type { syntactic, semantic }; enum info_type { pos, closedclass, lexical }; const size_type nsuffixletters; //!< keep nsuffix letters from words const bool distribute; //!< distribute head dependencies over coordinate phrases const size_type nheads; //!< number of levels of heads to use const info_type governorinfo; //!< use governor's word (in addition to its POS) const info_type dependentinfo; //!< use dependent's head word (in addition its POS) const head_type_type head_type; //!< type of head dependency to track std::string identifier_string; WSHeads(size_type nsuffixletters=0, //!< keep nsuffixletters from words bool distribute = false, //!< distribute head dependencies over coordinate phrases size_type nheads = 2, //!< number of levels of heads to use info_type governorinfo = lexical, //!< use governor's word (in addition to its POS) info_type dependentinfo = lexical, //!< use dependent's head word (in addition to its POS) head_type_type head_type = syntactic) : nsuffixletters(nsuffixletters), distribute(distribute), nheads(nheads), governorinfo(governorinfo), dependentinfo(dependentinfo), head_type(head_type), identifier_string("WSHeads:") { identifier_string += lexical_cast<std::string>(nsuffixletters) + ":"; identifier_string += lexical_cast<std::string>(distribute) + ":"; identifier_string += lexical_cast<std::string>(nheads) + ":"; identifier_string += lexical_cast<std::string>(governorinfo) + ":"; identifier_string += lexical_cast<std::string>(dependentinfo) + ":"; identifier_string += lexical_cast<std::string>(head_type); } // WSHeads::WSHeads() const sptree* headchild(const sptree* node) const { return head_type == semantic ? node->label.semantic_headchild : node->label.syntactic_headchild; } // WSHeads::headchild(); //! node_featurecount() uses headchild() to find all of the heads //! of this node and its ancestors. // template <typename FeatClass, typename Feat_Count> void node_featurecount(FeatClass& fc, const sptree* node, Feat_Count& feat_count) { if (!node->is_preterminal()) // only consider preterminal heads return; Feature f; f.push_back(node->label.cat); if (dependentinfo == closedclass) f.push_back(node->child->label.cat); else if (dependentinfo == lexical) f.push_back(suffix(node->child->label.cat, nsuffixletters)); visit_ancestors(feat_count, node, 1, f); } // WSHeads::node_featurecount() //! visit_ancestors() is written in continuation-passing style, in order //! to enumerate all possible governors. // template <typename Feat_Count> void visit_ancestors(Feat_Count& feat_count, const sptree* node, size_type nsofar, Feature& f) { if (nsofar == nheads) { // are we done? ++feat_count[f]; return; } const sptree* ancestor = node->label.parent; if (ancestor == NULL) return; // no more ancestors, so we can't find enough governors if (ancestor->is_coordination()) { // skip this level if (distribute || node->next == NULL) // if !distribute, don't go up if we aren't on right branch visit_ancestors(feat_count, ancestor, nsofar, f); } else { const sptree* hchild = headchild(ancestor); if (hchild != NULL && node != hchild) visit_descendants(feat_count, ancestor, nsofar, f, hchild); else visit_ancestors(feat_count, ancestor, nsofar, f); } } // WSHeads::visit_ancestors() //! visit_descendants() collects the head(s) of head and then visits ancestors // template <typename Feat_Count> void visit_descendants(Feat_Count& feat_count, const sptree* ancestor, size_type nsofar, Feature& f, const sptree* head) { if (head->is_preterminal()) { unsigned oldfsize = f.size(); f.push_back(head->label.cat); // push governor label if (governorinfo == closedclass) f.push_back(head->child->label.cat); else if (governorinfo == lexical) f.push_back(suffix(head->child->label.cat, nsuffixletters)); visit_ancestors(feat_count, ancestor, nsofar+1, f); // visit ancestors f.resize(oldfsize); // pop annotations we just pushed } else { if (head->is_coordination() && distribute) { // all children count as heads for (const sptree* child = head->child; child != NULL; child = child->next) if (child->label.cat == head->label.cat) visit_descendants(feat_count, ancestor, nsofar, f, child); } else { // visit head child const sptree* child = headchild(head); if (child != NULL) visit_descendants(feat_count, ancestor, nsofar, f, child); } } } // WSHeads::visit_descendants() // Here is the stuff that every feature needs //! The identifier string is WSHeads:nheads:governorlex:dependentlex:headtype. // virtual const char *identifier() const { return identifier_string.c_str(); } // WSHeads::identifier() SPFEATURES_COMMON_DEFINITIONS; }; // WSHeads{} //! The PTsFeatureClass is an ABC for feature classes where the feature //! count for a tree is the sum of feature counts for each node, and the //! feature counts for each node depends on the local tree and its ancestors, //! and the node's left and right string positions. //! //! Every subclass of PTsFeatureClass must define a method: //! //! node_featurecount(fc, preterminals, tp, feat_count) // class PTsFeatureClass : public TreeFeatureClass { public: typedef std::vector<const sptree*> SptreePtrs; template <typename FeatClass, typename Feat_Count> static void tree_featurecount(FeatClass& fc, const sptree* tp, Feat_Count& feat_count) { assert(tp != NULL); SptreePtrs preterms; tp->preterminal_nodes(preterms, true); if (preterms.size() != tp->label.right) { std::cerr << "## preterms = " << preterms << "\n## tp = " << tp << std::endl; return; } assert(preterms.size() == tp->label.right); tree_featurecount(fc, preterms, tp, feat_count); } // PTsFeatureClass:tree_featurecount() //! tree_featurecount() actually visits the nodes // template <typename FeatClass, typename Feat_Count> static void tree_featurecount(FeatClass& fc, const SptreePtrs& preterms, const sptree* tp, Feat_Count& feat_count) { fc.node_featurecount(fc, preterms, tp, feat_count); if (tp->is_nonterminal()) tree_featurecount(fc, preterms, tp->child, feat_count); if (tp->next) tree_featurecount(fc, preterms, tp->next, feat_count); } // PTsFeatureClass:tree_featurecount() }; // PTsFeatureClass{} //! The Neighbours{} includes the node's category, its binned length //! and the left and right POS tags next to each node. This version //! has a bug in it -- use Edges() to accomplish the same thing w/out //! the bug! //! //! Its identifier is Neighbours:<nleft>:<nright> // class Neighbours : public PTsFeatureClass { public: Neighbours(size_type nleft = 0, //!< include nleft left POS size_type nright = 0) //!< include nright right POS : nleft(nleft), nright(nright), identifier_string("Neighbours:") { (identifier_string += lexical_cast<std::string>(nleft)) += ":"; identifier_string += lexical_cast<std::string>(nright); } // Neighbours::Neighbours() // required types typedef std::vector<symbol> Ss; typedef std::pair<int,Ss> ISs; typedef ISs Feature; size_type nleft, nright; // number of POS tags to left and right std::string identifier_string; // will hold its identifier template <typename FeatClass, typename Feat_Count> void node_featurecount(FeatClass& fc, const SptreePtrs& preterms, const sptree* node, Feat_Count& feat_count) { if (!node->is_nonterminal()) return; size_type left = node->label.left; size_type right = node->label.right; Feature f; // number of preterminals f.first = quantize(right-left); f.second.push_back(node->label.cat); // category label // XXXX The following line is buggy, but is left in here because this // XXXX is what was used in the ACL'05 and CoNLL'05 talks // for (size_type i = 0; i < nleft; ++i) // left preterminals f.second.push_back(i <= left ? preterms[left-i]->label.cat : endmarker()); for (size_type i = 0; i < nright; ++i) // right preterminals f.second.push_back(right+i < preterms.size() ? preterms[right+i]->label.cat : endmarker()); ++feat_count[f]; } // Neighbours::node_featurecount() virtual const char *identifier() const { return identifier_string.c_str(); } // Neighbours::identifier() SPFEATURES_COMMON_DEFINITIONS; }; // Neighbours{} //! The WEdges{} includes the node's category, its binned length //! and the left and right POS and words preceding and following the constituent edges //! //! Its identifier is WEdges:<binnedlength>:<nleftprec>:<nleftprecw>:<nleftsucc>:<nleftsuccw>:<nrightprec>:<nrightprecw>:<nrightsucc>:<nrightsuccw> // class WEdges : public PTsFeatureClass { public: WEdges(bool binned_length = false, //!< include binned length size_type nleftprec = 0, //!< include nleft left preceding POS size_type nleftprecw = 0, //!< include nleft left preceding words size_type nleftsucc = 0, //!< include nleft left following POS size_type nleftsuccw = 0, //!< include nleft left following words size_type nrightprec = 0, //!< include nright right preceding POS size_type nrightprecw = 0, //!< include nright right preceding words size_type nrightsucc = 0, //!< include nright right following POS size_type nrightsuccw = 0) //!< include nright right following words : binned_length(binned_length), nleftprec(nleftprec), nleftsucc(nleftsucc), nrightprec(nrightprec), nrightsucc(nrightsucc), nleftprecw(nleftprecw), nleftsuccw(nleftsuccw), nrightprecw(nrightprecw), nrightsuccw(nrightsuccw), identifier_string("WEdges:") { (identifier_string += lexical_cast<std::string>(binned_length)) += ":"; (identifier_string += lexical_cast<std::string>(nleftprec)) += ":"; (identifier_string += lexical_cast<std::string>(nleftprecw)) += ":"; (identifier_string += lexical_cast<std::string>(nleftsucc)) += ":"; (identifier_string += lexical_cast<std::string>(nleftsuccw)) += ":"; (identifier_string += lexical_cast<std::string>(nrightprec)) += ":"; (identifier_string += lexical_cast<std::string>(nrightprecw)) += ":"; (identifier_string += lexical_cast<std::string>(nrightsucc)) += ":"; identifier_string += lexical_cast<std::string>(nrightsuccw); } // WEdges:WEdges() // required types typedef std::vector<symbol> Ss; typedef Ss Feature; bool binned_length; // collect binned length size_type nleftprec, nleftsucc, nrightprec, nrightsucc; // number of words surrounding edges size_type nleftprecw, nleftsuccw, nrightprecw, nrightsuccw; // number of words surrounding edges std::string identifier_string; // will hold its identifier template <typename FeatClass, typename Feat_Count> void node_featurecount(FeatClass& fc, const SptreePtrs& preterms, const sptree* node, Feat_Count& feat_count) { if (!node->is_nonterminal()) return; size_type left = node->label.left; size_type right = node->label.right; size_type nwords = preterms.size(); Feature f; if (binned_length) f.push_back(symbol_quantize(right-left)); // number of preterminals f.push_back(node->label.cat); // category label for (size_type i = 1; i <= nleftprec; ++i) f.push_back(i <= left ? preterms[left-i]->label.cat : endmarker()); for (size_type i = 1; i <= nleftprecw; ++i) f.push_back(i <= left ? preterms[left-i]->child->label.cat : endmarker()); for (size_type i = 0; i < nleftsucc; ++i) f.push_back(left+i < nwords ? preterms[left+i]->label.cat : endmarker()); for (size_type i = 0; i < nleftsuccw; ++i) f.push_back(left+i < nwords ? preterms[left+i]->child->label.cat : endmarker()); for (size_type i = 1; i <= nrightprec; ++i) f.push_back(i <= right ? preterms[right-i]->label.cat : endmarker()); for (size_type i = 1; i <= nrightprecw; ++i) f.push_back(i <= right ? preterms[right-i]->child->label.cat : endmarker()); for (size_type i = 0; i < nrightsucc; ++i) // right preterminals f.push_back(right+i < nwords ? preterms[right+i]->label.cat : endmarker()); for (size_type i = 0; i < nrightsuccw; ++i) // right words f.push_back(right+i < nwords ? preterms[right+i]->child->label.cat : endmarker()); ++feat_count[f]; } // WEdges::node_featurecount() virtual const char *identifier() const { return identifier_string.c_str(); } // WEdges::identifier() SPFEATURES_COMMON_DEFINITIONS; }; // WEdges{} //! The Edges{} includes the node's category, its binned length //! and the left and right POS preceding and following the constituent edges //! //! Its identifier is Edges:<binnedlength>:<nleftprec>:<nleftsucc>:<nrightprec>:<nrightsucc> // class Edges : public PTsFeatureClass { public: Edges(bool binned_length = false, //!< include binned length size_type nleftprec = 0, //!< include nleft left preceding words size_type nleftsucc = 0, //!< include nleft left following words size_type nrightprec = 0, //!< include nright right preceding words size_type nrightsucc = 0) //!< include nright right following words : binned_length(binned_length), nleftprec(nleftprec), nleftsucc(nleftsucc), nrightprec(nrightprec), nrightsucc(nrightsucc), identifier_string("Edges:") { (identifier_string += lexical_cast<std::string>(binned_length)) += ":"; (identifier_string += lexical_cast<std::string>(nleftprec)) += ":"; (identifier_string += lexical_cast<std::string>(nleftsucc)) += ":"; (identifier_string += lexical_cast<std::string>(nrightprec)) += ":"; identifier_string += lexical_cast<std::string>(nrightsucc); } // Edges::Edges() // required types typedef std::vector<symbol> Ss; typedef Ss Feature; bool binned_length; // collect binned length size_type nleftprec, nleftsucc, nrightprec, nrightsucc; // number of words surrounding edges std::string identifier_string; // will hold its identifier template <typename FeatClass, typename Feat_Count> void node_featurecount(FeatClass& fc, const SptreePtrs& preterms, const sptree* node, Feat_Count& feat_count) { if (!node->is_nonterminal()) return; size_type left = node->label.left; size_type right = node->label.right; size_type nwords = preterms.size(); Feature f; if (binned_length) f.push_back(symbol_quantize(right-left)); // number of preterminals f.push_back(node->label.cat); // category label for (size_type i = 1; i <= nleftprec; ++i) f.push_back(i <= left ? preterms[left-i]->label.cat : endmarker()); for (size_type i = 0; i < nleftsucc; ++i) f.push_back(left+i < nwords ? preterms[left+i]->label.cat : endmarker()); for (size_type i = 1; i <= nrightprec; ++i) f.push_back(i <= right ? preterms[right-i]->label.cat : endmarker()); for (size_type i = 0; i < nrightsucc; ++i) // right preterminals f.push_back(right+i < nwords ? preterms[right+i]->label.cat : endmarker()); ++feat_count[f]; } // Edges::node_featurecount() virtual const char *identifier() const { return identifier_string.c_str(); } // Edges::identifier() SPFEATURES_COMMON_DEFINITIONS; }; // Edges{} //! The WordNeighbours{} includes the node's category, its binned length //! and the left and right words next to each node. //! //! Its identifier is WordNeighbours:<binnedlengthflag>:<nleft>:<nright> // class WordNeighbours : public PTsFeatureClass { public: WordNeighbours(bool binned_length = false, //!< include binned length size_type nleft = 0, //!< include nleft left words size_type nright = 0) //!< include nright right words : binned_length(binned_length), nleft(nleft), nright(nright), identifier_string("WordNeighbours:") { (identifier_string += lexical_cast<std::string>(binned_length)) += ":"; (identifier_string += lexical_cast<std::string>(nleft)) += ":"; identifier_string += lexical_cast<std::string>(nright); } // WordNeighbours::WordNeighbours() // required types typedef std::vector<symbol> Ss; typedef Ss Feature; bool binned_length; // collect binned length size_type nleft, nright; // number of words to left and right std::string identifier_string; // will hold its identifier template <typename FeatClass, typename Feat_Count> void node_featurecount(FeatClass& fc, const SptreePtrs& preterms, const sptree* node, Feat_Count& feat_count) { if (!node->is_nonterminal()) return; size_type left = node->label.left; size_type right = node->label.right; Feature f; if (binned_length) f.push_back(symbol_quantize(right-left)); // number of preterminals f.push_back(node->label.cat); // category label // XXXXX BUG XXXXX -- should be // for (size_type i = 1; i <= nleft; ++i) for (size_type i = 0; i < nleft; ++i) // left preterminals f.push_back(i <= left ? preterms[left-i]->child->label.cat : endmarker()); for (size_type i = 0; i < nright; ++i) // right preterminals f.push_back(right+i < preterms.size() ? preterms[right+i]->child->label.cat : endmarker()); ++feat_count[f]; } // WordNeighbours::node_featurecount() virtual const char *identifier() const { return identifier_string.c_str(); } // WordNeighbours::identifier() SPFEATURES_COMMON_DEFINITIONS; }; // WordNeighbours{} //! The WordEdges{} includes the node's category, its binned length //! and the left and right words preceding and following the constituent edges //! //! Its identifier is WordEdges:<binnedlength>:<nleftprec>:<nleftsucc>:<nrightprec>:<nrightsucc> // class WordEdges : public PTsFeatureClass { public: WordEdges(bool binned_length = false, //!< include binned length size_type nleftprec = 0, //!< include nleft left preceding words size_type nleftsucc = 0, //!< include nleft left following words size_type nrightprec = 0, //!< include nright right preceding words size_type nrightsucc = 0) //!< include nright right following words : binned_length(binned_length), nleftprec(nleftprec), nleftsucc(nleftsucc), nrightprec(nrightprec), nrightsucc(nrightsucc), identifier_string("WordEdges:") { (identifier_string += lexical_cast<std::string>(binned_length)) += ":"; (identifier_string += lexical_cast<std::string>(nleftprec)) += ":"; (identifier_string += lexical_cast<std::string>(nleftsucc)) += ":"; (identifier_string += lexical_cast<std::string>(nrightprec)) += ":"; identifier_string += lexical_cast<std::string>(nrightsucc); } // WordEdges::WordEdges() // required types typedef std::vector<symbol> Ss; typedef Ss Feature; bool binned_length; // collect binned length size_type nleftprec, nleftsucc, nrightprec, nrightsucc; // number of words surrounding edges std::string identifier_string; // will hold its identifier template <typename FeatClass, typename Feat_Count> void node_featurecount(FeatClass& fc, const SptreePtrs& preterms, const sptree* node, Feat_Count& feat_count) { if (!node->is_nonterminal()) return; size_type left = node->label.left; size_type right = node->label.right; size_type nwords = preterms.size(); Feature f; if (binned_length) f.push_back(symbol_quantize(right-left)); // number of preterminals f.push_back(node->label.cat); // category label for (size_type i = 1; i <= nleftprec; ++i) f.push_back(i <= left ? preterms[left-i]->child->label.cat : endmarker()); for (size_type i = 0; i < nleftsucc; ++i) f.push_back(left+i < nwords ? preterms[left+i]->child->label.cat : endmarker()); for (size_type i = 1; i <= nrightprec; ++i) f.push_back(i <= right ? preterms[right-i]->child->label.cat : endmarker()); for (size_type i = 0; i < nrightsucc; ++i) // right preterminals f.push_back(right+i < nwords ? preterms[right+i]->child->label.cat : endmarker()); ++feat_count[f]; } // WordEdges::node_featurecount() virtual const char *identifier() const { return identifier_string.c_str(); } // WordEdges::identifier() SPFEATURES_COMMON_DEFINITIONS; }; // WordEdges{} //! The Heavy{} classifies nodes by their size and //! how close to the end of the sentence they occur, as well as whether //! they are followed by punctuation or coordination. // class Heavy : public PTsFeatureClass { public: typedef std::vector<symbol> Ss; typedef std::vector<int> Is; typedef std::pair<Is,Ss> Feature; template <typename FeatClass, typename Feat_Count> void node_featurecount(FeatClass& fc, const SptreePtrs& preterms, const sptree* node, Feat_Count& feat_count) { if (!node->is_nonterminal()) return; symbol final_punct = endmarker(); symbol following_punct = endmarker(); assert(node->label.right > 0); assert(node->label.right <= preterms.size()); if (preterms[node->label.right-1]->is_punctuation()) final_punct = preterms[node->label.right-1]->child->label.cat; if (node->label.right < preterms.size() && preterms[node->label.right]->is_punctuation()) following_punct = preterms[node->label.right]->child->label.cat; Feature f; // number of preterminals f.first.push_back(quantize(node->label.right-node->label.left)); f.first.push_back(quantize(preterms.size()-node->label.right)); // how far from end f.second.push_back(node->label.cat); // category label f.second.push_back(final_punct); f.second.push_back(following_punct); ++feat_count[f]; } // Heavy::node_featurecount() virtual const char *identifier() const { return "Heavy"; } // Heavy::identifier() SPFEATURES_COMMON_DEFINITIONS; }; // Heavy{} //! NGramTree identifies n-gram tree fragments. The identifier is //! //! NGramTree:ngram:lexicalize:collapse:nancs // class NGramTree : public TreeFeatureClass { public: // required types typedef sstring Feature; enum lexicalize_type { none, closed_class, functional, all }; int ngram; //!< # of words in context lexicalize_type lexicalize; //!< lexicalize preterminals bool collapse; //!< collapse nodes not dominating ngram int nancs; //!< extra ancestors to go up std::string identifier_string; //!< returned by identifier() NGramTree(int ngram = 2, lexicalize_type lexicalize = none, bool collapse = false, int nancs = 0) : ngram(ngram), lexicalize(lexicalize), collapse(collapse), nancs(nancs), identifier_string("NGramTree:") { identifier_string += lexical_cast<std::string>(ngram) + ":"; identifier_string += lexical_cast<std::string>(lexicalize) + ":"; identifier_string += lexical_cast<std::string>(collapse) + ":"; identifier_string += lexical_cast<std::string>(nancs); } // NGramTree::NGramTree() tree* selective_copy(const sptree* sp, size_type left, size_type right, bool copy_next = false) { const sptree_label& label = sp->label; if (collapse) { if (label.right <= left) return (sp->next && copy_next) ? selective_copy(sp->next, left, right, copy_next) : NULL; else if (label.left >= right) return NULL; } tree* t = new tree(label); if (sp->child && label.left < right && label.right > left && (sp->is_nonterminal() || lexicalize == all || (lexicalize == functional && sp->is_functional()) || (lexicalize == closed_class && sp->is_closed_class()))) t->child = selective_copy(sp->child, left, right, true); if (copy_next && sp->next) t->next = selective_copy(sp->next, left, right, copy_next); return t; } // NGramTree::selective_copy() template <typename FeatClass, typename Feat_Count> void tree_featurecount(FeatClass& fc, const sptree* root, Feat_Count& feat_count) { if (debug_level >= 10000) std::cerr << "# root = " << root << std::endl; std::vector<const sptree*> preterms; root->preterminal_nodes(preterms); for (size_type i = 0; i + ngram < preterms.size(); ++i) { const sptree* t0; for (t0 = preterms[i]; t0 != NULL && t0->label.right < i + ngram; t0 = t0->label.parent) ; assert(t0 != NULL); for (int ianc = 0; ianc < nancs && t0; ++ianc) t0 = t0->label.parent; // go up extra ancestors if (t0 == NULL) return; tree* frag = selective_copy(t0, i, i + ngram); Feature feat(frag); if (debug_level >= 20000) std::cerr << "# " << preterms[i]->child->label.cat << ": " << feat << std::endl; ++feat_count[feat]; delete frag; } } // NGramTree::tree_featurecount() virtual const char *identifier() const { return identifier_string.c_str(); } // NGramTree::identifier() SPFEATURES_COMMON_DEFINITIONS; }; // NGramTree{} //! HeadTree //! //! Identifier is HeadTree:collapse:lexicalize:nancs:headtype // class HeadTree : public NodeFeatureClass { public: // required types typedef sstring Feature; enum head_type { syntactic, semantic }; bool collapse; //!< collapse nodes not dominating ngram bool lexicalize; //!< include lexical item int nancs; //!< extra ancestors to go up head_type htype; //!< type of heads to project std::string identifier_string; //!< returned by identifier() HeadTree(bool collapse = true, bool lexicalize = false, int nancs = 0, head_type htype = semantic) : collapse(collapse), lexicalize(lexicalize), nancs(nancs), htype(htype), identifier_string("HeadTree:") { identifier_string += lexical_cast<std::string>(collapse) + ":"; identifier_string += lexical_cast<std::string>(lexicalize) + ":"; identifier_string += lexical_cast<std::string>(nancs) + ":"; identifier_string += lexical_cast<std::string>(htype); } // HeadTree::HeadTree() tree* selective_copy(const sptree* sp, unsigned int headleft) { if (!sp) return NULL; const sptree_label& label = sp->label; if (collapse) { unsigned int left = label.previous ? label.previous->label.left : label.left; unsigned int right = sp->next ? sp->next->label.right : label.right; if (right <= headleft) return selective_copy(sp->next, headleft); else if (left > headleft) return NULL; } return new tree(label, (sp->is_nonterminal() || (lexicalize && label.left == headleft)) ? selective_copy(sp->child, headleft) : NULL, selective_copy(sp->next, headleft)); } // HeadTree::selective_copy() template <typename FeatClass, typename Feat_Count> void tree_featurecount(FeatClass& fc, const sptree* root, Feat_Count& feat_count) { if (debug_level >= 20000) std::cerr << "# root = " << root << std::endl; std::vector<const sptree*> preterms; root->preterminal_nodes(preterms); for (size_type i = 0; i < preterms.size(); ++i) { const sptree* t0 = preterms[i]; while (true) { const sptree* parent = t0->label.parent; if (!parent) break; const sptree* hchild = (htype == syntactic) ? parent->label.syntactic_headchild : parent->label.semantic_headchild; if (hchild != t0) break; t0 = parent; } assert(t0 != NULL); for (int ianc = 0; ianc < nancs && t0; ++ianc) t0 = t0->label.parent; // go up extra ancestors if (t0 == NULL) return; tree* frag = selective_copy(t0, i); Feature feat(frag); if (debug_level >= 20000) std::cerr << "# " << preterms[i]->child->label.cat << ": " << feat << std::endl; ++feat_count[feat]; delete frag; } } // HeadTree::tree_featurecount() virtual const char *identifier() const { return identifier_string.c_str(); } // HeadTree::identifier() SPFEATURES_COMMON_DEFINITIONS; }; // HeadTree{} class SubjVerbAgr : public NodeFeatureClass { public: typedef std::vector<symbol> Feature; template <typename FeatClass, typename Feat_Count> void node_featurecount(FeatClass& fc, const sptree* node, Feat_Count& feat_count) { if ((node->label.cat != S() && node->label.cat != SINV()) || node->label.syntactic_lexhead == NULL) return; const sptree* subject = NULL; // subject is last NP before VP for (const sptree* child = node->child; child != NULL; child = child->next) if (child->label.cat == NP()) subject = child; else if (child->label.cat == VP()) break; if (subject == NULL || subject->label.semantic_lexhead == NULL) return; Feature f; f.push_back(subject->label.semantic_lexhead->label.cat); f.push_back(node->label.syntactic_lexhead->label.cat); ++feat_count[f]; } // SubjVerbAgr::node_featurecount() virtual const char *identifier() const { return "SubjVerbAgr"; } // SubjVerbAgr::identifier() SPFEATURES_COMMON_DEFINITIONS; }; // SubjVerbAgr{} class SynSemHeads : public NodeFeatureClass { public: typedef std::vector<symbol> Feature; enum annotation_type { none, lex_syn, lex_all }; annotation_type ann; std::string identifier_string; //!< identifier string, returned by identifier() SynSemHeads(annotation_type ann = none) : ann(ann), identifier_string("SynSemHeads:") { identifier_string += lexical_cast<std::string>(ann); } // SynSemHeads::SynSemHeads() template <typename FeatClass, typename Feat_Count> void node_featurecount(FeatClass& fc, const sptree* node, Feat_Count& feat_count) { const sptree_label& label = node->label; if (label.syntactic_lexhead == label.semantic_lexhead) return; Feature f; f.push_back(label.syntactic_lexhead ? label.syntactic_lexhead->label.cat : endmarker()); if (ann != none) { if (label.syntactic_lexhead == NULL) return; else f.push_back(label.syntactic_lexhead->child->label.cat); } f.push_back(label.semantic_lexhead ? label.semantic_lexhead->label.cat : endmarker()); if (ann == lex_all) { if (label.semantic_lexhead == NULL) return; else f.push_back(label.semantic_lexhead->child->label.cat); } ++feat_count[f]; } // SynSemHeads::node_featurecount() virtual const char *identifier() const { return identifier_string.c_str(); } // SynSemHeads::identifier() SPFEATURES_COMMON_DEFINITIONS; }; // SynSemHeads{} ////////////////////////////////////////////////////////////////////////// // // // Coordination features // // // ////////////////////////////////////////////////////////////////////////// //! CoPar{} counts the number of parallel and non-parallel coordinations //! at various levels //! //! Identifier is CoPar:IgnorePreterms // class CoPar : public NodeFeatureClass { bool ignore_preterms; std::string identifier_string; public: CoPar(bool ignore_preterms = false) : ignore_preterms(ignore_preterms), identifier_string("CoPar:") { identifier_string += lexical_cast<std::string>(ignore_preterms); } // CoPar::CoPar() template <typename FeatClass, typename Feat_Count> void node_featurecount(FeatClass& fc, const sptree* node, Feat_Count& feat_count) { if (!node->is_coordination()) return; for (int depth = 1; depth <= 5; ++depth) { const sptree* last_child = NULL; for (const sptree* child = node->child; child; child = child->next) { if (child->is_punctuation() || child->is_conjunction()) continue; if (last_child != NULL) { int m = match(depth, last_child, child); if (m != -1) ++feat_count[Feature(depth,m)]; } last_child = child; } } } // CoPar::node_feature_count() //! match() returns 1 if node1 and node2 match to depth, 0 if they mismatch //! and -1 if they match but do not have any subnodes at depth. // int match(int depth, const sptree* node1, const sptree* node2) const { assert(node1 != NULL); assert(node2 != NULL); if (node1->label.cat != node2->label.cat) return 0; if (depth == 1) return 1; if (node1->is_preterminal()) { assert(node2->is_preterminal()); return -1; } return matches(depth-1, node1->child, node2->child); } //! matches() is responsible for matching node1 and node2 and their right //! siblings // int matches(int depth, const sptree* node1, const sptree* node2) const { assert(depth >= 1); if (ignore_preterms) { while (node1 != NULL && node1->is_preterminal()) node1 = node1->next; while (node2 != NULL && node2->is_preterminal()) node2 = node2->next; } if (node1 == NULL) return (node2 == NULL) ? -1 : 0; if (node2 == NULL) return 0; int m1 = match(depth, node1, node2); int m2 = matches(depth, node1->next, node2->next); if (m1 == 0 || m2 == 0) return 0; else if (m1 == 1 || m2 == 1) return 1; else return -1; } virtual const char* identifier() const { return identifier_string.c_str(); } // CoPar::identifier() typedef std::pair<int,int> Feature; SPFEATURES_COMMON_DEFINITIONS; }; //! CoLenPar{} counts the number of adjacent conjuncts that have //! the same length, are shorter, and are longer. // class CoLenPar : public NodeFeatureClass { public: template <typename FeatClass, typename Feat_Count> void node_featurecount(FeatClass& fc, const sptree* node, Feat_Count& feat_count) { if (!node->is_coordination()) return; const sptree* last_child = NULL; int last_size = 0; for (const sptree* child = node->child; child; child = child->next) { if (child->is_punctuation() || child->is_conjunction()) continue; int size = child->label.right - child->label.left; if (last_child != NULL) { int dsize = size - last_size; if (dsize > 4) dsize = 5; else if (dsize < -4) dsize = -5; ++feat_count[Feature(dsize,child->next==NULL)]; } last_child = child; last_size = size; } } // CoLenPar::node_feature_count() virtual const char* identifier() const { return "CoLenPar"; } // CoLenPar::identifier() typedef std::pair<int,int> Feature; SPFEATURES_COMMON_DEFINITIONS; }; //! The WSEdges{} feature includes the preterminals, annotations and words surrounding //! nonterminal categories. //! //! Its identifier is WSEdges:binnedlength: edge flags // class WSEdges : public PTsFeatureClass { public: // required types typedef std::vector<symbol> Ss; typedef Ss Feature; struct E { int punct; //!< number of punct to collect int pos; //!< number of pos to collect int closed; //!< number of closed class words to collect int word; //!< number of words to collect int nsuffix; //!< number of characters to keep from each word E(int punct = 0, int pos = 0, int closed = 0, int word = 0, int nsuffix=0) : punct(punct), pos(pos), closed(closed), word(word), nsuffix(nsuffix) { } std::string identifier() const { return lexical_cast<std::string>(punct) + ":" + lexical_cast<std::string>(pos) + ":" + lexical_cast<std::string>(closed) + ":" + lexical_cast<std::string>(word) + ":" + lexical_cast<std::string>(nsuffix); } int width() const { return std::max(std::max(punct, pos), word); } void push_features(const SptreePtrs& preterms, int position, int direction, Feature& f) const { int n = preterms.size(); for (int i = 0; i < punct; ++i) { int j = position+i*direction; f.push_back((j < 0 || j >= n) ? endmarker() : (preterms[j]->is_punctuation() ? preterms[j]->label.cat : ZERO() )); } for (int i = 0; i < pos; ++i) { int j = position+i*direction; f.push_back((j < 0 || j >= n) ? endmarker() : preterms[j]->label.cat); } for (int i = 0; i < closed; ++i) { int j = position+i*direction; f.push_back((j < 0 || j >= n) ? endmarker() : (( preterms[j]->is_closed_class() || preterms[j]->is_punctuation() ) ? preterms[j]->child->label.cat : preterms[j]->label.cat )); } for (int i = 0; i < word; ++i) { int j = position+i*direction; f.push_back((j < 0 || j >= n) ? endmarker() : suffix(preterms[j]->child->label.cat, nsuffix)); } } // WSEdges::E:push_features() }; // WSEdges::E{} WSEdges(const E& leftleft, //!< left side of constituent's left egde const E& leftright, //!< right side of constituent's left egde const E& rightleft, //!< left side of constituent's right egde const E& rightright, //!< right side of constituent's right egde bool binned_length = false) //!< include binned length : leftleft(leftleft), leftright(leftright), rightleft(rightleft), rightright(rightright), binned_length(binned_length), identifier_string("WSEdges:") { (identifier_string += lexical_cast<std::string>(binned_length)) += ":"; ((identifier_string += "ll") += leftleft.identifier()) += ":"; ((identifier_string += "lr") += leftright.identifier()) += ":"; ((identifier_string += "rl") += rightleft.identifier()) += ":"; ((identifier_string += "rr") += rightright.identifier()); } // WSEdges::WSEdges() const E leftleft, leftright, rightleft, rightright; bool binned_length; // collect binned length std::string identifier_string; // will hold its identifier template <typename FeatClass, typename Feat_Count> void node_featurecount(FeatClass& fc, const SptreePtrs& preterms, const sptree* node, Feat_Count& feat_count) { if (!node->is_nonterminal()) return; int left = node->label.left; int right = node->label.right; int nwords = preterms.size(); // don't permit feature to overlap both edges // if (left + leftright.width() > right || left + rightleft.width() > right) return; if (left + 1 < leftleft.width()) return; if (right + rightright.width() > nwords) return; Feature f; f.push_back(node->label.cat); // category label if (binned_length) f.push_back(symbol_quantize(right-left)); // number of preterminals leftleft.push_features(preterms, left-1, -1, f); leftright.push_features(preterms, left, 1, f); rightleft.push_features(preterms, right-1, -1, f); rightright.push_features(preterms, right, 1, f); ++feat_count[f]; } // WSEdges::node_featurecount() virtual const char *identifier() const { return identifier_string.c_str(); } // WSEdges::identifier() SPFEATURES_COMMON_DEFINITIONS; }; // WSEdges{} //////////////////////////////////////////////////////////////////////// // // // Feature Classes used in reranker // // // //////////////////////////////////////////////////////////////////////// inline void FeatureClassPtrs::features_sumlogp() { push_back(new SumLogP()); } // FeatureClassPtrs::features_sumlogp() inline void FeatureClassPtrs::features_logps() { push_back(new SumLogP()); push_back(new LogP()); } // FeatureClassPtrs::features_logps() inline void FeatureClassPtrs::features_avlogps() { push_back(new AvLogP()); push_back(new LogP()); } // FeatureClassPtrs::features_avlogps() inline void FeatureClassPtrs::features_wavlogp() { push_back(new WAvLogP("p0-ll", 3)); } // FeatureClassPtrs::features_wavlogp() inline void FeatureClassPtrs::features_logpparses() { push_back(new SumLogP()); push_back(new AvLogP()); push_back(new LogP()); push_back(new Parsed()); push_back(new NParseds()); } // FeatureClassPtrs::features_logpparses() inline void FeatureClassPtrs::features_rank(unsigned featurelevel) { push_back(new AvLogP()); if (featurelevel <= 0) return; push_back(new BinnedParseRank(2.0)); push_back(new BinnedParseRank(1.5)); push_back(new Parsed()); if (featurelevel <= 1) return; push_back(new LogP()); if (featurelevel <= 2) return; push_back(new Parsed2()); if (featurelevel <= 3) return; push_back(new BinnedParseRank2(2.0)); if (featurelevel <= 4) return; push_back(new SumLogP()); push_back(new NParseds()); push_back(new Failed()); push_back(new Failed2()); push_back(new ParsedFailed()); push_back(new BinnedParseRank(4.0)); push_back(new BinnedParseRank2(1.5)); push_back(new BinnedParseRank2(4.0)); } inline void FeatureClassPtrs::features_rankplus(bool lxfeats, bool edgefeats) { push_back(new AvLogP()); push_back(new BinnedParseRank(2.0)); push_back(new BinnedParseRank(1.5)); push_back(new LogP()); push_back(new BinnedParseRank2(2.0)); push_back(new SumLogP()); push_back(new BinnedParseRank(4.0)); push_back(new BinnedParseRank2(1.5)); push_back(new BinnedParseRank2(4.0)); if (lxfeats) { push_back(new RightBranch()); push_back(new Heavy()); push_back(new CoPar(false)); push_back(new CoLenPar()); } if (edgefeats) { size_type maxwidth = 2, maxsumwidth = 2; for (size_type binflag = 0; binflag < 2; ++binflag) for (size_type nleftprec = 0; nleftprec <= maxwidth; ++nleftprec) for (size_type nleftsucc = 0; nleftsucc <= maxwidth; ++nleftsucc) for (size_type nrightprec = 0; nrightprec <= maxwidth; ++nrightprec) for (size_type nrightsucc = 0; nrightsucc <= maxwidth; ++nrightsucc) if (nleftprec + nleftsucc + nrightprec + nrightsucc <= maxsumwidth) push_back(new Edges(binflag, nleftprec, nleftsucc, nrightprec, nrightsucc)); for (size_type binflag = 0; binflag < 2; ++binflag) for (size_type nleftprec = 0; nleftprec <= maxwidth; ++nleftprec) for (size_type nleftsucc = 0; nleftsucc <= maxwidth; ++nleftsucc) for (size_type nrightprec = 0; nrightprec <= maxwidth; ++nrightprec) for (size_type nrightsucc = 0; nrightsucc <= maxwidth; ++nrightsucc) if (nleftprec + nleftsucc + nrightprec + nrightsucc <= maxsumwidth) push_back(new WordEdges(binflag, nleftprec, nleftsucc, nrightprec, nrightsucc)); } } inline void FeatureClassPtrs::features_spnn(bool avparses, bool nngram) { push_back(new SumLogP()); push_back(new LogP()); if (avparses) { push_back(new AvLogP()); push_back(new Parsed()); push_back(new NParseds()); } push_back(new RightBranch()); push_back(new Heavy()); push_back(new CoPar(false)); push_back(new CoLenPar()); push_back(new Word(1)); push_back(new Word(2)); push_back(new WProj()); push_back(new Rule()); push_back(new Rule(0, 1)); push_back(new Rule(0, 0, true)); push_back(new Rule(0, 0, false, true)); push_back(new Rule(0, 0, false, false, Rule::lexical)); push_back(new Rule(0, 0, false, false, Rule::none, Rule::lexical)); push_back(new Rule(0, 0, false, false, Rule::lexical, Rule::lexical)); push_back(new NGram(1, 1, false, true)); push_back(new NGram(2, 1, true, true)); push_back(new NGram(3, 1, true, true)); push_back(new NGram(2, 1, false, false, NGram::lexical)); push_back(new NGram(2, 1, false, false, NGram::none, NGram::lexical)); if (nngram) { push_back(new NNGram(1, 1, false, true, NNGram::none, NNGram::none, NNGram::none, NNGram::syntactic, true, true)); push_back(new NNGram(2, 1, true, true, NNGram::none, NNGram::none, NNGram::none, NNGram::syntactic, true, true)); push_back(new NNGram(3, 1, true, true, NNGram::none, NNGram::none, NNGram::none, NNGram::syntactic, true, true)); push_back(new NNGram(2, 1, false, false, NNGram::lexical, NNGram::none, NNGram::none, NNGram::syntactic, true, true)); push_back(new NNGram(2, 1, false, false, NNGram::lexical, NNGram::lexical, NNGram::none, NNGram::syntactic, true, true)); } push_back(new NGramTree(2, NGramTree::none, true)); push_back(new NGramTree(2, NGramTree::all, true)); push_back(new NGramTree(3, NGramTree::functional, true)); push_back(new HeadTree(true, false, 0, HeadTree::syntactic)); push_back(new HeadTree(true, false, 0, HeadTree::semantic)); push_back(new HeadTree(true, true, 0, HeadTree::semantic)); push_back(new Heads(2, false, false, Heads::syntactic)); push_back(new Heads(2, true, true, Heads::syntactic)); push_back(new Heads(2, true, true, Heads::semantic)); push_back(new Heads(3, false, false)); size_type maxwidth = 2, maxsumwidth = 2; for (size_type binflag = 0; binflag < 2; ++binflag) for (size_type nleftprec = 0; nleftprec <= maxwidth; ++nleftprec) for (size_type nleftsucc = 0; nleftsucc <= maxwidth; ++nleftsucc) for (size_type nrightprec = 0; nrightprec <= maxwidth; ++nrightprec) for (size_type nrightsucc = 0; nrightsucc <= maxwidth; ++nrightsucc) if (nleftprec + nleftsucc + nrightprec + nrightsucc <= maxsumwidth) push_back(new Edges(binflag, nleftprec, nleftsucc, nrightprec, nrightsucc)); for (size_type binflag = 0; binflag < 2; ++binflag) for (size_type nleftprec = 0; nleftprec <= maxwidth; ++nleftprec) for (size_type nleftsucc = 0; nleftsucc <= maxwidth; ++nleftsucc) for (size_type nrightprec = 0; nrightprec <= maxwidth; ++nrightprec) for (size_type nrightsucc = 0; nrightsucc <= maxwidth; ++nrightsucc) if (nleftprec + nleftsucc + nrightprec + nrightsucc <= maxsumwidth) push_back(new WordEdges(binflag, nleftprec, nleftsucc, nrightprec, nrightsucc)); } // FeatureClassPtrs::features_spnn() inline void FeatureClassPtrs::features_mfeatures(unsigned maxwidth, unsigned maxsumwidth, unsigned maxwords, const char* parser) { push_back(new SumLogP()); if (parser == NULL) push_back(new LogP()); else push_back(new LogPx(parser)); push_back(new RightBranch()); push_back(new Heavy()); push_back(new LeftBranchLength()); push_back(new RightBranchLength()); push_back(new Rule(0, 0, false, false, Rule::none, Rule::none, Rule::none, Rule::syntactic)); push_back(new Rule(0, 0, true, false, Rule::none, Rule::none, Rule::none, Rule::syntactic)); push_back(new Rule(0, 0, false, true, Rule::none, Rule::none, Rule::none, Rule::syntactic)); // push_back(new Rule(1, 0, false, false, Rule::none, Rule::none, Rule::none, Rule::syntactic)); // push_back(new Rule(1, 0, true, false, Rule::none, Rule::none, Rule::none, Rule::syntactic)); // push_back(new Rule(1, 0, false, true, Rule::none, Rule::none, Rule::none, Rule::syntactic)); push_back(new Rule(0, 1, false, false, Rule::none, Rule::none, Rule::none, Rule::syntactic)); push_back(new Rule(0, 1, true, false, Rule::none, Rule::none, Rule::none, Rule::syntactic)); push_back(new Rule(0, 1, false, true, Rule::none, Rule::none, Rule::none, Rule::syntactic)); push_back(new Rule(0, 0, false, false, Rule::pos, Rule::none, Rule::none, Rule::syntactic)); push_back(new Rule(0, 0, false, false, Rule::none, Rule::pos, Rule::none, Rule::syntactic)); push_back(new Rule(0, 0, false, false, Rule::none, Rule::none, Rule::pos, Rule::syntactic)); push_back(new Rule(0, 0, false, false, Rule::pos, Rule::none, Rule::none, Rule::semantic)); push_back(new Rule(0, 0, false, false, Rule::none, Rule::pos, Rule::none, Rule::semantic)); push_back(new Rule(0, 0, false, false, Rule::none, Rule::none, Rule::pos, Rule::semantic)); push_back(new Rule(0, 0, false, false, Rule::lexical, Rule::none, Rule::none, Rule::syntactic)); push_back(new Rule(0, 0, false, false, Rule::none, Rule::lexical, Rule::none, Rule::syntactic)); push_back(new Rule(0, 0, false, false, Rule::none, Rule::none, Rule::lexical, Rule::syntactic)); push_back(new Rule(0, 0, false, false, Rule::lexical, Rule::none, Rule::none, Rule::semantic)); push_back(new Rule(0, 0, false, false, Rule::none, Rule::lexical, Rule::none, Rule::semantic)); push_back(new Rule(0, 0, false, false, Rule::none, Rule::none, Rule::lexical, Rule::semantic)); push_back(new NNGram(1, 1, false, false, NNGram::none, NNGram::none, NNGram::none, NNGram::syntactic, false, false)); push_back(new NNGram(1, 1, false, false, NNGram::none, NNGram::none, NNGram::none, NNGram::syntactic, true, false)); push_back(new NNGram(1, 1, false, false, NNGram::none, NNGram::none, NNGram::none, NNGram::syntactic, true, true)); push_back(new NNGram(1, 2, false, false, NNGram::none, NNGram::none, NNGram::none, NNGram::syntactic, false, false)); push_back(new NNGram(1, 1, true, false, NNGram::none, NNGram::none, NNGram::none, NNGram::syntactic, false, false)); push_back(new NNGram(1, 1, false, true, NNGram::none, NNGram::none, NNGram::none, NNGram::syntactic, false, false)); push_back(new NNGram(1, 1, false, false, NNGram::pos, NNGram::none, NNGram::none, NNGram::syntactic, false, false)); push_back(new NNGram(1, 1, false, false, NNGram::pos, NNGram::pos, NNGram::none, NNGram::syntactic, false, false)); push_back(new NNGram(1, 1, false, false, NNGram::pos, NNGram::pos, NNGram::pos, NNGram::syntactic, false, false)); // push_back(new NNGram(1, 1, false, false, NNGram::lexical, NNGram::pos, NNGram::pos, NNGram::syntactic, false, false)); push_back(new NNGram(1, 1, false, false, NNGram::pos, NNGram::none, NNGram::none, NNGram::semantic, false, false)); push_back(new NNGram(1, 1, false, false, NNGram::pos, NNGram::pos, NNGram::none, NNGram::semantic, false, false)); push_back(new NNGram(1, 1, false, false, NNGram::pos, NNGram::pos, NNGram::pos, NNGram::semantic, false, false)); // push_back(new NNGram(1, 1, false, false, NNGram::lexical, NNGram::pos, NNGram::pos, NNGram::semantic, false, false)); push_back(new NNGram(2, 1, false, false, NNGram::none, NNGram::none, NNGram::none, NNGram::syntactic, false, false)); push_back(new NNGram(2, 1, false, false, NNGram::none, NNGram::none, NNGram::none, NNGram::syntactic, true, false)); push_back(new NNGram(2, 1, false, false, NNGram::none, NNGram::none, NNGram::none, NNGram::syntactic, true, true)); push_back(new NNGram(2, 2, false, false, NNGram::none, NNGram::none, NNGram::none, NNGram::syntactic, false, false)); push_back(new NNGram(2, 1, true, false, NNGram::none, NNGram::none, NNGram::none, NNGram::syntactic, false, false)); push_back(new NNGram(2, 1, false, true, NNGram::none, NNGram::none, NNGram::none, NNGram::syntactic, false, false)); push_back(new NNGram(2, 1, false, false, NNGram::pos, NNGram::none, NNGram::none, NNGram::syntactic, false, false)); push_back(new NNGram(2, 1, false, false, NNGram::pos, NNGram::pos, NNGram::none, NNGram::syntactic, false, false)); push_back(new NNGram(2, 1, false, false, NNGram::pos, NNGram::pos, NNGram::pos, NNGram::syntactic, false, false)); // push_back(new NNGram(2, 1, false, false, NNGram::lexical, NNGram::pos, NNGram::pos, NNGram::syntactic, false, false)); push_back(new NNGram(2, 1, false, false, NNGram::pos, NNGram::none, NNGram::none, NNGram::semantic, false, false)); push_back(new NNGram(2, 1, false, false, NNGram::pos, NNGram::pos, NNGram::none, NNGram::semantic, false, false)); push_back(new NNGram(2, 1, false, false, NNGram::pos, NNGram::pos, NNGram::pos, NNGram::semantic, false, false)); // push_back(new NNGram(2, 1, false, false, NNGram::lexical, NNGram::pos, NNGram::pos, NNGram::semantic, false, false)); push_back(new NNGram(3, 1, false, false, NNGram::none, NNGram::none, NNGram::none, NNGram::syntactic, false, false)); push_back(new NNGram(3, 1, false, false, NNGram::none, NNGram::none, NNGram::none, NNGram::syntactic, true, false)); push_back(new NNGram(3, 1, false, false, NNGram::none, NNGram::none, NNGram::none, NNGram::syntactic, true, true)); push_back(new NNGram(3, 2, false, false, NNGram::none, NNGram::none, NNGram::none, NNGram::syntactic, false, false)); push_back(new NNGram(3, 1, true, false, NNGram::none, NNGram::none, NNGram::none, NNGram::syntactic, false, false)); push_back(new NNGram(3, 1, false, true, NNGram::none, NNGram::none, NNGram::none, NNGram::syntactic, false, false)); push_back(new NNGram(3, 1, false, false, NNGram::pos, NNGram::none, NNGram::none, NNGram::syntactic, false, false)); push_back(new NNGram(3, 1, false, false, NNGram::pos, NNGram::pos, NNGram::none, NNGram::syntactic, false, false)); push_back(new NNGram(3, 1, false, false, NNGram::pos, NNGram::pos, NNGram::pos, NNGram::syntactic, false, false)); // push_back(new NNGram(3, 1, false, false, NNGram::lexical, NNGram::pos, NNGram::pos, NNGram::syntactic, false, false)); push_back(new NNGram(3, 1, false, false, NNGram::pos, NNGram::none, NNGram::none, NNGram::semantic, false, false)); push_back(new NNGram(3, 1, false, false, NNGram::pos, NNGram::pos, NNGram::none, NNGram::semantic, false, false)); push_back(new NNGram(3, 1, false, false, NNGram::pos, NNGram::pos, NNGram::pos, NNGram::semantic, false, false)); // push_back(new NNGram(3, 1, false, false, NNGram::lexical, NNGram::pos, NNGram::pos, NNGram::semantic, false, false)); push_back(new Heads(2, false, false, Heads::syntactic)); push_back(new Heads(2, false, false, Heads::semantic)); push_back(new Heads(2, true, false, Heads::syntactic)); push_back(new Heads(2, true, false, Heads::semantic)); push_back(new Heads(2, false, true, Heads::syntactic)); push_back(new Heads(2, false, true, Heads::semantic)); push_back(new Heads(2, true, true, Heads::syntactic)); push_back(new Heads(2, true, true, Heads::semantic)); push_back(new Heads(3, false, false, Heads::syntactic)); push_back(new Heads(3, false, false, Heads::semantic)); push_back(new SynSemHeads(SynSemHeads::none)); push_back(new SynSemHeads(SynSemHeads::lex_syn)); // push_back(new SynSemHeads(SynSemHeads::lex_all)); push_back(new RBContext(false, false, false, RBContext::syntactic)); push_back(new RBContext(false, false, false, RBContext::semantic)); push_back(new RBContext(true, false, false, RBContext::syntactic)); push_back(new RBContext(true, false, false, RBContext::semantic)); push_back(new RBContext(false, true, false, RBContext::syntactic)); push_back(new RBContext(false, true, false, RBContext::semantic)); push_back(new RBContext(false, false, true, RBContext::syntactic)); push_back(new RBContext(false, false, true, RBContext::semantic)); push_back(new SubjVerbAgr()); push_back(new CoPar(false)); push_back(new CoPar(true)); push_back(new CoLenPar()); push_back(new Word(1)); push_back(new Word(2)); push_back(new WProj()); for (size_type binflag = 0; binflag < 2; ++binflag) for (size_type nleftprec = 0; nleftprec <= maxwidth; ++nleftprec) for (size_type nleftsucc = 0; nleftsucc <= maxwidth; ++nleftsucc) for (size_type nrightprec = 0; nrightprec <= maxwidth; ++nrightprec) for (size_type nrightsucc = 0; nrightsucc <= maxwidth; ++nrightsucc) if (nleftprec + nleftsucc + nrightprec + nrightsucc <= maxsumwidth) for (size_type nleftprecw = 0; nleftprecw <= nleftprec; ++nleftprecw) for (size_type nleftsuccw = 0; nleftsuccw <= nleftsucc; ++nleftsuccw) for (size_type nrightprecw = 0; nrightprecw <= nrightprec; ++nrightprecw) for (size_type nrightsuccw = 0; nrightsuccw <= nrightsucc; ++nrightsuccw) if (nleftprecw + nleftsuccw + nrightprecw + nrightsuccw <= maxwords) push_back(new WEdges(binflag, nleftprec, nleftprecw, nleftsucc, nleftsuccw, nrightprec, nrightprecw, nrightsucc, nrightsuccw)); push_back(new NGramTree(2, NGramTree::none, false)); push_back(new NGramTree(2, NGramTree::functional, false)); push_back(new NGramTree(2, NGramTree::all, false)); push_back(new NGramTree(2, NGramTree::none, true)); push_back(new NGramTree(2, NGramTree::functional, true)); push_back(new NGramTree(2, NGramTree::all, true)); // push_back(new NGramTree(3, NGramTree::none, true)); // push_back(new NGramTree(3, NGramTree::functional, true)); push_back(new HeadTree(false, false, 0, HeadTree::syntactic)); push_back(new HeadTree(false, false, 0, HeadTree::semantic)); push_back(new HeadTree(true, false, 0, HeadTree::syntactic)); push_back(new HeadTree(true, false, 0, HeadTree::semantic)); push_back(new HeadTree(true, true, 0, HeadTree::syntactic)); push_back(new HeadTree(true, true, 0, HeadTree::semantic)); } // features_mfeatures() //! FeatureClassPtrs::FeatureClassPtrs() preloads a //! set of features. // inline FeatureClassPtrs::FeatureClassPtrs(const char* fcname) { // features_connll(); if (fcname == NULL) features_spnn(false, false); else if (strcmp(fcname, "sp") == 0) features_spnn(false, false); else if (strcmp(fcname, "spnn") == 0) features_spnn(false, true); else if (strcmp(fcname, "spp") == 0) features_spnn(true, false); else if (strcmp(fcname, "sppnn") == 0) features_spnn(true, true); else if (strcmp(fcname, "mfeatures") == 0) features_mfeatures(); else if (strcmp(fcname, "mfeatures_p0_ll") == 0) features_mfeatures(1, 2, 1, "p0-ll"); else if (strcmp(fcname, "sumlogp") == 0) features_sumlogp(); else if (strcmp(fcname, "logps") == 0) features_logps(); else if (strcmp(fcname, "avlogps") == 0) features_avlogps(); else if (strcmp(fcname, "wavlogp") == 0) features_wavlogp(); else if (strcmp(fcname, "logpparses") == 0) features_logpparses(); else if (strcmp(fcname, "rank0") == 0) features_rank(0); else if (strcmp(fcname, "rank1") == 0) features_rank(1); else if (strcmp(fcname, "rank2") == 0) features_rank(2); else if (strcmp(fcname, "rank3") == 0) features_rank(3); else if (strcmp(fcname, "rank4") == 0) features_rank(4); else if (strcmp(fcname, "rank5") == 0) features_rank(5); else if (strcmp(fcname, "rankplus") == 0) features_rankplus(false, false); else if (strcmp(fcname, "rankpluslx") == 0) features_rankplus(true, false); else if (strcmp(fcname, "rankplusedge") == 0) features_rankplus(false, true); else if (strcmp(fcname, "rankpluslxedge") == 0) features_rankplus(true, true); else { std::cerr << "## Error in spmfeatures.h: FeatureClassPtrs::FeatureClassPtrs(), unknown fcname = " << fcname << std::endl; exit(EXIT_FAILURE); } } // FeatureClassPtrs::FeatureClassPtrs() #undef FloatTol
ocp_nlp_common.c
/* * Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren, * Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor, * Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan, * Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl * * This file is part of acados. * * The 2-Clause BSD License * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE.; */ #include "acados/ocp_nlp/ocp_nlp_common.h" #include <assert.h> #include <stdlib.h> #include <string.h> #include <math.h> // blasfeo #include "blasfeo/include/blasfeo_common.h" #include "blasfeo/include/blasfeo_d_blas.h" // hpipm #include "hpipm/include/hpipm_d_ocp_qp_dim.h" // acados #include "acados/utils/mem.h" #include "acados/utils/print.h" // openmp #if defined(ACADOS_WITH_OPENMP) #include <omp.h> #endif /************************************************ * config ************************************************/ acados_size_t ocp_nlp_config_calculate_size(int N) { acados_size_t size = 0; // self size += sizeof(ocp_nlp_config); // qp solver size += 1 * ocp_qp_xcond_solver_config_calculate_size(); // regularization size += ocp_nlp_reg_config_calculate_size(); // dynamics size += N * sizeof(ocp_nlp_dynamics_config *); for (int i = 0; i < N; i++) size += ocp_nlp_dynamics_config_calculate_size(); // cost size += (N + 1) * sizeof(ocp_nlp_cost_config *); for (int i = 0; i <= N; i++) size += ocp_nlp_cost_config_calculate_size(); // constraints size += (N + 1) * sizeof(ocp_nlp_constraints_config *); for (int i = 0; i <= N; i++) size += ocp_nlp_constraints_config_calculate_size(); return size; } ocp_nlp_config *ocp_nlp_config_assign(int N, void *raw_memory) { char *c_ptr = (char *) raw_memory; ocp_nlp_config *config = (ocp_nlp_config *) c_ptr; c_ptr += sizeof(ocp_nlp_config); config->N = N; // qp solver config->qp_solver = ocp_qp_xcond_solver_config_assign(c_ptr); c_ptr += ocp_qp_xcond_solver_config_calculate_size(); // regularization config->regularize = ocp_nlp_reg_config_assign(c_ptr); c_ptr += ocp_nlp_reg_config_calculate_size(); // dynamics config->dynamics = (ocp_nlp_dynamics_config **) c_ptr; c_ptr += N * sizeof(ocp_nlp_dynamics_config *); for (int i = 0; i < N; i++) { config->dynamics[i] = ocp_nlp_dynamics_config_assign(c_ptr); c_ptr += ocp_nlp_dynamics_config_calculate_size(); } // cost config->cost = (ocp_nlp_cost_config **) c_ptr; c_ptr += (N + 1) * sizeof(ocp_nlp_cost_config *); for (int i = 0; i <= N; i++) { config->cost[i] = ocp_nlp_cost_config_assign(c_ptr); c_ptr += ocp_nlp_cost_config_calculate_size(); } // constraints config->constraints = (ocp_nlp_constraints_config **) c_ptr; c_ptr += (N + 1) * sizeof(ocp_nlp_constraints_config *); for (int i = 0; i <= N; i++) { config->constraints[i] = ocp_nlp_constraints_config_assign(c_ptr); c_ptr += ocp_nlp_constraints_config_calculate_size(); } return config; } /************************************************ * dims ************************************************/ static acados_size_t ocp_nlp_dims_calculate_size_self(int N) { acados_size_t size = 0; size += sizeof(ocp_nlp_dims); // nlp sizes size += 6 * (N + 1) * sizeof(int); // nv, nx, nu, ni, nz, ns // dynamics size += N * sizeof(void *); // cost size += (N + 1) * sizeof(void *); // constraints size += (N + 1) * sizeof(void *); // regularization size += ocp_nlp_reg_dims_calculate_size(N); size += sizeof(ocp_nlp_reg_dims); size += 8; // initial align size += 8; // intermediate align make_int_multiple_of(8, &size); return size; } acados_size_t ocp_nlp_dims_calculate_size(void *config_) { ocp_nlp_config *config = config_; int N = config->N; acados_size_t size = 0; // self size += ocp_nlp_dims_calculate_size_self(N); // dynamics for (int i = 0; i < N; i++) size += config->dynamics[i]->dims_calculate_size(config->dynamics[i]); // cost for (int i = 0; i <= N; i++) size += config->cost[i]->dims_calculate_size(config->cost[i]); // constraints for (int i = 0; i <= N; i++) size += config->constraints[i]->dims_calculate_size(config->constraints[i]); // qp solver size += config->qp_solver->dims_calculate_size(config->qp_solver, N); return size; } static ocp_nlp_dims *ocp_nlp_dims_assign_self(int N, void *raw_memory) { char *c_ptr = (char *) raw_memory; // initial align align_char_to(8, &c_ptr); // struct ocp_nlp_dims *dims = (ocp_nlp_dims *) c_ptr; c_ptr += sizeof(ocp_nlp_dims); // dynamics dims->dynamics = (void **) c_ptr; c_ptr += N * sizeof(void *); // cost dims->cost = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); // constraints dims->constraints = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); // nv assign_and_advance_int(N + 1, &dims->nv, &c_ptr); // nx assign_and_advance_int(N + 1, &dims->nx, &c_ptr); // nu assign_and_advance_int(N + 1, &dims->nu, &c_ptr); // ni assign_and_advance_int(N + 1, &dims->ni, &c_ptr); // nz assign_and_advance_int(N + 1, &dims->nz, &c_ptr); // ns assign_and_advance_int(N + 1, &dims->ns, &c_ptr); // intermediate align align_char_to(8, &c_ptr); // regularization dims->regularize = ocp_nlp_reg_dims_assign(N, c_ptr); c_ptr += ocp_nlp_reg_dims_calculate_size(N); /* initialize qp_solver dimensions */ // dims->qp_solver->N = N; // for (int i = 0; i <= N; i++) // { // TODO(dimitris): values below are needed for reformulation of QP when soft constraints // are not supported. Make this a bit more transparent as it clushes with nbx/nbu above. // dims->qp_solver->nsbx[i] = 0; // dims->qp_solver->nsbu[i] = 0; // dims->qp_solver->nsg[i] = 0; // } // N dims->N = N; // initialize dimensions to zero by default // nv for(int i=0; i<=N; i++) dims->nv[i] = 0; // nx for(int i=0; i<=N; i++) dims->nx[i] = 0; // nu for(int i=0; i<=N; i++) dims->nu[i] = 0; // ni for(int i=0; i<=N; i++) dims->ni[i] = 0; // nz for(int i=0; i<=N; i++) dims->nz[i] = 0; // ns for(int i=0; i<=N; i++) dims->ns[i] = 0; // TODO initialize dims to zero by default also in modules !!!!!!! // assert assert((char *) raw_memory + ocp_nlp_dims_calculate_size_self(N) >= c_ptr); return dims; } ocp_nlp_dims *ocp_nlp_dims_assign(void *config_, void *raw_memory) { ocp_nlp_config *config = config_; int N = config->N; char *c_ptr = (char *) raw_memory; // self ocp_nlp_dims *dims = ocp_nlp_dims_assign_self(N, c_ptr); c_ptr += ocp_nlp_dims_calculate_size_self(N); // dynamics for (int i = 0; i < N; i++) { dims->dynamics[i] = config->dynamics[i]->dims_assign(config->dynamics[i], c_ptr); c_ptr += config->dynamics[i]->dims_calculate_size(config->dynamics[i]); } // cost for (int i = 0; i <= N; i++) { dims->cost[i] = config->cost[i]->dims_assign(config->cost[i], c_ptr); c_ptr += config->cost[i]->dims_calculate_size(config->cost[i]); } // constraints for (int i = 0; i <= N; i++) { dims->constraints[i] = config->constraints[i]->dims_assign(config->constraints[i], c_ptr); c_ptr += config->constraints[i]->dims_calculate_size(config->constraints[i]); } // qp solver dims->qp_solver = config->qp_solver->dims_assign(config->qp_solver, N, c_ptr); c_ptr += config->qp_solver->dims_calculate_size(config->qp_solver, N); // assert assert((char *) raw_memory + ocp_nlp_dims_calculate_size(config_) >= c_ptr); return dims; } void ocp_nlp_dims_set_opt_vars(void *config_, void *dims_, const char *field, const void* value_array) { // to set dimension nx, nu, nz, ns (number of slacks = number of soft constraints) ocp_nlp_config *config = config_; ocp_nlp_dims *dims = dims_; int N = config->N; int *int_array = (int *) value_array; /* set ocp_nlp dimension */ if (!strcmp(field, "nx")) { // opt var for (int i = 0; i <= N; i++) { // set nx dims->nx[i] = int_array[i]; // update nv dims->nv[i] = dims->nu[i] + dims->nx[i] + 2 * dims->ns[i]; } // cost for (int i = 0; i <= N; i++) { config->cost[i]->dims_set(config->cost[i], dims->cost[i], "nx", &int_array[i]); } // dynamics for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], "nx", &int_array[i]); } for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], "nx1", &int_array[i+1]); } // constraints for (int i = 0; i <= N; i++) { config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i], "nx", &int_array[i]); } // qp solver for (int i = 0; i <= N; i++) { config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "nx", &int_array[i]); } // regularization for (int i = 0; i <= N; i++) { config->regularize->dims_set(config->regularize, dims->regularize, i, "nx", &int_array[i]); } } else if (!strcmp(field, "nu")) { // nlp opt var for (int i = 0; i <= N; i++) { // set nu dims->nu[i] = int_array[i]; // update nv dims->nv[i] = dims->nu[i] + dims->nx[i] + 2 * dims->ns[i]; } // cost for (int i = 0; i <= N; i++) { config->cost[i]->dims_set(config->cost[i], dims->cost[i], "nu", &int_array[i]); } // dynamics for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], "nu", &int_array[i]); } for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], "nu1", &int_array[i+1]); } // constraints for (int i = 0; i <= N; i++) { config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i], "nu", &int_array[i]); } // qp solver for (int i = 0; i <= N; i++) { config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "nu", &int_array[i]); } // regularization for (int i = 0; i <= N; i++) { config->regularize->dims_set(config->regularize, dims->regularize, i, "nu", &int_array[i]); } } else if (!strcmp(field, "nz")) { // nlp opt var for (int i = 0; i <= N; i++) { // set nz dims->nz[i] = int_array[i]; } // cost for (int i = 0; i <= N; i++) { config->cost[i]->dims_set(config->cost[i], dims->cost[i], "nz", &int_array[i]); } // dynamics for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], "nz", &int_array[i]); } // constraints for (int i = 0; i <= N; i++) { config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i], "nz", &int_array[i]); } } else if (!strcmp(field, "ns")) { // nlp opt var for (int i = 0; i <= N; i++) { // set ns dims->ns[i] = int_array[i]; // update nv dims->nv[i] = dims->nu[i] + dims->nx[i] + 2 * dims->ns[i]; } // cost for (int i = 0; i <= N; i++) { config->cost[i]->dims_set(config->cost[i], dims->cost[i], "ns", &int_array[i]); } // qp solver for (int i = 0; i <= N; i++) { config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "ns", &int_array[i]); } } else { printf("error: dims type not available in module ocp_nlp: %s", field); exit(1); } #if 0 /* set ocp_nlp submodule dimensions */ if (strcmp(field, "ns")) // dynamics do not contain slack/soft constraints { for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], field, &int_array[i]); } } if (!strcmp(field, "nu")) { for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], "nu1", &int_array[i+1]); } } if (!strcmp(field, "nx")) { for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], "nx1", &int_array[i+1]); } } for (int i = 0; i <= N; i++) // cost { config->cost[i]->dims_set(config->cost[i], dims->cost[i], field, &int_array[i]); } for (int i = 0; i <= N; i++) // constraints { config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i], field, &int_array[i]); } if (strcmp(field, "nz")) // qp_solver does not contain nz { for (int i = 0; i <= N; i++) // qp_solver { config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, field, &int_array[i]); } } #endif } void ocp_nlp_dims_set_constraints(void *config_, void *dims_, int stage, const char *field, const void* value_) { // to set dimension nbx, nbu, ng, nh, nq (quadratic over nonlinear) ocp_nlp_config *config = config_; ocp_nlp_dims *dims = dims_; int *int_value = (int *) value_; int i = stage; // set in constraint module config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i], field, int_value); // update ni in ocp_nlp dimensions config->constraints[i]->dims_get(config->constraints[i], dims->constraints[i], "ni", &dims->ni[i]); // update qp_solver dims if ( (!strcmp(field, "nbx")) || (!strcmp(field, "nbu")) ) { // qp solver config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, field, int_value); // regularization config->regularize->dims_set(config->regularize, dims->regularize, i, (char *) field, int_value); } else if ( (!strcmp(field, "nsbx")) || (!strcmp(field, "nsbu")) ) { // qp solver config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, field, int_value); } else if ( (!strcmp(field, "ng")) || (!strcmp(field, "nh")) || (!strcmp(field, "nphi"))) { // update ng_qp_solver in qp_solver int ng_qp_solver; config->constraints[i]->dims_get(config->constraints[i], dims->constraints[i], "ng_qp_solver", &ng_qp_solver); // qp solver config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "ng", &ng_qp_solver); // regularization config->regularize->dims_set(config->regularize, dims->regularize, i, "ng", &ng_qp_solver); } else if ( (!strcmp(field, "nsg")) || (!strcmp(field, "nsh")) || (!strcmp(field, "nsphi"))) { // update ng_qp_solver in qp_solver int nsg_qp_solver; config->constraints[i]->dims_get(config->constraints[i], dims->constraints[i], "nsg_qp_solver", &nsg_qp_solver); // qp solver config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "nsg", &nsg_qp_solver); } else if ( (!strcmp(field, "nbxe")) || (!strcmp(field, "nbue")) ) { // qp solver config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, field, int_value); } else if ( (!strcmp(field, "nge")) || (!strcmp(field, "nhe")) || (!strcmp(field, "nphie"))) { // update ng_qp_solver in qp_solver int ng_qp_solver; config->constraints[i]->dims_get(config->constraints[i], dims->constraints[i], "nge_qp_solver", &ng_qp_solver); // qp solver config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "nge", &ng_qp_solver); } } void ocp_nlp_dims_set_cost(void *config_, void *dims_, int stage, const char *field, const void* value_) { // to set dimension ny (output) ocp_nlp_config *config = config_; ocp_nlp_dims *dims = dims_; int *int_value = (int *) value_; config->cost[stage]->dims_set(config->cost[stage], dims->cost[stage], field, int_value); } void ocp_nlp_dims_set_dynamics(void *config_, void *dims_, int stage, const char *field, const void* value) { // mainly for gnsf dimensions ocp_nlp_config *config = config_; ocp_nlp_dims *dims = dims_; int *int_value = (int *) value; config->dynamics[stage]->dims_set(config->dynamics[stage], dims->dynamics[stage], field, int_value); } /************************************************ * in ************************************************/ acados_size_t ocp_nlp_in_calculate_size_self(int N) { acados_size_t size = sizeof(ocp_nlp_in); size += N * sizeof(double); // Ts size += N * sizeof(void *); // dynamics size += (N + 1) * sizeof(void *); // cost size += (N + 1) * sizeof(void *); // constraints return size; } acados_size_t ocp_nlp_in_calculate_size(ocp_nlp_config *config, ocp_nlp_dims *dims) { int N = dims->N; acados_size_t size = ocp_nlp_in_calculate_size_self(N); // dynamics for (int i = 0; i < N; i++) { size += config->dynamics[i]->model_calculate_size(config->dynamics[i], dims->dynamics[i]); } // cost for (int i = 0; i <= N; i++) { size += config->cost[i]->model_calculate_size(config->cost[i], dims->cost[i]); } // constraints for (int i = 0; i <= N; i++) { size += config->constraints[i]->model_calculate_size(config->constraints[i], dims->constraints[i]); } size += 8; // initial align size += 8; // final align // make_int_multiple_of(64, &size); return size; } ocp_nlp_in *ocp_nlp_in_assign_self(int N, void *raw_memory) { char *c_ptr = (char *) raw_memory; // initial align align_char_to(8, &c_ptr); // struct ocp_nlp_in *in = (ocp_nlp_in *) c_ptr; c_ptr += sizeof(ocp_nlp_in); // Ts assign_and_advance_double(N, &in->Ts, &c_ptr); // dynamics in->dynamics = (void **) c_ptr; c_ptr += N * sizeof(void *); // cost in->cost = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); // constraints in->constraints = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); align_char_to(8, &c_ptr); return in; } ocp_nlp_in *ocp_nlp_in_assign(ocp_nlp_config *config, ocp_nlp_dims *dims, void *raw_memory) { int N = dims->N; char *c_ptr = (char *) raw_memory; // struct ocp_nlp_in *in = ocp_nlp_in_assign_self(N, c_ptr); c_ptr += ocp_nlp_in_calculate_size_self(N); // dynamics for (int i = 0; i < N; i++) { in->dynamics[i] = config->dynamics[i]->model_assign(config->dynamics[i], dims->dynamics[i], c_ptr); c_ptr += config->dynamics[i]->model_calculate_size(config->dynamics[i], dims->dynamics[i]); } // cost for (int i = 0; i <= N; i++) { in->cost[i] = config->cost[i]->model_assign(config->cost[i], dims->cost[i], c_ptr); c_ptr += config->cost[i]->model_calculate_size(config->cost[i], dims->cost[i]); } // constraints for (int i = 0; i <= N; i++) { in->constraints[i] = config->constraints[i]->model_assign(config->constraints[i], dims->constraints[i], c_ptr); c_ptr += config->constraints[i]->model_calculate_size(config->constraints[i], dims->constraints[i]); } assert((char *) raw_memory + ocp_nlp_in_calculate_size(config, dims) >= c_ptr); return in; } /************************************************ * out ************************************************/ acados_size_t ocp_nlp_out_calculate_size(ocp_nlp_config *config, ocp_nlp_dims *dims) { // extract dims int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; int *nz = dims->nz; acados_size_t size = sizeof(ocp_nlp_out); size += 4 * (N + 1) * sizeof(struct blasfeo_dvec); // ux, lam, t, z size += 1 * N * sizeof(struct blasfeo_dvec); // pi for (int i = 0; i < N; i++) { size += 1 * blasfeo_memsize_dvec(nv[i]); // ux size += 1 * blasfeo_memsize_dvec(nz[i]); // z size += 2 * blasfeo_memsize_dvec(2 * ni[i]); // lam, t size += 1 * blasfeo_memsize_dvec(nx[i + 1]); // pi } size += 1 * blasfeo_memsize_dvec(nv[N]); // ux size += 1 * blasfeo_memsize_dvec(nz[N]); // z size += 2 * blasfeo_memsize_dvec(2 * ni[N]); // lam, t size += 8; // initial align size += 8; // blasfeo_struct align size += 64; // blasfeo_mem align make_int_multiple_of(8, &size); return size; } ocp_nlp_out *ocp_nlp_out_assign(ocp_nlp_config *config, ocp_nlp_dims *dims, void *raw_memory) { // extract sizes int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; int *nz = dims->nz; char *c_ptr = (char *) raw_memory; // initial align align_char_to(8, &c_ptr); ocp_nlp_out *out = (ocp_nlp_out *) c_ptr; c_ptr += sizeof(ocp_nlp_out); // blasfeo_struct align align_char_to(8, &c_ptr); // blasfeo_dvec_struct // ux assign_and_advance_blasfeo_dvec_structs(N + 1, &out->ux, &c_ptr); // z assign_and_advance_blasfeo_dvec_structs(N + 1, &out->z, &c_ptr); // pi assign_and_advance_blasfeo_dvec_structs(N, &out->pi, &c_ptr); // lam assign_and_advance_blasfeo_dvec_structs(N + 1, &out->lam, &c_ptr); // t assign_and_advance_blasfeo_dvec_structs(N + 1, &out->t, &c_ptr); // blasfeo_mem align align_char_to(64, &c_ptr); // blasfeo_dvec // ux for (int i = 0; i <= N; ++i) { assign_and_advance_blasfeo_dvec_mem(nv[i], out->ux + i, &c_ptr); } // z for (int i = 0; i <= N; ++i) { assign_and_advance_blasfeo_dvec_mem(nz[i], out->z + i, &c_ptr); } // pi for (int i = 0; i < N; ++i) { assign_and_advance_blasfeo_dvec_mem(nx[i + 1], out->pi + i, &c_ptr); } // lam for (int i = 0; i <= N; ++i) { assign_and_advance_blasfeo_dvec_mem(2 * ni[i], out->lam + i, &c_ptr); } // t for (int i = 0; i <= N; ++i) { assign_and_advance_blasfeo_dvec_mem(2 * ni[i], out->t + i, &c_ptr); } // zero solution for(int i=0; i<N; i++) { blasfeo_dvecse(nv[i], 0.0, out->ux+i, 0); blasfeo_dvecse(nz[i], 0.0, out->z+i, 0); blasfeo_dvecse(nx[i+1], 0.0, out->pi+i, 0); blasfeo_dvecse(2*ni[i], 0.0, out->lam+i, 0); blasfeo_dvecse(2*ni[i], 0.0, out->t+i, 0); } blasfeo_dvecse(nv[N], 0.0, out->ux+N, 0); blasfeo_dvecse(nz[N], 0.0, out->z+N, 0); blasfeo_dvecse(2*ni[N], 0.0, out->lam+N, 0); blasfeo_dvecse(2*ni[N], 0.0, out->t+N, 0); assert((char *) raw_memory + ocp_nlp_out_calculate_size(config, dims) >= c_ptr); return out; } /************************************************ * options ************************************************/ acados_size_t ocp_nlp_opts_calculate_size(void *config_, void *dims_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; int N = dims->N; acados_size_t size = 0; size += sizeof(ocp_nlp_opts); size += qp_solver->opts_calculate_size(qp_solver, dims->qp_solver); size += config->regularize->opts_calculate_size(); // dynamics size += N * sizeof(void *); for (int i = 0; i < N; i++) { size += dynamics[i]->opts_calculate_size(dynamics[i], dims->dynamics[i]); } // cost size += (N + 1) * sizeof(void *); for (int i = 0; i <= N; i++) { size += cost[i]->opts_calculate_size(cost[i], dims->cost[i]); } // constraints size += (N + 1) * sizeof(void *); for (int i = 0; i <= N; i++) { size += constraints[i]->opts_calculate_size(constraints[i], dims->constraints[i]); } size += 2*8; // 2 aligns return size; } void *ocp_nlp_opts_assign(void *config_, void *dims_, void *raw_memory) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; int N = dims->N; char *c_ptr = (char *) raw_memory; align_char_to(8, &c_ptr); ocp_nlp_opts *opts = (ocp_nlp_opts *) c_ptr; c_ptr += sizeof(ocp_nlp_opts); /* pointers to substructures */ opts->dynamics = (void **) c_ptr; c_ptr += N * sizeof(void *); opts->cost = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); opts->constraints = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); align_char_to(8, &c_ptr); /* substructures */ opts->qp_solver_opts = qp_solver->opts_assign(qp_solver, dims->qp_solver, c_ptr); c_ptr += qp_solver->opts_calculate_size(qp_solver, dims->qp_solver); opts->regularize = config->regularize->opts_assign(c_ptr); c_ptr += config->regularize->opts_calculate_size(); // dynamics for (int i = 0; i < N; i++) { opts->dynamics[i] = dynamics[i]->opts_assign(dynamics[i], dims->dynamics[i], c_ptr); c_ptr += dynamics[i]->opts_calculate_size(dynamics[i], dims->dynamics[i]); } // cost for (int i = 0; i <= N; i++) { opts->cost[i] = cost[i]->opts_assign(cost[i], dims->cost[i], c_ptr); c_ptr += cost[i]->opts_calculate_size(cost[i], dims->cost[i]); } // constraints for (int i = 0; i <= N; i++) { opts->constraints[i] = constraints[i]->opts_assign(constraints[i], dims->constraints[i], c_ptr); c_ptr += constraints[i]->opts_calculate_size(constraints[i], dims->constraints[i]); } assert((char *) raw_memory + ocp_nlp_opts_calculate_size(config, dims) >= c_ptr); return opts; } void ocp_nlp_opts_initialize_default(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_opts *opts = opts_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; ocp_nlp_reg_config *regularize = config->regularize; int N = dims->N; opts->reuse_workspace = 1; #if defined(ACADOS_WITH_OPENMP) #if defined(ACADOS_NUM_THREADS) opts->num_threads = ACADOS_NUM_THREADS; // printf("\nocp_nlp: openmp threads from macro = %d\n", opts->num_threads); #else opts->num_threads = omp_get_max_threads(); // printf("\nocp_nlp: omp_get_max_threads %d", omp_get_max_threads()); #endif #endif // printf("\nocp_nlp: openmp threads = %d\n", opts->num_threads); opts->globalization = FIXED_STEP; opts->print_level = 0; opts->step_length = 1.0; opts->levenberg_marquardt = 0.0; /* submodules opts */ // qp solver qp_solver->opts_initialize_default(qp_solver, dims->qp_solver, opts->qp_solver_opts); // regularization regularize->opts_initialize_default(regularize, dims->regularize, opts->regularize); // dynamics for (int i = 0; i < N; i++) { dynamics[i]->opts_initialize_default(dynamics[i], dims->dynamics[i], opts->dynamics[i]); } // cost for (int i = 0; i <= N; i++) { cost[i]->opts_initialize_default(cost[i], dims->cost[i], opts->cost[i]); } // constraints for (int i = 0; i <= N; i++) { constraints[i]->opts_initialize_default(constraints[i], dims->constraints[i], opts->constraints[i]); } // globalization opts->alpha_min = 0.05; opts->alpha_reduction = 0.7; opts->full_step_dual = 0; opts->line_search_use_sufficient_descent = 0; opts->globalization_use_SOC = 0; opts->eps_sufficient_descent = 1e-4; // Leineweber1999: MUSCOD-I eps_T = 1e-4 (p.89); Note: eps_T = 0.1 originally proposed by Powell 1978 (Leineweber 1999, p. 53) return; } void ocp_nlp_opts_update(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_opts *opts = opts_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; int N = dims->N; qp_solver->opts_update(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (int i = 0; i < N; i++) { dynamics[i]->opts_update(dynamics[i], dims->dynamics[i], opts->dynamics[i]); } // cost for (int i = 0; i <= N; i++) { cost[i]->opts_update(cost[i], dims->cost[i], opts->cost[i]); } // constraints for (int i = 0; i <= N; i++) { constraints[i]->opts_update(constraints[i], dims->constraints[i], opts->constraints[i]); } return; } void ocp_nlp_opts_set(void *config_, void *opts_, const char *field, void* value) { ocp_nlp_opts *opts = (ocp_nlp_opts *) opts_; ocp_nlp_config *config = config_; char module[MAX_STR_LEN]; char *ptr_module = NULL; int module_length = 0; // extract module name, i.e. substring in field before '_' char *char_ = strchr(field, '_'); if (char_!=NULL) { module_length = char_-field; for (int i=0; i<module_length; i++) module[i] = field[i]; module[module_length] = '\0'; // add end of string ptr_module = module; } // pass options to QP module if ( ptr_module!=NULL && (!strcmp(ptr_module, "qp")) ) { config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, field+module_length+1, value); } else // nlp opts { if (!strcmp(field, "reuse_workspace")) { int* reuse_workspace = (int *) value; opts->reuse_workspace = *reuse_workspace; } else if (!strcmp(field, "num_threads")) { int* num_threads = (int *) value; opts->num_threads = *num_threads; } else if (!strcmp(field, "step_length")) { double* step_length = (double *) value; opts->step_length = *step_length; } else if (!strcmp(field, "alpha_reduction")) { double* alpha_reduction = (double *) value; opts->alpha_reduction = *alpha_reduction; } else if (!strcmp(field, "alpha_min")) { double* alpha_min = (double *) value; opts->alpha_min = *alpha_min; } else if (!strcmp(field, "eps_sufficient_descent")) { double* eps_sufficient_descent = (double *) value; opts->eps_sufficient_descent = *eps_sufficient_descent; } else if (!strcmp(field, "full_step_dual")) { int* full_step_dual = (int *) value; opts->full_step_dual = *full_step_dual; } else if (!strcmp(field, "line_search_use_sufficient_descent")) { int* line_search_use_sufficient_descent = (int *) value; opts->line_search_use_sufficient_descent = *line_search_use_sufficient_descent; } else if (!strcmp(field, "globalization_use_SOC")) { int* globalization_use_SOC = (int *) value; opts->globalization_use_SOC = *globalization_use_SOC; } else if (!strcmp(field, "globalization")) { char* globalization = (char *) value; if (!strcmp(globalization, "fixed_step")) { opts->globalization = FIXED_STEP; } else if (!strcmp(globalization, "merit_backtracking")) { opts->globalization = MERIT_BACKTRACKING; } else { printf("\nerror: ocp_nlp_opts_set: not supported value for globalization, got: %s\n", globalization); exit(1); } } else if (!strcmp(field, "levenberg_marquardt")) { double* levenberg_marquardt = (double *) value; opts->levenberg_marquardt = *levenberg_marquardt; } else if (!strcmp(field, "exact_hess")) { int N = config->N; // cost for (int i=0; i<=N; i++) config->cost[i]->opts_set(config->cost[i], opts->cost[i], "exact_hess", value); // dynamics for (int i=0; i<N; i++) config->dynamics[i]->opts_set(config->dynamics[i], opts->dynamics[i], "compute_hess", value); // constraints for (int i=0; i<=N; i++) config->constraints[i]->opts_set(config->constraints[i], opts->constraints[i], "compute_hess", value); } // selectively turn on exact hessian contributions else if (!strcmp(field, "exact_hess_cost")) { int N = config->N; for (int i=0; i<=N; i++) config->cost[i]->opts_set(config->cost[i], opts->cost[i], "exact_hess", value); } else if (!strcmp(field, "exact_hess_dyn")) { int N = config->N; for (int i=0; i<N; i++) config->dynamics[i]->opts_set(config->dynamics[i], opts->dynamics[i], "compute_hess", value); } else if (!strcmp(field, "exact_hess_constr")) { int N = config->N; for (int i=0; i<=N; i++) config->constraints[i]->opts_set(config->constraints[i], opts->constraints[i], "compute_hess", value); } else if (!strcmp(field, "print_level")) { int* print_level = (int *) value; if (*print_level < 0) { printf("\nerror: ocp_nlp_opts_set: invalid value for print_level field, need int >=0, got %d.\n", *print_level); exit(1); } opts->print_level = *print_level; } else { printf("\nerror: ocp_nlp_opts_set: wrong field: %s\n", field); exit(1); } } return; } void ocp_nlp_opts_set_at_stage(void *config_, void *opts_, int stage, const char *field, void* value) { ocp_nlp_opts *opts = (ocp_nlp_opts *) opts_; ocp_nlp_config *config = config_; char module[MAX_STR_LEN]; char *ptr_module = NULL; int module_length = 0; // extract module name char *char_ = strchr(field, '_'); if (char_!=NULL) { module_length = char_-field; for (int i=0; i<module_length; i++) module[i] = field[i]; module[module_length] = '\0'; // add end of string ptr_module = module; } // pass options to dynamics module if ( ptr_module!=NULL && (!strcmp(ptr_module, "dynamics")) ) { config->dynamics[stage]->opts_set( config->dynamics[stage], opts->dynamics[stage], field+module_length+1, value ); } // pass options to cost module else if ( ptr_module!=NULL && (!strcmp(ptr_module, "cost")) ) { config->cost[stage]->opts_set( config->cost[stage], opts->cost[stage], field+module_length+1, value); } // pass options to constraint module else if ( ptr_module!=NULL && (!strcmp(ptr_module, "constraints")) ) { config->constraints[stage]->opts_set( config->constraints[stage], opts->constraints[stage], (char *) field+module_length+1, value); } else { printf("\nerror: ocp_nlp_opts_set_at_stage: wrong field: %s\n", field); exit(1); } return; } /************************************************ * memory ************************************************/ acados_size_t ocp_nlp_memory_calculate_size(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_opts *opts) { ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; // extract dims int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; int *nz = dims->nz; int *nu = dims->nu; int *ni = dims->ni; acados_size_t size = sizeof(ocp_nlp_memory); // qp in size += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims); // qp out size += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims); // qp solver size += qp_solver->memory_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // regularization size += config->regularize->memory_calculate_size(config->regularize, dims->regularize, opts->regularize); // dynamics size += N * sizeof(void *); for (int i = 0; i < N; i++) { size += dynamics[i]->memory_calculate_size(dynamics[i], dims->dynamics[i], opts->dynamics[i]); } // cost size += (N + 1) * sizeof(void *); for (int i = 0; i <= N; i++) { size += cost[i]->memory_calculate_size(cost[i], dims->cost[i], opts->cost[i]); } // constraints size += (N + 1) * sizeof(void *); for (int i = 0; i <= N; i++) { size += constraints[i]->memory_calculate_size(constraints[i], dims->constraints[i], opts->constraints[i]); } // nlp res size += ocp_nlp_res_calculate_size(dims); size += (N+1)*sizeof(bool); // set_sim_guess size += (N+1)*sizeof(struct blasfeo_dmat); // dzduxt size += 6*(N+1)*sizeof(struct blasfeo_dvec); // cost_grad ineq_fun ineq_adj dyn_adj sim_guess z_alg size += 1*N*sizeof(struct blasfeo_dvec); // dyn_fun for (int i = 0; i < N; i++) { size += 1*blasfeo_memsize_dmat(nu[i]+nx[i], nz[i]); // dzduxt size += 1*blasfeo_memsize_dvec(nz[i]); // z_alg size += 2*blasfeo_memsize_dvec(nv[i]); // cost_grad ineq_adj size += 1*blasfeo_memsize_dvec(nu[i] + nx[i]); // dyn_adj size += 1*blasfeo_memsize_dvec(nx[i + 1]); // dyn_fun size += 1*blasfeo_memsize_dvec(2 * ni[i]); // ineq_fun size += 1*blasfeo_memsize_dvec(nx[i] + nz[i]); // sim_guess } size += 1*blasfeo_memsize_dmat(nu[N]+nx[N], nz[N]); // dzduxt size += 1*blasfeo_memsize_dvec(nz[N]); // z_alg size += 2*blasfeo_memsize_dvec(nv[N]); // cost_grad ineq_adj size += 1*blasfeo_memsize_dvec(nu[N] + nx[N]); // dyn_adj size += 1*blasfeo_memsize_dvec(2 * ni[N]); // ineq_fun size += 1*blasfeo_memsize_dvec(nx[N] + nz[N]); // sim_guess size += 8; // initial align size += 8; // middle align size += 8; // blasfeo_struct align size += 64; // blasfeo_mem align make_int_multiple_of(8, &size); return size; } ocp_nlp_memory *ocp_nlp_memory_assign(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_opts *opts, void *raw_memory) { ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; // extract sizes int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; int *nz = dims->nz; int *nu = dims->nu; int *ni = dims->ni; char *c_ptr = (char *) raw_memory; // initial align align_char_to(8, &c_ptr); // struct ocp_nlp_memory *mem = (ocp_nlp_memory *) c_ptr; c_ptr += sizeof(ocp_nlp_memory); /* pointers to substructures */ // dynamics mem->dynamics = (void **) c_ptr; c_ptr += N*sizeof(void *); // cost mem->cost = (void **) c_ptr; c_ptr += (N+1)*sizeof(void *); // constraints mem->constraints = (void **) c_ptr; c_ptr += (N+1)*sizeof(void *); // middle align align_char_to(8, &c_ptr); /* substructures */ // qp in mem->qp_in = ocp_qp_in_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims); // qp out mem->qp_out = ocp_qp_out_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims); // QP solver mem->qp_solver_mem = qp_solver->memory_assign(qp_solver, dims->qp_solver, opts->qp_solver_opts, c_ptr); c_ptr += qp_solver->memory_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // regularization mem->regularize_mem = config->regularize->memory_assign(config->regularize, dims->regularize, opts->regularize, c_ptr); c_ptr += config->regularize->memory_calculate_size(config->regularize, dims->regularize, opts->regularize); // dynamics for (int i = 0; i < N; i++) { mem->dynamics[i] = dynamics[i]->memory_assign(dynamics[i], dims->dynamics[i], opts->dynamics[i], c_ptr); c_ptr += dynamics[i]->memory_calculate_size(dynamics[i], dims->dynamics[i], opts->dynamics[i]); } // cost for (int i = 0; i <= N; i++) { mem->cost[i] = cost[i]->memory_assign(cost[i], dims->cost[i], opts->cost[i], c_ptr); c_ptr += cost[i]->memory_calculate_size(cost[i], dims->cost[i], opts->cost[i]); } // constraints for (int i = 0; i <= N; i++) { mem->constraints[i] = constraints[i]->memory_assign(constraints[i], dims->constraints[i], opts->constraints[i], c_ptr); c_ptr += constraints[i]->memory_calculate_size( constraints[i], dims->constraints[i], opts->constraints[i]); } // nlp res mem->nlp_res = ocp_nlp_res_assign(dims, c_ptr); c_ptr += mem->nlp_res->memsize; // blasfeo_struct align align_char_to(8, &c_ptr); // dzduxt assign_and_advance_blasfeo_dmat_structs(N + 1, &mem->dzduxt, &c_ptr); // z_alg assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->z_alg, &c_ptr); // cost_grad assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->cost_grad, &c_ptr); // ineq_fun assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->ineq_fun, &c_ptr); // ineq_adj assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->ineq_adj, &c_ptr); // dyn_fun assign_and_advance_blasfeo_dvec_structs(N, &mem->dyn_fun, &c_ptr); // dyn_adj assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->dyn_adj, &c_ptr); // sim_guess assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->sim_guess, &c_ptr); // set_sim_guess assign_and_advance_bool(N+1, &mem->set_sim_guess, &c_ptr); for (int i = 0; i <= N; ++i) { mem->set_sim_guess[i] = false; } // blasfeo_mem align align_char_to(64, &c_ptr); // dzduxt for (int i=0; i<=N; i++) { assign_and_advance_blasfeo_dmat_mem(nu[i]+nx[i], nz[i], mem->dzduxt+i, &c_ptr); } // z_alg for (int i=0; i<=N; i++) { blasfeo_create_dvec(nz[i], mem->z_alg+i, c_ptr); c_ptr += blasfeo_memsize_dvec(nz[i]); } // cost_grad for (int i = 0; i <= N; i++) { assign_and_advance_blasfeo_dvec_mem(nv[i], mem->cost_grad + i, &c_ptr); } // ineq_fun for (int i = 0; i <= N; i++) { assign_and_advance_blasfeo_dvec_mem(2 * ni[i], mem->ineq_fun + i, &c_ptr); } // ineq_adj for (int i = 0; i <= N; i++) { assign_and_advance_blasfeo_dvec_mem(nv[i], mem->ineq_adj + i, &c_ptr); } // dyn_fun for (int i = 0; i < N; i++) { assign_and_advance_blasfeo_dvec_mem(nx[i + 1], mem->dyn_fun + i, &c_ptr); } // dyn_adj for (int i = 0; i <= N; i++) { assign_and_advance_blasfeo_dvec_mem(nu[i] + nx[i], mem->dyn_adj + i, &c_ptr); } // sim_guess for (int i = 0; i <= N; i++) { assign_and_advance_blasfeo_dvec_mem(nx[i] + nz[i], mem->sim_guess + i, &c_ptr); // set to 0; blasfeo_dvecse(nx[i] + nz[i], 0.0, mem->sim_guess+i, 0); // printf("sim_guess i %d: %p\n", i, mem->sim_guess+i); } // printf("created memory %p\n", mem); return mem; } /************************************************ * workspace ************************************************/ acados_size_t ocp_nlp_workspace_calculate_size(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_opts *opts) { ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; int N = dims->N; int *nx = dims->nx; int *nu = dims->nu; int *ni = dims->ni; // int *nz = dims->nz; acados_size_t size = 0; // nlp size += sizeof(ocp_nlp_workspace); // tmp_nlp_out size += ocp_nlp_out_calculate_size(config, dims); // weight_merit_fun size += ocp_nlp_out_calculate_size(config, dims); // blasfeo_dvec int nxu_max = 0; int nx_max = 0; int ni_max = 0; for (int i = 0; i <= N; i++) { nx_max = nx_max > nx[i] ? nx_max : nx[i]; nxu_max = nxu_max > (nx[i]+nu[i]) ? nxu_max : (nx[i]+nu[i]); ni_max = ni_max > ni[i] ? ni_max : ni[i]; } size += 1 * blasfeo_memsize_dvec(nx_max); size += 1 * blasfeo_memsize_dvec(nxu_max); size += 1 * blasfeo_memsize_dvec(ni_max); // array of pointers // cost size += (N+1)*sizeof(void *); // dynamics size += N*sizeof(void *); // constraints size += (N+1)*sizeof(void *); // module workspace if (opts->reuse_workspace) { #if defined(ACADOS_WITH_OPENMP) // qp solver size += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (int i = 0; i < N; i++) { size += dynamics[i]->workspace_calculate_size(dynamics[i], dims->dynamics[i], opts->dynamics[i]); } // cost for (int i = 0; i <= N; i++) { size += cost[i]->workspace_calculate_size(cost[i], dims->cost[i], opts->cost[i]); } // constraints for (int i = 0; i <= N; i++) { size += constraints[i]->workspace_calculate_size(constraints[i], dims->constraints[i], opts->constraints[i]); } #else acados_size_t size_tmp = 0; int tmp; // qp solver tmp = qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); size_tmp = tmp > size_tmp ? tmp : size_tmp; // dynamics for (int i = 0; i < N; i++) { tmp = dynamics[i]->workspace_calculate_size(dynamics[i], dims->dynamics[i], opts->dynamics[i]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } // cost for (int i = 0; i <= N; i++) { tmp = cost[i]->workspace_calculate_size(cost[i], dims->cost[i], opts->cost[i]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } // constraints for (int i = 0; i <= N; i++) { tmp = constraints[i]->workspace_calculate_size(constraints[i], dims->constraints[i], opts->constraints[i]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } size += size_tmp; #endif } else { // qp solver size += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (int i = 0; i < N; i++) { size += dynamics[i]->workspace_calculate_size(dynamics[i], dims->dynamics[i], opts->dynamics[i]); } // cost for (int i = 0; i <= N; i++) { size += cost[i]->workspace_calculate_size(cost[i], dims->cost[i], opts->cost[i]); } // constraints for (int i = 0; i <= N; i++) { size += constraints[i]->workspace_calculate_size(constraints[i], dims->constraints[i], opts->constraints[i]); } } size += 8; // struct align return size; } ocp_nlp_workspace *ocp_nlp_workspace_assign(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_opts *opts, ocp_nlp_memory *mem, void *raw_memory) { ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; int N = dims->N; int *nx = dims->nx; // int *nv = dims->nv; int *nu = dims->nu; int *ni = dims->ni; // int *nz = dims->nz; char *c_ptr = (char *) raw_memory; ocp_nlp_workspace *work = (ocp_nlp_workspace *) c_ptr; c_ptr += sizeof(ocp_nlp_workspace); /* pointers to substructures */ // work->dynamics = (void **) c_ptr; c_ptr += N*sizeof(void *); // work->cost = (void **) c_ptr; c_ptr += (N+1)*sizeof(void *); // work->constraints = (void **) c_ptr; c_ptr += (N+1)*sizeof(void *); align_char_to(8, &c_ptr); /* substructures */ // tmp_nlp_out work->tmp_nlp_out = ocp_nlp_out_assign(config, dims, c_ptr); c_ptr += ocp_nlp_out_calculate_size(config, dims); // weight_merit_fun work->weight_merit_fun = ocp_nlp_out_assign(config, dims, c_ptr); c_ptr += ocp_nlp_out_calculate_size(config, dims); // blasfeo_dvec int nxu_max = 0; int nx_max = 0; int ni_max = 0; for (int i = 0; i <= N; i++) { nx_max = nx_max > nx[i] ? nx_max : nx[i]; nxu_max = nxu_max > (nx[i]+nu[i]) ? nxu_max : (nx[i]+nu[i]); ni_max = ni_max > ni[i] ? ni_max : ni[i]; } assign_and_advance_blasfeo_dvec_mem(nxu_max, &work->tmp_nxu, &c_ptr); assign_and_advance_blasfeo_dvec_mem(ni_max, &work->tmp_ni, &c_ptr); assign_and_advance_blasfeo_dvec_mem(nx_max, &work->dxnext_dy, &c_ptr); if (opts->reuse_workspace) { #if defined(ACADOS_WITH_OPENMP) // qp solver work->qp_work = (void *) c_ptr; c_ptr += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (int i = 0; i < N; i++) { work->dynamics[i] = c_ptr; c_ptr += dynamics[i]->workspace_calculate_size(dynamics[i], dims->dynamics[i], opts->dynamics[i]); } // cost for (int i = 0; i <= N; i++) { work->cost[i] = c_ptr; c_ptr += cost[i]->workspace_calculate_size(cost[i], dims->cost[i], opts->cost[i]); } // constraints for (int i = 0; i <= N; i++) { work->constraints[i] = c_ptr; c_ptr += constraints[i]->workspace_calculate_size(constraints[i], dims->constraints[i], opts->constraints[i]); } #else acados_size_t size_tmp = 0; int tmp; // qp solver work->qp_work = (void *) c_ptr; tmp = qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); size_tmp = tmp > size_tmp ? tmp : size_tmp; // dynamics for (int i = 0; i < N; i++) { work->dynamics[i] = c_ptr; tmp = dynamics[i]->workspace_calculate_size(dynamics[i], dims->dynamics[i], opts->dynamics[i]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } // cost for (int i = 0; i <= N; i++) { work->cost[i] = c_ptr; tmp = cost[i]->workspace_calculate_size(cost[i], dims->cost[i], opts->cost[i]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } // constraints for (int i = 0; i <= N; i++) { work->constraints[i] = c_ptr; tmp = constraints[i]->workspace_calculate_size(constraints[i], dims->constraints[i], opts->constraints[i]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } c_ptr += size_tmp; #endif } else { // qp solver work->qp_work = (void *) c_ptr; c_ptr += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (int i = 0; i < N; i++) { work->dynamics[i] = c_ptr; c_ptr += dynamics[i]->workspace_calculate_size(dynamics[i], dims->dynamics[i], opts->dynamics[i]); } // cost for (int i = 0; i <= N; i++) { work->cost[i] = c_ptr; c_ptr += cost[i]->workspace_calculate_size(cost[i], dims->cost[i], opts->cost[i]); } // constraints for (int i = 0; i <= N; i++) { work->constraints[i] = c_ptr; c_ptr += constraints[i]->workspace_calculate_size(constraints[i], dims->constraints[i], opts->constraints[i]); } } assert((char *) work + ocp_nlp_workspace_calculate_size(config, dims, opts) >= c_ptr); return work; } /************************************************ * functions ************************************************/ void ocp_nlp_alias_memory_to_submodules(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *nlp_in, ocp_nlp_out *nlp_out, ocp_nlp_opts *opts, ocp_nlp_memory *nlp_mem, ocp_nlp_workspace *nlp_work) { #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel { // beginning of parallel region #endif int N = dims->N; // alias to dynamics_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for nowait #endif for (int i = 0; i < N; i++) { config->dynamics[i]->memory_set_ux_ptr(nlp_out->ux+i, nlp_mem->dynamics[i]); config->dynamics[i]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+i, nlp_mem->dynamics[i]); config->dynamics[i]->memory_set_ux1_ptr(nlp_out->ux+i+1, nlp_mem->dynamics[i]); config->dynamics[i]->memory_set_tmp_ux1_ptr(nlp_work->tmp_nlp_out->ux+i+1, nlp_mem->dynamics[i]); config->dynamics[i]->memory_set_pi_ptr(nlp_out->pi+i, nlp_mem->dynamics[i]); config->dynamics[i]->memory_set_tmp_pi_ptr(nlp_work->tmp_nlp_out->pi+i, nlp_mem->dynamics[i]); config->dynamics[i]->memory_set_BAbt_ptr(nlp_mem->qp_in->BAbt+i, nlp_mem->dynamics[i]); config->dynamics[i]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+i, nlp_mem->dynamics[i]); config->dynamics[i]->memory_set_dzduxt_ptr(nlp_mem->dzduxt+i, nlp_mem->dynamics[i]); config->dynamics[i]->memory_set_sim_guess_ptr(nlp_mem->sim_guess+i, nlp_mem->set_sim_guess+i, nlp_mem->dynamics[i]); config->dynamics[i]->memory_set_z_alg_ptr(nlp_mem->z_alg+i, nlp_mem->dynamics[i]); } #pragma omp parallel for for (int i = 0; i < N; i++) { i+=1; } // alias to cost_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for nowait #endif for (int i = 0; i <= N; i++) { config->cost[i]->memory_set_ux_ptr(nlp_out->ux+i, nlp_mem->cost[i]); config->cost[i]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+i, nlp_mem->cost[i]); config->cost[i]->memory_set_z_alg_ptr(nlp_mem->z_alg+i, nlp_mem->cost[i]); config->cost[i]->memory_set_dzdux_tran_ptr(nlp_mem->dzduxt+i, nlp_mem->cost[i]); config->cost[i]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+i, nlp_mem->cost[i]); config->cost[i]->memory_set_Z_ptr(nlp_mem->qp_in->Z+i, nlp_mem->cost[i]); } // alias to constraints_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for nowait #endif for (int i = 0; i <= N; i++) { config->constraints[i]->memory_set_ux_ptr(nlp_out->ux+i, nlp_mem->constraints[i]); config->constraints[i]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+i, nlp_mem->constraints[i]); config->constraints[i]->memory_set_lam_ptr(nlp_out->lam+i, nlp_mem->constraints[i]); config->constraints[i]->memory_set_tmp_lam_ptr(nlp_work->tmp_nlp_out->lam+i, nlp_mem->constraints[i]); config->constraints[i]->memory_set_z_alg_ptr(nlp_mem->z_alg+i, nlp_mem->constraints[i]); config->constraints[i]->memory_set_dzdux_tran_ptr(nlp_mem->dzduxt+i, nlp_mem->constraints[i]); config->constraints[i]->memory_set_DCt_ptr(nlp_mem->qp_in->DCt+i, nlp_mem->constraints[i]); config->constraints[i]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+i, nlp_mem->constraints[i]); config->constraints[i]->memory_set_idxb_ptr(nlp_mem->qp_in->idxb[i], nlp_mem->constraints[i]); config->constraints[i]->memory_set_idxs_rev_ptr(nlp_mem->qp_in->idxs_rev[i], nlp_mem->constraints[i]); config->constraints[i]->memory_set_idxe_ptr(nlp_mem->qp_in->idxe[i], nlp_mem->constraints[i]); } // alias to regularize memory config->regularize->memory_set_RSQrq_ptr(dims->regularize, nlp_mem->qp_in->RSQrq, nlp_mem->regularize_mem); config->regularize->memory_set_rq_ptr(dims->regularize, nlp_mem->qp_in->rqz, nlp_mem->regularize_mem); config->regularize->memory_set_BAbt_ptr(dims->regularize, nlp_mem->qp_in->BAbt, nlp_mem->regularize_mem); config->regularize->memory_set_b_ptr(dims->regularize, nlp_mem->qp_in->b, nlp_mem->regularize_mem); config->regularize->memory_set_idxb_ptr(dims->regularize, nlp_mem->qp_in->idxb, nlp_mem->regularize_mem); config->regularize->memory_set_DCt_ptr(dims->regularize, nlp_mem->qp_in->DCt, nlp_mem->regularize_mem); config->regularize->memory_set_ux_ptr(dims->regularize, nlp_mem->qp_out->ux, nlp_mem->regularize_mem); config->regularize->memory_set_pi_ptr(dims->regularize, nlp_mem->qp_out->pi, nlp_mem->regularize_mem); config->regularize->memory_set_lam_ptr(dims->regularize, nlp_mem->qp_out->lam, nlp_mem->regularize_mem); // copy sampling times into dynamics model #if defined(ACADOS_WITH_OPENMP) #pragma omp for nowait #endif // NOTE(oj): this will lead in an error for irk_gnsf, T must be set in precompute; // -> remove here and make sure precompute is called everywhere (e.g. Python interface). for (int i = 0; i < N; i++) { config->dynamics[i]->model_set(config->dynamics[i], dims->dynamics[i], nlp_in->dynamics[i], "T", nlp_in->Ts+i); } #if defined(ACADOS_WITH_OPENMP) } // end of parallel region #endif return; } void ocp_nlp_initialize_qp(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { int N = dims->N; #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (int i = 0; i <= N; i++) { // cost config->cost[i]->initialize(config->cost[i], dims->cost[i], in->cost[i], opts->cost[i], mem->cost[i], work->cost[i]); // dynamics if (i < N) config->dynamics[i]->initialize(config->dynamics[i], dims->dynamics[i], in->dynamics[i], opts->dynamics[i], mem->dynamics[i], work->dynamics[i]); // constraints config->constraints[i]->initialize(config->constraints[i], dims->constraints[i], in->constraints[i], opts->constraints[i], mem->constraints[i], work->constraints[i]); } return; } void ocp_nlp_initialize_t_slacks(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { struct blasfeo_dvec *ineq_fun; int N = dims->N; int *ni = dims->ni; int *ns = dims->ns; int *nx = dims->nx; int *nu = dims->nu; #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (int i = 0; i <= N; i++) { // copy out->ux to tmp_nlp_out->ux, since this is used in compute_fun blasfeo_dveccp(nx[i]+nu[i]+2*ns[i], out->ux+i, 0, work->tmp_nlp_out->ux+i, 0); // evaluate inequalities config->constraints[i]->compute_fun(config->constraints[i], dims->constraints[i], in->constraints[i], opts->constraints[i], mem->constraints[i], work->constraints[i]); ineq_fun = config->constraints[i]->memory_get_fun_ptr(mem->constraints[i]); // t = -ineq_fun blasfeo_dveccpsc(2 * ni[i], -1.0, ineq_fun, 0, out->t + i, 0); } return; } void ocp_nlp_approximate_qp_matrices(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; int *nu = dims->nu; int *ni = dims->ni; /* stage-wise multiple shooting lagrangian evaluation */ #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (int i = 0; i <= N; i++) { // init Hessian to 0 blasfeo_dgese(nu[i] + nx[i], nu[i] + nx[i], 0.0, mem->qp_in->RSQrq+i, 0, 0); if (i < N) { // Levenberg Marquardt term: Ts[i] * levenberg_marquardt * eye() if (opts->levenberg_marquardt > 0.0) blasfeo_ddiare(nu[i] + nx[i], in->Ts[i] * opts->levenberg_marquardt, mem->qp_in->RSQrq+i, 0, 0); // dynamics config->dynamics[i]->update_qp_matrices(config->dynamics[i], dims->dynamics[i], in->dynamics[i], opts->dynamics[i], mem->dynamics[i], work->dynamics[i]); } else { // Levenberg Marquardt term: 1.0 * levenberg_marquardt * eye() if (opts->levenberg_marquardt > 0.0) blasfeo_ddiare(nu[i] + nx[i], opts->levenberg_marquardt, mem->qp_in->RSQrq+i, 0, 0); } // cost config->cost[i]->update_qp_matrices(config->cost[i], dims->cost[i], in->cost[i], opts->cost[i], mem->cost[i], work->cost[i]); // constraints config->constraints[i]->update_qp_matrices(config->constraints[i], dims->constraints[i], in->constraints[i], opts->constraints[i], mem->constraints[i], work->constraints[i]); } /* collect stage-wise evaluations */ #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (int i=0; i <= N; i++) { // nlp mem: cost_grad struct blasfeo_dvec *cost_grad = config->cost[i]->memory_get_grad_ptr(mem->cost[i]); blasfeo_dveccp(nv[i], cost_grad, 0, mem->cost_grad + i, 0); // nlp mem: dyn_fun if (i < N) { struct blasfeo_dvec *dyn_fun = config->dynamics[i]->memory_get_fun_ptr(mem->dynamics[i]); blasfeo_dveccp(nx[i + 1], dyn_fun, 0, mem->dyn_fun + i, 0); } // nlp mem: dyn_adj if (i < N) { struct blasfeo_dvec *dyn_adj = config->dynamics[i]->memory_get_adj_ptr(mem->dynamics[i]); blasfeo_dveccp(nu[i] + nx[i], dyn_adj, 0, mem->dyn_adj + i, 0); } else { blasfeo_dvecse(nu[N] + nx[N], 0.0, mem->dyn_adj + N, 0); } if (i > 0) { struct blasfeo_dvec *dyn_adj = config->dynamics[i-1]->memory_get_adj_ptr(mem->dynamics[i-1]); blasfeo_daxpy(nx[i], 1.0, dyn_adj, nu[i-1]+nx[i-1], mem->dyn_adj+i, nu[i], mem->dyn_adj+i, nu[i]); } // nlp mem: ineq_fun struct blasfeo_dvec *ineq_fun = config->constraints[i]->memory_get_fun_ptr(mem->constraints[i]); blasfeo_dveccp(2 * ni[i], ineq_fun, 0, mem->ineq_fun + i, 0); // nlp mem: ineq_adj struct blasfeo_dvec *ineq_adj = config->constraints[i]->memory_get_adj_ptr(mem->constraints[i]); blasfeo_dveccp(nv[i], ineq_adj, 0, mem->ineq_adj + i, 0); } for (int i = 0; i <= N; i++) { // TODO(rien) where should the update happen??? move to qp update ??? // TODO(all): fix and move where appropriate // if (i<N) // { // ocp_nlp_dynamics_opts *dynamics_opts = opts->dynamics[i]; // sim_opts *opts = dynamics_opts->sim_solver; // if (opts->scheme != NULL && opts->scheme->type != exact) // { // for (int_t j = 0; j < nx; j++) // BLASFEO_DVECEL(nlp_mem->cost_grad+i, nu+j) += work->sim_out[i]->grad[j]; // for (int_t j = 0; j < nu; j++) // BLASFEO_DVECEL(nlp_mem->cost_grad+i, j) += work->sim_out[i]->grad[nx+j]; // } // } } } // update QP rhs for SQP (step prim var, abs dual var) // TODO(all): move in dynamics, cost, constraints modules ??? void ocp_nlp_approximate_qp_vectors_sqp(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (int i = 0; i <= N; i++) { // g blasfeo_dveccp(nv[i], mem->cost_grad + i, 0, mem->qp_in->rqz + i, 0); // b if (i < N) blasfeo_dveccp(nx[i + 1], mem->dyn_fun + i, 0, mem->qp_in->b + i, 0); // d blasfeo_dveccp(2 * ni[i], mem->ineq_fun + i, 0, mem->qp_in->d + i, 0); } } void ocp_nlp_embed_initial_value(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { int *ni = dims->ni; // constraints config->constraints[0]->bounds_update(config->constraints[0], dims->constraints[0], in->constraints[0], opts->constraints[0], mem->constraints[0], work->constraints[0]); // nlp mem: ineq_fun struct blasfeo_dvec *ineq_fun = config->constraints[0]->memory_get_fun_ptr(mem->constraints[0]); blasfeo_dveccp(2 * ni[0], ineq_fun, 0, mem->ineq_fun, 0); // d blasfeo_dveccp(2 * ni[0], mem->ineq_fun, 0, mem->qp_in->d, 0); } double ocp_nlp_compute_merit_gradient(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { /* computes merit function gradient at iterate: out -- using already evaluated gradients of submodules with weights: work->weight_merit_fun */ int i, j; int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; int *nu = dims->nu; int *ni = dims->ni; double merit_grad = 0.0; double weight; // NOTE: step is in: mem->qp_out->ux struct blasfeo_dvec *tmp_vec; // size nv struct blasfeo_dvec tmp_vec_nxu = work->tmp_nxu; // size nxu struct blasfeo_dvec dxnext_dy = work->dxnext_dy; // size nx // cost for (i=0; i<=N; i++) { tmp_vec = config->cost[i]->memory_get_grad_ptr(mem->cost[i]); merit_grad += blasfeo_ddot(nv[i], tmp_vec, 0, mem->qp_out->ux + i, 0); } double merit_grad_cost = merit_grad; /* dynamics */ double merit_grad_dyn = 0.0; for (i=0; i<N; i++) { // get shooting node gap x_next(x_n, u_n) - x_{n+1}; tmp_vec = config->dynamics[i]->memory_get_fun_ptr(mem->dynamics[i]); /* compute directional derivative of xnext with direction y -> dxnext_dy */ blasfeo_dgemv_t(nx[i]+nu[i], nx[i+1], 1.0, mem->qp_in->BAbt+i, 0, 0, mem->qp_out->ux+i, 0, 0.0, &dxnext_dy, 0, &dxnext_dy, 0); /* add merit gradient contributions depending on sign of shooting gap */ for (j = 0; j < nx[i+1]; j++) { weight = BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j); double deqj_dy = BLASFEO_DVECEL(&dxnext_dy, j) - BLASFEO_DVECEL(mem->qp_out->ux+(i+1), nu[i+1]+j); { if (BLASFEO_DVECEL(tmp_vec, j) > 0) { merit_grad_dyn += weight * deqj_dy; // printf("\ndyn_contribution +%e, weight %e, deqj_dy %e, i %d, j %d", weight * deqj_dy, weight, deqj_dy, i, j); } else { merit_grad_dyn -= weight * deqj_dy; // printf("\ndyn_contribution %e, weight %e, deqj_dy %e, i %d, j %d", -weight * deqj_dy, weight, deqj_dy, i, j); } } } } /* inequality contributions */ // NOTE: slack bound inequalities are not considered here. // They should never be infeasible. Only if explicitly initialized infeasible from outside. int constr_index, slack_index_in_ux, slack_index; ocp_qp_dims* qp_dims = mem->qp_in->dim; int *nb = qp_dims->nb; int *ng = qp_dims->ng; int *ns = qp_dims->ns; double merit_grad_ineq = 0.0; double slack_step; for (i=0; i<=N; i++) { tmp_vec = config->constraints[i]->memory_get_fun_ptr(mem->constraints[i]); int *idxb = mem->qp_in->idxb[i]; if (ni[i] > 0) { // NOTE: loop could be simplified handling lower and upper constraints together. for (j = 0; j < 2 * (nb[i] + ng[i]); j++) // 2 * ni { double constraint_val = BLASFEO_DVECEL(tmp_vec, j); if (constraint_val > 0) { weight = BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j); // find corresponding slack value constr_index = j < nb[i]+ng[i] ? j : j-(nb[i]+ng[i]); slack_index = mem->qp_in->idxs_rev[i][constr_index]; // if softened: add slack contribution if (slack_index >= 0) { slack_index_in_ux = j < (nb[i]+ng[i]) ? nx[i] + nu[i] + slack_index : nx[i] + nu[i] + slack_index + ns[i]; slack_step = BLASFEO_DVECEL(mem->qp_out->ux+i, slack_index_in_ux); merit_grad_ineq -= weight * slack_step; // printf("at node %d, ineq %d, idxs_rev[%d] = %d\n", i, j, constr_index, slack_index); // printf("slack contribution: uxs[%d] = %e\n", slack_index_in_ux, slack_step); } // NOTE: the inequalities are internally organized in the following order: // [ lbu lbx lg lh lphi ubu ubx ug uh uphi; // lsbu lsbx lsg lsh lsphi usbu usbx usg ush usphi] // printf("constraint %d %d is active with value %e", i, j, constraint_val); if (j < nb[i]) { // printf("lower idxb[%d] = %d dir %f, constraint_val %f, nb = %d\n", j, idxb[j], BLASFEO_DVECEL(mem->qp_out->ux, idxb[j]), constraint_val, nb[i]); merit_grad_ineq += weight * BLASFEO_DVECEL(mem->qp_out->ux+i, idxb[j]); } else if (j < nb[i] + ng[i]) { // merit_grad_ineq += weight * mem->qp_in->DCt_j * dux blasfeo_dcolex(nx[i] + nu[i], mem->qp_in->DCt+i, j - nb[i], 0, &tmp_vec_nxu, 0); merit_grad_ineq += weight * blasfeo_ddot(nx[i] + nu[i], &tmp_vec_nxu, 0, mem->qp_out->ux+i, 0); // printf("general linear constraint lower contribution = %e, val = %e\n", blasfeo_ddot(nx[i] + nu[i], &tmp_vec_nxu, 0, mem->qp_out->ux+i, 0), constraint_val); } else if (j < 2*nb[i] + ng[i]) { // printf("upper idxb[%d] = %d dir %f, constraint_val %f, nb = %d\n", j-nb[i]-ng[i], idxb[j-nb[i]-ng[i]], BLASFEO_DVECEL(mem->qp_out->ux, idxb[j-nb[i]-ng[i]]), constraint_val, nb[i]); merit_grad_ineq += weight * BLASFEO_DVECEL(mem->qp_out->ux+i, idxb[j-nb[i]-ng[i]]); } else if (j < 2*nb[i] + 2*ng[i]) { blasfeo_dcolex(nx[i] + nu[i], mem->qp_in->DCt+i, j - 2*nb[i] - ng[i], 0, &tmp_vec_nxu, 0); merit_grad_ineq += weight * blasfeo_ddot(nx[i] + nu[i], &tmp_vec_nxu, 0, mem->qp_out->ux+i, 0); // printf("general linear constraint upper contribution = %e, val = %e\n", blasfeo_ddot(nx[i] + nu[i], &tmp_vec_nxu, 0, mem->qp_out->ux+i, 0), constraint_val); } } } } } // print_ocp_qp_dims(qp_dims); // print_ocp_qp_in(mem->qp_in); merit_grad = merit_grad_cost + merit_grad_dyn + merit_grad_ineq; if (opts->print_level > 1) printf("computed merit_grad = %e, merit_grad_cost = %e, merit_grad_dyn = %e, merit_grad_ineq = %e\n", merit_grad, merit_grad_cost, merit_grad_dyn, merit_grad_ineq); return merit_grad; } static double ocp_nlp_get_violation(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { // computes constraint violation infinity norm // assumes constraint functions are evaluated before, e.g. done in ocp_nlp_evaluate_merit_fun int i, j; int N = dims->N; int *nx = dims->nx; int *ni = dims->ni; struct blasfeo_dvec *tmp_fun_vec; double violation = 0.0; double tmp; for (i=0; i<N; i++) { tmp_fun_vec = config->dynamics[i]->memory_get_fun_ptr(mem->dynamics[i]); for (j=0; j<nx[i+1]; j++) { tmp = fabs(BLASFEO_DVECEL(tmp_fun_vec, j)); violation = tmp > violation ? tmp : violation; } } for (i=0; i<=N; i++) { tmp_fun_vec = config->constraints[i]->memory_get_fun_ptr(mem->constraints[i]); for (j=0; j<2*ni[i]; j++) { // Note constraint violation corresponds to > 0 tmp = BLASFEO_DVECEL(tmp_fun_vec, j); violation = tmp > violation ? tmp : violation; } } return violation; } double ocp_nlp_evaluate_merit_fun(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { /* computes merit function value at iterate: tmp_nlp_out, with weights: work->weight_merit_fun */ //int j; int N = dims->N; int *nx = dims->nx; int *ni = dims->ni; double merit_fun = 0.0; // compute fun value #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (int i=0; i<=N; i++) { // cost config->cost[i]->compute_fun(config->cost[i], dims->cost[i], in->cost[i], opts->cost[i], mem->cost[i], work->cost[i]); } #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (int i=0; i<N; i++) { // dynamics config->dynamics[i]->compute_fun(config->dynamics[i], dims->dynamics[i], in->dynamics[i], opts->dynamics[i], mem->dynamics[i], work->dynamics[i]); } #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (int i=0; i<=N; i++) { // constr config->constraints[i]->compute_fun(config->constraints[i], dims->constraints[i], in->constraints[i], opts->constraints[i], mem->constraints[i], work->constraints[i]); } double *tmp_fun; double tmp; struct blasfeo_dvec *tmp_fun_vec; double cost_fun = 0.0; for(int i=0; i<=N; i++) { tmp_fun = config->cost[i]->memory_get_fun_ptr(mem->cost[i]); cost_fun += *tmp_fun; } double dyn_fun = 0.0; for(int i=0; i<N; i++) { tmp_fun_vec = config->dynamics[i]->memory_get_fun_ptr(mem->dynamics[i]); // printf("\nMerit: dyn will multiply tmp_fun, weights %d\n", i); // blasfeo_print_exp_tran_dvec(nx[i+1], tmp_fun_vec, 0); // blasfeo_print_exp_tran_dvec(nx[i+1], work->weight_merit_fun->pi+i, 0); for(int j=0; j<nx[i+1]; j++) { // printf("\n%e %e\n", fabs(BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j)), fabs(BLASFEO_DVECEL(tmp_fun_vec, j))); dyn_fun += fabs(BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j)) * fabs(BLASFEO_DVECEL(tmp_fun_vec, j)); } } double constr_fun = 0.0; for(int i=0; i<=N; i++) { // printf("\ni %d\n", i); tmp_fun_vec = config->constraints[i]->memory_get_fun_ptr(mem->constraints[i]); // blasfeo_print_exp_tran_dvec(2*ni[i], tmp_fun_vec, 0); // blasfeo_print_exp_tran_dvec(2*ni[i], work->weight_merit_fun->lam+i, 0); for (int j=0; j<2*ni[i]; j++) { tmp = BLASFEO_DVECEL(tmp_fun_vec, j); if (tmp > 0.0) { // tmp = constraint violation // printf("IN merit fun: ineq i %d, j %d tmp_fun %e, multiplier %e\n", i, j, tmp, BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j)); constr_fun += fabs(BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j)) * tmp; } } } merit_fun = cost_fun + dyn_fun + constr_fun; // printf("Merit fun: %e cost: %e dyn: %e constr: %e\n", merit_fun, cost_fun, dyn_fun, constr_fun); return merit_fun; } double ocp_nlp_line_search(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work, int check_early_termination) { int i, j; int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; int *ni = dims->ni; double alpha = opts->step_length; double tmp0, tmp1, merit_fun1; ocp_qp_out *qp_out = mem->qp_out; // Line search version Jonathan // Following Leineweber1999, Section "3.5.1 Line Search Globalization" // TODO: check out more advanced step search Leineweber1995 if (opts->globalization == MERIT_BACKTRACKING) { // copy out (current iterate) to work->tmp_nlp_out for (i = 0; i <= N; i++) blasfeo_dveccp(nv[i], out->ux+i, 0, work->tmp_nlp_out->ux+i, 0); // for (i = 0; i < N; i++) // blasfeo_dveccp(nx[i+1], out->pi+i, 0, work->tmp_nlp_out->pi+i, 0); // for (i = 0; i <= N; i++) // blasfeo_dveccp(2*ni[i], out->lam+i, 0, work->tmp_nlp_out->lam+i, 0); // linear update of algebraic variables using state and input sensitivity // if (i < N) // { // blasfeo_dgemv_t(nu[i]+nx[i], nz[i], alpha, mem->dzduxt+i, 0, 0, mem->qp_out->ux+i, 0, 1.0, mem->z_alg+i, 0, out->z+i, 0); // } /* modify/initialize merit function weights (Leineweber1999 M5.1, p.89) */ if (mem->sqp_iter[0]==0) { // initialize weights // equality merit weights = abs( eq multipliers of qp_sol ) for (i = 0; i < N; i++) { for (j=0; j<nx[i+1]; j++) { // tmp0 = fabs(BLASFEO_DVECEL(out->pi+i, j)); tmp0 = fabs(BLASFEO_DVECEL(qp_out->pi+i, j)); } } for (i = 0; i <= N; i++) { blasfeo_dveccp(2*ni[i], qp_out->lam+i, 0, work->weight_merit_fun->lam+i, 0); } } else { // update weights // printf("merit fun: update weights, sqp_iter = %d\n", mem->sqp_iter[0]); for (i = 0; i < N; i++) { for(j=0; j<nx[i+1]; j++) { // abs(lambda) (LW) tmp0 = fabs(BLASFEO_DVECEL(qp_out->pi+i, j)); // .5 * (abs(lambda) + sigma) tmp1 = 0.5 * (tmp0 + BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j)); BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j) = tmp0 > tmp1 ? tmp0 : tmp1; } } for (i = 0; i <= N; i++) { for(j=0; j<2*ni[i]; j++) { // mu (LW) tmp0 = BLASFEO_DVECEL(qp_out->lam+i, j); // .5 * (mu + tau) tmp1 = 0.5 * (tmp0 + BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j)); BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j) = tmp0>tmp1 ? tmp0 : tmp1; } } } if (1) // (mem->sqp_iter[0]!=0) // TODO: why does Leineweber do full step in first SQP iter? { double merit_fun0 = ocp_nlp_evaluate_merit_fun(config, dims, in, out, opts, mem, work); double reduction_factor = opts->alpha_reduction; double max_next_merit_fun_val = merit_fun0; double eps_sufficient_descent = opts->eps_sufficient_descent; double dmerit_dy = 0.0; alpha = 1.0; // to avoid armijo evaluation and loop when checking if SOC should be done if (check_early_termination) { // TMP: // printf("tmp: merit_grad eval in early termination\n"); // dmerit_dy = ocp_nlp_compute_merit_gradient(config, dims, in, out, opts, mem, work); // TODO(oj): should the merit weight update be undone in case of early termination? double violation_current = ocp_nlp_get_violation(config, dims, in, out, opts, mem, work); // tmp_nlp_out = out + alpha * qp_out for (i = 0; i <= N; i++) blasfeo_daxpy(nv[i], alpha, qp_out->ux+i, 0, out->ux+i, 0, work->tmp_nlp_out->ux+i, 0); merit_fun1 = ocp_nlp_evaluate_merit_fun(config, dims, in, out, opts, mem, work); double violation_step = ocp_nlp_get_violation(config, dims, in, out, opts, mem, work); if (opts->print_level > 0) { printf("\npreliminary line_search: merit0 %e, merit1 %e; viol_current %e, viol_step %e\n", merit_fun0, merit_fun1, violation_current, violation_step); } if (merit_fun1 < merit_fun0 && violation_step < violation_current) { // TODO: check armijo in this case? return alpha; } else { return reduction_factor * reduction_factor; } } /* actual Line Search*/ if (opts->line_search_use_sufficient_descent) { // check Armijo-type sufficient descent condition Leinweber1999 (2.35); dmerit_dy = ocp_nlp_compute_merit_gradient(config, dims, in, out, opts, mem, work); if (dmerit_dy > 0.0) { if (dmerit_dy > 1e-6 && opts->print_level > 0) { printf("\nacados line search: found dmerit_dy = %e > 0. Setting it to 0.0 instead\n", dmerit_dy); } dmerit_dy = 0.0; } } // From Leineweber1999: eq (3.64) -> only relevant for adaptive integrators looking at Remark 3.2. // "It is noteworthy that our practical implementation takes into account the potential nonsmoothness introduced by the fact that certain components of the penalty function - namely the continuity condition residuals - are evaluated only within integration tolerance." // double sum_pi = 0.0; // for (i = 0; i < N; i++) // { // for (j = 0; j < dims->nx[i+1]; j++) // sum_pi += BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j); // } // double relaxed_val = 2.0 * 1e-6 * sum_pi; // if (abs(merit_fun0 - merit_fun1) < relaxed_val) // { // printf("\nexiting because of relaxed_val."); // break; // } for (j=0; alpha*reduction_factor > opts->alpha_min; j++) { // tmp_nlp_out = out + alpha * qp_out for (i = 0; i <= N; i++) blasfeo_daxpy(nv[i], alpha, qp_out->ux+i, 0, out->ux+i, 0, work->tmp_nlp_out->ux+i, 0); merit_fun1 = ocp_nlp_evaluate_merit_fun(config, dims, in, out, opts, mem, work); if (opts->print_level > 1) { printf("backtracking %d, merit_fun1 = %e, merit_fun0 %e\n", j, merit_fun1, merit_fun0); } // if (merit_fun1 < merit_fun0 && merit_fun1 > max_next_merit_fun_val) // { // printf("\nalpha %f would be accepted without sufficient descent condition", alpha); // } max_next_merit_fun_val = merit_fun0 + eps_sufficient_descent * dmerit_dy * alpha; if (merit_fun1 < max_next_merit_fun_val) { break; } else { alpha *= reduction_factor; } } } } return alpha; } void ocp_nlp_update_variables_sqp(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work, double alpha) { int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; int *nu = dims->nu; int *ni = dims->ni; int *nz = dims->nz; #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (int i = 0; i <= N; i++) { // step in primal variables blasfeo_daxpy(nv[i], alpha, mem->qp_out->ux + i, 0, out->ux + i, 0, out->ux + i, 0); // update dual variables if (opts->full_step_dual) { blasfeo_dveccp(2*ni[i], mem->qp_out->lam+i, 0, out->lam+i, 0); if (i < N) { blasfeo_dveccp(nx[i+1], mem->qp_out->pi+i, 0, out->pi+i, 0); } } else { // update duals with alpha step blasfeo_dvecsc(2*ni[i], 1.0-alpha, out->lam+i, 0); blasfeo_daxpy(2*ni[i], alpha, mem->qp_out->lam+i, 0, out->lam+i, 0, out->lam+i, 0); if (i < N) { blasfeo_dvecsc(nx[i+1], 1.0-alpha, out->pi+i, 0); blasfeo_daxpy(nx[i+1], alpha, mem->qp_out->pi+i, 0, out->pi+i, 0, out->pi+i, 0); } } // update slack values blasfeo_dvecsc(2*ni[i], 1.0-alpha, out->t+i, 0); blasfeo_daxpy(2*ni[i], alpha, mem->qp_out->t+i, 0, out->t+i, 0, out->t+i, 0); // linear update of algebraic variables using state and input sensitivity if (i < N) { blasfeo_dgemv_t(nu[i]+nx[i], nz[i], alpha, mem->dzduxt+i, 0, 0, mem->qp_out->ux+i, 0, 1.0, mem->z_alg+i, 0, out->z+i, 0); } } } /************************************************ * residuals ************************************************/ acados_size_t ocp_nlp_res_calculate_size(ocp_nlp_dims *dims) { // extract dims int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; acados_size_t size = sizeof(ocp_nlp_res); size += 3 * (N + 1) * sizeof(struct blasfeo_dvec); // res_stat res_ineq res_comp size += 1 * N * sizeof(struct blasfeo_dvec); // res_eq for (int i = 0; i < N; i++) { size += 1 * blasfeo_memsize_dvec(nv[i]); // res_stat size += 1 * blasfeo_memsize_dvec(nx[i + 1]); // res_eq size += 2 * blasfeo_memsize_dvec(2 * ni[i]); // res_ineq res_comp } size += 1 * blasfeo_memsize_dvec(nv[N]); // res_stat size += 2 * blasfeo_memsize_dvec(2 * ni[N]); // res_ineq res_comp size += 8; // initial align size += 8; // blasfeo_struct align size += 64; // blasfeo_mem align make_int_multiple_of(8, &size); return size; } ocp_nlp_res *ocp_nlp_res_assign(ocp_nlp_dims *dims, void *raw_memory) { char *c_ptr = (char *) raw_memory; // extract sizes int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; // initial align align_char_to(8, &c_ptr); // struct ocp_nlp_res *res = (ocp_nlp_res *) c_ptr; c_ptr += sizeof(ocp_nlp_res); // blasfeo_struct align align_char_to(8, &c_ptr); // res_stat assign_and_advance_blasfeo_dvec_structs(N + 1, &res->res_stat, &c_ptr); // res_eq assign_and_advance_blasfeo_dvec_structs(N, &res->res_eq, &c_ptr); // res_ineq assign_and_advance_blasfeo_dvec_structs(N + 1, &res->res_ineq, &c_ptr); // res_comp assign_and_advance_blasfeo_dvec_structs(N + 1, &res->res_comp, &c_ptr); // blasfeo_mem align align_char_to(64, &c_ptr); // res_stat for (int i = 0; i <= N; i++) { assign_and_advance_blasfeo_dvec_mem(nv[i], res->res_stat + i, &c_ptr); } // res_eq for (int i = 0; i < N; i++) { assign_and_advance_blasfeo_dvec_mem(nx[i + 1], res->res_eq + i, &c_ptr); } // res_ineq for (int i = 0; i <= N; i++) { assign_and_advance_blasfeo_dvec_mem(2 * ni[i], res->res_ineq + i, &c_ptr); } // res_comp for (int i = 0; i <= N; i++) { assign_and_advance_blasfeo_dvec_mem(2 * ni[i], res->res_comp + i, &c_ptr); } res->memsize = ocp_nlp_res_calculate_size(dims); return res; } void ocp_nlp_res_compute(ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_res *res, ocp_nlp_memory *mem) { // extract dims int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; int *nu = dims->nu; int *ni = dims->ni; double tmp_res; // res_stat res->inf_norm_res_stat = 0.0; for (int i = 0; i <= N; i++) { blasfeo_daxpy(nv[i], -1.0, mem->ineq_adj + i, 0, mem->cost_grad + i, 0, res->res_stat + i, 0); blasfeo_daxpy(nu[i] + nx[i], -1.0, mem->dyn_adj + i, 0, res->res_stat + i, 0, res->res_stat + i, 0); blasfeo_dvecnrm_inf(nv[i], res->res_stat + i, 0, &tmp_res); res->inf_norm_res_stat = tmp_res > res->inf_norm_res_stat ? tmp_res : res->inf_norm_res_stat; } // res_eq res->inf_norm_res_eq = 0.0; for (int i = 0; i < N; i++) { blasfeo_dveccp(nx[i + 1], mem->dyn_fun + i, 0, res->res_eq + i, 0); blasfeo_dvecnrm_inf(nx[i + 1], res->res_eq + i, 0, &tmp_res); res->inf_norm_res_eq = tmp_res > res->inf_norm_res_eq ? tmp_res : res->inf_norm_res_eq; } // res_ineq res->inf_norm_res_ineq = 0.0; for (int i = 0; i <= N; i++) { blasfeo_daxpy(2 * ni[i], 1.0, out->t + i, 0, mem->ineq_fun + i, 0, res->res_ineq + i, 0); blasfeo_dvecnrm_inf(2 * ni[i], res->res_ineq + i, 0, &tmp_res); res->inf_norm_res_ineq = tmp_res > res->inf_norm_res_ineq ? tmp_res : res->inf_norm_res_ineq; } // res_comp res->inf_norm_res_comp = 0.0; for (int i = 0; i <= N; i++) { blasfeo_dvecmul(2 * ni[i], out->lam + i, 0, out->t + i, 0, res->res_comp + i, 0); blasfeo_dvecnrm_inf(2 * ni[i], res->res_comp + i, 0, &tmp_res); res->inf_norm_res_comp = tmp_res > res->inf_norm_res_comp ? tmp_res : res->inf_norm_res_comp; } // printf("computed residuals stat: %e, eq: %e, ineq: %e, comp: %e\n", res->inf_norm_res_stat, res->inf_norm_res_eq, // res->inf_norm_res_ineq, res->inf_norm_res_comp); } void ocp_nlp_res_get_inf_norm(ocp_nlp_res *res, double *out) { double norm = res->inf_norm_res_stat; norm = (res->inf_norm_res_eq > norm) ? res->inf_norm_res_eq : norm; norm = (res->inf_norm_res_ineq > norm) ? res->inf_norm_res_ineq : norm; norm = (res->inf_norm_res_comp > norm) ? res->inf_norm_res_comp : norm; *out = norm; return; } void ocp_nlp_cost_compute(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { // extract dims int N = dims->N; double* tmp_cost = NULL; double total_cost = 0.0; for (int i = 0; i <= N; i++) { // set pointers // NOTE(oj): the cost compute function takes the tmp_ux_ptr as input, // since it is also used for globalization, // especially with primal variables that are NOT current SQP iterates. config->cost[i]->memory_set_tmp_ux_ptr(out->ux+i, mem->cost[i]); config->cost[i]->compute_fun(config->cost[i], dims->cost[i], in->cost[i], opts->cost[i], mem->cost[i], work->cost[i]); tmp_cost = config->cost[i]->memory_get_fun_ptr(mem->cost[i]); // printf("cost at stage %d = %e, total = %e\n", i, *tmp_cost, total_cost); total_cost += *tmp_cost; } mem->cost_value = total_cost; // printf("\ncomputed total cost: %e\n", total_cost); }
b4c7387_gcc_so12.c
#define _POSIX_C_SOURCE 200809L #define START_TIMER(S) \ struct timeval start_##S, end_##S; \ gettimeofday(&start_##S, NULL); #define STOP_TIMER(S, T) \ gettimeofday(&end_##S, NULL); \ T->S += (double)(end_##S.tv_sec - start_##S.tv_sec) + (double)(end_##S.tv_usec - start_##S.tv_usec) / 1000000; #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "xmmintrin.h" #include "pmmintrin.h" #include <stdio.h> #include "omp.h" #define min(a, b) (((a) < (b)) ? (a) : (b)) #define max(a, b) (((a) > (b)) ? (a) : (b)) struct dataobj { void *restrict data; int *size; int *npsize; int *dsize; int *hsize; int *hofs; int *oofs; }; struct profiler { double section0; double section1; }; void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int t0, const int t1, const int t2, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int time, const int tw); int Kernel(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, const int sp_zi_m, const int time_M, const int time_m, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int nthreads_nonaffine, struct profiler *timers) { int(*restrict block_sizes) __attribute__((aligned(64))) = (int(*))block_sizes_vec->data; int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data; int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data; /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); int xb_size = block_sizes[0]; int y0_blk0_size = block_sizes[3]; int x0_blk0_size = block_sizes[2]; int yb_size = block_sizes[1]; printf(" Tiles: %d, %d ::: Blocks %d, %d \n", xb_size, yb_size, x0_blk0_size, y0_blk0_size); //for (int time = time_m, t0 = (time + 2)%(3), t1 = (time)%(3), t2 = (time + 1)%(3); time <= time_M; time += 1, t0 = (time + 2)%(3), t1 = (time)%(3), t2 = (time + 1)%(3)) //{ int sf = 6; int t_blk_size = 2 * sf * (time_M - time_m); START_TIMER(section0) for (int t_blk = time_m; t_blk < sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block { for (int xb = x_m; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size + 1) { for (int yb = y_m; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size + 1) { for (int time = t_blk, t1 = (time + 2) % (3), t0 = (time) % (3), t2 = (time + 1) % (3); time <= 1 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t1 = (((time / sf) % (time_M - time_m + 1)) + 2) % (3), t0 = (((time / sf) % (time_M - time_m + 1))) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3)) { int tw = ((time / sf) % (time_M - time_m + 1)); bf0(damp_vec, dt, u_vec, vp_vec, nnz_sp_source_mask_vec, sp_source_mask_vec, save_src_u_vec, source_id_vec, source_mask_vec, t0, t1, t2, x0_blk0_size, x_M - (x_M - x_m + 1) % (x0_blk0_size), x_m, y0_blk0_size, y_M - (y_M - y_m + 1) % (y0_blk0_size), y_m, z_M, z_m, sp_zi_m, nthreads, xb, yb, xb_size, yb_size, time, tw); //bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, x0_blk0_size, x_M - (x_M - x_m + 1) % (x0_blk0_size), x_m, (y_M - y_m + 1) % (y0_blk0_size), y_M, y_M - (y_M - y_m + 1) % (y0_blk0_size) + 1, z_M, z_m, nthreads); //bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, (x_M - x_m + 1) % (x0_blk0_size), x_M, x_M - (x_M - x_m + 1) % (x0_blk0_size) + 1, y0_blk0_size, y_M - (y_M - y_m + 1) % (y0_blk0_size), y_m, z_M, z_m, nthreads); //bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, (x_M - x_m + 1) % (x0_blk0_size), x_M, x_M - (x_M - x_m + 1) % (x0_blk0_size) + 1, (y_M - y_m + 1) % (y0_blk0_size), y_M, y_M - (y_M - y_m + 1) % (y0_blk0_size) + 1, z_M, z_m, nthreads); } } } /* End section0 */ } STOP_TIMER(section0, timers) for (int time = time_m, t2 = (time + 1) % (3); time <= time_M; time += 1, t2 = (time + 1) % (3)) { START_TIMER(section1) /* Begin section1 */ /* End section1 */ STOP_TIMER(section1, timers) } return 0; } void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int t0, const int t1, const int t2, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int time, const int tw) { float(*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data; float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data; float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data; int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data; int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; if (x0_blk0_size == 0 || y0_blk0_size == 0) { return; } #pragma omp parallel num_threads(nthreads) { #pragma omp for collapse(2) schedule(dynamic, 1) for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size) { for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size) { for (int x = x0_blk0; x <= min(min((x_M + time), (xb + xb_size)), (x0_blk0 + x0_blk0_size - 1)); x++) { for (int y = y0_blk0; y <= min(min((y_M + time), (yb + yb_size)), (y0_blk0 + y0_blk0_size - 1)); y++) { #pragma omp simd aligned(damp, u, vp : 32) for (int z = z_m; z <= z_M; z += 1) { float r8 = 1.0/dt; float r7 = 1.0/(dt*dt); float r6 = 1.0/(vp[x - time + 12][y - time + 12][z + 12]*vp[x - time + 12][y - time + 12][z + 12]); u[t2][x - time + 12][y - time + 12][z + 12] = (r6*(-r7*(-2.0F*u[t0][x - time + 12][y - time + 12][z + 12] + u[t1][x - time + 12][y - time + 12][z + 12])) + r8*(damp[x - time + 1][y - time + 1][z + 1]*u[t0][x - time + 12][y - time + 12][z + 12]) - 2.67222496e-7F*(u[t0][x - time + 6][y - time + 12][z + 12] + u[t0][x - time + 12][y - time + 6][z + 12] + u[t0][x - time + 12][y - time + 12][z + 6] + u[t0][x - time + 12][y - time + 12][z + 18] + u[t0][x - time + 12][y - time + 18][z + 12] + u[t0][x - time + 18][y - time + 12][z + 12]) + 4.61760473e-6F*(u[t0][x - time + 7][y - time + 12][z + 12] + u[t0][x - time + 12][y - time + 7][z + 12] + u[t0][x - time + 12][y - time + 12][z + 7] + u[t0][x - time + 12][y - time + 12][z + 17] + u[t0][x - time + 12][y - time + 17][z + 12] + u[t0][x - time + 17][y - time + 12][z + 12]) - 3.96825406e-5F*(u[t0][x - time + 8][y - time + 12][z + 12] + u[t0][x - time + 12][y - time + 8][z + 12] + u[t0][x - time + 12][y - time + 12][z + 8] + u[t0][x - time + 12][y - time + 12][z + 16] + u[t0][x - time + 12][y - time + 16][z + 12] + u[t0][x - time + 16][y - time + 12][z + 12]) + 2.35155796e-4F*(u[t0][x - time + 9][y - time + 12][z + 12] + u[t0][x - time + 12][y - time + 9][z + 12] + u[t0][x - time + 12][y - time + 12][z + 9] + u[t0][x - time + 12][y - time + 12][z + 15] + u[t0][x - time + 12][y - time + 15][z + 12] + u[t0][x - time + 15][y - time + 12][z + 12]) - 1.19047622e-3F*(u[t0][x - time + 10][y - time + 12][z + 12] + u[t0][x - time + 12][y - time + 10][z + 12] + u[t0][x - time + 12][y - time + 12][z + 10] + u[t0][x - time + 12][y - time + 12][z + 14] + u[t0][x - time + 12][y - time + 14][z + 12] + u[t0][x - time + 14][y - time + 12][z + 12]) + 7.6190478e-3F*(u[t0][x - time + 11][y - time + 12][z + 12] + u[t0][x - time + 12][y - time + 11][z + 12] + u[t0][x - time + 12][y - time + 12][z + 11] + u[t0][x - time + 12][y - time + 12][z + 13] + u[t0][x - time + 12][y - time + 13][z + 12] + u[t0][x - time + 13][y - time + 12][z + 12]) - 3.97703713e-2F*u[t0][x - time + 12][y - time + 12][z + 12])/(r6*r7 + r8*damp[x - time + 1][y - time + 1][z + 1]); } #pragma omp simd aligned(damp, u, vp : 32) for (int sp_zi = sp_zi_m; sp_zi <= nnz_sp_source_mask[x - time][y - time] - 1; sp_zi += 1) { int zind = sp_source_mask[x - time][y - time][sp_zi]; float r0 = save_src_u[tw][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind]; u[t2][x - time + 12][y - time + 12][zind + 12] += r0; } } } } } } }
GB_unop__isfinite_bool_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__isfinite_bool_fc32 // op(A') function: GB_unop_tran__isfinite_bool_fc32 // C type: bool // A type: GxB_FC32_t // cast: GxB_FC32_t cij = (aij) // unaryop: cij = GB_cisfinitef (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_cisfinitef (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = (aij) ; \ Cx [pC] = GB_cisfinitef (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISFINITE || GxB_NO_BOOL || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__isfinite_bool_fc32 ( bool *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = (aij) ; Cx [p] = GB_cisfinitef (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__isfinite_bool_fc32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mp.c
// // mp.c - An OpenMP Mandelbrot // // compile: gcc -Wall -o mp -std=c99 -O3 -msse2 -march=native -fopenmp palette.c dictionary.c iniparser.c getopt.c cnames.c cspace.c colors.c elapsed.c util.c cJSON.c mp.c -ljpeg -lgd // gcc -ggdb -Wall -o mp -std=c99 -msse2 -march=native -fopenmp palette.c dictionary.c iniparser.c getopt.c cnames.c cspace.c colors.c elapsed.c util.c cJSON.c mp.c -ljpeg -lgd // debug: gdb -args … // #define __USE_MINGW_ANSI_STDIO 1 #include <stdio.h> #include <stdlib.h> #include "palette.h" #include "colors.h" #include "getopt.h" #include "util.h" #include "mp.h" #include "elapsed.h" char *Version = "0.025"; char *Date = "Sat Jul 29 10:20:53 2017"; typedef struct { __float128 xmin; __float128 xmax; __float128 ymin; __float128 ymax; __float128 ctxfactor; __float128 ctyfactor; } AxesFLT128; typedef struct { long double xmin; long double xmax; long double ymin; long double ymax; long double ctxfactor; long double ctyfactor; } AxesLDBL; typedef struct { double xmin; double xmax; double ymin; double ymax; double ctxfactor; double ctyfactor; } AxesDBL; typedef struct { float xmin; float xmax; float ymin; float ymax; float ctxfactor; float ctyfactor; } AxesFLT; AxesFLT ct_gain_axesFLT( float real, float imag, float diameter, int height, int width ); AxesDBL ct_gain_axesDBL( double real, double imag, double diameter, int height, int width ); AxesLDBL ct_gain_axesLDBL( long double real, long double imag, long double diameter, int height, int width ); AxesFLT128 ct_gain_axesFLT128( __float128 real, __float128 imag, __float128 diameter, int height, int width ); int main( int argc, char *argv[] ) { Parameters g = getParameters( argc, argv, Version, Date ); struct timeval tvBegin, tvEnd, tvDiff; if ( g.palname ) { if ( strncmp( g.palname, "SET", 3 ) == 0 ) { colors = setColors( g ); } else { colors = getColors( g.palname ); } } gettimeofday( &tvBegin, NULL ); int guess = bestGuess( g.diameter, g.width ); printf( "%s vr%s\n", argv[0], Version ); printf( "%s-diameter:%.17Lg, dims: %dx%d %s color=%d, tweak=%d aa=%s\n", guessStr( guess ), ( long double ) g.diameter, g.width, g.height, g.filename, g.color, g.tweak, g.aa ); switch ( guess ) { case FLT: g = msetFLT( g ); break; case DBL: g = msetDBL( g ); break; case LDBL: g = msetLDBL( g ); break; case FLT128: g = msetFLT128( g ); break; default: printf( "Need higher precision\n" ); return 1; break; } gettimeofday( &tvEnd, NULL ); timeval_subtract( &tvDiff, &tvEnd, &tvBegin ); elapsed( tvDiff.tv_sec, tvDiff.tv_usec ); timeval_print( &tvEnd ); FILE *fp = fopen( g.filename, "wb" ); gdImageJpeg( im, fp, 95 ); fclose( fp ); if ( g.aa ) { printf( "Convolving image to %s...\n", g.aa ); gdImageConvolution( im, hpf_filter_1, 0.75, 0.5 ); FILE *cfp = fopen( g.aa, "wb" ); gdImageJpeg( im, cfp, 95 ); fclose( cfp ); } gdImageDestroy( im ); if ( g.palname ) freeArray( &colors ); printf( "maxiter = '%d', nMax = '%d'\n", g.maxiter, g.nMax ); printf( "——————————————————————————————\n" ); return 0; } Parameters msetFLT( Parameters P ) { Parameters g = P; AxesFLT ctaxes = ct_gain_axesFLT( g.centerX, g.centerY, g.diameter, g.height, g.width ); im = gdImageCreateTrueColor( g.width, g.height ); #pragma omp parallel shared(im) { ColorFLT cp; #pragma omp for schedule(dynamic) for ( int Y = 0; Y < g.height; Y++ ) { cp.Cy = ctaxes.ymax - Y * ctaxes.ctyfactor; for ( int X = 0; X < g.width; X++ ) { cp.Cx = ctaxes.xmin + X * ctaxes.ctxfactor; cp.Zx = 0.0; cp.Zy = 0.0; cp.colorPoly = 0; cp.Exps = 0; float Zx2; float Zy2; for ( cp.n = 0; cp.n < g.maxiter; cp.n++ ) { Zx2 = cp.Zx * cp.Zx; Zy2 = cp.Zy * cp.Zy; if ( ( Zx2 + Zy2 ) > g.escape ) break; cp.Zy = 2 * cp.Zx * cp.Zy + cp.Cy; cp.Zx = Zx2 - Zy2 + cp.Cx; cp.Exps += exp( Zx2 + Zy2 ); } if ( cp.n < g.maxiter ) { g.nMax = max( cp.n, g.nMax ); } gdImageSetPixel( im, X, Y, getfColorFLT( g, cp ) ); } } } return g; } Parameters msetDBL( Parameters P ) { Parameters g = P; AxesDBL ctaxes = ct_gain_axesDBL( g.centerX, g.centerY, g.diameter, g.height, g.width ); im = gdImageCreateTrueColor( g.width, g.height ); #pragma omp parallel shared(im) { ColorDBL cp; #pragma omp for schedule(dynamic) for ( int Y = 0; Y < g.height; Y++ ) { cp.Cy = ctaxes.ymax - Y * ctaxes.ctyfactor; for ( int X = 0; X < g.width; X++ ) { cp.Cx = ctaxes.xmin + X * ctaxes.ctxfactor; cp.Zx = 0.0; cp.Zy = 0.0; cp.colorPoly = 0; cp.Exps = 0; double Zx2; double Zy2; for ( cp.n = 0; cp.n < g.maxiter; cp.n++ ) { Zx2 = cp.Zx * cp.Zx; Zy2 = cp.Zy * cp.Zy; if ( ( Zx2 + Zy2 ) > g.escape ) break; cp.Zy = 2 * cp.Zx * cp.Zy + cp.Cy; cp.Zx = Zx2 - Zy2 + cp.Cx; cp.Exps += exp( Zx2 + Zy2 ); } if ( cp.n < g.maxiter ) { g.nMax = max( cp.n, g.nMax ); } gdImageSetPixel( im, X, Y, getfColorDBL( g, cp ) ); } } } return g; } Parameters msetLDBL( Parameters P ) { Parameters g = P; AxesLDBL ctaxes = ct_gain_axesLDBL( g.centerX, g.centerY, g.diameter, g.height, g.width ); im = gdImageCreateTrueColor( g.width, g.height ); #pragma omp parallel shared(im) { ColorLDBL cp; #pragma omp for schedule(dynamic) for ( int Y = 0; Y < g.height; Y++ ) { cp.Cy = ctaxes.ymax - Y * ctaxes.ctyfactor; for ( int X = 0; X < g.width; X++ ) { cp.Cx = ctaxes.xmin + X * ctaxes.ctxfactor; cp.Zx = 0.0; cp.Zy = 0.0; cp.colorPoly = 0; cp.Exps = 0; long double Zx2; long double Zy2; for ( cp.n = 0; cp.n < g.maxiter; cp.n++ ) { Zx2 = cp.Zx * cp.Zx; Zy2 = cp.Zy * cp.Zy; if ( ( Zx2 + Zy2 ) > g.escape ) break; cp.Zy = 2 * cp.Zx * cp.Zy + cp.Cy; cp.Zx = Zx2 - Zy2 + cp.Cx; cp.Exps += exp( Zx2 + Zy2 ); } if ( cp.n < g.maxiter ) { g.nMax = max( cp.n, g.nMax ); } gdImageSetPixel( im, X, Y, getfColorLDBL( g, cp ) ); } } } return g; } Parameters msetFLT128( Parameters P ) { Parameters g = P; AxesFLT128 ctaxes = ct_gain_axesFLT128( g.centerX, g.centerY, g.diameter, g.height, g.width ); im = gdImageCreateTrueColor( g.width, g.height ); #pragma omp parallel shared(im) { Color128 cp; #pragma omp for schedule(dynamic) for ( int Y = 0; Y < g.height; Y++ ) { cp.Cy = ctaxes.ymax - Y * ctaxes.ctyfactor; for ( int X = 0; X < g.width; X++ ) { cp.Cx = ctaxes.xmin + X * ctaxes.ctxfactor; cp.Zx = 0.0; cp.Zy = 0.0; cp.colorPoly = 0; cp.Exps = 0; __float128 Zx2; __float128 Zy2; for ( cp.n = 0; cp.n < g.maxiter; cp.n++ ) { Zx2 = cp.Zx * cp.Zx; Zy2 = cp.Zy * cp.Zy; if ( ( Zx2 + Zy2 ) > g.escape ) break; cp.Zy = 2 * cp.Zx * cp.Zy + cp.Cy; cp.Zx = Zx2 - Zy2 + cp.Cx; cp.Exps += exp( Zx2 + Zy2 ); } if ( cp.n < g.maxiter ) { g.nMax = max( cp.n, g.nMax ); } gdImageSetPixel( im, X, Y, getfColor128( g, cp ) ); } } } return g; } AxesFLT ct_gain_axesFLT( float real, float imag, float diameter, int height, int width ) { float radius = diameter / 2.0; AxesFLT ctaxes = { real - diameter, real + radius, imag - radius, imag + radius, 0.0, 0.0, }; float ctwidth = ctaxes.xmax - ctaxes.xmin; float ctheight = ctaxes.ymax - ctaxes.ymin; float ctdaspect = fabsf( ( float ) height / ( float ) width ); float ctwaspect = fabsf( ctheight / ctwidth ); if ( ctdaspect > ctwaspect ) { float excess = ctheight * ( ctdaspect / ctwaspect - 1 ); ctaxes.ymax += excess / 2; ctaxes.ymin -= excess / 2; } else if ( ctdaspect < ctwaspect ) { float excess = ctwidth * ( ctwaspect / ctdaspect - 1 ); ctaxes.xmax += excess / 2; ctaxes.xmin -= excess / 2; } ctwidth = ctaxes.xmax - ctaxes.xmin; ctheight = ctaxes.ymax - ctaxes.ymin; ctaxes.ctxfactor = ctwidth / ( ( width > 1 ) ? ( width - 1 ) : width ); ctaxes.ctyfactor = ctheight / ( ( height > 1 ) ? ( height - 1 ) : height ); return ctaxes; } AxesDBL ct_gain_axesDBL( double real, double imag, double diameter, int height, int width ) { double radius = diameter / 2.0; AxesDBL ctaxes = { real - diameter, real + radius, imag - radius, imag + radius, 0.0, 0.0, }; double ctwidth = ctaxes.xmax - ctaxes.xmin; double ctheight = ctaxes.ymax - ctaxes.ymin; double ctdaspect = fabs( ( double ) height / ( double ) width ); double ctwaspect = fabs( ctheight / ctwidth ); if ( ctdaspect > ctwaspect ) { double excess = ctheight * ( ctdaspect / ctwaspect - 1 ); ctaxes.ymax += excess / 2; ctaxes.ymin -= excess / 2; } else if ( ctdaspect < ctwaspect ) { double excess = ctwidth * ( ctwaspect / ctdaspect - 1 ); ctaxes.xmax += excess / 2; ctaxes.xmin -= excess / 2; } ctwidth = ctaxes.xmax - ctaxes.xmin; ctheight = ctaxes.ymax - ctaxes.ymin; ctaxes.ctxfactor = ctwidth / ( ( width > 1 ) ? ( width - 1 ) : width ); ctaxes.ctyfactor = ctheight / ( ( height > 1 ) ? ( height - 1 ) : height ); return ctaxes; } AxesLDBL ct_gain_axesLDBL( long double real, long double imag, long double diameter, int height, int width ) { long double radius = diameter / 2.0; AxesLDBL ctaxes = { real - diameter, real + radius, imag - radius, imag + radius, 0.0, 0.0, }; long double ctwidth = ctaxes.xmax - ctaxes.xmin; long double ctheight = ctaxes.ymax - ctaxes.ymin; long double ctdaspect = fabsl( ( long double ) height / ( long double ) width ); long double ctwaspect = fabsl( ctheight / ctwidth ); if ( ctdaspect > ctwaspect ) { long double excess = ctheight * ( ctdaspect / ctwaspect - 1 ); ctaxes.ymax += excess / 2; ctaxes.ymin -= excess / 2; } else if ( ctdaspect < ctwaspect ) { long double excess = ctwidth * ( ctwaspect / ctdaspect - 1 ); ctaxes.xmax += excess / 2; ctaxes.xmin -= excess / 2; } ctwidth = ctaxes.xmax - ctaxes.xmin; ctheight = ctaxes.ymax - ctaxes.ymin; ctaxes.ctxfactor = ctwidth / ( ( width > 1 ) ? ( width - 1 ) : width ); ctaxes.ctyfactor = ctheight / ( ( height > 1 ) ? ( height - 1 ) : height ); return ctaxes; } AxesFLT128 ct_gain_axesFLT128( __float128 real, __float128 imag, __float128 diameter, int height, int width ) { __float128 radius = diameter / 2.0; AxesFLT128 ctaxes = { real - diameter, real + radius, imag - radius, imag + radius, 0.0, 0.0, }; __float128 ctwidth = ctaxes.xmax - ctaxes.xmin; __float128 ctheight = ctaxes.ymax - ctaxes.ymin; __float128 ctdaspect = fabsq( ( __float128 ) height / ( __float128 ) width ); __float128 ctwaspect = fabsq( ctheight / ctwidth ); if ( ctdaspect > ctwaspect ) { __float128 excess = ctheight * ( ctdaspect / ctwaspect - 1 ); ctaxes.ymax += excess / 2; ctaxes.ymin -= excess / 2; } else if ( ctdaspect < ctwaspect ) { __float128 excess = ctwidth * ( ctwaspect / ctdaspect - 1 ); ctaxes.xmax += excess / 2; ctaxes.xmin -= excess / 2; } ctwidth = ctaxes.xmax - ctaxes.xmin; ctheight = ctaxes.ymax - ctaxes.ymin; ctaxes.ctxfactor = ctwidth / ( ( width > 1 ) ? ( width - 1 ) : width ); ctaxes.ctyfactor = ctheight / ( ( height > 1 ) ? ( height - 1 ) : height ); return ctaxes; }
outer_profile.h
#include "CSC.h" #include "CSR.h" #include "Triple.h" #include "radix_sort/radix_sort.hpp" // #include "vergesort/vergesort.h" // #include "pdqsort/pdqsort.h" // #include "cpp-TimSort/include/gfx/timsort.hpp" #include "utility.h" #include <algorithm> #include <iostream> #include <fstream> #include <omp.h> #include <unistd.h> #include <cstring> #include<set> using namespace std; static uint32_t ncols_of_A; // static int *rows_to_blockers; // static int *flops_by_row_blockers; static int nrows_per_blocker; #define SIZE 16 #define GFX_TIMSORT_USE_STD_MOVE 1 template <typename IT> IT fast_mod(const IT input, const int ceil) { return input >= ceil ? input % ceil : input; } template <typename IT, typename NT> uint64_t getFlop(const CSC<IT, NT>& A, const CSR<IT, NT>& B) { uint64_t flop = 0; #pragma omp parallel for reduction(+ : flop) for (IT i = 0; i < A.cols; ++i) { IT colnnz = A.colptr[i + 1] - A.colptr[i]; IT rownnz = B.rowptr[i + 1] - B.rowptr[i]; flop += (colnnz * rownnz); } return flop; } template <typename IT> inline IT get_blocker_id(IT rowid, IT* rows_to_blockers, IT num_blockers) { return rows_to_blockers[rowid]; IT cur_blocker_id = 0; for (IT i=0; i<num_blockers; ++i) if (rows_to_blockers[i] <= rowid) cur_blocker_id = i; return cur_blocker_id; } template <typename IT, typename NT> void do_static_symbolic(const CSC<IT, NT>& A, const CSR<IT, NT>& B, IT startIdx, IT endIdx, IT nrows_per_blocker, IT num_blockers, IT* flops_by_row_blockers, IT& total_flops) { #pragma omp parallel for reduction(+ : flops_by_row_blockers[:num_blockers]) for (IT i = startIdx; i < endIdx; ++i) { IT rownnz = B.rowptr[i + 1] - B.rowptr[i]; for (IT j = A.colptr[i]; j < A.colptr[i + 1]; ++j) { uint16_t row_blocker_id = A.rowids[j] / nrows_per_blocker; flops_by_row_blockers[row_blocker_id] += rownnz; } } for (IT i = 0; i < num_blockers; ++i) { total_flops += flops_by_row_blockers[i]; } } template <typename IT, typename NT> void do_symbolic(const CSC<IT, NT>& A, const CSR<IT, NT>& B, IT startIdx, IT endIdx, IT num_blockers, IT* flops_by_rows, IT* rows_to_blockers, IT* flops_by_row_blockers, IT& total_flops) { double avg_volumn = 0.0; double cur_volumn = 0.0; IT cur_blocker_id = 0; // #pragma omp parallel { // #pragma omp for reduction(+ : flops_by_rows[:A.rows]) for (IT i = startIdx; i < endIdx; ++i) { IT rownnz = B.rowptr[i + 1] - B.rowptr[i]; for (IT j = A.colptr[i]; j < A.colptr[i + 1]; ++j) { flops_by_rows[A.rowids[j]] += rownnz; } } #pragma omp parallel for reduction(+ : total_flops) for (IT i = 0; i<A.rows; ++i) total_flops += flops_by_rows[i]; } avg_volumn = total_flops / num_blockers; rows_to_blockers[0] = 0; for (IT i=0; i<A.rows; ++i) { cur_volumn += flops_by_rows[i]; flops_by_row_blockers[cur_blocker_id] = cur_volumn; rows_to_blockers[i] = cur_blocker_id; if (cur_volumn > avg_volumn) { cur_blocker_id ++; cur_volumn = 0; } } // for (IT i = 0 ; i< num_blockers; ++i) // cout << "BlockerId = " << i << " RowId = " << rows_to_blockers[i] << endl; } template <typename IT, typename NT> int64_t getReqMemory(const CSC<IT, NT>& A, const CSR<IT, NT>& B) { uint64_t flop = getFlop(A, B); return flop * sizeof(int64_t); } struct ExtractKey { inline int64_t operator()(tuple<int32_t, int32_t, double> tup) { int64_t res = std::get<0>(tup); res = (res << 32); res = res | (int64_t)(uint32_t) std::get<1>(tup); return res; } }; struct ExtractKey2 { inline uint32_t operator()(tuple<int32_t, int32_t, double> tup) { return ((std::get<0>(tup) % nrows_per_blocker) << 20 | (uint32_t) std::get<1>(tup)); // return (std::get<0>(tup) << 16) | ((uint32_t) std::get<1>(tup)); // return ((std::get<0>(tup) % flops_by_row_blockers[rows_to_blockers[std::get<0>(tup)]] << 16) | ((uint32_t) std::get<1>(tup))); } }; // struct CompareKey2 // { // inline bool operator()(tuple<int32_t, int32_t, double> tup1, tuple<int32_t, int32_t, double> tup2) // { // return ((std::get<0>(tup1) % flops_by_row_blockers[rows_to_blockers[std::get<0>(tup1)]] << 16) | ((uint32_t) std::get<1>(tup1))) < ((std::get<0>(tup2) % flops_by_row_blockers[rows_to_blockers[std::get<0>(tup2)]] << 16) | ((uint32_t) std::get<1>(tup2))); // } // }; struct CompareTuple { inline bool operator()(tuple<int32_t, int32_t, double> t1, tuple<int32_t, int32_t, double> t2) { return (std::get<0>(t1) << 20 | (uint32_t) std::get<1>(t1)) < (std::get<0>(t2) << 20 | (uint32_t) std::get<1>(t2)); // if (std::get<0>(t1) < std::get<0>(t2)) // return true; // else if (std::get<0>(t1) == std::get<0>(t2) && std::get<1>(t1) < std::get<1>(t2)) // return true; // return false; } // if (std::get<1>(t1) != std::get<1>(t2)) // return false; // if (std::get<0>(t1) != std::get<0>(t2)) // return false; // return true; }; template <typename IT, typename NT> inline bool isTupleEqual (tuple<IT, IT, NT> t1, tuple<IT, IT, NT> t2) { return op(t1) == op(t2); // if (std::get<1>(t1) != std::get<1>(t2)) // return false; // if (std::get<0>(t1) != std::get<0>(t2)) // return false; // return true; } template <typename IT, typename NT> void doRadixSort(tuple<IT, IT, NT>* begin, tuple<IT, IT, NT>* end, tuple<IT, IT, NT>* buffer) { radix_sort(begin, end, buffer, ExtractKey2()); // vergesort::vergesort(begin, end, CompareTuple()); // pdqsort(begin, end, CompareTuple()); // gfx::timsort(begin, end, CompareTuple()); // sort(begin, end, compareTuple<IT, NT>); } template <typename IT, typename NT> IT doMerge(tuple<IT, IT, NT>* vec, IT length) { if (length == 0) return 0; auto op = ExtractKey(); IT i = 0; IT j = 1; while (i < length && j < length) { if (j < length && op(vec[i]) == op(vec[j])) std::get<2>(vec[i]) += std::get<2>(vec[j]); else { ++i; std::get<0>(vec[i]) = std::get<0>(vec[j]); std::get<1>(vec[i]) = std::get<1>(vec[j]); std::get<2>(vec[i]) = std::get<2>(vec[j]); // vec[++i] = std::move(vec[j]); } ++j; } return i + 1; } template <typename IT> void initializeBlockerBoundary(IT* nums_per_col_blocker, IT num_blockers, IT* blocker_begin_ptr, IT* blocker_end_ptr) { blocker_begin_ptr[0] = 0; blocker_end_ptr[0] = 0; for (IT blocker_index = 1; blocker_index < num_blockers; ++blocker_index) { blocker_begin_ptr[blocker_index] = blocker_begin_ptr[blocker_index - 1] + nums_per_col_blocker[blocker_index - 1]; blocker_end_ptr[blocker_index] = blocker_begin_ptr[blocker_index]; } } template <typename IT, typename NT> void OuterSpGEMM_stage(const CSC<IT, NT>& A, const CSR<IT, NT>& B, IT startIdx, IT endIdx, CSR<IT, NT>& C, \ double &t_symbolic, double &t_pb1, double &t_sort, double &t_merge, double &t_convert, double &t_alloc, double &t_free, \ int nblockers, int nblockchars, double &ttflops) { double t1, t2, t3; t1 = omp_get_wtime(); typedef tuple<IT, IT, NT> TripleNode; const IT nthreads = omp_get_max_threads(); const IT num_blockers = nblockers; const IT block_width = nblockchars; ncols_of_A = A.cols; nrows_per_blocker = A.rows <= num_blockers * 64 ? 64 : (A.rows + num_blockers - 1) / num_blockers; IT total_flop = 0; IT *flops_by_row_blockers = my_malloc<IT>(num_blockers); IT *flops_by_rows = my_malloc<IT>(A.rows); IT *nnz_by_row = my_malloc<IT>(A.rows); // rows_to_blockers = static_cast<IT*>(::operator new(sizeof(IT) * A.rows)); t_alloc += (omp_get_wtime() - t1); t1 = omp_get_wtime(); // do_symbolic(A, B, 0, A.rows, num_blockers, flops_by_rows, rows_to_blockers, flops_by_row_blockers, total_flop); do_static_symbolic(A, B, 0, A.cols, nrows_per_blocker, num_blockers, flops_by_row_blockers, total_flop); t_symbolic += (omp_get_wtime() - t1); ttflops = total_flop; // std::ofstream fd; // fd.open("flop_by_row_blockers.csv"); // for (int i=0; i<num_blockers; ++ i) // fd << i << " " << flops_by_row_blockers[i] << endl; t1 = omp_get_wtime(); IT *global_blocker_counters = my_malloc<IT>(num_blockers); TripleNode **global_blockers = my_malloc<TripleNode*>(num_blockers); IT **local_blocker_counters = my_malloc<IT*>(nthreads); TripleNode **local_blockers = my_malloc<TripleNode*>(nthreads); TripleNode **sorting_buffer = my_malloc<TripleNode*>(nthreads); IT *nnz_per_row_blocker = my_malloc<IT>(num_blockers); IT max_flops_in_row_blockers = *std::max_element(flops_by_row_blockers, flops_by_row_blockers + num_blockers); #pragma omp parallel for for (IT blocker_id=0; blocker_id<num_blockers; ++blocker_id) // global_blockers[blocker_id] = my_malloc<TripleNode>(flops_by_row_blockers[blocker_id]); global_blockers[blocker_id] = static_cast<TripleNode*>(::operator new(SIZE * flops_by_row_blockers[blocker_id])); uint64_t avg_flops_in_row_blockers = 0; for (IT i = 0; i < num_blockers; ++i) { avg_flops_in_row_blockers += flops_by_row_blockers[i]; } // cout << "avg_flops_in_row_blockers = " << avg_flops_in_row_blockers / num_blockers << " max_flops_in_row_blockers = " << max_flops_in_row_blockers << endl; #pragma omp parallel { IT thread_id = omp_get_thread_num(); // local_blockers[thread_id] = my_malloc<TripleNode>(num_blockers * block_width); local_blockers[thread_id] = static_cast<TripleNode*>(::operator new(SIZE * num_blockers * block_width)); local_blocker_counters[thread_id] = my_malloc<IT>(num_blockers); sorting_buffer[thread_id] = static_cast<TripleNode*>(::operator new(SIZE * max_flops_in_row_blockers)); } t_alloc += (omp_get_wtime() - t1); t1 = omp_get_wtime(); #pragma omp parallel { IT thread_id = omp_get_thread_num(); IT row_blocker_id; TripleNode *begin_local_blockers, *cur_local_blockers, *cur_global_blockers; // computing phase #pragma omp for nowait for (IT idx = startIdx; idx < endIdx; ++idx) { for (IT j = A.colptr[idx]; j < A.colptr[idx + 1]; ++j) // ncols(A) * 4 { row_blocker_id = A.rowids[j] / nrows_per_blocker; // row_blocker_id = get_blocker_id(A.rowids[j], rows_to_blockers, num_blockers); // cout << "RowId = " << A.rowids[j] << " BlockerId = " << row_blocker_id << endl; begin_local_blockers = local_blockers[thread_id] + row_blocker_id * block_width; cur_local_blockers = begin_local_blockers + local_blocker_counters[thread_id][row_blocker_id]; for (IT k = B.rowptr[idx]; k < B.rowptr[idx + 1]; ++k) // nrows(B) * 4 { // *cur_local_blockers = TripleNode(A.rowids[j], B.colids[k], A.values[j] * B.values[k]); std::get<0>(*cur_local_blockers) = A.rowids[j]; std::get<1>(*cur_local_blockers) = B.colids[k]; std::get<2>(*cur_local_blockers) = A.values[j] * B.values[k]; cur_local_blockers++; if (cur_local_blockers == begin_local_blockers + block_width) // flop * 16 { // cur_global_blockers = global_blockers[row_blocker_id] + fetch_and_add(global_blocker_counters[row_blocker_id], block_width); // std::memcpy( // cur_global_blockers, // begin_local_blockers, // block_width * SIZE // ); // for (IT offset=0; offset<block_width; ++offset) // { // std::get<0>(cur_global_blockers[offset]) = std::get<0>(begin_local_blockers[offset]); // std::get<1>(cur_global_blockers[offset]) = std::get<1>(begin_local_blockers[offset]); // std::get<2>(cur_global_blockers[offset]) = std::get<2>(begin_local_blockers[offset]); // // cur_global_blockers[offset] = begin_local_blockers[offset]; // } std::memcpy( global_blockers[row_blocker_id] + __sync_fetch_and_add(&global_blocker_counters[row_blocker_id], block_width), begin_local_blockers, block_width * SIZE ); cur_local_blockers = begin_local_blockers; } } local_blocker_counters[thread_id][row_blocker_id] = cur_local_blockers - begin_local_blockers; } } for (IT row_blocker_id = 0; row_blocker_id < num_blockers; row_blocker_id++) { // cur_global_blockers = global_blockers[row_blocker_id] + fetch_and_add(global_blocker_counters[row_blocker_id], local_blocker_counters[thread_id][row_blocker_id]); // for (IT offset=0; offset<local_blocker_counters[thread_id][row_blocker_id]; ++offset) // { // std::get<0>(cur_global_blockers[offset]) = std::get<0>(local_blockers[thread_id][row_blocker_id * block_width + offset]); // std::get<1>(cur_global_blockers[offset]) = std::get<1>(local_blockers[thread_id][row_blocker_id * block_width + offset]); // std::get<2>(cur_global_blockers[offset]) = std::get<2>(local_blockers[thread_id][row_blocker_id * block_width + offset]); // // cur_global_blockers[offset] = local_blockers[thread_id][row_blocker_id * block_width + offset]; // } std::memcpy( global_blockers[row_blocker_id] + __sync_fetch_and_add(&global_blocker_counters[row_blocker_id], local_blocker_counters[thread_id][row_blocker_id]), local_blockers[thread_id] + row_blocker_id * block_width, local_blocker_counters[thread_id][row_blocker_id] * SIZE ); local_blocker_counters[thread_id][row_blocker_id] = 0; } } t_pb1 += (omp_get_wtime() - t1); t1 = omp_get_wtime(); #pragma omp parallel { IT thread_id = omp_get_thread_num(); #pragma omp for for (IT row_blocker_id = 0; row_blocker_id < num_blockers; ++row_blocker_id) { doRadixSort(global_blockers[row_blocker_id], global_blockers[row_blocker_id] + global_blocker_counters[row_blocker_id], sorting_buffer[thread_id]); } } t_sort += (omp_get_wtime() - t1); t1 = omp_get_wtime(); #pragma omp parallel for for (IT row_blocker_id = 0; row_blocker_id < num_blockers; ++row_blocker_id) nnz_per_row_blocker[row_blocker_id] += doMerge(global_blockers[row_blocker_id], global_blocker_counters[row_blocker_id]); t_merge += (omp_get_wtime() - t1); IT *cumulative_row_indices = my_malloc<IT>(num_blockers + 1); scan(nnz_per_row_blocker, cumulative_row_indices, (IT)(num_blockers) + 1); IT total_nnz = cumulative_row_indices[num_blockers]; if (C.isEmpty()) { C.make_empty(); } C.rows = A.rows; C.cols = B.cols; C.nnz = total_nnz; C.colids = static_cast<IT*>(::operator new(sizeof(IT[total_nnz]))); C.rowptr = static_cast<IT*>(::operator new(sizeof(IT[C.rows+1]))); C.values = static_cast<NT*>(::operator new(sizeof(NT[total_nnz]))); C.rowptr[0] = 0; t1 = omp_get_wtime(); #pragma omp parallel for for (IT row_blocker_id = 0; row_blocker_id < num_blockers; ++row_blocker_id) { IT base = cumulative_row_indices[row_blocker_id]; // TripleNode* this_blocker = global_blockers[row_blocker_id]; // auto space_addr = global_blockers[row_blocker_id]; for (IT i = 0; i < nnz_per_row_blocker[row_blocker_id]; ++i) { ++nnz_by_row[std::get<0>(global_blockers[row_blocker_id][i])]; C.colids[base + i] = std::get<1>(global_blockers[row_blocker_id][i]); C.values[base + i] = std::get<2>(global_blockers[row_blocker_id][i]); // ++nnz_by_row[std::get<0>(this_blocker[i])]; // C.colids[base+i] = std::get<1>(this_blocker[i]); // C.values[base+i] = std::get<2>(this_blocker[i]); } } t_convert += (omp_get_wtime() - t1); scan(nnz_by_row, C.rowptr, C.rows + 1); t1 = omp_get_wtime(); my_free<IT>(flops_by_row_blockers); my_free<IT>(nnz_by_row); my_free<IT>(nnz_per_row_blocker); my_free<IT>(cumulative_row_indices); for (IT row_blocker_id = 0; row_blocker_id < num_blockers; ++row_blocker_id) { my_free<TripleNode>(global_blockers[row_blocker_id]); } my_free<TripleNode*>(global_blockers); for (IT thread_id=0; thread_id<nthreads; ++thread_id) { my_free<TripleNode>(local_blockers[thread_id]); my_free<IT>(local_blocker_counters[thread_id]); } my_free<TripleNode*>(local_blockers); my_free<IT*>(local_blocker_counters); t_free += (omp_get_wtime() - t1); } template <typename IT, typename NT> void OuterSpGEMM(const CSC<IT, NT>& A, const CSR<IT, NT>& B, CSR<IT, NT>& C, int nblockers, int nblockchars) { // cout << "nblockers = " << nblockers << " nblockchars = " << nblockchars << endl; double t_symbolic = 0; double t_pb1 = 0; double t_pb2 = 0; double t_sort = 0; double t_merge = 0; double t_convert = 0; double t_alloc = 0; double t_free = 0; double ttflops = 0; int niter = 5; for (int i = 0; i < niter; ++i) { OuterSpGEMM_stage(A, B, 0, A.cols, C, t_symbolic, t_pb1, t_sort, t_merge, t_convert, t_alloc, t_free, nblockers, nblockchars, ttflops); // cout << "Iter " << i << " Completed!" << endl; } cout << "symbolic took " << t_symbolic / niter * 1000 << endl; cout << "PB1 took " << t_pb1 / niter * 1000 << endl; cout << "Sort took " << t_sort / niter * 1000 << endl; cout << "Merge took " << t_merge / niter * 1000 << endl; cout << "Convert took " << t_convert / niter * 1000 << endl; cout << "Alloc took " << t_alloc / niter * 1000 << endl; cout << "Free took " << t_free / niter << endl << endl; double FLOPS_pb1 = ttflops / (1000000000 * (t_pb1 / niter)); double bytes_pb1 = (A.nnz + B.nnz) * (sizeof(IT) + sizeof(NT)) + (A.cols + B.rows) * sizeof(IT) + 2 * ttflops * (2 * sizeof(IT) + sizeof(NT)); double BW_pb1 = bytes_pb1 / (1000000000 * (t_pb1 / niter)); double BW_sort = (9.0 * 16 * ttflops) / (1000000000 * (t_sort / niter)); cout << "PB Bandwidth = " << BW_pb1 << " GB/s" << " GFLOPS = " << FLOPS_pb1 << " Data = " << bytes_pb1 / 1000000000 << " Total flops = " << ttflops << endl; cout << "Sort Bandwidth = " << BW_sort << " GB/s" << endl; }
bml_add_ellsort_typed.c
#include "../../macros.h" #include "../../typed.h" #include "../bml_add.h" #include "../bml_allocate.h" #include "../bml_parallel.h" #include "../bml_types.h" #include "bml_add_ellsort.h" #include "bml_allocate_ellsort.h" #include "bml_types_ellsort.h" #include <complex.h> #include <math.h> #include <stdlib.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif /** Matrix addition. * * \f$ A = \alpha A + \beta B \f$ * * \ingroup add_group * * \param A Matrix A * \param B Matrix B * \param alpha Scalar factor multiplied by A * \param beta Scalar factor multiplied by B * \param threshold Threshold for matrix addition */ void TYPED_FUNC( bml_add_ellsort) ( bml_matrix_ellsort_t * A, bml_matrix_ellsort_t * B, double alpha, double beta, double threshold) { int N = A->N; int A_M = A->M; int B_M = B->M; int *A_nnz = A->nnz; int *A_index = A->index; int *A_localRowMin = A->domain->localRowMin; int *A_localRowMax = A->domain->localRowMax; int *B_nnz = B->nnz; int *B_index = B->index; REAL_T *A_value = (REAL_T *) A->value; REAL_T *B_value = (REAL_T *) B->value; int myRank = bml_getMyRank(); #if !(defined(__IBMC__) || defined(__ibmxl__)) int ix[N], jx[N]; REAL_T x[N]; memset(ix, 0, N * sizeof(int)); memset(jx, 0, N * sizeof(int)); memset(x, 0.0, N * sizeof(REAL_T)); #endif #if defined(__IBMC__) || defined(__ibmxl__) #pragma omp parallel for \ shared(N, A_M, B_M, myRank) \ shared(A_index, A_value, A_nnz) \ shared(A_localRowMin, A_localRowMax) \ shared(B_index, B_value, B_nnz) #else #pragma omp parallel for \ shared(N, A_M, B_M, myRank) \ shared(A_index, A_value, A_nnz) \ shared(A_localRowMin, A_localRowMax) \ shared(B_index, B_value, B_nnz) \ firstprivate(ix, jx, x) #endif //for (int i = 0; i < N; i++) for (int i = A_localRowMin[myRank]; i < A_localRowMax[myRank]; i++) { #if defined(__IBMC__) || defined(__ibmxl__) int ix[N], jx[N]; REAL_T x[N]; memset(ix, 0, N * sizeof(int)); #endif int l = 0; if (alpha > (double) 0.0 || alpha < (double) 0.0) for (int jp = 0; jp < A_nnz[i]; jp++) { int k = A_index[ROWMAJOR(i, jp, N, A_M)]; if (ix[k] == 0) { x[k] = 0.0; ix[k] = i + 1; //A_index[ROWMAJOR(i, l, N, A_M)] = k; jx[l] = k; l++; } x[k] = x[k] + alpha * A_value[ROWMAJOR(i, jp, N, A_M)]; } if (beta > (double) 0.0 || beta < (double) 0.0) for (int jp = 0; jp < B_nnz[i]; jp++) { int k = B_index[ROWMAJOR(i, jp, N, B_M)]; if (ix[k] == 0) { x[k] = 0.0; ix[k] = i + 1; //A_index[ROWMAJOR(i, l, N, A_M)] = k; jx[l] = k; l++; } x[k] = x[k] + beta * B_value[ROWMAJOR(i, jp, N, B_M)]; } A_nnz[i] = l; int ll = 0; for (int jp = 0; jp < l; jp++) { int jind = jx[jp]; //REAL_T xTmp = x[A_index[ROWMAJOR(i, jp, N, A_M)]]; REAL_T xTmp = x[jind]; if (is_above_threshold(xTmp, threshold)) { A_value[ROWMAJOR(i, ll, N, A_M)] = xTmp; A_index[ROWMAJOR(i, ll, N, A_M)] = jind; ll++; } x[jind] = 0.0; ix[jind] = 0; } A_nnz[i] = ll; } } /** Matrix addition. * * \f$ A = \alpha A + \beta B \f$ * * \ingroup add_group * * \param A Matrix A * \param B Matrix B * \param alpha Scalar factor multiplied by A * \param beta Scalar factor multiplied by B * \param threshold Threshold for matrix addition */ double TYPED_FUNC( bml_add_norm_ellsort) ( bml_matrix_ellsort_t * A, bml_matrix_ellsort_t * B, double alpha, double beta, double threshold) { int N = A->N; int A_M = A->M; int B_M = B->M; int *A_nnz = A->nnz; int *A_index = A->index; int *A_localRowMin = A->domain->localRowMin; int *A_localRowMax = A->domain->localRowMax; int *B_nnz = B->nnz; int *B_index = B->index; REAL_T *A_value = (REAL_T *) A->value; REAL_T *B_value = (REAL_T *) B->value; double trnorm = 0.0; int myRank = bml_getMyRank(); #if !(defined(__IBMC__) || defined(__ibmxl__)) int ix[N], jx[N]; REAL_T x[N]; REAL_T y[N]; memset(ix, 0, N * sizeof(int)); memset(jx, 0, N * sizeof(int)); memset(x, 0.0, N * sizeof(REAL_T)); memset(y, 0.0, N * sizeof(REAL_T)); #endif #if defined(__IBMC__) || defined(__ibmxl__) #pragma omp parallel for \ shared(N, A_M, B_M, myRank) \ shared(A_index, A_value, A_nnz) \ shared(A_localRowMin, A_localRowMax) \ shared(B_index, B_value, B_nnz) \ reduction(+:trnorm) #else #pragma omp parallel for \ shared(N, A_M, B_M, myRank) \ shared(A_index, A_value, A_nnz) \ shared(A_localRowMin, A_localRowMax) \ shared(B_index, B_value, B_nnz) \ firstprivate(ix, jx, x, y) \ reduction(+:trnorm) #endif //for (int i = 0; i < N; i++) for (int i = A_localRowMin[myRank]; i < A_localRowMax[myRank]; i++) { #if defined(__IBMC__) || defined(__ibmxl__) int ix[N], jx[N]; REAL_T x[N]; REAL_T y[N]; memset(ix, 0, N * sizeof(int)); #endif int l = 0; for (int jp = 0; jp < A_nnz[i]; jp++) { int ind = ROWMAJOR(i, jp, N, A_M); int k = A_index[ind]; if (ix[k] == 0) { x[k] = 0.0; ix[k] = i + 1; y[k] = 0.0; //A_index[ROWMAJOR(i, l, N, A_M)] = k; jx[l] = k; l++; } x[k] = x[k] + alpha * A_value[ind]; y[k] = y[k] + A_value[ind]; } for (int jp = 0; jp < B_nnz[i]; jp++) { int ind = ROWMAJOR(i, jp, N, B_M); int k = B_index[ind]; if (ix[k] == 0) { x[k] = 0.0; ix[k] = i + 1; y[k] = 0.0; //A_index[ROWMAJOR(i, l, N, A_M)] = k; jx[l] = k; l++; } x[k] = x[k] + beta * B_value[ind]; y[k] = y[k] - B_value[ind]; } A_nnz[i] = l; int ll = 0; for (int jp = 0; jp < l; jp++) { int jind = jx[jp]; REAL_T xTmp = x[jind]; trnorm += y[jind] * y[jind]; if (is_above_threshold(xTmp, threshold)) { A_value[ROWMAJOR(i, ll, N, A_M)] = xTmp; A_index[ROWMAJOR(i, ll, N, A_M)] = jind; ll++; } x[jind] = 0.0; ix[jind] = 0; y[jind] = 0.0; } A_nnz[i] = ll; } return trnorm; } /** Matrix addition. * * A = A + beta * I * * \ingroup add_group * * \param A Matrix A * \param beta Scalar factor multiplied by I * \param threshold Threshold for matrix addition */ void TYPED_FUNC( bml_add_identity_ellsort) ( bml_matrix_ellsort_t * A, double beta, double threshold) { REAL_T alpha = (REAL_T) 1.0; bml_matrix_ellsort_t *Id = TYPED_FUNC(bml_identity_matrix_ellsort) (A->N, A->M, A->distribution_mode); TYPED_FUNC(bml_add_ellsort) (A, Id, alpha, beta, threshold); bml_deallocate_ellsort(Id); } /** Matrix addition. * * A = alpha * A + beta * I * * \ingroup add_group * * \param A Matrix A * \param alpha Scalar factor multiplied by A * \param beta Scalar factor multiplied by I * \param threshold Threshold for matrix addition */ void TYPED_FUNC( bml_scale_add_identity_ellsort) ( bml_matrix_ellsort_t * A, double alpha, double beta, double threshold) { bml_matrix_ellsort_t *Id = TYPED_FUNC(bml_identity_matrix_ellsort) (A->N, A->M, A->distribution_mode); TYPED_FUNC(bml_add_ellsort) (A, Id, alpha, beta, threshold); bml_deallocate_ellsort(Id); }
qnx_fmt_plug.c
/* * This file is part of John the Ripper password cracker. Written to crack * QNX shadow hash passwords. algorithm is func(salt . pass x rounds+1) * func is md5, sha256 or sha512. rounds defaults to 1000, BUT can be specified * in the hash string and thus is not fixed. * * This software is Copyright (c) 2015 JimF, and it is hereby released to the * general public under the following terms: Redistribution and use in source * and binary forms, with or without modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_qnx; #elif FMT_REGISTERS_H john_register_one(&fmt_qnx); #else #include "arch.h" #undef SIMD_COEF_32 #define FORCE_GENERIC_SHA2 1 #include "sha2.h" #include "md5.h" #define _GNU_SOURCE 1 #include <string.h> #include "params.h" #include "common.h" #include "formats.h" #include "johnswap.h" #include "simd-intrinsics.h" #ifdef _OPENMP #ifndef OMP_SCALE #define OMP_SCALE 8 #endif #include <omp.h> #endif #include "memdbg.h" // NOTE, in SSE mode, even if NOT in OMP, we may need to scale, quite a bit, due to needing // to 'group' passwords based upon length of password. #ifdef SIMD_COEF_32 #ifdef _OPENMP #define SIMD_COEF_SCALE (128/SIMD_COEF_32) #else #define SIMD_COEF_SCALE (256/SIMD_COEF_32) #endif #else #define SIMD_COEF_SCALE 1 #endif #define FORMAT_LABEL "qnx" #ifdef SIMD_COEF_32 #define ALGORITHM_NAME SHA256_ALGORITHM_NAME #else #define ALGORITHM_NAME "32/" ARCH_BITS_STR " " SHA2_LIB #endif #define PLAINTEXT_LENGTH 48 #define SALT_SIZE sizeof(struct qnx_saltstruct) #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT (SIMD_COEF_32*SIMD_PARA_SHA256) #define MAX_KEYS_PER_CRYPT (SIMD_COEF_32*SIMD_PARA_SHA256) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #define __QNX_CREATE_PROPER_TESTS_ARRAY__ #include "qnx_common.h" static int (*saved_len); static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; #ifdef SIMD_COEF_32 static int *(sk_by_len[PLAINTEXT_LENGTH+1]); static int sk_by_lens[PLAINTEXT_LENGTH+1]; #endif static struct qnx_saltstruct { unsigned int len; unsigned int type; // 5 for md5, 256 for sha256, 512 for sha512 unsigned int rounds; unsigned char salt[SALT_LENGTH]; } *cur_salt; static void init(struct fmt_main *self) { int omp_t = 1; int max_crypts; #ifdef _OPENMP omp_t = omp_get_max_threads(); omp_t *= OMP_SCALE; #endif max_crypts = SIMD_COEF_SCALE * omp_t * MAX_KEYS_PER_CRYPT; self->params.max_keys_per_crypt = max_crypts; // we allocate 1 more than needed, and use that 'extra' value as a zero // length PW to fill in the tail groups in MMX mode. saved_len = mem_calloc(1 + max_crypts, sizeof(*saved_len)); saved_key = mem_calloc(1 + max_crypts, sizeof(*saved_key)); crypt_out = mem_calloc(1 + max_crypts, sizeof(*crypt_out)); #ifdef SIMD_COEF_32 for (omp_t = 1; omp_t <= PLAINTEXT_LENGTH; ++omp_t) sk_by_len[omp_t] = mem_calloc(1+max_crypts, sizeof(int)); #endif } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); MEM_FREE(saved_len); } static void clear_keys(void) { #ifdef SIMD_COEF_32 memset(sk_by_lens, 0, sizeof(sk_by_lens)); #endif } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_key(char *key, int index) { int len = strlen(key); saved_len[index] = len; if (len > PLAINTEXT_LENGTH) len = saved_len[index] = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, len); saved_key[index][len] = 0; #ifdef SIMD_COEF_32 sk_by_len[len][sk_by_lens[len]++] = index; #endif } static char *get_key(int index) { saved_key[index][saved_len[index]] = 0; return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; int tot_todo = count, inc = 1, *MixOrder = NULL; #ifdef SIMD_COEF_32 int usesse = 0; if (cur_salt->type == 5) { usesse = 1; } #ifdef SIMD_PARA_SHA256 if (cur_salt->type == 256) { usesse = 1; } #endif #ifdef SIMD_PARA_SHA512 if (cur_salt->type == 512) usesse = 1; #endif if (usesse) { int j, k; MixOrder = (int*)mem_calloc((count+PLAINTEXT_LENGTH*MAX_KEYS_PER_CRYPT), sizeof(int)); tot_todo = 0; saved_len[count] = 0; // point all 'tail' MMX buffer elements to this location. for (j = 1; j < PLAINTEXT_LENGTH; ++j) { for (k = 0; k < sk_by_lens[j]; ++k) MixOrder[tot_todo++] = sk_by_len[k]; while (tot_todo % MAX_KEYS_PER_CRYPT) MixOrder[tot_todo++] = count; } } #endif #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < tot_todo; index += inc) { #ifdef SIMD_COEF_32 if (MixOrder) { int len, len_tot=0; switch(cur_salt->type) { case 5: case 256: case 512: } } else #endif { int i, len = saved_len[index]; char *pass = saved_key[index]; switch (cur_salt->type) { case 5: { MD5_CTX ctx; MD5_Init(&ctx); MD5_Update(&ctx, cur_salt->salt, cur_salt->len); for (i = 0; i <= cur_salt->rounds; ++i) MD5_Update(&ctx, pass, len); MD5_Final((unsigned char*)(crypt_out[index]), &ctx); break; } case 256: { SHA256_CTX ctx; SHA256_Init(&ctx); SHA256_Update(&ctx, cur_salt->salt, cur_salt->len); for (i = 0; i <= cur_salt->rounds; ++i) SHA256_Update(&ctx, pass, len); SHA256_Final((unsigned char*)(crypt_out[index]), &ctx); break; } case 512: { SHA512_CTX ctx; SHA512_Init(&ctx); SHA512_Update(&ctx, cur_salt->salt, cur_salt->len); if (len && 128 % len == 0 && cur_salt->len+len*cur_salt->rounds > 256) { // we can optimize this, by filling buffer (after the // first salted buffer), and then simply calling // jtr_sha512_hash_block 'natively' never having to // refill the buffer again. int ex; for (i = 0; i <= cur_salt->rounds; ++i) { SHA512_Update(&ctx, pass, len); if (ctx.total > 128+cur_salt->len) break; } ++i; ex = (256-ctx.total)/len; i += ex; ctx.total += ex*len; jtr_sha512_hash_block(&ctx, ctx.buffer, 1); while (i+128/len <= cur_salt->rounds) { ctx.total += 128; jtr_sha512_hash_block(&ctx, ctx.buffer, 1); i += 128/len; } for (;i <= cur_salt->rounds; ++i) ctx.total += len; } else { for (i = 0; i <= cur_salt->rounds; ++i) SHA512_Update(&ctx, pass, len); } ctx.bIsQnxBuggy = 1; SHA512_Final((unsigned char*)(crypt_out[index]), &ctx); break; } default: exit(fprintf(stderr, "Unknown QNX hash type found\n")); } } } MEM_FREE(MixOrder); return count; } static void set_salt(void *salt) { cur_salt = salt; } static void *get_salt(char *ciphertext) { static struct qnx_saltstruct out; char *origptr = strdup(ciphertext), *ct = origptr; memset(&out, 0, sizeof(out)); ct = strtokm(&ct[1], "@"); if (*ct == 'm') out.type = 5; else if (*ct == 's') out.type = 256; else if (*ct == 'S') out.type = 512; if (ct[1] == ',') out.rounds = atoi(&ct[2]); else out.rounds = ROUNDS_DEFAULT; ct = strtokm(NULL, "@"); ct = strtokm(NULL, "@"); out.len = strlen(ct); memcpy(out.salt, ct, out.len); MEM_FREE(origptr); return &out; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { if(cur_salt->type == 5) return !memcmp(binary, crypt_out[index], BINARY_SIZE_MD5); if(cur_salt->type == 256) return !memcmp(binary, crypt_out[index], BINARY_SIZE_SHA256); return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static unsigned int iteration_count(void *salt) { return ((struct qnx_saltstruct *)salt)->rounds; } static unsigned int algorithm_type(void *salt) { return ((struct qnx_saltstruct *)salt)->type; } // Public domain hash function by DJ Bernstein // We are hashing the entire struct static int salt_hash(void *salt) { unsigned char *s = (unsigned char *)salt; unsigned int hash = 5381; unsigned int i; for (i = 0; i < sizeof(struct qnx_saltstruct); i++) hash = ((hash << 5) + hash) ^ s[i]; return hash & (SALT_HASH_SIZE - 1); } struct fmt_main fmt_qnx = { { FORMAT_LABEL, FORMAT_NAME, "QNX " ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", "algorithm (5=md5 256=sha256 512=sha512)", }, { NULL }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { iteration_count, algorithm_type, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
Stmt.h
//===--- Stmt.h - Classes for representing statements -----------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Stmt interface and subclasses. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMT_H #define LLVM_CLANG_AST_STMT_H #include "clang/AST/DeclGroup.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/CapturedStmt.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include <string> namespace llvm { class FoldingSetNodeID; } namespace clang { class ASTContext; class Attr; class CapturedDecl; class Decl; class Expr; class IdentifierInfo; class LabelDecl; class ParmVarDecl; class PrinterHelper; struct PrintingPolicy; class QualType; class RecordDecl; class SourceManager; class StringLiteral; class SwitchStmt; class Token; class VarDecl; //===--------------------------------------------------------------------===// // ExprIterator - Iterators for iterating over Stmt* arrays that contain // only Expr*. This is needed because AST nodes use Stmt* arrays to store // references to children (to be compatible with StmtIterator). //===--------------------------------------------------------------------===// class Stmt; class Expr; class ExprIterator { Stmt** I; public: ExprIterator(Stmt** i) : I(i) {} ExprIterator() : I(nullptr) {} ExprIterator& operator++() { ++I; return *this; } ExprIterator operator-(size_t i) { return I-i; } ExprIterator operator+(size_t i) { return I+i; } Expr* operator[](size_t idx); // FIXME: Verify that this will correctly return a signed distance. signed operator-(const ExprIterator& R) const { return I - R.I; } Expr* operator*() const; Expr* operator->() const; bool operator==(const ExprIterator& R) const { return I == R.I; } bool operator!=(const ExprIterator& R) const { return I != R.I; } bool operator>(const ExprIterator& R) const { return I > R.I; } bool operator>=(const ExprIterator& R) const { return I >= R.I; } }; class ConstExprIterator { const Stmt * const *I; public: ConstExprIterator(const Stmt * const *i) : I(i) {} ConstExprIterator() : I(nullptr) {} ConstExprIterator& operator++() { ++I; return *this; } ConstExprIterator operator+(size_t i) const { return I+i; } ConstExprIterator operator-(size_t i) const { return I-i; } const Expr * operator[](size_t idx) const; signed operator-(const ConstExprIterator& R) const { return I - R.I; } const Expr * operator*() const; const Expr * operator->() const; bool operator==(const ConstExprIterator& R) const { return I == R.I; } bool operator!=(const ConstExprIterator& R) const { return I != R.I; } bool operator>(const ConstExprIterator& R) const { return I > R.I; } bool operator>=(const ConstExprIterator& R) const { return I >= R.I; } }; //===----------------------------------------------------------------------===// // AST classes for statements. //===----------------------------------------------------------------------===// /// Stmt - This represents one statement. /// class Stmt { public: enum StmtClass { NoStmtClass = 0, #define STMT(CLASS, PARENT) CLASS##Class, #define STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class, #define LAST_STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class #define ABSTRACT_STMT(STMT) #include "clang/AST/StmtNodes.inc" }; // Make vanilla 'new' and 'delete' illegal for Stmts. protected: void* operator new(size_t bytes) throw() { llvm_unreachable("Stmts cannot be allocated with regular 'new'."); } void operator delete(void* data) throw() { llvm_unreachable("Stmts cannot be released with regular 'delete'."); } class StmtBitfields { friend class Stmt; /// \brief The statement class. unsigned sClass : 8; }; enum { NumStmtBits = 8 }; class CompoundStmtBitfields { friend class CompoundStmt; unsigned : NumStmtBits; unsigned NumStmts : 32 - NumStmtBits; }; class ExprBitfields { friend class Expr; friend class DeclRefExpr; // computeDependence friend class InitListExpr; // ctor friend class DesignatedInitExpr; // ctor friend class BlockDeclRefExpr; // ctor friend class ASTStmtReader; // deserialization friend class CXXNewExpr; // ctor friend class DependentScopeDeclRefExpr; // ctor friend class CXXConstructExpr; // ctor friend class CallExpr; // ctor friend class OffsetOfExpr; // ctor friend class ObjCMessageExpr; // ctor friend class ObjCArrayLiteral; // ctor friend class ObjCDictionaryLiteral; // ctor friend class ShuffleVectorExpr; // ctor friend class ParenListExpr; // ctor friend class CXXUnresolvedConstructExpr; // ctor friend class CXXDependentScopeMemberExpr; // ctor friend class OverloadExpr; // ctor friend class PseudoObjectExpr; // ctor friend class AtomicExpr; // ctor unsigned : NumStmtBits; unsigned ValueKind : 2; unsigned ObjectKind : 2; unsigned TypeDependent : 1; unsigned ValueDependent : 1; unsigned InstantiationDependent : 1; unsigned ContainsUnexpandedParameterPack : 1; }; enum { NumExprBits = 16 }; class CharacterLiteralBitfields { friend class CharacterLiteral; unsigned : NumExprBits; unsigned Kind : 2; }; enum APFloatSemantics { IEEEhalf, IEEEsingle, IEEEdouble, x87DoubleExtended, IEEEquad, PPCDoubleDouble }; class FloatingLiteralBitfields { friend class FloatingLiteral; unsigned : NumExprBits; unsigned Semantics : 3; // Provides semantics for APFloat construction unsigned IsExact : 1; }; class UnaryExprOrTypeTraitExprBitfields { friend class UnaryExprOrTypeTraitExpr; unsigned : NumExprBits; unsigned Kind : 2; unsigned IsType : 1; // true if operand is a type, false if an expression. }; class DeclRefExprBitfields { friend class DeclRefExpr; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; unsigned HasQualifier : 1; unsigned HasTemplateKWAndArgsInfo : 1; unsigned HasFoundDecl : 1; unsigned HadMultipleCandidates : 1; unsigned RefersToEnclosingVariableOrCapture : 1; }; class CastExprBitfields { friend class CastExpr; unsigned : NumExprBits; unsigned Kind : 6; unsigned BasePathSize : 32 - 6 - NumExprBits; }; class CallExprBitfields { friend class CallExpr; unsigned : NumExprBits; unsigned NumPreArgs : 1; }; class ExprWithCleanupsBitfields { friend class ExprWithCleanups; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; unsigned NumObjects : 32 - NumExprBits; }; class PseudoObjectExprBitfields { friend class PseudoObjectExpr; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; // These don't need to be particularly wide, because they're // strictly limited by the forms of expressions we permit. unsigned NumSubExprs : 8; unsigned ResultIndex : 32 - 8 - NumExprBits; }; class ObjCIndirectCopyRestoreExprBitfields { friend class ObjCIndirectCopyRestoreExpr; unsigned : NumExprBits; unsigned ShouldCopy : 1; }; class InitListExprBitfields { friend class InitListExpr; unsigned : NumExprBits; /// Whether this initializer list originally had a GNU array-range /// designator in it. This is a temporary marker used by CodeGen. unsigned HadArrayRangeDesignator : 1; }; class TypeTraitExprBitfields { friend class TypeTraitExpr; friend class ASTStmtReader; friend class ASTStmtWriter; unsigned : NumExprBits; /// \brief The kind of type trait, which is a value of a TypeTrait enumerator. unsigned Kind : 8; /// \brief If this expression is not value-dependent, this indicates whether /// the trait evaluated true or false. unsigned Value : 1; /// \brief The number of arguments to this type trait. unsigned NumArgs : 32 - 8 - 1 - NumExprBits; }; union { // FIXME: this is wasteful on 64-bit platforms. void *Aligner; StmtBitfields StmtBits; CompoundStmtBitfields CompoundStmtBits; ExprBitfields ExprBits; CharacterLiteralBitfields CharacterLiteralBits; FloatingLiteralBitfields FloatingLiteralBits; UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits; DeclRefExprBitfields DeclRefExprBits; CastExprBitfields CastExprBits; CallExprBitfields CallExprBits; ExprWithCleanupsBitfields ExprWithCleanupsBits; PseudoObjectExprBitfields PseudoObjectExprBits; ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits; InitListExprBitfields InitListExprBits; TypeTraitExprBitfields TypeTraitExprBits; }; friend class ASTStmtReader; friend class ASTStmtWriter; public: // Only allow allocation of Stmts using the allocator in ASTContext // or by doing a placement new. void* operator new(size_t bytes, const ASTContext& C, unsigned alignment = 8); void* operator new(size_t bytes, const ASTContext* C, unsigned alignment = 8) { return operator new(bytes, *C, alignment); } void* operator new(size_t bytes, void* mem) throw() { return mem; } void operator delete(void*, const ASTContext&, unsigned) throw() { } void operator delete(void*, const ASTContext*, unsigned) throw() { } void operator delete(void*, size_t) throw() { } void operator delete(void*, void*) throw() { } public: /// \brief A placeholder type used to construct an empty shell of a /// type, that will be filled in later (e.g., by some /// de-serialization). struct EmptyShell { }; private: /// \brief Whether statistic collection is enabled. static bool StatisticsEnabled; protected: /// \brief Construct an empty statement. explicit Stmt(StmtClass SC, EmptyShell) { StmtBits.sClass = SC; if (StatisticsEnabled) Stmt::addStmtClass(SC); } public: Stmt(StmtClass SC) { StmtBits.sClass = SC; if (StatisticsEnabled) Stmt::addStmtClass(SC); } StmtClass getStmtClass() const { return static_cast<StmtClass>(StmtBits.sClass); } const char *getStmtClassName() const; /// SourceLocation tokens are not useful in isolation - they are low level /// value objects created/interpreted by SourceManager. We assume AST /// clients will have a pointer to the respective SourceManager. SourceRange getSourceRange() const LLVM_READONLY; SourceLocation getLocStart() const LLVM_READONLY; SourceLocation getLocEnd() const LLVM_READONLY; // global temp stats (until we have a per-module visitor) static void addStmtClass(const StmtClass s); static void EnableStatistics(); static void PrintStats(); /// \brief Dumps the specified AST fragment and all subtrees to /// \c llvm::errs(). void dump() const; void dump(SourceManager &SM) const; void dump(raw_ostream &OS, SourceManager &SM) const; /// dumpColor - same as dump(), but forces color highlighting. void dumpColor() const; /// dumpPretty/printPretty - These two methods do a "pretty print" of the AST /// back to its original source language syntax. void dumpPretty(const ASTContext &Context) const; void printPretty(raw_ostream &OS, PrinterHelper *Helper, const PrintingPolicy &Policy, unsigned Indentation = 0) const; /// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only /// works on systems with GraphViz (Mac OS X) or dot+gv installed. void viewAST() const; /// Skip past any implicit AST nodes which might surround this /// statement, such as ExprWithCleanups or ImplicitCastExpr nodes. Stmt *IgnoreImplicit(); /// \brief Skip no-op (attributed, compound) container stmts and skip captured /// stmt at the top, if \a IgnoreCaptured is true. Stmt *IgnoreContainers(bool IgnoreCaptured = false); const Stmt *stripLabelLikeStatements() const; Stmt *stripLabelLikeStatements() { return const_cast<Stmt*>( const_cast<const Stmt*>(this)->stripLabelLikeStatements()); } /// Child Iterators: All subclasses must implement 'children' /// to permit easy iteration over the substatements/subexpessions of an /// AST node. This permits easy iteration over all nodes in the AST. typedef StmtIterator child_iterator; typedef ConstStmtIterator const_child_iterator; typedef StmtRange child_range; typedef ConstStmtRange const_child_range; child_range children(); const_child_range children() const { return const_cast<Stmt*>(this)->children(); } child_iterator child_begin() { return children().first; } child_iterator child_end() { return children().second; } const_child_iterator child_begin() const { return children().first; } const_child_iterator child_end() const { return children().second; } /// \brief Produce a unique representation of the given statement. /// /// \param ID once the profiling operation is complete, will contain /// the unique representation of the given statement. /// /// \param Context the AST context in which the statement resides /// /// \param Canonical whether the profile should be based on the canonical /// representation of this statement (e.g., where non-type template /// parameters are identified by index/level rather than their /// declaration pointers) or the exact representation of the statement as /// written in the source. void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, bool Canonical) const; }; /// DeclStmt - Adaptor class for mixing declarations with statements and /// expressions. For example, CompoundStmt mixes statements, expressions /// and declarations (variables, types). Another example is ForStmt, where /// the first statement can be an expression or a declaration. /// class DeclStmt : public Stmt { DeclGroupRef DG; SourceLocation StartLoc, EndLoc; public: DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {} /// \brief Build an empty declaration statement. explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) { } /// isSingleDecl - This method returns true if this DeclStmt refers /// to a single Decl. bool isSingleDecl() const { return DG.isSingleDecl(); } const Decl *getSingleDecl() const { return DG.getSingleDecl(); } Decl *getSingleDecl() { return DG.getSingleDecl(); } const DeclGroupRef getDeclGroup() const { return DG; } DeclGroupRef getDeclGroup() { return DG; } void setDeclGroup(DeclGroupRef DGR) { DG = DGR; } SourceLocation getStartLoc() const { return StartLoc; } void setStartLoc(SourceLocation L) { StartLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return StartLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DeclStmtClass; } // Iterators over subexpressions. child_range children() { return child_range(child_iterator(DG.begin(), DG.end()), child_iterator(DG.end(), DG.end())); } typedef DeclGroupRef::iterator decl_iterator; typedef DeclGroupRef::const_iterator const_decl_iterator; typedef llvm::iterator_range<decl_iterator> decl_range; typedef llvm::iterator_range<const_decl_iterator> decl_const_range; decl_range decls() { return decl_range(decl_begin(), decl_end()); } decl_const_range decls() const { return decl_const_range(decl_begin(), decl_end()); } decl_iterator decl_begin() { return DG.begin(); } decl_iterator decl_end() { return DG.end(); } const_decl_iterator decl_begin() const { return DG.begin(); } const_decl_iterator decl_end() const { return DG.end(); } typedef std::reverse_iterator<decl_iterator> reverse_decl_iterator; reverse_decl_iterator decl_rbegin() { return reverse_decl_iterator(decl_end()); } reverse_decl_iterator decl_rend() { return reverse_decl_iterator(decl_begin()); } }; /// NullStmt - This is the null statement ";": C99 6.8.3p3. /// class NullStmt : public Stmt { SourceLocation SemiLoc; /// \brief True if the null statement was preceded by an empty macro, e.g: /// @code /// #define CALL(x) /// CALL(0); /// @endcode bool HasLeadingEmptyMacro; public: NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false) : Stmt(NullStmtClass), SemiLoc(L), HasLeadingEmptyMacro(hasLeadingEmptyMacro) {} /// \brief Build an empty null statement. explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty), HasLeadingEmptyMacro(false) { } SourceLocation getSemiLoc() const { return SemiLoc; } void setSemiLoc(SourceLocation L) { SemiLoc = L; } bool hasLeadingEmptyMacro() const { return HasLeadingEmptyMacro; } SourceLocation getLocStart() const LLVM_READONLY { return SemiLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SemiLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == NullStmtClass; } child_range children() { return child_range(); } friend class ASTStmtReader; friend class ASTStmtWriter; }; /// CompoundStmt - This represents a group of statements like { stmt stmt }. /// class CompoundStmt : public Stmt { Stmt** Body; SourceLocation LBraceLoc, RBraceLoc; friend class ASTStmtReader; public: CompoundStmt(const ASTContext &C, ArrayRef<Stmt*> Stmts, SourceLocation LB, SourceLocation RB); // \brief Build an empty compound statement with a location. explicit CompoundStmt(SourceLocation Loc) : Stmt(CompoundStmtClass), Body(nullptr), LBraceLoc(Loc), RBraceLoc(Loc) { CompoundStmtBits.NumStmts = 0; } // \brief Build an empty compound statement. explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty), Body(nullptr) { CompoundStmtBits.NumStmts = 0; } void setStmts(const ASTContext &C, Stmt **Stmts, unsigned NumStmts); bool body_empty() const { return CompoundStmtBits.NumStmts == 0; } unsigned size() const { return CompoundStmtBits.NumStmts; } typedef Stmt** body_iterator; typedef llvm::iterator_range<body_iterator> body_range; body_range body() { return body_range(body_begin(), body_end()); } body_iterator body_begin() { return Body; } body_iterator body_end() { return Body + size(); } Stmt *body_back() { return !body_empty() ? Body[size()-1] : nullptr; } void setLastStmt(Stmt *S) { assert(!body_empty() && "setLastStmt"); Body[size()-1] = S; } typedef Stmt* const * const_body_iterator; typedef llvm::iterator_range<const_body_iterator> body_const_range; body_const_range body() const { return body_const_range(body_begin(), body_end()); } const_body_iterator body_begin() const { return Body; } const_body_iterator body_end() const { return Body + size(); } const Stmt *body_back() const { return !body_empty() ? Body[size() - 1] : nullptr; } typedef std::reverse_iterator<body_iterator> reverse_body_iterator; reverse_body_iterator body_rbegin() { return reverse_body_iterator(body_end()); } reverse_body_iterator body_rend() { return reverse_body_iterator(body_begin()); } typedef std::reverse_iterator<const_body_iterator> const_reverse_body_iterator; const_reverse_body_iterator body_rbegin() const { return const_reverse_body_iterator(body_end()); } const_reverse_body_iterator body_rend() const { return const_reverse_body_iterator(body_begin()); } SourceLocation getLocStart() const LLVM_READONLY { return LBraceLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RBraceLoc; } SourceLocation getLBracLoc() const { return LBraceLoc; } SourceLocation getRBracLoc() const { return RBraceLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == CompoundStmtClass; } // Iterators child_range children() { return child_range(Body, Body + CompoundStmtBits.NumStmts); } const_child_range children() const { return child_range(Body, Body + CompoundStmtBits.NumStmts); } }; // SwitchCase is the base class for CaseStmt and DefaultStmt, class SwitchCase : public Stmt { protected: // A pointer to the following CaseStmt or DefaultStmt class, // used by SwitchStmt. SwitchCase *NextSwitchCase; SourceLocation KeywordLoc; SourceLocation ColonLoc; SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc) : Stmt(SC), NextSwitchCase(nullptr), KeywordLoc(KWLoc), ColonLoc(ColonLoc) { } SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC), NextSwitchCase(nullptr) {} public: const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; } SwitchCase *getNextSwitchCase() { return NextSwitchCase; } void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; } SourceLocation getKeywordLoc() const { return KeywordLoc; } void setKeywordLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } Stmt *getSubStmt(); const Stmt *getSubStmt() const { return const_cast<SwitchCase*>(this)->getSubStmt(); } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY; static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass || T->getStmtClass() == DefaultStmtClass; } }; class CaseStmt : public SwitchCase { enum { LHS, RHS, SUBSTMT, END_EXPR }; Stmt* SubExprs[END_EXPR]; // The expression for the RHS is Non-null for // GNU "case 1 ... 4" extension SourceLocation EllipsisLoc; public: CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc) : SwitchCase(CaseStmtClass, caseLoc, colonLoc) { SubExprs[SUBSTMT] = nullptr; SubExprs[LHS] = reinterpret_cast<Stmt*>(lhs); SubExprs[RHS] = reinterpret_cast<Stmt*>(rhs); EllipsisLoc = ellipsisLoc; } /// \brief Build an empty switch case statement. explicit CaseStmt(EmptyShell Empty) : SwitchCase(CaseStmtClass, Empty) { } SourceLocation getCaseLoc() const { return KeywordLoc; } void setCaseLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getEllipsisLoc() const { return EllipsisLoc; } void setEllipsisLoc(SourceLocation L) { EllipsisLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } Expr *getLHS() { return reinterpret_cast<Expr*>(SubExprs[LHS]); } Expr *getRHS() { return reinterpret_cast<Expr*>(SubExprs[RHS]); } Stmt *getSubStmt() { return SubExprs[SUBSTMT]; } const Expr *getLHS() const { return reinterpret_cast<const Expr*>(SubExprs[LHS]); } const Expr *getRHS() const { return reinterpret_cast<const Expr*>(SubExprs[RHS]); } const Stmt *getSubStmt() const { return SubExprs[SUBSTMT]; } void setSubStmt(Stmt *S) { SubExprs[SUBSTMT] = S; } void setLHS(Expr *Val) { SubExprs[LHS] = reinterpret_cast<Stmt*>(Val); } void setRHS(Expr *Val) { SubExprs[RHS] = reinterpret_cast<Stmt*>(Val); } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY { // Handle deeply nested case statements with iteration instead of recursion. const CaseStmt *CS = this; while (const CaseStmt *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt())) CS = CS2; return CS->getSubStmt()->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[END_EXPR]); } }; class DefaultStmt : public SwitchCase { Stmt* SubStmt; public: DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) : SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {} /// \brief Build an empty default statement. explicit DefaultStmt(EmptyShell Empty) : SwitchCase(DefaultStmtClass, Empty) { } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *S) { SubStmt = S; } SourceLocation getDefaultLoc() const { return KeywordLoc; } void setDefaultLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} static bool classof(const Stmt *T) { return T->getStmtClass() == DefaultStmtClass; } // Iterators child_range children() { return child_range(&SubStmt, &SubStmt+1); } }; inline SourceLocation SwitchCase::getLocEnd() const { if (const CaseStmt *CS = dyn_cast<CaseStmt>(this)) return CS->getLocEnd(); return cast<DefaultStmt>(this)->getLocEnd(); } /// LabelStmt - Represents a label, which has a substatement. For example: /// foo: return; /// class LabelStmt : public Stmt { LabelDecl *TheDecl; Stmt *SubStmt; SourceLocation IdentLoc; public: LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt) : Stmt(LabelStmtClass), TheDecl(D), SubStmt(substmt), IdentLoc(IL) { } // \brief Build an empty label statement. explicit LabelStmt(EmptyShell Empty) : Stmt(LabelStmtClass, Empty) { } SourceLocation getIdentLoc() const { return IdentLoc; } LabelDecl *getDecl() const { return TheDecl; } void setDecl(LabelDecl *D) { TheDecl = D; } const char *getName() const; Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setIdentLoc(SourceLocation L) { IdentLoc = L; } void setSubStmt(Stmt *SS) { SubStmt = SS; } SourceLocation getLocStart() const LLVM_READONLY { return IdentLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} child_range children() { return child_range(&SubStmt, &SubStmt+1); } static bool classof(const Stmt *T) { return T->getStmtClass() == LabelStmtClass; } }; /// \brief Represents an attribute applied to a statement. /// /// Represents an attribute applied to a statement. For example: /// [[omp::for(...)]] for (...) { ... } /// class AttributedStmt : public Stmt { Stmt *SubStmt; SourceLocation AttrLoc; unsigned NumAttrs; friend class ASTStmtReader; AttributedStmt(SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt) : Stmt(AttributedStmtClass), SubStmt(SubStmt), AttrLoc(Loc), NumAttrs(Attrs.size()) { memcpy(getAttrArrayPtr(), Attrs.data(), Attrs.size() * sizeof(Attr *)); } explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs) : Stmt(AttributedStmtClass, Empty), NumAttrs(NumAttrs) { memset(getAttrArrayPtr(), 0, NumAttrs * sizeof(Attr *)); } Attr *const *getAttrArrayPtr() const { return reinterpret_cast<Attr *const *>(this + 1); } Attr **getAttrArrayPtr() { return reinterpret_cast<Attr **>(this + 1); } public: static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); // \brief Build an empty attributed statement. static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs); SourceLocation getAttrLoc() const { return AttrLoc; } ArrayRef<const Attr*> getAttrs() const { return llvm::makeArrayRef(getAttrArrayPtr(), NumAttrs); } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } SourceLocation getLocStart() const LLVM_READONLY { return AttrLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == AttributedStmtClass; } }; /// IfStmt - This represents an if/then/else. /// class IfStmt : public Stmt { enum { VAR, COND, THEN, ELSE, END_EXPR }; Stmt* SubExprs[END_EXPR]; SourceLocation IfLoc; SourceLocation ElseLoc; public: IfStmt(const ASTContext &C, SourceLocation IL, VarDecl *var, Expr *cond, Stmt *then, SourceLocation EL = SourceLocation(), Stmt *elsev = nullptr); /// \brief Build an empty if/then/else statement explicit IfStmt(EmptyShell Empty) : Stmt(IfStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "if" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// if (int x = foo()) { /// printf("x is %d", x); /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this IfStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); } const Stmt *getThen() const { return SubExprs[THEN]; } void setThen(Stmt *S) { SubExprs[THEN] = S; } const Stmt *getElse() const { return SubExprs[ELSE]; } void setElse(Stmt *S) { SubExprs[ELSE] = S; } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Stmt *getThen() { return SubExprs[THEN]; } Stmt *getElse() { return SubExprs[ELSE]; } SourceLocation getIfLoc() const { return IfLoc; } void setIfLoc(SourceLocation L) { IfLoc = L; } SourceLocation getElseLoc() const { return ElseLoc; } void setElseLoc(SourceLocation L) { ElseLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return IfLoc; } SourceLocation getLocEnd() const LLVM_READONLY { if (SubExprs[ELSE]) return SubExprs[ELSE]->getLocEnd(); else return SubExprs[THEN]->getLocEnd(); } // Iterators over subexpressions. The iterators will include iterating // over the initialization expression referenced by the condition variable. child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } static bool classof(const Stmt *T) { return T->getStmtClass() == IfStmtClass; } }; /// SwitchStmt - This represents a 'switch' stmt. /// class SwitchStmt : public Stmt { enum { VAR, COND, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // This points to a linked list of case and default statements. SwitchCase *FirstCase; SourceLocation SwitchLoc; /// If the SwitchStmt is a switch on an enum value, this records whether /// all the enum values were covered by CaseStmts. This value is meant to /// be a hint for possible clients. unsigned AllEnumCasesCovered : 1; public: SwitchStmt(const ASTContext &C, VarDecl *Var, Expr *cond); /// \brief Build a empty switch statement. explicit SwitchStmt(EmptyShell Empty) : Stmt(SwitchStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "switch" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// switch (int x = foo()) { /// case 0: break; /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this SwitchStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Stmt *getBody() const { return SubExprs[BODY]; } const SwitchCase *getSwitchCaseList() const { return FirstCase; } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); } Stmt *getBody() { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SwitchCase *getSwitchCaseList() { return FirstCase; } /// \brief Set the case list for this switch statement. void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; } SourceLocation getSwitchLoc() const { return SwitchLoc; } void setSwitchLoc(SourceLocation L) { SwitchLoc = L; } void setBody(Stmt *S, SourceLocation SL) { SubExprs[BODY] = S; SwitchLoc = SL; } void addSwitchCase(SwitchCase *SC) { assert(!SC->getNextSwitchCase() && "case/default already added to a switch"); SC->setNextSwitchCase(FirstCase); FirstCase = SC; } /// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a /// switch over an enum value then all cases have been explicitly covered. void setAllEnumCasesCovered() { AllEnumCasesCovered = 1; } /// Returns true if the SwitchStmt is a switch of an enum value and all cases /// have been explicitly covered. bool isAllEnumCasesCovered() const { return (bool) AllEnumCasesCovered; } SourceLocation getLocStart() const LLVM_READONLY { return SwitchLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY] ? SubExprs[BODY]->getLocEnd() : SubExprs[COND]->getLocEnd(); } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } static bool classof(const Stmt *T) { return T->getStmtClass() == SwitchStmtClass; } }; /// WhileStmt - This represents a 'while' stmt. /// class WhileStmt : public Stmt { enum { VAR, COND, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; SourceLocation WhileLoc; public: WhileStmt(const ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body, SourceLocation WL); /// \brief Build an empty while statement. explicit WhileStmt(EmptyShell Empty) : Stmt(WhileStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "while" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// while (int x = random()) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this WhileStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return WhileLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY]->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == WhileStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// DoStmt - This represents a 'do/while' stmt. /// class DoStmt : public Stmt { enum { BODY, COND, END_EXPR }; Stmt* SubExprs[END_EXPR]; SourceLocation DoLoc; SourceLocation WhileLoc; SourceLocation RParenLoc; // Location of final ')' in do stmt condition. public: DoStmt(Stmt *body, Expr *cond, SourceLocation DL, SourceLocation WL, SourceLocation RP) : Stmt(DoStmtClass), DoLoc(DL), WhileLoc(WL), RParenLoc(RP) { SubExprs[COND] = reinterpret_cast<Stmt*>(cond); SubExprs[BODY] = body; } /// \brief Build an empty do-while statement. explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) { } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getDoLoc() const { return DoLoc; } void setDoLoc(SourceLocation L) { DoLoc = L; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return DoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DoStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of /// the init/cond/inc parts of the ForStmt will be null if they were not /// specified in the source. /// class ForStmt : public Stmt { enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt. SourceLocation ForLoc; SourceLocation LParenLoc, RParenLoc; public: ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP, SourceLocation RP); /// \brief Build an empty for statement. explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) { } Stmt *getInit() { return SubExprs[INIT]; } /// \brief Retrieve the variable declared in this "for" statement, if any. /// /// In the following example, "y" is the condition variable. /// \code /// for (int x = random(); int y = mangle(x); ++x) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this ForStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getInit() const { return SubExprs[INIT]; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); } const Stmt *getBody() const { return SubExprs[BODY]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getForLoc() const { return ForLoc; } void setForLoc(SourceLocation L) { ForLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return ForLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY]->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ForStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// GotoStmt - This represents a direct goto. /// class GotoStmt : public Stmt { LabelDecl *Label; SourceLocation GotoLoc; SourceLocation LabelLoc; public: GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL) : Stmt(GotoStmtClass), Label(label), GotoLoc(GL), LabelLoc(LL) {} /// \brief Build an empty goto statement. explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) { } LabelDecl *getLabel() const { return Label; } void setLabel(LabelDecl *D) { Label = D; } SourceLocation getGotoLoc() const { return GotoLoc; } void setGotoLoc(SourceLocation L) { GotoLoc = L; } SourceLocation getLabelLoc() const { return LabelLoc; } void setLabelLoc(SourceLocation L) { LabelLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return LabelLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GotoStmtClass; } // Iterators child_range children() { return child_range(); } }; /// IndirectGotoStmt - This represents an indirect goto. /// class IndirectGotoStmt : public Stmt { SourceLocation GotoLoc; SourceLocation StarLoc; Stmt *Target; public: IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target) : Stmt(IndirectGotoStmtClass), GotoLoc(gotoLoc), StarLoc(starLoc), Target((Stmt*)target) {} /// \brief Build an empty indirect goto statement. explicit IndirectGotoStmt(EmptyShell Empty) : Stmt(IndirectGotoStmtClass, Empty) { } void setGotoLoc(SourceLocation L) { GotoLoc = L; } SourceLocation getGotoLoc() const { return GotoLoc; } void setStarLoc(SourceLocation L) { StarLoc = L; } SourceLocation getStarLoc() const { return StarLoc; } Expr *getTarget() { return reinterpret_cast<Expr*>(Target); } const Expr *getTarget() const {return reinterpret_cast<const Expr*>(Target);} void setTarget(Expr *E) { Target = reinterpret_cast<Stmt*>(E); } /// getConstantTarget - Returns the fixed target of this indirect /// goto, if one exists. LabelDecl *getConstantTarget(); const LabelDecl *getConstantTarget() const { return const_cast<IndirectGotoStmt*>(this)->getConstantTarget(); } SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return Target->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == IndirectGotoStmtClass; } // Iterators child_range children() { return child_range(&Target, &Target+1); } }; /// ContinueStmt - This represents a continue. /// class ContinueStmt : public Stmt { SourceLocation ContinueLoc; public: ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass), ContinueLoc(CL) {} /// \brief Build an empty continue statement. explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) { } SourceLocation getContinueLoc() const { return ContinueLoc; } void setContinueLoc(SourceLocation L) { ContinueLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return ContinueLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return ContinueLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == ContinueStmtClass; } // Iterators child_range children() { return child_range(); } }; /// BreakStmt - This represents a break. /// class BreakStmt : public Stmt { SourceLocation BreakLoc; public: BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass), BreakLoc(BL) {} /// \brief Build an empty break statement. explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) { } SourceLocation getBreakLoc() const { return BreakLoc; } void setBreakLoc(SourceLocation L) { BreakLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return BreakLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return BreakLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == BreakStmtClass; } // Iterators child_range children() { return child_range(); } }; /// ReturnStmt - This represents a return, optionally of an expression: /// return; /// return 4; /// /// Note that GCC allows return with no argument in a function declared to /// return a value, and it allows returning a value in functions declared to /// return void. We explicitly model this in the AST, which means you can't /// depend on the return type of the function and the presence of an argument. /// class ReturnStmt : public Stmt { Stmt *RetExpr; SourceLocation RetLoc; const VarDecl *NRVOCandidate; public: ReturnStmt(SourceLocation RL) : Stmt(ReturnStmtClass), RetExpr(nullptr), RetLoc(RL), NRVOCandidate(nullptr) {} ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate) : Stmt(ReturnStmtClass), RetExpr((Stmt*) E), RetLoc(RL), NRVOCandidate(NRVOCandidate) {} /// \brief Build an empty return expression. explicit ReturnStmt(EmptyShell Empty) : Stmt(ReturnStmtClass, Empty) { } const Expr *getRetValue() const; Expr *getRetValue(); void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt*>(E); } SourceLocation getReturnLoc() const { return RetLoc; } void setReturnLoc(SourceLocation L) { RetLoc = L; } /// \brief Retrieve the variable that might be used for the named return /// value optimization. /// /// The optimization itself can only be performed if the variable is /// also marked as an NRVO object. const VarDecl *getNRVOCandidate() const { return NRVOCandidate; } void setNRVOCandidate(const VarDecl *Var) { NRVOCandidate = Var; } SourceLocation getLocStart() const LLVM_READONLY { return RetLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RetExpr ? RetExpr->getLocEnd() : RetLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == ReturnStmtClass; } // Iterators child_range children() { if (RetExpr) return child_range(&RetExpr, &RetExpr+1); return child_range(); } }; /// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt. /// class AsmStmt : public Stmt { protected: SourceLocation AsmLoc; /// \brief True if the assembly statement does not have any input or output /// operands. bool IsSimple; /// \brief If true, treat this inline assembly as having side effects. /// This assembly statement should not be optimized, deleted or moved. bool IsVolatile; unsigned NumOutputs; unsigned NumInputs; unsigned NumClobbers; Stmt **Exprs; AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, unsigned numclobbers) : Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile), NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) { } friend class ASTStmtReader; public: /// \brief Build an empty inline-assembly statement. explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty), Exprs(nullptr) { } SourceLocation getAsmLoc() const { return AsmLoc; } void setAsmLoc(SourceLocation L) { AsmLoc = L; } bool isSimple() const { return IsSimple; } void setSimple(bool V) { IsSimple = V; } bool isVolatile() const { return IsVolatile; } void setVolatile(bool V) { IsVolatile = V; } SourceLocation getLocStart() const LLVM_READONLY { return SourceLocation(); } SourceLocation getLocEnd() const LLVM_READONLY { return SourceLocation(); } //===--- Asm String Analysis ---===// /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// unsigned getNumOutputs() const { return NumOutputs; } /// getOutputConstraint - Return the constraint string for the specified /// output operand. All output constraints are known to be non-empty (either /// '=' or '+'). StringRef getOutputConstraint(unsigned i) const; /// isOutputPlusConstraint - Return true if the specified output constraint /// is a "+" constraint (which is both an input and an output) or false if it /// is an "=" constraint (just an output). bool isOutputPlusConstraint(unsigned i) const { return getOutputConstraint(i)[0] == '+'; } const Expr *getOutputExpr(unsigned i) const; /// getNumPlusOperands - Return the number of output operands that have a "+" /// constraint. unsigned getNumPlusOperands() const; //===--- Input operands ---===// unsigned getNumInputs() const { return NumInputs; } /// getInputConstraint - Return the specified input constraint. Unlike output /// constraints, these can be empty. StringRef getInputConstraint(unsigned i) const; const Expr *getInputExpr(unsigned i) const; //===--- Other ---===// unsigned getNumClobbers() const { return NumClobbers; } StringRef getClobber(unsigned i) const; static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass || T->getStmtClass() == MSAsmStmtClass; } // Input expr iterators. typedef ExprIterator inputs_iterator; typedef ConstExprIterator const_inputs_iterator; typedef llvm::iterator_range<inputs_iterator> inputs_range; typedef llvm::iterator_range<const_inputs_iterator> inputs_const_range; inputs_iterator begin_inputs() { return &Exprs[0] + NumOutputs; } inputs_iterator end_inputs() { return &Exprs[0] + NumOutputs + NumInputs; } inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); } const_inputs_iterator begin_inputs() const { return &Exprs[0] + NumOutputs; } const_inputs_iterator end_inputs() const { return &Exprs[0] + NumOutputs + NumInputs; } inputs_const_range inputs() const { return inputs_const_range(begin_inputs(), end_inputs()); } // Output expr iterators. typedef ExprIterator outputs_iterator; typedef ConstExprIterator const_outputs_iterator; typedef llvm::iterator_range<outputs_iterator> outputs_range; typedef llvm::iterator_range<const_outputs_iterator> outputs_const_range; outputs_iterator begin_outputs() { return &Exprs[0]; } outputs_iterator end_outputs() { return &Exprs[0] + NumOutputs; } outputs_range outputs() { return outputs_range(begin_outputs(), end_outputs()); } const_outputs_iterator begin_outputs() const { return &Exprs[0]; } const_outputs_iterator end_outputs() const { return &Exprs[0] + NumOutputs; } outputs_const_range outputs() const { return outputs_const_range(begin_outputs(), end_outputs()); } child_range children() { return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } }; /// This represents a GCC inline-assembly statement extension. /// class GCCAsmStmt : public AsmStmt { SourceLocation RParenLoc; StringLiteral *AsmStr; // FIXME: If we wanted to, we could allocate all of these in one big array. StringLiteral **Constraints; StringLiteral **Clobbers; IdentifierInfo **Names; friend class ASTStmtReader; public: GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, IdentifierInfo **names, StringLiteral **constraints, Expr **exprs, StringLiteral *asmstr, unsigned numclobbers, StringLiteral **clobbers, SourceLocation rparenloc); /// \brief Build an empty inline-assembly statement. explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty), Constraints(nullptr), Clobbers(nullptr), Names(nullptr) { } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } //===--- Asm String Analysis ---===// const StringLiteral *getAsmString() const { return AsmStr; } StringLiteral *getAsmString() { return AsmStr; } void setAsmString(StringLiteral *E) { AsmStr = E; } /// AsmStringPiece - this is part of a decomposed asm string specification /// (for use with the AnalyzeAsmString function below). An asm string is /// considered to be a concatenation of these parts. class AsmStringPiece { public: enum Kind { String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%". Operand // Operand reference, with optional modifier %c4. }; private: Kind MyKind; std::string Str; unsigned OperandNo; // Source range for operand references. CharSourceRange Range; public: AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {} AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin, SourceLocation End) : MyKind(Operand), Str(S), OperandNo(OpNo), Range(CharSourceRange::getCharRange(Begin, End)) { } bool isString() const { return MyKind == String; } bool isOperand() const { return MyKind == Operand; } const std::string &getString() const { return Str; } unsigned getOperandNo() const { assert(isOperand()); return OperandNo; } CharSourceRange getRange() const { assert(isOperand() && "Range is currently used only for Operands."); return Range; } /// getModifier - Get the modifier for this operand, if present. This /// returns '\0' if there was no modifier. char getModifier() const; }; /// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing /// it into pieces. If the asm string is erroneous, emit errors and return /// true, otherwise return false. This handles canonicalization and /// translation of strings from GCC syntax to LLVM IR syntax, and handles //// flattening of named references like %[foo] to Operand AsmStringPiece's. unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces, const ASTContext &C, unsigned &DiagOffs) const; /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; } StringRef getOutputName(unsigned i) const { if (IdentifierInfo *II = getOutputIdentifier(i)) return II->getName(); return StringRef(); } StringRef getOutputConstraint(unsigned i) const; const StringLiteral *getOutputConstraintLiteral(unsigned i) const { return Constraints[i]; } StringLiteral *getOutputConstraintLiteral(unsigned i) { return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// IdentifierInfo *getInputIdentifier(unsigned i) const { return Names[i + NumOutputs]; } StringRef getInputName(unsigned i) const { if (IdentifierInfo *II = getInputIdentifier(i)) return II->getName(); return StringRef(); } StringRef getInputConstraint(unsigned i) const; const StringLiteral *getInputConstraintLiteral(unsigned i) const { return Constraints[i + NumOutputs]; } StringLiteral *getInputConstraintLiteral(unsigned i) { return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getInputExpr(i); } private: void setOutputsAndInputsAndClobbers(const ASTContext &C, IdentifierInfo **Names, StringLiteral **Constraints, Stmt **Exprs, unsigned NumOutputs, unsigned NumInputs, StringLiteral **Clobbers, unsigned NumClobbers); public: //===--- Other ---===// /// getNamedOperand - Given a symbolic operand reference like %[foo], /// translate this into a numeric value needed to reference the same operand. /// This returns -1 if the operand name is invalid. int getNamedOperand(StringRef SymbolicName) const; StringRef getClobber(unsigned i) const; StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; } const StringLiteral *getClobberStringLiteral(unsigned i) const { return Clobbers[i]; } SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass; } }; /// This represents a Microsoft inline-assembly statement extension. /// class MSAsmStmt : public AsmStmt { SourceLocation LBraceLoc, EndLoc; StringRef AsmStr; unsigned NumAsmToks; Token *AsmToks; StringRef *Constraints; StringRef *Clobbers; friend class ASTStmtReader; public: MSAsmStmt(const ASTContext &C, SourceLocation asmloc, SourceLocation lbraceloc, bool issimple, bool isvolatile, ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs, ArrayRef<StringRef> constraints, ArrayRef<Expr*> exprs, StringRef asmstr, ArrayRef<StringRef> clobbers, SourceLocation endloc); /// \brief Build an empty MS-style inline-assembly statement. explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty), NumAsmToks(0), AsmToks(nullptr), Constraints(nullptr), Clobbers(nullptr) { } SourceLocation getLBraceLoc() const { return LBraceLoc; } void setLBraceLoc(SourceLocation L) { LBraceLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } bool hasBraces() const { return LBraceLoc.isValid(); } unsigned getNumAsmToks() { return NumAsmToks; } Token *getAsmToks() { return AsmToks; } //===--- Asm String Analysis ---===// StringRef getAsmString() const { return AsmStr; } /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// StringRef getOutputConstraint(unsigned i) const { assert(i < NumOutputs); return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// StringRef getInputConstraint(unsigned i) const { assert(i < NumInputs); return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getInputExpr(i); } //===--- Other ---===// ArrayRef<StringRef> getAllConstraints() const { return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs); } ArrayRef<StringRef> getClobbers() const { return llvm::makeArrayRef(Clobbers, NumClobbers); } ArrayRef<Expr*> getAllExprs() const { return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs), NumInputs + NumOutputs); } StringRef getClobber(unsigned i) const { return getClobbers()[i]; } private: void initialize(const ASTContext &C, StringRef AsmString, ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints, ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers); public: SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == MSAsmStmtClass; } child_range children() { return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } }; class SEHExceptStmt : public Stmt { SourceLocation Loc; Stmt *Children[2]; enum { FILTER_EXPR, BLOCK }; SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); friend class ASTReader; friend class ASTStmtReader; explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) { } public: static SEHExceptStmt* Create(const ASTContext &C, SourceLocation ExceptLoc, Expr *FilterExpr, Stmt *Block); SourceLocation getLocStart() const LLVM_READONLY { return getExceptLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getExceptLoc() const { return Loc; } SourceLocation getEndLoc() const { return getBlock()->getLocEnd(); } Expr *getFilterExpr() const { return reinterpret_cast<Expr*>(Children[FILTER_EXPR]); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Children[BLOCK]); } child_range children() { return child_range(Children,Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHExceptStmtClass; } }; class SEHFinallyStmt : public Stmt { SourceLocation Loc; Stmt *Block; SEHFinallyStmt(SourceLocation Loc, Stmt *Block); friend class ASTReader; friend class ASTStmtReader; explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) { } public: static SEHFinallyStmt* Create(const ASTContext &C, SourceLocation FinallyLoc, Stmt *Block); SourceLocation getLocStart() const LLVM_READONLY { return getFinallyLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getFinallyLoc() const { return Loc; } SourceLocation getEndLoc() const { return Block->getLocEnd(); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); } child_range children() { return child_range(&Block,&Block+1); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHFinallyStmtClass; } }; class SEHTryStmt : public Stmt { bool IsCXXTry; SourceLocation TryLoc; Stmt *Children[2]; enum { TRY = 0, HANDLER = 1 }; SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try' SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); friend class ASTReader; friend class ASTStmtReader; explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) { } public: static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry, SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); SourceLocation getLocStart() const LLVM_READONLY { return getTryLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getTryLoc() const { return TryLoc; } SourceLocation getEndLoc() const { return Children[HANDLER]->getLocEnd(); } bool getIsCXXTry() const { return IsCXXTry; } CompoundStmt* getTryBlock() const { return cast<CompoundStmt>(Children[TRY]); } Stmt *getHandler() const { return Children[HANDLER]; } /// Returns 0 if not defined SEHExceptStmt *getExceptHandler() const; SEHFinallyStmt *getFinallyHandler() const; child_range children() { return child_range(Children,Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHTryStmtClass; } }; /// Represents a __leave statement. /// class SEHLeaveStmt : public Stmt { SourceLocation LeaveLoc; public: explicit SEHLeaveStmt(SourceLocation LL) : Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {} /// \brief Build an empty __leave statement. explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) { } SourceLocation getLeaveLoc() const { return LeaveLoc; } void setLeaveLoc(SourceLocation L) { LeaveLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return LeaveLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return LeaveLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHLeaveStmtClass; } // Iterators child_range children() { return child_range(); } }; /// \brief This captures a statement into a function. For example, the following /// pragma annotated compound statement can be represented as a CapturedStmt, /// and this compound statement is the body of an anonymous outlined function. /// @code /// #pragma omp parallel /// { /// compute(); /// } /// @endcode class CapturedStmt : public Stmt { public: /// \brief The different capture forms: by 'this', by reference, capture for /// variable-length array type etc. enum VariableCaptureKind { VCK_This, VCK_ByRef, VCK_VLAType, }; /// \brief Describes the capture of either a variable, or 'this', or /// variable-length array type. class Capture { llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind; SourceLocation Loc; public: /// \brief Create a new capture. /// /// \param Loc The source location associated with this capture. /// /// \param Kind The kind of capture (this, ByRef, ...). /// /// \param Var The variable being captured, or null if capturing this. /// Capture(SourceLocation Loc, VariableCaptureKind Kind, VarDecl *Var = nullptr) : VarAndKind(Var, Kind), Loc(Loc) { switch (Kind) { case VCK_This: assert(!Var && "'this' capture cannot have a variable!"); break; case VCK_ByRef: assert(Var && "capturing by reference must have a variable!"); break; case VCK_VLAType: assert(!Var && "Variable-length array type capture cannot have a variable!"); break; } } /// \brief Determine the kind of capture. VariableCaptureKind getCaptureKind() const { return VarAndKind.getInt(); } /// \brief Retrieve the source location at which the variable or 'this' was /// first used. SourceLocation getLocation() const { return Loc; } /// \brief Determine whether this capture handles the C++ 'this' pointer. bool capturesThis() const { return getCaptureKind() == VCK_This; } /// \brief Determine whether this capture handles a variable. bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; } /// \brief Determine whether this capture handles a variable-length array /// type. bool capturesVariableArrayType() const { return getCaptureKind() == VCK_VLAType; } /// \brief Retrieve the declaration of the variable being captured. /// /// This operation is only valid if this capture captures a variable. VarDecl *getCapturedVar() const { assert(capturesVariable() && "No variable available for 'this' or VAT capture"); return VarAndKind.getPointer(); } friend class ASTStmtReader; }; private: /// \brief The number of variable captured, including 'this'. unsigned NumCaptures; /// \brief The pointer part is the implicit the outlined function and the /// int part is the captured region kind, 'CR_Default' etc. llvm::PointerIntPair<CapturedDecl *, 1, CapturedRegionKind> CapDeclAndKind; /// \brief The record for captured variables, a RecordDecl or CXXRecordDecl. RecordDecl *TheRecordDecl; /// \brief Construct a captured statement. CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); /// \brief Construct an empty captured statement. CapturedStmt(EmptyShell Empty, unsigned NumCaptures); Stmt **getStoredStmts() const { return reinterpret_cast<Stmt **>(const_cast<CapturedStmt *>(this) + 1); } Capture *getStoredCaptures() const; void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; } public: static CapturedStmt *Create(const ASTContext &Context, Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); static CapturedStmt *CreateDeserialized(const ASTContext &Context, unsigned NumCaptures); /// \brief Retrieve the statement being captured. Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; } const Stmt *getCapturedStmt() const { return const_cast<CapturedStmt *>(this)->getCapturedStmt(); } /// \brief Retrieve the outlined function declaration. CapturedDecl *getCapturedDecl() { return CapDeclAndKind.getPointer(); } const CapturedDecl *getCapturedDecl() const { return const_cast<CapturedStmt *>(this)->getCapturedDecl(); } /// \brief Set the outlined function declaration. void setCapturedDecl(CapturedDecl *D) { assert(D && "null CapturedDecl"); CapDeclAndKind.setPointer(D); } /// \brief Retrieve the captured region kind. CapturedRegionKind getCapturedRegionKind() const { return CapDeclAndKind.getInt(); } /// \brief Set the captured region kind. void setCapturedRegionKind(CapturedRegionKind Kind) { CapDeclAndKind.setInt(Kind); } /// \brief Retrieve the record declaration for captured variables. const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; } /// \brief Set the record declaration for captured variables. void setCapturedRecordDecl(RecordDecl *D) { assert(D && "null RecordDecl"); TheRecordDecl = D; } /// \brief True if this variable has been captured. bool capturesVariable(const VarDecl *Var) const; /// \brief An iterator that walks over the captures. typedef Capture *capture_iterator; typedef const Capture *const_capture_iterator; typedef llvm::iterator_range<capture_iterator> capture_range; typedef llvm::iterator_range<const_capture_iterator> capture_const_range; capture_range captures() { return capture_range(capture_begin(), capture_end()); } capture_const_range captures() const { return capture_const_range(capture_begin(), capture_end()); } /// \brief Retrieve an iterator pointing to the first capture. capture_iterator capture_begin() { return getStoredCaptures(); } const_capture_iterator capture_begin() const { return getStoredCaptures(); } /// \brief Retrieve an iterator pointing past the end of the sequence of /// captures. capture_iterator capture_end() const { return getStoredCaptures() + NumCaptures; } /// \brief Retrieve the number of captures, including 'this'. unsigned capture_size() const { return NumCaptures; } /// \brief Iterator that walks over the capture initialization arguments. typedef Expr **capture_init_iterator; typedef llvm::iterator_range<capture_init_iterator> capture_init_range; capture_init_range capture_inits() const { return capture_init_range(capture_init_begin(), capture_init_end()); } /// \brief Retrieve the first initialization argument. capture_init_iterator capture_init_begin() const { return reinterpret_cast<Expr **>(getStoredStmts()); } /// \brief Retrieve the iterator pointing one past the last initialization /// argument. capture_init_iterator capture_init_end() const { return capture_init_begin() + NumCaptures; } SourceLocation getLocStart() const LLVM_READONLY { return getCapturedStmt()->getLocStart(); } SourceLocation getLocEnd() const LLVM_READONLY { return getCapturedStmt()->getLocEnd(); } SourceRange getSourceRange() const LLVM_READONLY { return getCapturedStmt()->getSourceRange(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CapturedStmtClass; } child_range children(); friend class ASTStmtReader; }; } // end namespace clang #endif
residualbased_newton_raphson_strategy.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ \. // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // #if !defined(KRATOS_RESIDUALBASED_NEWTON_RAPHSON_STRATEGY) #define KRATOS_RESIDUALBASED_NEWTON_RAPHSON_STRATEGY // System includes // External includes // Project includes #include "includes/define.h" #include "solving_strategies/strategies/solving_strategy.h" #include "solving_strategies/convergencecriterias/convergence_criteria.h" #include "utilities/builtin_timer.h" //default builder and solver #include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class ResidualBasedNewtonRaphsonStrategy * @ingroup KratosCore * @brief This is the base Newton Raphson strategy * @details This strategy iterates until the convergence is achieved (or the maximum number of iterations is surpassed) using a Newton Raphson algorithm * @author Riccardo Rossi */ template <class TSparseSpace, class TDenseSpace, // = DenseSpace<double>, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class ResidualBasedNewtonRaphsonStrategy : public SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> { public: ///@name Type Definitions ///@{ typedef ConvergenceCriteria<TSparseSpace, TDenseSpace> TConvergenceCriteriaType; // Counted pointer of ClassName KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedNewtonRaphsonStrategy); typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType; typedef typename BaseType::TDataType TDataType; typedef TSparseSpace SparseSpaceType; typedef typename BaseType::TSchemeType TSchemeType; //typedef typename BaseType::DofSetType DofSetType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; ///@} ///@name Life Cycle ///@{ /** * Default constructor * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param MaxIterations The maximum number of non-linear iterations to be considered when solving the problem * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ ResidualBasedNewtonRaphsonStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, int MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false) : SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, MoveMeshFlag), mpLinearSolver(pNewLinearSolver), mpScheme(pScheme), mpConvergenceCriteria(pNewConvergenceCriteria), mReformDofSetAtEachStep(ReformDofSetAtEachStep), mCalculateReactionsFlag(CalculateReactions), mSolutionStepIsInitialized(false), mMaxIterationNumber(MaxIterations), mInitializeWasPerformed(false), mKeepSystemConstantDuringIterations(false) { KRATOS_TRY; // Setting up the default builder and solver mpBuilderAndSolver = typename TBuilderAndSolverType::Pointer( new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(mpLinearSolver)); // Tells to the builder and solver if the reactions have to be Calculated or not GetBuilderAndSolver()->SetCalculateReactionsFlag(mCalculateReactionsFlag); // Tells to the Builder And Solver if the system matrix and vectors need to // be reshaped at each step or not GetBuilderAndSolver()->SetReshapeMatrixFlag(mReformDofSetAtEachStep); // Set EchoLevel to the default value (only time is displayed) SetEchoLevel(1); // By default the matrices are rebuilt at each iteration this->SetRebuildLevel(2); mpA = TSparseSpace::CreateEmptyMatrixPointer(); mpDx = TSparseSpace::CreateEmptyVectorPointer(); mpb = TSparseSpace::CreateEmptyVectorPointer(); KRATOS_CATCH(""); } /** * Constructor specifying the builder and solver * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param pNewBuilderAndSolver The builder and solver employed * @param MaxIterations The maximum number of non-linear iterations to be considered when solving the problem * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ ResidualBasedNewtonRaphsonStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver, int MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false) : SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, MoveMeshFlag), mpLinearSolver(pNewLinearSolver), mpScheme(pScheme), mpBuilderAndSolver(pNewBuilderAndSolver), mpConvergenceCriteria(pNewConvergenceCriteria), mReformDofSetAtEachStep(ReformDofSetAtEachStep), mCalculateReactionsFlag(CalculateReactions), mSolutionStepIsInitialized(false), mMaxIterationNumber(MaxIterations), mInitializeWasPerformed(false), mKeepSystemConstantDuringIterations(false) { KRATOS_TRY // Tells to the builder and solver if the reactions have to be Calculated or not GetBuilderAndSolver()->SetCalculateReactionsFlag(mCalculateReactionsFlag); // Tells to the Builder And Solver if the system matrix and vectors need to //be reshaped at each step or not GetBuilderAndSolver()->SetReshapeMatrixFlag(mReformDofSetAtEachStep); // Set EchoLevel to the default value (only time is displayed) SetEchoLevel(1); // By default the matrices are rebuilt at each iteration this->SetRebuildLevel(2); mpA = TSparseSpace::CreateEmptyMatrixPointer(); mpDx = TSparseSpace::CreateEmptyVectorPointer(); mpb = TSparseSpace::CreateEmptyVectorPointer(); KRATOS_CATCH("") } /** * Constructor with Parameters * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param Parameters Settings used in the strategy */ ResidualBasedNewtonRaphsonStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, Parameters Settings) : SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, Settings), mpLinearSolver(pNewLinearSolver), mpScheme(pScheme), mpConvergenceCriteria(pNewConvergenceCriteria), mSolutionStepIsInitialized(false), mInitializeWasPerformed(false), mKeepSystemConstantDuringIterations(false) { KRATOS_TRY; Parameters default_settings = this->GetDefaultSettings(); Settings.AddMissingParameters(default_settings); this->AssignSettings(Settings); // Setting up the default builder and solver mpBuilderAndSolver = typename TBuilderAndSolverType::Pointer( new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(mpLinearSolver)); // Tells to the builder and solver if the reactions have to be Calculated or not GetBuilderAndSolver()->SetCalculateReactionsFlag(mCalculateReactionsFlag); // Tells to the Builder And Solver if the system matrix and vectors need to // be reshaped at each step or not GetBuilderAndSolver()->SetReshapeMatrixFlag(mReformDofSetAtEachStep); // Set EchoLevel to the default value (only time is displayed) SetEchoLevel(1); // By default the matrices are rebuilt at each iteration this->SetRebuildLevel(2); mpA = TSparseSpace::CreateEmptyMatrixPointer(); mpDx = TSparseSpace::CreateEmptyVectorPointer(); mpb = TSparseSpace::CreateEmptyVectorPointer(); KRATOS_CATCH(""); } /** * Constructor specifying the builder and solver and using Parameters * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param pNewBuilderAndSolver The builder and solver employed * @param Parameters Settings used in the strategy */ ResidualBasedNewtonRaphsonStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver, Parameters Settings) : SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, Settings), mpLinearSolver(pNewLinearSolver), mpScheme(pScheme), mpBuilderAndSolver(pNewBuilderAndSolver), mpConvergenceCriteria(pNewConvergenceCriteria), mSolutionStepIsInitialized(false), mInitializeWasPerformed(false), mKeepSystemConstantDuringIterations(false) { KRATOS_TRY Parameters default_settings = this->GetDefaultSettings(); Settings.AddMissingParameters(default_settings); this->AssignSettings(Settings); // Tells to the builder and solver if the reactions have to be Calculated or not GetBuilderAndSolver()->SetCalculateReactionsFlag(mCalculateReactionsFlag); // Tells to the Builder And Solver if the system matrix and vectors need to //be reshaped at each step or not GetBuilderAndSolver()->SetReshapeMatrixFlag(mReformDofSetAtEachStep); // Set EchoLevel to the default value (only time is displayed) SetEchoLevel(1); // By default the matrices are rebuilt at each iteration this->SetRebuildLevel(2); mpA = TSparseSpace::CreateEmptyMatrixPointer(); mpDx = TSparseSpace::CreateEmptyVectorPointer(); mpb = TSparseSpace::CreateEmptyVectorPointer(); KRATOS_CATCH("") } /** * @brief Destructor. * @details In trilinos third party library, the linear solver's preconditioner should be freed before the system matrix. We control the deallocation order with Clear(). */ ~ResidualBasedNewtonRaphsonStrategy() override { // If the linear solver has not been deallocated, clean it before // deallocating mpA. This prevents a memory error with the the ML // solver (which holds a reference to it). auto p_linear_solver = GetBuilderAndSolver()->GetLinearSystemSolver(); if (p_linear_solver != nullptr) p_linear_solver->Clear(); // Deallocating system vectors to avoid errors in MPI. Clear calls // TrilinosSpace::Clear for the vectors, which preserves the Map of // current vectors, performing MPI calls in the process. Due to the // way Python garbage collection works, this may happen after // MPI_Finalize has already been called and is an error. Resetting // the pointers here prevents Clear from operating with the // (now deallocated) vectors. mpA.reset(); mpDx.reset(); mpb.reset(); Clear(); } /** * @brief Set method for the time scheme * @param pScheme The pointer to the time scheme considered */ void SetScheme(typename TSchemeType::Pointer pScheme) { mpScheme = pScheme; }; /** * @brief Get method for the time scheme * @return mpScheme: The pointer to the time scheme considered */ typename TSchemeType::Pointer GetScheme() { return mpScheme; }; /** * @brief Set method for the builder and solver * @param pNewBuilderAndSolver The pointer to the builder and solver considered */ void SetBuilderAndSolver(typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver) { mpBuilderAndSolver = pNewBuilderAndSolver; }; /** * @brief Get method for the builder and solver * @return mpBuilderAndSolver: The pointer to the builder and solver considered */ typename TBuilderAndSolverType::Pointer GetBuilderAndSolver() { return mpBuilderAndSolver; }; /** * @brief This method sets the flag mInitializeWasPerformed * @param InitializePerformedFlag The flag that tells if the initialize has been computed */ void SetInitializePerformedFlag(bool InitializePerformedFlag = true) { mInitializeWasPerformed = InitializePerformedFlag; } /** * @brief This method gets the flag mInitializeWasPerformed * @return mInitializeWasPerformed: The flag that tells if the initialize has been computed */ bool GetInitializePerformedFlag() { return mInitializeWasPerformed; } /** * @brief This method sets the flag mCalculateReactionsFlag * @param CalculateReactionsFlag The flag that tells if the reactions are computed */ void SetCalculateReactionsFlag(bool CalculateReactionsFlag) { mCalculateReactionsFlag = CalculateReactionsFlag; } /** * @brief This method returns the flag mCalculateReactionsFlag * @return The flag that tells if the reactions are computed */ bool GetCalculateReactionsFlag() { return mCalculateReactionsFlag; } /** * @brief This method sets the flag mReformDofSetAtEachStep * @param Flag The flag that tells if each time step the system is rebuilt */ void SetReformDofSetAtEachStepFlag(bool Flag) { mReformDofSetAtEachStep = Flag; GetBuilderAndSolver()->SetReshapeMatrixFlag(mReformDofSetAtEachStep); } /** * @brief This method returns the flag mReformDofSetAtEachStep * @return The flag that tells if each time step the system is rebuilt */ bool GetReformDofSetAtEachStepFlag() { return mReformDofSetAtEachStep; } /** * @brief This method sets the flag mMaxIterationNumber * @param MaxIterationNumber This is the maximum number of on linear iterations */ void SetMaxIterationNumber(unsigned int MaxIterationNumber) { mMaxIterationNumber = MaxIterationNumber; } /** * @brief This method gets the flag mMaxIterationNumber * @return mMaxIterationNumber: This is the maximum number of on linear iterations */ unsigned int GetMaxIterationNumber() { return mMaxIterationNumber; } /** * @brief It sets the level of echo for the solving strategy * @param Level The level to set * @details The different levels of echo are: * - 0: Mute... no echo at all * - 1: Printing time and basic informations * - 2: Printing linear solver data * - 3: Print of debug informations: Echo of stiffness matrix, Dx, b... */ void SetEchoLevel(int Level) override { BaseType::mEchoLevel = Level; GetBuilderAndSolver()->SetEchoLevel(Level); } //********************************************************************************* /**OPERATIONS ACCESSIBLE FROM THE INPUT: **/ /** * @brief Operation to predict the solution ... if it is not called a trivial predictor is used in which the values of the solution step of interest are assumed equal to the old values */ void Predict() override { KRATOS_TRY //OPERATIONS THAT SHOULD BE DONE ONCE - internal check to avoid repetitions //if the operations needed were already performed this does nothing if (mInitializeWasPerformed == false) Initialize(); //initialize solution step if (mSolutionStepIsInitialized == false) InitializeSolutionStep(); TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; DofsArrayType& r_dof_set = GetBuilderAndSolver()->GetDofSet(); GetScheme()->Predict(BaseType::GetModelPart(), r_dof_set, rA, rDx, rb); // Applying constraints if needed auto& r_constraints_array = BaseType::GetModelPart().MasterSlaveConstraints(); const std::size_t number_of_constraints = r_constraints_array.size(); if(number_of_constraints != 0) { const auto& r_process_info = BaseType::GetModelPart().GetProcessInfo(); const auto it_const_begin = r_constraints_array.begin(); #pragma omp parallel for for(int i=0; i<static_cast<int>(number_of_constraints); ++i) (it_const_begin + i)->ResetSlaveDofs(r_process_info); #pragma omp parallel for for(int i=0; i<static_cast<int>(number_of_constraints); ++i) (it_const_begin + i)->Apply(r_process_info); // The following is needed since we need to eventually compute time derivatives after applying // Master slave relations TSparseSpace::SetToZero(rDx); this->GetScheme()->Update(BaseType::GetModelPart(), r_dof_set, rA, rDx, rb); } // Move the mesh if needed if (this->MoveMeshFlag() == true) BaseType::MoveMesh(); KRATOS_CATCH("") } /** * @brief Initialization of member variables and prior operations */ void Initialize() override { KRATOS_TRY; if (mInitializeWasPerformed == false) { //pointers needed in the solution typename TSchemeType::Pointer p_scheme = GetScheme(); typename TConvergenceCriteriaType::Pointer p_convergence_criteria = mpConvergenceCriteria; //Initialize The Scheme - OPERATIONS TO BE DONE ONCE if (p_scheme->SchemeIsInitialized() == false) p_scheme->Initialize(BaseType::GetModelPart()); //Initialize The Elements - OPERATIONS TO BE DONE ONCE if (p_scheme->ElementsAreInitialized() == false) p_scheme->InitializeElements(BaseType::GetModelPart()); //Initialize The Conditions - OPERATIONS TO BE DONE ONCE if (p_scheme->ConditionsAreInitialized() == false) p_scheme->InitializeConditions(BaseType::GetModelPart()); //initialisation of the convergence criteria if (p_convergence_criteria->IsInitialized() == false) p_convergence_criteria->Initialize(BaseType::GetModelPart()); mInitializeWasPerformed = true; } KRATOS_CATCH(""); } /** * @brief Clears the internal storage */ void Clear() override { KRATOS_TRY; // if the preconditioner is saved between solves, it // should be cleared here. GetBuilderAndSolver()->GetLinearSystemSolver()->Clear(); if (mpA != nullptr) SparseSpaceType::Clear(mpA); if (mpDx != nullptr) SparseSpaceType::Clear(mpDx); if (mpb != nullptr) SparseSpaceType::Clear(mpb); //setting to zero the internal flag to ensure that the dof sets are recalculated GetBuilderAndSolver()->SetDofSetIsInitializedFlag(false); GetBuilderAndSolver()->Clear(); GetScheme()->Clear(); mInitializeWasPerformed = false; mSolutionStepIsInitialized = false; KRATOS_CATCH(""); } /** * @brief This should be considered as a "post solution" convergence check which is useful for coupled analysis - the convergence criteria used is the one used inside the "solve" step */ bool IsConverged() override { KRATOS_TRY; TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; if (mpConvergenceCriteria->GetActualizeRHSflag() == true) GetBuilderAndSolver()->BuildRHS(GetScheme(), BaseType::GetModelPart(), rb); return mpConvergenceCriteria->PostCriteria(BaseType::GetModelPart(), GetBuilderAndSolver()->GetDofSet(), rA, rDx, rb); KRATOS_CATCH(""); } /** * @brief This operations should be called before printing the results when non trivial results * (e.g. stresses) * Need to be calculated given the solution of the step * @details This operations should be called only when needed, before printing as it can involve a non * negligible cost */ void CalculateOutputData() override { TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; GetScheme()->CalculateOutputData(BaseType::GetModelPart(), GetBuilderAndSolver()->GetDofSet(), rA, rDx, rb); } /** * @brief Performs all the required operations that should be done (for each step) before solving the solution step. * @details A member variable should be used as a flag to make sure this function is called only once per step. */ void InitializeSolutionStep() override { KRATOS_TRY; if (!mSolutionStepIsInitialized) { // Pointers needed in the solution typename TSchemeType::Pointer p_scheme = GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = GetBuilderAndSolver(); ModelPart& r_model_part = BaseType::GetModelPart(); //set up the system, operation performed just once unless it is required //to reform the dof set at each iteration BuiltinTimer system_construction_time; if (p_builder_and_solver->GetDofSetIsInitializedFlag() == false || mReformDofSetAtEachStep == true) { //setting up the list of the DOFs to be solved BuiltinTimer setup_dofs_time; p_builder_and_solver->SetUpDofSet(p_scheme, r_model_part); KRATOS_INFO_IF("Setup Dofs Time", BaseType::GetEchoLevel() > 0) << setup_dofs_time.ElapsedSeconds() << std::endl; //shaping correctly the system BuiltinTimer setup_system_time; p_builder_and_solver->SetUpSystem(r_model_part); KRATOS_INFO_IF("Setup System Time", BaseType::GetEchoLevel() > 0) << setup_system_time.ElapsedSeconds() << std::endl; //setting up the Vectors involved to the correct size BuiltinTimer system_matrix_resize_time; p_builder_and_solver->ResizeAndInitializeVectors(p_scheme, mpA, mpDx, mpb, r_model_part); KRATOS_INFO_IF("System Matrix Resize Time", BaseType::GetEchoLevel() > 0) << system_matrix_resize_time.ElapsedSeconds() << std::endl; } KRATOS_INFO_IF("System Construction Time", BaseType::GetEchoLevel() > 0) << system_construction_time.ElapsedSeconds() << std::endl; TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; // Initial operations ... things that are constant over the Solution Step p_builder_and_solver->InitializeSolutionStep(r_model_part, rA, rDx, rb); // Initial operations ... things that are constant over the Solution Step p_scheme->InitializeSolutionStep(r_model_part, rA, rDx, rb); // Initialisation of the convergence criteria if (mpConvergenceCriteria->GetActualizeRHSflag() == true) { TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb); } mpConvergenceCriteria->InitializeSolutionStep(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb); if (mpConvergenceCriteria->GetActualizeRHSflag() == true) TSparseSpace::SetToZero(rb); mSolutionStepIsInitialized = true; } KRATOS_CATCH(""); } /** * @brief Performs all the required operations that should be done (for each step) after solving the solution step. * @details A member variable should be used as a flag to make sure this function is called only once per step. */ void FinalizeSolutionStep() override { KRATOS_TRY; ModelPart& r_model_part = BaseType::GetModelPart(); typename TSchemeType::Pointer p_scheme = GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = GetBuilderAndSolver(); TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; //Finalisation of the solution step, //operations to be done after achieving convergence, for example the //Final Residual Vector (mb) has to be saved in there //to avoid error accumulation p_scheme->FinalizeSolutionStep(r_model_part, rA, rDx, rb); p_builder_and_solver->FinalizeSolutionStep(r_model_part, rA, rDx, rb); mpConvergenceCriteria->FinalizeSolutionStep(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb); //Cleaning memory after the solution p_scheme->Clean(); //reset flags for next step mSolutionStepIsInitialized = false; if (mReformDofSetAtEachStep == true) //deallocate the systemvectors { SparseSpaceType::Clear(mpA); SparseSpaceType::Clear(mpDx); SparseSpaceType::Clear(mpb); this->Clear(); } KRATOS_CATCH(""); } /** * @brief Solves the current step. This function returns true if a solution has been found, false otherwise. */ bool SolveSolutionStep() override { // Pointers needed in the solution ModelPart& r_model_part = BaseType::GetModelPart(); typename TSchemeType::Pointer p_scheme = GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = GetBuilderAndSolver(); auto& r_dof_set = p_builder_and_solver->GetDofSet(); TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; //initializing the parameters of the Newton-Raphson cycle unsigned int iteration_number = 1; r_model_part.GetProcessInfo()[NL_ITERATION_NUMBER] = iteration_number; bool is_converged = false; bool residual_is_updated = false; p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb); mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); is_converged = mpConvergenceCriteria->PreCriteria(r_model_part, r_dof_set, rA, rDx, rb); // Function to perform the building and the solving phase. if (BaseType::mRebuildLevel > 0 || BaseType::mStiffnessMatrixIsBuilt == false) { TSparseSpace::SetToZero(rA); TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb); } else { TSparseSpace::SetToZero(rDx); //Dx=0.00; TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } // Debugging info EchoInfo(iteration_number); // Updating the results stored in the database UpdateDatabase(rA, rDx, rb, BaseType::MoveMeshFlag()); p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb); mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); if (is_converged) { if (mpConvergenceCriteria->GetActualizeRHSflag()) { TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb); } is_converged = mpConvergenceCriteria->PostCriteria(r_model_part, r_dof_set, rA, rDx, rb); } //Iteration Cycle... performed only for NonLinearProblems while (is_converged == false && iteration_number++ < mMaxIterationNumber) { //setting the number of iteration r_model_part.GetProcessInfo()[NL_ITERATION_NUMBER] = iteration_number; p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb); mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); is_converged = mpConvergenceCriteria->PreCriteria(r_model_part, r_dof_set, rA, rDx, rb); //call the linear system solver to find the correction mDx for the //it is not called if there is no system to solve if (SparseSpaceType::Size(rDx) != 0) { if (BaseType::mRebuildLevel > 1 || BaseType::mStiffnessMatrixIsBuilt == false) { if (GetKeepSystemConstantDuringIterations() == false) { //A = 0.00; TSparseSpace::SetToZero(rA); TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb); } else { TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } } else { TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } } else { KRATOS_WARNING("NO DOFS") << "ATTENTION: no free DOFs!! " << std::endl; } // Debugging info EchoInfo(iteration_number); // Updating the results stored in the database UpdateDatabase(rA, rDx, rb, BaseType::MoveMeshFlag()); p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb); mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); residual_is_updated = false; if (is_converged == true) { if (mpConvergenceCriteria->GetActualizeRHSflag() == true) { TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb); residual_is_updated = true; } is_converged = mpConvergenceCriteria->PostCriteria(r_model_part, r_dof_set, rA, rDx, rb); } } //plots a warning if the maximum number of iterations is exceeded if (iteration_number >= mMaxIterationNumber) { MaxIterationsExceeded(); } else { KRATOS_INFO_IF("NR-Strategy", this->GetEchoLevel() > 0) << "Convergence achieved after " << iteration_number << " / " << mMaxIterationNumber << " iterations" << std::endl; } //recalculate residual if needed //(note that some convergence criteria need it to be recalculated) if (residual_is_updated == false) { // NOTE: // The following part will be commented because it is time consuming // and there is no obvious reason to be here. If someone need this // part please notify the community via mailing list before uncommenting it. // Pooyan. // TSparseSpace::SetToZero(mb); // p_builder_and_solver->BuildRHS(p_scheme, r_model_part, mb); } //calculate reactions if required if (mCalculateReactionsFlag == true) p_builder_and_solver->CalculateReactions(p_scheme, r_model_part, rA, rDx, rb); return is_converged; } /** * @brief Function to perform expensive checks. * @details It is designed to be called ONCE to verify that the input is correct. */ int Check() override { KRATOS_TRY BaseType::Check(); GetBuilderAndSolver()->Check(BaseType::GetModelPart()); GetScheme()->Check(BaseType::GetModelPart()); mpConvergenceCriteria->Check(BaseType::GetModelPart()); return 0; KRATOS_CATCH("") } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ ///@} ///@name Access ///@{ /** * @brief This method returns the LHS matrix * @return The LHS matrix */ TSystemMatrixType &GetSystemMatrix() { TSystemMatrixType &mA = *mpA; return mA; } /** * @brief This method returns the RHS vector * @return The RHS vector */ TSystemVectorType& GetSystemVector() { TSystemVectorType& mb = *mpb; return mb; } /** * @brief This method returns the solution vector * @return The Dx vector */ TSystemVectorType& GetSolutionVector() { TSystemVectorType& mDx = *mpDx; return mDx; } /** * @brief Set method for the flag mKeepSystemConstantDuringIterations * @param Value If we consider constant the system of equations during the iterations */ void SetKeepSystemConstantDuringIterations(bool Value) { mKeepSystemConstantDuringIterations = Value; } /** * @brief Get method for the flag mKeepSystemConstantDuringIterations * @return True if we consider constant the system of equations during the iterations, false otherwise */ bool GetKeepSystemConstantDuringIterations() { return mKeepSystemConstantDuringIterations; } ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "ResidualBasedNewtonRaphsonStrategy"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override { rOStream << Info(); } ///@} ///@name Friends ///@{ ///@} private: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} protected: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ typename TLinearSolver::Pointer mpLinearSolver; /// The pointer to the linear solver considered typename TSchemeType::Pointer mpScheme; /// The pointer to the time scheme employed typename TBuilderAndSolverType::Pointer mpBuilderAndSolver; /// The pointer to the builder and solver employed typename TConvergenceCriteriaType::Pointer mpConvergenceCriteria; /// The pointer to the convergence criteria employed TSystemVectorPointerType mpDx; /// The incremement in the solution TSystemVectorPointerType mpb; /// The RHS vector of the system of equations TSystemMatrixPointerType mpA; /// The LHS matrix of the system of equations /** * @brief Flag telling if it is needed to reform the DofSet at each solution step or if it is possible to form it just once * @details Default = false - true : Reforme at each time step - false : Form just one (more efficient) */ bool mReformDofSetAtEachStep; /** * @brief Flag telling if it is needed or not to compute the reactions * @details default = true */ bool mCalculateReactionsFlag; bool mSolutionStepIsInitialized; /// Flag to set as initialized the solution step unsigned int mMaxIterationNumber; /// The maximum number of iterations, 30 by default bool mInitializeWasPerformed; /// Flag to set as initialized the strategy bool mKeepSystemConstantDuringIterations; // Flag to allow keeping system matrix constant during iterations ///@} ///@name Private Operators ///@{ /** * @brief Here the database is updated * @param A The LHS matrix of the system of equations * @param Dx The incremement in the solution * @param b The RHS vector of the system of equations * @param MoveMesh The flag that allows to move the mesh */ virtual void UpdateDatabase( TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb, const bool MoveMesh) { typename TSchemeType::Pointer p_scheme = GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = GetBuilderAndSolver(); p_scheme->Update(BaseType::GetModelPart(), p_builder_and_solver->GetDofSet(), rA, rDx, rb); // Move the mesh if needed if (MoveMesh == true) BaseType::MoveMesh(); } /** * @brief This method returns the components of the system of equations depending of the echo level * @param IterationNumber The non linear iteration in the solution loop */ virtual void EchoInfo(const unsigned int IterationNumber) { TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; if (this->GetEchoLevel() == 2) //if it is needed to print the debug info { KRATOS_INFO("Dx") << "Solution obtained = " << rDx << std::endl; KRATOS_INFO("RHS") << "RHS = " << rb << std::endl; } else if (this->GetEchoLevel() == 3) //if it is needed to print the debug info { KRATOS_INFO("LHS") << "SystemMatrix = " << rA << std::endl; KRATOS_INFO("Dx") << "Solution obtained = " << rDx << std::endl; KRATOS_INFO("RHS") << "RHS = " << rb << std::endl; } else if (this->GetEchoLevel() == 4) //print to matrix market file { std::stringstream matrix_market_name; matrix_market_name << "A_" << BaseType::GetModelPart().GetProcessInfo()[TIME] << "_" << IterationNumber << ".mm"; TSparseSpace::WriteMatrixMarketMatrix((char *)(matrix_market_name.str()).c_str(), rA, false); std::stringstream matrix_market_vectname; matrix_market_vectname << "b_" << BaseType::GetModelPart().GetProcessInfo()[TIME] << "_" << IterationNumber << ".mm.rhs"; TSparseSpace::WriteMatrixMarketVector((char *)(matrix_market_vectname.str()).c_str(), rb); } } /** * @brief This method prints information after reach the max number of iterations */ virtual void MaxIterationsExceeded() { KRATOS_INFO_IF("NR-Strategy", this->GetEchoLevel() > 0) << "ATTENTION: max iterations ( " << mMaxIterationNumber << " ) exceeded!" << std::endl; } /** * @brief This method returns the default settings */ virtual Parameters GetDefaultSettings() { Parameters default_settings(R"({ "max_iterations" : 30, "reform_dofs_at_each_step" : false, "calculate_reactions" : false })"); return default_settings; } /** * @brief This method assigns settings to member variables * @param Settings Parameters that are assigned to the member variables */ virtual void AssignSettings(Parameters Settings) { mMaxIterationNumber = Settings["max_iterations"].GetInt(); mReformDofSetAtEachStep = Settings["reform_dofs_at_each_step"].GetBool(); mCalculateReactionsFlag = Settings["calculate_reactions"].GetBool(); } ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /** * Copy constructor. */ ResidualBasedNewtonRaphsonStrategy(const ResidualBasedNewtonRaphsonStrategy &Other){}; ///@} }; /* Class ResidualBasedNewtonRaphsonStrategy */ ///@} ///@name Type Definitions ///@{ ///@} } /* namespace Kratos. */ #endif /* KRATOS_RESIDUALBASED_NEWTON_RAPHSON_STRATEGY defined */
GB_unaryop__ainv_bool_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_bool_int32 // op(A') function: GB_tran__ainv_bool_int32 // C type: bool // A type: int32_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_BOOL || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_bool_int32 ( bool *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_bool_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GeometryConverter.h
/* -*-c++-*- IfcPlusPlus - www.ifcplusplus.com - Copyright (C) 2011 Fabian Gerold * * This library is open source and may be redistributed and/or modified under * the terms of the OpenSceneGraph Public License (OSGPL) version 0.0 or * (at your option) any later version. The full license is in LICENSE file * included with this distribution, and on the openscenegraph.org website. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * OpenSceneGraph Public License for more details. */ #pragma once #include <set> #include <sstream> #include <osg/Switch> #include <ifcpp/model/shared_ptr.h> #include <ifcpp/model/IfcPPObject.h> #include <ifcpp/model/IfcPPModel.h> #include <ifcpp/model/StatusCallback.h> #include <ifcpp/IFC4/include/IfcComplexProperty.h> #include <ifcpp/IFC4/include/IfcIdentifier.h> #include <ifcpp/IFC4/include/IfcProject.h> #include <ifcpp/IFC4/include/IfcProduct.h> #include <ifcpp/IFC4/include/IfcPropertySet.h> #include <ifcpp/IFC4/include/IfcPropertySetDefinitionSet.h> #include <ifcpp/IFC4/include/IfcPropertySingleValue.h> #include <ifcpp/IFC4/include/IfcRelAggregates.h> #include <ifcpp/IFC4/include/IfcRelContainedInSpatialStructure.h> #include <ifcpp/IFC4/include/IfcRelDefinesByProperties.h> #include <ifcpp/IFC4/include/IfcSimpleProperty.h> #include "RepresentationConverter.h" #include "ConverterOSG.h" #include "GeometrySettings.h" #include "GeometryInputData.h" class GeometryConverter : public StatusCallback { protected: shared_ptr<IfcPPModel> m_ifc_model; shared_ptr<GeometrySettings> m_geom_settings; shared_ptr<RepresentationConverter> m_representation_converter; shared_ptr<ConverterOSG> m_converter_osg; std::map<int, shared_ptr<ProductShapeInputData> > m_shape_input_data; boost::unordered_map<int, shared_ptr<IfcPPObject> > m_map_outside_spatial_structure; double m_recent_progress; std::map<int, std::vector<shared_ptr<StatusCallback::Message> > > m_messages; #ifdef IFCPP_OPENMP Mutex m_writelock_messages; #endif public: // getters and setters shared_ptr<IfcPPModel>& getIfcPPModel() { return m_ifc_model; } shared_ptr<RepresentationConverter>& getRepresentationConverter() { return m_representation_converter; } shared_ptr<ConverterOSG>& getConverterOSG() { return m_converter_osg; } shared_ptr<GeometrySettings>& getGeomSettings() { return m_geom_settings; } std::map<int, shared_ptr<ProductShapeInputData> >& getShapeInputData() { return m_shape_input_data; } boost::unordered_map<int, shared_ptr<IfcPPObject> >& getObjectsOutsideSpatialStructure() { return m_map_outside_spatial_structure; } virtual void slotMessageWrapper( void* ptr, shared_ptr<StatusCallback::Message> m ) { GeometryConverter* myself = (GeometryConverter*)ptr; if( myself ) { if( m->m_entity ) { #ifdef IFCPP_OPENMP ScopedLock lock( myself->m_writelock_messages ); #endif // make sure that the same message for one entity does not appear several times const int entity_id = m->m_entity->m_id; std::map<int, std::vector<shared_ptr<StatusCallback::Message> > >::iterator it = myself->m_messages.find( entity_id ); if( it != myself->m_messages.end() ) { std::vector<shared_ptr<StatusCallback::Message> >& vec_message_for_entity = it->second; for( size_t i = 0; i < vec_message_for_entity.size(); ++i ) { shared_ptr<StatusCallback::Message>& existing_message = vec_message_for_entity[i]; if( existing_message->m_message_text.compare( m->m_message_text ) == 0 ) { // same message for same entity is already there, so ignore message return; } } vec_message_for_entity.push_back( m ); } else { std::vector<shared_ptr<StatusCallback::Message> >& vec = myself->m_messages.insert( std::make_pair( entity_id, std::vector<shared_ptr<StatusCallback::Message> >() ) ).first->second; vec.push_back( m ); } } myself->messageCallback( m ); } } GeometryConverter( shared_ptr<IfcPPModel>& ifc_model ) { m_ifc_model = ifc_model; m_geom_settings = shared_ptr<GeometrySettings>( new GeometrySettings() ); resetNumVerticesPerCircle(); shared_ptr<UnitConverter>& unit_converter = m_ifc_model->getUnitConverter(); m_representation_converter = shared_ptr<RepresentationConverter>( new RepresentationConverter( m_geom_settings, unit_converter ) ); m_converter_osg = shared_ptr<ConverterOSG>( new ConverterOSG( m_geom_settings ) ); // redirect all messages to slotMessageWrapper addCallbackChild( m_ifc_model.get() ); addCallbackChild( m_representation_converter.get() ); addCallbackChild( m_converter_osg.get() ); } virtual ~GeometryConverter() { } void resetModel() { progressTextCallback( L"Unloading model, cleaning up memory..." ); clearInputCache(); m_recent_progress = 0.0; m_ifc_model->clearCache(); m_ifc_model->clearIfcModel(); progressTextCallback( L"Unloading model done" ); progressValueCallback( 0.0, "parse" ); #ifdef _DEBUG GeomDebugUtils::clearMeshsetDump(); #endif } void clearInputCache() { m_shape_input_data.clear(); m_map_outside_spatial_structure.clear(); m_converter_osg->clearAppearanceCache(); m_representation_converter->clearCache(); m_messages.clear(); } void resetNumVerticesPerCircle() { m_geom_settings->resetNumVerticesPerCircle(); } void setModel( shared_ptr<IfcPPModel> model ) { clearInputCache(); m_ifc_model = model; m_representation_converter->clearCache(); m_representation_converter->setUnitConverter( m_ifc_model->getUnitConverter() ); //m_ifc_model->setMessageCallBack( this, &slotMessageWrapper ); addCallbackChild( m_ifc_model.get() ); } /*\brief method createGeometryOSG: Creates geometry for OpenSceneGraph from previously loaded model. \param[out] parent_group Group to append the resulting geometry. **/ void createGeometryOSG( osg::ref_ptr<osg::Switch> parent_group ) { progressTextCallback( L"Creating geometry..." ); progressValueCallback( 0, "geometry" ); m_shape_input_data.clear(); m_map_outside_spatial_structure.clear(); m_representation_converter->clearCache(); std::vector<shared_ptr<IfcProduct> > vec_products; const double length_to_meter_factor = m_ifc_model->getUnitConverter()->getLengthInMeterFactor(); carve::setEpsilon( 1.4901161193847656e-05*length_to_meter_factor ); const boost::unordered_map<int, shared_ptr<IfcPPEntity> >& map_entities = m_ifc_model->getMapIfcEntities(); for( auto it = map_entities.begin(); it != map_entities.end(); ++it ) { shared_ptr<IfcPPEntity> obj = it->second; shared_ptr<IfcProduct> product = dynamic_pointer_cast<IfcProduct>( obj ); if( product ) { vec_products.push_back( product ); } // TODO: sort vec_products such, that products with complex geometry are at the beginning (better scheduling for omp). Rate complexity by number of CSG ops, and number of vertices. Check if sorting pays off. } // create geometry for for each IfcProduct independently, spatial structure will be resolved later std::map<int, shared_ptr<ProductShapeInputData> >* map_products_ptr = &m_shape_input_data; const int num_products = (int)vec_products.size(); #ifdef IFCPP_OPENMP Mutex writelock_map; #pragma omp parallel firstprivate(num_products) shared(map_products_ptr) { // time for one product may vary significantly, so schedule not so many #pragma omp for schedule(dynamic,10) #endif for( int i = 0; i < num_products; ++i ) { shared_ptr<IfcProduct> product = vec_products[i]; std::stringstream thread_err; if( dynamic_pointer_cast<IfcFeatureElementSubtraction>( product ) ) { // geometry will be created in method subtractOpenings continue; } if( !product->m_Representation ) { continue; } const int product_id = product->m_id; shared_ptr<ProductShapeInputData> product_geom_input_data( new ProductShapeInputData() ); product_geom_input_data->m_ifc_product = product; try { convertIfcProduct( product_geom_input_data ); m_converter_osg->convertToOSG( product_geom_input_data, length_to_meter_factor ); } #ifdef _DEBUG catch( DebugBreakException& dbge ) { throw dbge; } #endif catch( IfcPPOutOfMemoryException& e ) { throw e; } catch( IfcPPException& e ) { thread_err << e.what(); } catch( carve::exception& e ) { thread_err << e.str(); } catch( std::exception& e ) { thread_err << e.what(); } catch( ... ) { thread_err << "undefined error, product id " << product_id; } { #ifdef IFCPP_OPENMP ScopedLock scoped_lock( writelock_map ); #endif map_products_ptr->insert( std::make_pair( product_id, product_geom_input_data ) ); if( thread_err.tellp() > 0 ) { messageCallback( thread_err.str().c_str(), StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__ ); } } // progress callback double progress = (double)i / (double)num_products; if( progress - m_recent_progress > 0.02 ) { #ifdef IFCPP_OPENMP if( omp_get_thread_num() == 0 ) #endif { // leave 10% of progress to openscenegraph internals progressValueCallback( progress*0.9, "geometry" ); m_recent_progress = progress; } } } #ifdef IFCPP_OPENMP } // implicit barrier #endif try { // now resolve spatial structure shared_ptr<IfcProject> ifc_project = m_ifc_model->getIfcProject(); if( ifc_project ) { resolveProjectStructure( ifc_project, parent_group ); } // check if there are entities that are not in spatial structure osg::ref_ptr<osg::Group> group_outside_spatial_structure = new osg::Group(); group_outside_spatial_structure->setName( "Entities not in spatial structure" ); for( auto it_product_shapes = m_shape_input_data.begin(); it_product_shapes != m_shape_input_data.end(); ++it_product_shapes ) { shared_ptr<ProductShapeInputData> product_shape = it_product_shapes->second; shared_ptr<IfcProduct> ifc_product( product_shape->m_ifc_product ); if( !product_shape ) { continue; } if( !product_shape->m_added_to_node ) { shared_ptr<IfcFeatureElementSubtraction> opening = dynamic_pointer_cast<IfcFeatureElementSubtraction>( ifc_product ); if( opening ) { continue; } if( product_shape->m_product_switch.valid() ) { #ifdef _DEBUG if( product_shape->m_product_switch->getNumParents() > 0 ) { std::cout << __FUNC__ << ": product_shape->m_product_switch->getNumParents() > 0" << std::endl; } #endif group_outside_spatial_structure->addChild( product_shape->m_product_switch ); } product_shape->m_added_to_node = true; } m_map_outside_spatial_structure[ifc_product->m_id] = ifc_product; } if( group_outside_spatial_structure->getNumChildren() > 0 ) { #ifdef _DEBUG if( group_outside_spatial_structure->getNumParents() > 0 ) { std::cout << __FUNC__ << ": group_outside_spatial_structure->getNumParents() > 0" << std::endl; } #endif parent_group->addChild( group_outside_spatial_structure ); } } catch( IfcPPOutOfMemoryException& e ) { throw e; } catch( IfcPPException& e ) { messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" ); } catch( std::exception& e ) { messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" ); } catch( ... ) { messageCallback( "undefined error", StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__ ); } m_representation_converter->getProfileCache()->clearProfileCache(); progressTextCallback( L"Loading file done" ); progressValueCallback( 1.0, "geometry" ); } bool inParentList( const int entity_id, osg::Group* group ) { if( !group ) { return false; } const osg::Group::ParentList& vec_parents = group->getParents(); for( size_t ii = 0; ii < vec_parents.size(); ++ii ) { osg::Group* parent = vec_parents[ii]; if( parent ) { const std::string parent_name = parent->getName(); if( parent_name.length() > 0 ) { if( parent_name.at( 0 ) == '#' ) { // extract entity id std::string parent_name_id = parent_name.substr( 1 ); size_t last_index = parent_name_id.find_first_not_of( "0123456789" ); std::string id_str = parent_name_id.substr( 0, last_index ); const int id = std::stoi( id_str.c_str() ); if( id == entity_id ) { return true; } bool in_parent_list = inParentList( entity_id, parent ); if( in_parent_list ) { return true; } } } } } return false; } void resolveProjectStructure( const shared_ptr<IfcObjectDefinition>& obj_def, osg::ref_ptr<osg::Switch> group ) { const int entity_id = obj_def->m_id; if( inParentList( entity_id, group ) ) { messageCallback( "Cycle in project structure detected", StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__, obj_def.get() ); return; } shared_ptr<IfcProduct> ifc_product = dynamic_pointer_cast<IfcProduct>( obj_def ); if( ifc_product ) { std::map<int, shared_ptr<ProductShapeInputData> >::iterator it_product_map = m_shape_input_data.find( entity_id ); if( it_product_map != m_shape_input_data.end() ) { shared_ptr<ProductShapeInputData>& product_shape = it_product_map->second; if( product_shape ) { if( product_shape->m_product_switch ) { if( product_shape->m_added_to_node ) { // IfcRelContained/RelAggregates relationship is required to be hierarchical (an element can only be contained in exactly one spatial structure element) std::cout << "already product_shape->added_to_node" << std::endl; } #ifdef _DEBUG for( size_t ii_parent = 0; ii_parent < product_shape->m_product_switch->getNumParents(); ++ii_parent ) { osg::Node* parent_node = product_shape->m_product_switch->getParent( ii_parent ); const std::string parent_name = parent_node->getName(); std::cout << __FUNC__ << ": product_shape->m_product_switch->getNumParents() > 0" << std::endl; } #endif group->addChild( product_shape->m_product_switch ); product_shape->m_added_to_node = true; } } } } if( group->getName().size() < 1 ) { std::stringstream switch_name; switch_name << "#" << entity_id << "=" << obj_def->className() << " parent group"; group->setName( switch_name.str().c_str() ); } const std::vector<weak_ptr<IfcRelAggregates> >& vec_IsDecomposedBy = obj_def->m_IsDecomposedBy_inverse; for( size_t ii = 0; ii < vec_IsDecomposedBy.size(); ++ii ) { const weak_ptr<IfcRelAggregates>& rel_aggregates_weak_ptr = vec_IsDecomposedBy[ii]; if( rel_aggregates_weak_ptr.expired() ) { continue; } shared_ptr<IfcRelAggregates> rel_aggregates( rel_aggregates_weak_ptr ); if( rel_aggregates ) { const std::vector<shared_ptr<IfcObjectDefinition> >& vec_related_objects = rel_aggregates->m_RelatedObjects; for( size_t jj = 0; jj < vec_related_objects.size(); ++jj ) { const shared_ptr<IfcObjectDefinition>& child_obj_def = vec_related_objects[jj]; if( child_obj_def ) { osg::ref_ptr<osg::Switch> group_subparts = new osg::Switch(); std::stringstream group_subparts_name; group_subparts_name << "#" << child_obj_def->m_id << "=" << child_obj_def->className(); #ifdef _DEBUG group_subparts_name << ", RelatedObjects[" << jj << "]"; #endif group_subparts->setName( group_subparts_name.str().c_str() ); group->addChild( group_subparts ); resolveProjectStructure( child_obj_def, group_subparts ); } } } } shared_ptr<IfcSpatialStructureElement> spatial_ele = dynamic_pointer_cast<IfcSpatialStructureElement>( obj_def ); if( spatial_ele ) { const std::vector<weak_ptr<IfcRelContainedInSpatialStructure> >& vec_contains = spatial_ele->m_ContainsElements_inverse; for( size_t ii = 0; ii < vec_contains.size(); ++ii ) { const weak_ptr<IfcRelContainedInSpatialStructure>& rel_contained_weak_ptr = vec_contains[ii]; if( rel_contained_weak_ptr.expired() ) { continue; } shared_ptr<IfcRelContainedInSpatialStructure> rel_contained( rel_contained_weak_ptr ); if( rel_contained ) { const std::vector<shared_ptr<IfcProduct> >& vec_related_elements = rel_contained->m_RelatedElements; for( size_t jj = 0; jj < vec_related_elements.size(); ++jj ) { const shared_ptr<IfcProduct>& related_product = vec_related_elements[jj]; if( related_product ) { osg::ref_ptr<osg::Switch> group_subparts = new osg::Switch(); std::stringstream group_subparts_name; group_subparts_name << "#" << related_product->m_id << "=" << related_product->className(); #ifdef _DEBUG group_subparts_name << ", RelatedElements[" << jj << "]"; #endif group_subparts->setName( group_subparts_name.str().c_str() ); group->addChild( group_subparts ); resolveProjectStructure( related_product, group_subparts ); } } } } } // TODO: handle IfcRelAssignsToProduct } void readAppearanceFromPropertySet( const shared_ptr<IfcPropertySet>& prop_set, shared_ptr<ProductShapeInputData>& product_shape ) { if( !prop_set ) { return; } for( auto& ifc_property : prop_set->m_HasProperties ) { if( !ifc_property ) { continue; } shared_ptr<IfcSimpleProperty> simple_property = dynamic_pointer_cast<IfcSimpleProperty>( ifc_property ); if( simple_property ) { // ENTITY IfcSimpleProperty ABSTRACT SUPERTYPE OF(ONEOF( IfcPropertyBoundedValue, IfcPropertyEnumeratedValue, IfcPropertyListValue, // IfcPropertyReferenceValue, IfcPropertySingleValue, IfcPropertyTableValue)) shared_ptr<IfcIdentifier> property_name = simple_property->m_Name; std::wstring name_str = property_name->m_value; if( name_str.compare( L"LayerName" ) == 0 ) { // TODO: implement layers } shared_ptr<IfcText> description = simple_property->m_Description; shared_ptr<IfcPropertySingleValue> property_single_value = dynamic_pointer_cast<IfcPropertySingleValue>( simple_property ); if( property_single_value ) { //shared_ptr<IfcValue>& nominal_value = property_single_value->m_NominalValue; //optional //shared_ptr<IfcUnit>& unit = property_single_value->m_Unit; //optional } continue; } shared_ptr<IfcComplexProperty> complex_property = dynamic_pointer_cast<IfcComplexProperty>( ifc_property ); if( complex_property ) { if( !complex_property->m_UsageName ) continue; if( complex_property->m_UsageName->m_value.compare( L"Color" ) == 0 ) { carve::geom::vector<4> vec_color; m_representation_converter->getStylesConverter()->convertIfcComplexPropertyColor( complex_property, vec_color ); shared_ptr<AppearanceData> appearance_data( new AppearanceData( -1 ) ); if( !appearance_data ) { throw IfcPPOutOfMemoryException( __FUNC__ ); } appearance_data->m_apply_to_geometry_type = AppearanceData::ANY; appearance_data->m_color_ambient = vec_color; appearance_data->m_color_diffuse = vec_color; appearance_data->m_color_specular = vec_color; appearance_data->m_shininess = 35.f; product_shape->addAppearance( appearance_data ); } } } } //\brief method convertIfcProduct: Creates geometry objects (meshset with connected vertex-edge-face graph) from an IfcProduct object // caution: when using OpenMP, this method runs in parallel threads, so every write access to member variables needs a write lock void convertIfcProduct( shared_ptr<ProductShapeInputData>& product_shape ) { shared_ptr<IfcProduct> ifc_product( product_shape->m_ifc_product ); if( !ifc_product ) { return; } if( !ifc_product->m_Representation ) { return; } //const int product_id = ifc_product->m_id; const double length_factor = m_ifc_model->getUnitConverter()->getLengthInMeterFactor(); product_shape->m_ifc_product = ifc_product; // evaluate IFC geometry shared_ptr<IfcProductRepresentation>& product_representation = ifc_product->m_Representation; std::vector<shared_ptr<IfcRepresentation> >& vec_representations = product_representation->m_Representations; for( size_t i_representations = 0; i_representations < vec_representations.size(); ++i_representations ) { const shared_ptr<IfcRepresentation>& representation = vec_representations[i_representations]; try { shared_ptr<ProductRepresentationData> representation_data( new ProductRepresentationData() ); m_representation_converter->convertIfcRepresentation( representation, representation_data ); product_shape->m_vec_representations.push_back( representation_data ); } catch( IfcPPOutOfMemoryException& e ) { throw e; } catch( IfcPPException& e ) { messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" ); } catch( std::exception& e ) { messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" ); } } // IfcProduct has an ObjectPlacement that can be local or global carve::math::Matrix product_placement_matrix( carve::math::Matrix::IDENT() ); if( ifc_product->m_ObjectPlacement ) { // IfcPlacement2Matrix follows related placements in case of local coordinate systems std::unordered_set<IfcObjectPlacement*> placement_already_applied; PlacementConverter::convertIfcObjectPlacement( ifc_product->m_ObjectPlacement, length_factor, product_placement_matrix, this, placement_already_applied ); product_shape->applyPosition( product_placement_matrix ); } // handle openings std::vector<shared_ptr<ProductShapeInputData> > vec_opening_data; const shared_ptr<IfcElement> ifc_element = dynamic_pointer_cast<IfcElement>( ifc_product ); if( ifc_element ) { m_representation_converter->subtractOpenings( ifc_element, product_shape ); } // Fetch the IFCProduct relationships if( ifc_product->m_IsDefinedBy_inverse.size() > 0 ) { std::vector<weak_ptr<IfcRelDefinesByProperties> >& vec_IsDefinedBy_inverse = ifc_product->m_IsDefinedBy_inverse; for( size_t i = 0; i < vec_IsDefinedBy_inverse.size(); ++i ) { shared_ptr<IfcRelDefinesByProperties> rel_def( vec_IsDefinedBy_inverse[i] ); shared_ptr<IfcPropertySetDefinitionSelect> relating_property_definition_select = rel_def->m_RelatingPropertyDefinition; if( relating_property_definition_select ) { // TYPE IfcPropertySetDefinitionSelect = SELECT (IfcPropertySetDefinition ,IfcPropertySetDefinitionSet); shared_ptr<IfcPropertySetDefinition> property_set_def = dynamic_pointer_cast<IfcPropertySetDefinition>( relating_property_definition_select ); if( property_set_def ) { shared_ptr<IfcPropertySet> property_set = dynamic_pointer_cast<IfcPropertySet>( property_set_def ); if( property_set ) { readAppearanceFromPropertySet( property_set, product_shape ); } continue; } shared_ptr<IfcPropertySetDefinitionSet> property_set_def_set = dynamic_pointer_cast<IfcPropertySetDefinitionSet>( relating_property_definition_select ); if( property_set_def_set ) { std::vector<shared_ptr<IfcPropertySetDefinition> >& vec_propterty_set_def = property_set_def_set->m_vec; std::vector<shared_ptr<IfcPropertySetDefinition> >::iterator it_property_set_def; for( it_property_set_def = vec_propterty_set_def.begin(); it_property_set_def != vec_propterty_set_def.end(); ++it_property_set_def ) { shared_ptr<IfcPropertySetDefinition> property_set_def2 = ( *it_property_set_def ); if( property_set_def2 ) { shared_ptr<IfcPropertySet> property_set = dynamic_pointer_cast<IfcPropertySet>( property_set_def2 ); if( property_set ) { readAppearanceFromPropertySet( property_set, product_shape ); } } } continue; } } } } } };
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/ASTConcept.h" #include "clang/AST/ASTFwd.h" #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprConcepts.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExprOpenMP.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtOpenMP.h" #include "clang/AST/TypeLoc.h" #include "clang/APINotes/APINotesManager.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/Builtins.h" #include "clang/Basic/DarwinSDKInfo.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenCLOptions.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/SemaConcept.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include <deque> #include <functional> #include <memory> #include <string> #include <tuple> #include <vector> namespace llvm { class APSInt; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; enum class OverloadCandidateParamOrder : char; enum OverloadCandidateRewriteKind : unsigned; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Tracks expected type during expression parsing, for use in code completion. /// The type is tied to a particular token, all functions that update or consume /// the type take a start location of the token they are looking at as a /// parameter. This avoids updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder(bool Enabled) : Enabled(Enabled) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Handles e.g. BaseType{ .D = Tok... void enterDesignatedInitializer(SourceLocation Tok, QualType BaseType, const Designation &D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. /// /// The callback should also emit signature help as a side-effect, but only /// if the completion point has been reached. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); /// Get the expected type associated with this location, if any. /// /// If the location is a function argument, determining the expected type /// involves considering all function overloads and the arguments so far. /// In this case, signature help for these function overloads will be reported /// as a side-effect (only if the completion point has been reached). QualType get(SourceLocation Tok) const { if (!Enabled || Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: bool Enabled; /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema final { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: /// The maximum alignment, same as in llvm::Value. We duplicate them here /// because that allows us not to duplicate the constants in clang code, /// which we must to since we can't directly use the llvm constants. /// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp /// /// This is the greatest alignment value supported by load, store, and alloca /// instructions, and global values. static const unsigned MaxAlignmentExponent = 32; static const uint64_t MaximumAlignment = 1ull << MaxAlignmentExponent; typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions CurFPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; api_notes::APINotesManager APINotes; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// Holds TypoExprs that are created from `createDelayedTypo`. This is used by /// `TransformTypos` in order to keep track of any TypoExprs that are created /// recursively during typo correction and wipe them away if the correction /// fails. llvm::SmallVector<TypoExpr *, 2> TypoExprs; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4, PCSK_Relro = 5 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangRelroSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; // #pragma pack and align. class AlignPackInfo { public: // `Native` represents default align mode, which may vary based on the // platform. enum Mode : unsigned char { Native, Natural, Packed, Mac68k }; // #pragma pack info constructor AlignPackInfo(AlignPackInfo::Mode M, unsigned Num, bool IsXL) : PackAttr(true), AlignMode(M), PackNumber(Num), XLStack(IsXL) { assert(Num == PackNumber && "The pack number has been truncated."); } // #pragma align info constructor AlignPackInfo(AlignPackInfo::Mode M, bool IsXL) : PackAttr(false), AlignMode(M), PackNumber(M == Packed ? 1 : UninitPackVal), XLStack(IsXL) {} explicit AlignPackInfo(bool IsXL) : AlignPackInfo(Native, IsXL) {} AlignPackInfo() : AlignPackInfo(Native, false) {} // When a AlignPackInfo itself cannot be used, this returns an 32-bit // integer encoding for it. This should only be passed to // AlignPackInfo::getFromRawEncoding, it should not be inspected directly. static uint32_t getRawEncoding(const AlignPackInfo &Info) { std::uint32_t Encoding{}; if (Info.IsXLStack()) Encoding |= IsXLMask; Encoding |= static_cast<uint32_t>(Info.getAlignMode()) << 1; if (Info.IsPackAttr()) Encoding |= PackAttrMask; Encoding |= static_cast<uint32_t>(Info.getPackNumber()) << 4; return Encoding; } static AlignPackInfo getFromRawEncoding(unsigned Encoding) { bool IsXL = static_cast<bool>(Encoding & IsXLMask); AlignPackInfo::Mode M = static_cast<AlignPackInfo::Mode>((Encoding & AlignModeMask) >> 1); int PackNumber = (Encoding & PackNumMask) >> 4; if (Encoding & PackAttrMask) return AlignPackInfo(M, PackNumber, IsXL); return AlignPackInfo(M, IsXL); } bool IsPackAttr() const { return PackAttr; } bool IsAlignAttr() const { return !PackAttr; } Mode getAlignMode() const { return AlignMode; } unsigned getPackNumber() const { return PackNumber; } bool IsPackSet() const { // #pragma align, #pragma pack(), and #pragma pack(0) do not set the pack // attriute on a decl. return PackNumber != UninitPackVal && PackNumber != 0; } bool IsXLStack() const { return XLStack; } bool operator==(const AlignPackInfo &Info) const { return std::tie(AlignMode, PackNumber, PackAttr, XLStack) == std::tie(Info.AlignMode, Info.PackNumber, Info.PackAttr, Info.XLStack); } bool operator!=(const AlignPackInfo &Info) const { return !(*this == Info); } private: /// \brief True if this is a pragma pack attribute, /// not a pragma align attribute. bool PackAttr; /// \brief The alignment mode that is in effect. Mode AlignMode; /// \brief The pack number of the stack. unsigned char PackNumber; /// \brief True if it is a XL #pragma align/pack stack. bool XLStack; /// \brief Uninitialized pack value. static constexpr unsigned char UninitPackVal = -1; // Masks to encode and decode an AlignPackInfo. static constexpr uint32_t IsXLMask{0x0000'0001}; static constexpr uint32_t AlignModeMask{0x0000'0006}; static constexpr uint32_t PackAttrMask{0x00000'0008}; static constexpr uint32_t PackNumMask{0x0000'01F0}; }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value) { if (Action == PSK_Reset) { CurrentValue = DefaultValue; CurrentPragmaLocation = PragmaLocation; return; } if (Action & PSK_Push) Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation, PragmaLocation); else if (Action & PSK_Pop) { if (!StackSlotLabel.empty()) { // If we've got a label, try to find it and jump there. auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) { return x.StackSlotLabel == StackSlotLabel; }); // If we found the label so pop from there. if (I != Stack.rend()) { CurrentValue = I->Value; CurrentPragmaLocation = I->PragmaLocation; Stack.erase(std::prev(I.base()), Stack.end()); } } else if (!Stack.empty()) { // We do not have a label, just pop the last entry. CurrentValue = Stack.back().Value; CurrentPragmaLocation = Stack.back().PragmaLocation; Stack.pop_back(); } } if (Action & PSK_Set) { CurrentValue = Value; CurrentPragmaLocation = PragmaLocation; } } // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispMode> VtorDispStack; PragmaStack<AlignPackInfo> AlignPackStack; // The current #pragma align/pack values and locations at each #include. struct AlignPackIncludeState { AlignPackInfo CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<AlignPackIncludeState, 8> AlignPackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // This stack tracks the current state of Sema.CurFPFeatures. PragmaStack<FPOptionsOverride> FpPragmaStack; FPOptionsOverride CurFPFeatureOverrides() { FPOptionsOverride result; if (!FpPragmaStack.hasValue()) { result = FPOptionsOverride(); } else { result = FpPragmaStack.CurrentValue; } return result; } // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>, llvm::SmallPtrSet<Expr *, 4>>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; /// The index of the first FunctionScope that corresponds to the current /// context. unsigned FunctionScopesStart = 0; ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const { return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart, FunctionScopes.end()); } /// Stack containing information needed when in C++2a an 'auto' is encountered /// in a function declaration parameter type specifier in order to invent a /// corresponding template parameter in the enclosing abbreviated function /// template. This information is also present in LambdaScopeInfo, stored in /// the FunctionScopes stack. SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos; /// The index of the first InventedParameterInfo that refers to the current /// context. unsigned InventedParameterInfosStart = 0; ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const { return llvm::makeArrayRef(InventedParameterInfos.begin() + InventedParameterInfosStart, InventedParameterInfos.end()); } typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; /// All the external declarations encoutered and used in the TU. SmallVector<VarDecl *, 4> ExternalDeclarations; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } /// \brief Callback to the parser to parse a type expressed as a string. std::function<TypeResult(StringRef, StringRef, SourceLocation)> ParseTypeFromStringCallback; class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; unsigned SavedFunctionScopesStart; unsigned SavedInventedParameterInfosStart; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride), SavedFunctionScopesStart(S.FunctionScopesStart), SavedInventedParameterInfosStart(S.InventedParameterInfosStart) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); // Any saved FunctionScopes do not refer to this context. S.FunctionScopesStart = S.FunctionScopes.size(); S.InventedParameterInfosStart = S.InventedParameterInfos.size(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; S.FunctionScopesStart = SavedFunctionScopesStart; S.InventedParameterInfosStart = SavedInventedParameterInfosStart; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Whether the AST is currently being rebuilt to correct immediate /// invocations. Immediate invocation candidates and references to consteval /// functions aren't tracked when this is set. bool RebuildingImmediateInvocation = false; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The namespace where coroutine components are defined. In standard, /// they are defined in std namespace. And in the previous implementation, /// they are defined in std::experimental namespace. NamespaceDecl *CoroTraitsNamespaceCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// In addition of being constant evaluated, the current expression /// occurs in an immediate function context - either a consteval function /// or a consteval if function. ImmediateFunctionContext, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// Expressions appearing as the LHS of a volatile assignment in this /// context. We produce a warning for these when popping the context if /// they are not discarded-value expressions nor unevaluated operands. SmallVector<Expr*, 2> VolatileAssignmentLHSs; /// Set of candidates for starting an immediate invocation. llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates; /// Set of DeclRefExprs referencing a consteval function when used in a /// context not already known to be immediately invoked. llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; // A context can be nested in both a discarded statement context and // an immediate function context, so they need to be tracked independently. bool InDiscardedStatement; bool InImmediateFunctionContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext), InDiscardedStatement(false), InImmediateFunctionContext(false) {} bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated || Context == ExpressionEvaluationContext::ImmediateFunctionContext; } bool isImmediateFunctionContext() const { return Context == ExpressionEvaluationContext::ImmediateFunctionContext || (Context == ExpressionEvaluationContext::DiscardedStatement && InImmediateFunctionContext); } bool isDiscardedStatementContext() const { return Context == ExpressionEvaluationContext::DiscardedStatement || (Context == ExpressionEvaluationContext::ImmediateFunctionContext && InDiscardedStatement); } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. Also return the extra mangling decl if any. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. std::tuple<MangleNumberingContext *, Decl *> getCurrentMangleNumberContext(const DeclContext *DC); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. const TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; class GlobalMethodPool { public: using Lists = std::pair<ObjCMethodList, ObjCMethodList>; using iterator = llvm::DenseMap<Selector, Lists>::iterator; iterator begin() { return Methods.begin(); } iterator end() { return Methods.end(); } iterator find(Selector Sel) { return Methods.find(Sel); } std::pair<iterator, bool> insert(std::pair<Selector, Lists> &&Val) { return Methods.insert(Val); } int count(Selector Sel) const { return Methods.count(Sel); } bool empty() const { return Methods.empty(); } private: llvm::DenseMap<Selector, Lists> Methods; }; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// Kinds of defaulted comparison operator functions. enum class DefaultedComparisonKind : unsigned char { /// This is not a defaultable comparison operator. None, /// This is an operator== that should be implemented as a series of /// subobject comparisons. Equal, /// This is an operator<=> that should be implemented as a series of /// subobject comparisons. ThreeWay, /// This is an operator!= that should be implemented as a rewrite in terms /// of a == comparison. NotEqual, /// This is an <, <=, >, or >= that should be implemented as a rewrite in /// terms of a <=> comparison. Relational, }; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the CurFPFeatures state on entry/exit of compound /// statements. class FPFeaturesStateRAII { public: FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.CurFPFeatures) { OldOverrides = S.FpPragmaStack.CurrentValue; } ~FPFeaturesStateRAII() { S.CurFPFeatures = OldFPFeaturesState; S.FpPragmaStack.CurrentValue = OldOverrides; } FPOptionsOverride getOverrides() { return OldOverrides; } private: Sema& S; FPOptions OldFPFeaturesState; FPOptionsOverride OldOverrides; }; void addImplicitTypedef(StringRef Name, QualType T); bool WarnedStackExhausted = false; /// Increment when we find a reference; decrement when we find an ignored /// assignment. Ultimately the value is 0 if every reference is an ignored /// assignment. llvm::DenseMap<const VarDecl *, int> RefsMinusAssignments; Optional<std::unique_ptr<DarwinSDKInfo>> CachedDarwinSDKInfo; public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); /// This virtual key function only exists to limit the emission of debug info /// describing the Sema class. GCC and Clang only emit debug info for a class /// with a vtable when the vtable is emitted. Sema is final and not /// polymorphic, but the debug info size savings are so significant that it is /// worth adding a vtable just to take advantage of this optimization. virtual void anchor(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getCurFPFeatures() { return CurFPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } DarwinSDKInfo *getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc, StringRef Platform); ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Warn that the stack is nearly exhausted. void warnStackExhausted(SourceLocation Loc); /// Run some code with "sufficient" stack space. (Currently, at least 256K is /// guaranteed). Produces a warning if we're low on stack space and allocates /// more in that case. Use this in code that may recurse deeply (for example, /// in template instantiation) to avoid stack overflow. void runWithSufficientStackSpace(SourceLocation Loc, llvm::function_ref<void()> Fn); /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. ImmediateDiagBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class ImmediateDiagBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: ImmediateDiagBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {} ImmediateDiagBuilder(DiagnosticBuilder &&DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {} // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~ImmediateDiagBuilder is a safe no-op // in that case anwyay. ImmediateDiagBuilder(const ImmediateDiagBuilder &) = default; ~ImmediateDiagBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First clear the diagnostic // builder itself so it won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template <typename T> friend const ImmediateDiagBuilder & operator<<(const ImmediateDiagBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } // It is necessary to limit this to rvalue reference to avoid calling this // function with a bitfield lvalue argument since non-const reference to // bitfield is not allowed. template <typename T, typename = typename std::enable_if< !std::is_lvalue_reference<T>::value>::type> const ImmediateDiagBuilder &operator<<(T &&V) const { const DiagnosticBuilder &BaseDiag = *this; BaseDiag << std::move(V); return *this; } }; /// A generic diagnostic builder for errors which may or may not be deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class SemaDiagnosticBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; SemaDiagnosticBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D); SemaDiagnosticBuilder(const SemaDiagnosticBuilder &) = default; ~SemaDiagnosticBuilder(); bool isImmediate() const { return ImmediateDiag.hasValue(); } /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (SemaDiagnosticBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a SemaDiagnosticBuilder yourself. operator bool() const { return isImmediate(); } template <typename T> friend const SemaDiagnosticBuilder & operator<<(const SemaDiagnosticBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } // It is necessary to limit this to rvalue reference to avoid calling this // function with a bitfield lvalue argument since non-const reference to // bitfield is not allowed. template <typename T, typename = typename std::enable_if< !std::is_lvalue_reference<T>::value>::type> const SemaDiagnosticBuilder &operator<<(T &&V) const { if (ImmediateDiag.hasValue()) *ImmediateDiag << std::move(V); else if (PartialDiagId.hasValue()) S.DeviceDeferredDiags[Fn][*PartialDiagId].second << std::move(V); return *this; } friend const SemaDiagnosticBuilder & operator<<(const SemaDiagnosticBuilder &Diag, const PartialDiagnostic &PD) { if (Diag.ImmediateDiag.hasValue()) PD.Emit(*Diag.ImmediateDiag); else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second = PD; return Diag; } void AddFixItHint(const FixItHint &Hint) const { if (ImmediateDiag.hasValue()) ImmediateDiag->AddFixItHint(Hint); else if (PartialDiagId.hasValue()) S.DeviceDeferredDiags[Fn][*PartialDiagId].second.AddFixItHint(Hint); } friend ExprResult ExprError(const SemaDiagnosticBuilder &) { return ExprError(); } friend StmtResult StmtError(const SemaDiagnosticBuilder &) { return StmtError(); } operator ExprResult() const { return ExprError(); } operator StmtResult() const { return StmtError(); } operator TypeResult() const { return TypeError(); } operator DeclResult() const { return DeclResult(true); } operator MemInitResult() const { return MemInitResult(true); } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<ImmediateDiagBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Is the last error level diagnostic immediate. This is used to determined /// whether the next info diagnostic should be immediate. bool IsLastErrorImmediate = true; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID, bool DeferHint = false); /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic &PD, bool DeferHint = false); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h /// Whether deferrable diagnostics should be deferred. bool DeferDiags = false; /// RAII class to control scope of DeferDiags. class DeferDiagsRAII { Sema &S; bool SavedDeferDiags = false; public: DeferDiagsRAII(Sema &S, bool DeferDiags) : S(S), SavedDeferDiags(S.DeferDiags) { S.DeferDiags = DeferDiags; } ~DeferDiagsRAII() { S.DeferDiags = SavedDeferDiags; } }; /// Whether uncompilable error has occurred. This includes error happens /// in deferred diagnostics. bool hasUncompilableErrorOccurred() const; bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; /// Invent a new identifier for parameters of abbreviated templates. IdentifierInfo * InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName, unsigned Index); void emitAndClearUnusedLocalTypedefWarnings(); private: /// Function or variable declarations to be checked for whether the deferred /// diagnostics should be emitted. llvm::SmallSetVector<Decl *, 4> DeclsToCheckForDeferredDiags; public: // Emit all deferred diagnostics. void emitDeferredDiags(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K, unsigned OpenMPCaptureLevel = 0); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void setFunctionHasMustTail(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Get the innermost lambda enclosing the current location, if any. This /// looks through intervening non-lambda scopes such as local functions and /// blocks. sema::LambdaScopeInfo *getEnclosingLambda() const; /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// Retrieve the current function, if any, that should be analyzed for /// potential availability violations. sema::FunctionScopeInfo *getCurFunctionAvailabilityContext(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } /// Called before parsing a function declarator belonging to a function /// declaration. void ActOnStartFunctionDeclarationDeclarator(Declarator &D, unsigned TemplateParameterDepth); /// Called after parsing a function declarator belonging to a function /// declaration. void ActOnFinishFunctionDeclarationDeclarator(Declarator &D); void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); QualType BuildBitIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Stmt *E); /// Determine whether the callee of a particular function call can throw. /// E, D and Loc are all optional. static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D, SourceLocation Loc = SourceLocation()); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { protected: unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, std::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, std::index_sequence_for<Ts...>()); DB << T; } }; /// Do a check to make sure \p Name looks like a legal argument for the /// swift_name attribute applied to decl \p D. Raise a diagnostic if the name /// is invalid for the given declaration. /// /// \p AL is used to provide caret diagnostics in case of a malformed name. /// /// \returns true if the name is a valid swift name for \p D, false otherwise. bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation Loc, const ParsedAttr &AL, bool IsAsync); /// A derivative of BoundTypeDiagnoser for which the diagnostic's type /// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless. /// For example, a diagnostic with no other parameters would generally have /// the form "...%select{incomplete|sizeless}0 type %1...". template <typename... Ts> class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> { public: SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args) : BoundTypeDiagnoser<Ts...>(DiagID, Args...) {} void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID); this->emit(DB, std::index_sequence_for<Ts...>()); DB << T->isSizelessType() << T; } }; enum class CompleteTypeKind { /// Apply the normal rules for complete types. In particular, /// treat all sizeless types as incomplete. Normal, /// Relax the normal rules for complete types so that they include /// sizeless built-in types. AcceptSizeless, // FIXME: Eventually we should flip the default to Normal and opt in // to AcceptSizeless rather than opt out of it. Default = AcceptSizeless }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } /// Helper function to judge if we are in module purview. /// Return false if we are not in a module. bool isCurrentModulePurview() const { return getCurrentModule() ? getCurrentModule()->isModulePurview() : false; } /// Enter the scope of the global module. Module *PushGlobalModuleFragment(SourceLocation BeginLoc, bool IsImplicit); /// Leave the scope of the global module. void PopGlobalModuleFragment(); VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(const Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); // When loading a non-modular PCH files, this is used to restore module // visibility. void makeModuleVisible(Module *Mod, SourceLocation ImportLoc) { VisibleModules.setVisible(Mod, ImportLoc); } /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return D->isUnconditionallyVisible() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind = CompleteTypeKind::Default) { return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, unsigned DiagID); bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser); } bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID); } template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser); } /// Get the type of expression E, triggering instantiation to complete the /// type if necessary -- that is, if the expression refers to a templated /// static data member of incomplete array type. /// /// May still return an incomplete type if instantiation was not possible or /// if the type is incomplete for a different reason. Use /// RequireCompleteExprType instead if a diagnostic is expected for an /// incomplete expression type. QualType getCompletedType(Expr *E); void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); // Returns the underlying type of a decltype with the given expression. QualType getDecltypeForExpr(Expr *E); QualType BuildTypeofExprType(Expr *E); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { /// This name is not a type or template in this context, but might be /// something else. NC_Unknown, /// Classification failed; an error has been produced. NC_Error, /// The name has been typo-corrected to a keyword. NC_Keyword, /// The name was classified as a type. NC_Type, /// The name was classified as a specific non-type, non-template /// declaration. ActOnNameClassifiedAsNonType should be called to /// convert the declaration to an expression. NC_NonType, /// The name was classified as an ADL-only function name. /// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the /// result to an expression. NC_UndeclaredNonType, /// The name denotes a member of a dependent type that could not be /// resolved. ActOnNameClassifiedAsDependentNonType should be called to /// convert the result to an expression. NC_DependentNonType, /// The name was classified as an overload set, and an expression /// representing that overload set has been formed. /// ActOnNameClassifiedAsOverloadSet should be called to form a suitable /// expression referencing the overload set. NC_OverloadSet, /// The name was classified as a template whose specializations are types. NC_TypeTemplate, /// The name was classified as a variable template name. NC_VarTemplate, /// The name was classified as a function template name. NC_FunctionTemplate, /// The name was classified as an ADL-only function template name. NC_UndeclaredTemplate, /// The name was classified as a concept name. NC_Concept, }; class NameClassification { NameClassificationKind Kind; union { ExprResult Expr; NamedDecl *NonTypeDecl; TemplateName Template; ParsedType Type; }; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification OverloadSet(ExprResult E) { NameClassification Result(NC_OverloadSet); Result.Expr = E; return Result; } static NameClassification NonType(NamedDecl *D) { NameClassification Result(NC_NonType); Result.NonTypeDecl = D; return Result; } static NameClassification UndeclaredNonType() { return NameClassification(NC_UndeclaredNonType); } static NameClassification DependentNonType() { return NameClassification(NC_DependentNonType); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification Concept(TemplateName Name) { NameClassification Result(NC_Concept); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ExprResult getExpression() const { assert(Kind == NC_OverloadSet); return Expr; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } NamedDecl *getNonTypeDecl() const { assert(Kind == NC_NonType); return NonTypeDecl; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_Concept || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_Concept: return TNK_Concept_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, CorrectionCandidateCallback *CCC = nullptr); /// Act on the result of classifying a name as an undeclared (ADL-only) /// non-type declaration. ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name, SourceLocation NameLoc); /// Act on the result of classifying a name as an undeclared member of a /// dependent base class. ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, bool IsAddressOfOperand); /// Act on the result of classifying a name as a specific non-type /// declaration. ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS, NamedDecl *Found, SourceLocation NameLoc, const Token &NextToken); /// Act on the result of classifying a name as an overload set. ExprResult ActOnNameClassifiedAsOverloadSet(Scope *S, Expr *OverloadSet); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); void warnOnReservedIdentifier(const NamedDecl *D); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); bool tryToFixVariablyModifiedVarType(TypeSourceInfo *&TInfo, QualType &T, SourceLocation Loc, unsigned FailedFoldDiagID); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); void diagnosePointerAuthDisabled(SourceLocation loc, SourceRange range); bool checkConstantPointerAuthKey(Expr *keyExpr, unsigned &key); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const BindingDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); QualType adjustParameterTypeForObjCAutoRefCount(QualType T, SourceLocation NameLoc, TypeSourceInfo *TSInfo); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); ExprResult ConvertParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); // Contexts where using non-trivial C union types can be disallowed. This is // passed to err_non_trivial_c_union_in_invalid_context. enum NonTrivialCUnionContext { // Function parameter. NTCUC_FunctionParam, // Function return. NTCUC_FunctionReturn, // Default-initialized object. NTCUC_DefaultInitializedObject, // Variable with automatic storage duration. NTCUC_AutoVar, // Initializer expression that might copy from another object. NTCUC_CopyInit, // Assignment. NTCUC_Assignment, // Compound literal. NTCUC_CompoundLiteral, // Block capture. NTCUC_BlockCapture, // lvalue-to-rvalue conversion of volatile type. NTCUC_LValueToRValueVolatile, }; /// Emit diagnostics if the initializer or any of its explicit or /// implicitly-generated subexpressions require copying or /// default-initializing a type that is or contains a C union type that is /// non-trivial to copy or default-initialize. void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc); // These flags are passed to checkNonTrivialCUnion. enum NonTrivialCUnionKind { NTCUK_Init = 0x1, NTCUK_Destruct = 0x2, NTCUK_Copy = 0x4, }; /// Emit diagnostics if a non-trivial C union type or a struct that contains /// a non-trivial C union is used in an invalid context. void checkNonTrivialCUnion(QualType QT, SourceLocation Loc, NonTrivialCUnionContext UseContext, unsigned NonTrivialKind); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D); ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr); ExprResult ActOnRequiresClause(ExprResult ConstraintExpr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); /// For a defaulted function, the kind of defaulted function that it is. class DefaultedFunctionKind { CXXSpecialMember SpecialMember : 8; DefaultedComparisonKind Comparison : 8; public: DefaultedFunctionKind() : SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) { } DefaultedFunctionKind(CXXSpecialMember CSM) : SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {} DefaultedFunctionKind(DefaultedComparisonKind Comp) : SpecialMember(CXXInvalid), Comparison(Comp) {} bool isSpecialMember() const { return SpecialMember != CXXInvalid; } bool isComparison() const { return Comparison != DefaultedComparisonKind::None; } explicit operator bool() const { return isSpecialMember() || isComparison(); } CXXSpecialMember asSpecialMember() const { return SpecialMember; } DefaultedComparisonKind asComparison() const { return Comparison; } /// Get the index of this function kind for use in diagnostics. unsigned getDiagnosticIndex() const { static_assert(CXXInvalid > CXXDestructor, "invalid should have highest index"); static_assert((unsigned)DefaultedComparisonKind::None == 0, "none should be equal to zero"); return SpecialMember + (unsigned)Comparison; } }; DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) { return getDefaultedFunctionKind(MD).asSpecialMember(); } DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) { return getDefaultedFunctionKind(FD).asComparison(); } void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, bool IsAbstract, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Enter a template parameter scope, after it's been associated with a particular /// DeclContext. Causes lookup within the scope to chain through enclosing contexts /// in the correct order. void EnterTemplatedContext(Scope *S, DeclContext *DC); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, /// Merge availability attributes for an implementation of /// an optional protocol requirement. AMK_OptionalProtocolImplementation }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr * mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority); TypeVisibilityAttr * mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, TypeVisibilityAttr::VisibilityType Vis); VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, VisibilityAttr::VisibilityType Vis); UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI, StringRef UuidAsWritten, MSGuidDecl *GuidDecl); DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI); DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI); MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D, const AttributeCommonInfo &CI, bool BestCase, MSInheritanceModel Model); ErrorAttr *mergeErrorAttr(Decl *D, const AttributeCommonInfo &CI, StringRef NewUserDiagnostic); FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Format, int FormatIdx, int FirstArg); SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, const AttributeCommonInfo &CI, const IdentifierInfo *Ident); MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI); SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const SwiftNameAttr &SNA, StringRef Name); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, const AttributeCommonInfo &CI); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); WebAssemblyImportNameAttr *mergeImportNameAttr( Decl *D, const WebAssemblyImportNameAttr &AL); WebAssemblyImportModuleAttr *mergeImportModuleAttr( Decl *D, const WebAssemblyImportModuleAttr &AL); EnforceTCBAttr *mergeEnforceTCBAttr(Decl *D, const EnforceTCBAttr &AL); EnforceTCBLeafAttr *mergeEnforceTCBLeafAttr(Decl *D, const EnforceTCBLeafAttr &AL); BTFDeclTagAttr *mergeBTFDeclTagAttr(Decl *D, const BTFDeclTagAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true, bool ConsiderRequiresClauses = true); enum class AllowedExplicit { /// Allow no explicit functions to be used. None, /// Allow explicit conversion functions but not explicit constructors. Conversions, /// Allow both explicit conversion functions and explicit constructors. All }; ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, AllowedExplicit AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(QualType Param, QualType Arg); bool CanPerformAggregateInitializationForOverloadResolution( const InitializedEntity &Entity, InitListExpr *From); bool IsStringInit(Expr *Init, const ArrayType *AT); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_ArrayBound, ///< Array bound in array declarator or new-expression. CCEK_ExplicitBool, ///< Condition in an explicit(bool) specifier. CCEK_Noexcept ///< Condition in a noexcept(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE, NamedDecl *Dest = nullptr); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false, OverloadCandidateParamOrder PO = {}); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, OverloadCandidateParamOrder PO = {}); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, OverloadCandidateParamOrder PO = {}); bool CheckNonDependentConversions( FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}, OverloadCandidateParamOrder PO = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddNonMemberOperatorCandidates( const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, OverloadCandidateParamOrder PO = {}); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate( NamedDecl *Found, FunctionDecl *Fn, OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(), QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfSingleOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); void AddOverloadedCallCandidates( LookupResult &R, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateUnresolvedLookupExpr(CXXRecordDecl *NamingClass, NestedNameSpecifierLoc NNSLoc, DeclarationNameInfo DNI, const UnresolvedSetImpl &Fns, bool PerformADL = true); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet, OverloadedOperatorKind Op, const UnresolvedSetImpl &Fns, ArrayRef<Expr *> Args, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true, bool AllowRewrittenCandidates = true, FunctionDecl *DefaultedFn = nullptr); ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, FunctionDecl *DefaultedFn); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false, bool AllowRecovery = false); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up a name following ~ in a destructor name. This is an ordinary /// lookup, but prefers tags to typedefs. LookupDestructorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplatePack, }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, SourceLocation TypoLoc); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupBuiltin(LookupResult &R); void LookupNecessaryTypesForBuiltin(Scope *S, unsigned ID); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id, bool IsUDSuffix); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing, StringLiteral *StringLit = nullptr); bool isKnownName(StringRef name); /// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs. enum class FunctionEmissionStatus { Emitted, CUDADiscarded, // Discarded due to CUDA/HIP hostness OMPDiscarded, // Discarded due to OpenMP hostness TemplateDiscarded, // Discarded due to uninstantiated templates Unknown, }; FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl, bool Final = false); // Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check. bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param RecoverUncorrectedTypos If true, when typo correction fails, it /// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr( Expr *E, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr( ExprResult ER, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), InitDecl, RecoverUncorrectedTypos, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} /// Attempts to produce a RecoveryExpr after some AST node cannot be created. ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End, ArrayRef<Expr *> SubExprs, QualType T = QualType()); ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); FunctionDecl *CreateBuiltin(IdentifierInfo *II, QualType Type, unsigned ID, SourceLocation Loc); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction( FunctionDecl *FD); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Handles semantic checking for features that are common to all attributes, /// such as checking whether a parameter was properly specified, or the /// correct number of arguments were passed, etc. Returns true if the /// attribute has been diagnosed. bool checkCommonAttributeFeatures(const Decl *D, const ParsedAttr &A); bool checkCommonAttributeFeatures(const Stmt *S, const ParsedAttr &A); /// Map any API notes provided for this declaration to attributes on the /// declaration. /// /// Triggered by declaration-attribute processing. void ProcessAPINotes(Decl *D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); llvm::Error isValidSectionSpecifier(StringRef Str); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkTargetClonesAttrString(SourceLocation LiteralLoc, StringRef Str, const StringLiteral *Literal, bool &HasDefault, bool &HasCommas, SmallVectorImpl<StringRef> &Strings); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceModel SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type through some means not written in source (e.g. API notes). /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param diagLoc The location to use for diagnostics. /// /// \param allowArrayTypes Whether to accept nullability specifiers on an /// array type (e.g., because it will decay to a pointer). /// /// \param overrideExisting Whether to override an existing, locally-specified /// nullability specifier rather than complaining about the conflict. /// /// \returns true if nullability cannot be applied, false otherwise. bool checkImplicitNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation diagLoc, bool allowArrayTypes, bool overrideExisting); /// Process the attributes before creating an attributed statement. Returns /// the semantic attributes that have been processed. void ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesWithRange &InAttrs, SmallVectorImpl<const Attr *> &OutAttrs); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); /// Returns default addr space for method qualifiers. LangAS getDefaultCXXMethodAddrSpace() const; private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnAfterCompoundStatementLeadingPragmas(); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult BuildAttributedStmt(SourceLocation AttrsLoc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt); StmtResult ActOnAttributedStmt(const ParsedAttributesWithRange &AttrList, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, IfStatementKind StatementKind, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, IfStatementKind StatementKind, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc, ConditionResult Cond, SourceLocation RParenLoc, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params, unsigned OpenMPCaptureLevel = 0); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); struct NamedReturnInfo { const VarDecl *Candidate; enum Status : uint8_t { None, MoveEligible, MoveEligibleAndCopyElidable }; Status S; bool isMoveEligible() const { return S != None; }; bool isCopyElidable() const { return S == MoveEligibleAndCopyElidable; } }; enum class SimplerImplicitMoveMode { ForceOff, Normal, ForceOn }; NamedReturnInfo getNamedReturnInfo( Expr *&E, SimplerImplicitMoveMode Mode = SimplerImplicitMoveMode::Normal); NamedReturnInfo getNamedReturnInfo(const VarDecl *VD); const VarDecl *getCopyElisionCandidate(NamedReturnInfo &Info, QualType ReturnType); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const NamedReturnInfo &NRInfo, Expr *Value, bool SupressSimplerImplicitMoves = false); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, NamedReturnInfo &NRInfo, bool SupressSimplerImplicitMoves); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S, unsigned DiagID); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// If VD is set but not otherwise used, diagnose, for a parameter or a /// variable. void DiagnoseUnusedButSetDecl(const VarDecl *VD); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { ParsingClassDepth++; return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { ParsingClassDepth--; DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult CheckUnevaluatedOperand(Expr *E); void CheckUnusedVolatileAssignment(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false, ArrayRef<const Expr *> StopAt = None); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Try to convert an expression \p E to type \p Ty. Returns the result of the /// conversion. ExprResult tryConvertExprToType(Expr *E, QualType Ty); /// Conditionally issue a diagnostic based on the statements's reachability /// analysis. /// /// \param Stmts If Stmts is non-empty, delay reporting the diagnostic until /// the function body is parsed, and then do a basic reachability analysis to /// determine if the statement is reachable. If it is unreachable, the /// diagnostic will not be emitted. bool DiagIfReachable(SourceLocation Loc, ArrayRef<const Stmt *> Stmts, const PartialDiagnostic &PD); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseDependentMemberLookup(LookupResult &R); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S, IdentifierInfo *II); ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr( const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, UnresolvedLookupExpr *AsULE = nullptr); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); ExprResult BuildSYCLUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation LParen, SourceLocation RParen, TypeSourceInfo *TSI); ExprResult ActOnSYCLUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation LParen, SourceLocation RParen, ParsedType ParsedTy); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx, Expr *ColumnIdx, SourceLocation RBLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLocFirst, SourceLocation ColonLocSecond, Expr *Length, Expr *Stride, SourceLocation RBLoc); ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc, SourceLocation RParenLoc, ArrayRef<Expr *> Dims, ArrayRef<SourceRange> Brackets); /// Data structure for iterator expression. struct OMPIteratorData { IdentifierInfo *DeclIdent = nullptr; SourceLocation DeclIdentLoc; ParsedType Type; OMPIteratorExpr::IteratorRange Range; SourceLocation AssignLoc; SourceLocation ColonLoc; SourceLocation SecColonLoc; }; ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc, SourceLocation LLoc, SourceLocation RLoc, ArrayRef<OMPIteratorData> Data); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false, bool AllowRecovery = false); Expr *BuildBuiltinCallExpr(SourceLocation Loc, Builtin::ID Id, MultiExprArg CallArgs); enum class AtomicArgumentOrder { API, AST }; ExprResult BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, SourceLocation RParenLoc, MultiExprArg Args, AtomicExpr::AtomicOp Op, AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult BuildInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation EqualOrColonLoc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void LookupBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, UnresolvedSetImpl &Functions); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc, unsigned TemplateDepth); // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); ExprResult BuildAsTypeExpr(Expr *E, QualType DestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); NamespaceDecl *getCachedCoroNamespace() { return CoroTraitsNamespaceCache; } CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: enum class ComparisonCategoryUsage { /// The '<=>' operator was used in an expression and a builtin operator /// was selected. OperatorInExpression, /// A defaulted 'operator<=>' needed the comparison category. This /// typically only applies to 'std::strong_ordering', due to the implicit /// fallback return value. DefaultedOperator, }; /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc, ComparisonCategoryUsage Usage); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void FilterUsingLookup(Scope *S, LookupResult &lookup); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(BaseUsingDecl *BUD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, BaseUsingDecl *BUD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc, const LookupResult *R = nullptr, const UsingDecl *UD = nullptr); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation, bool IsUsingIfExists); NamedDecl *BuildUsingEnumDeclaration(Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation EnumLoc, SourceLocation NameLoc, EnumDecl *ED); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnUsingEnumDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation EnumLoc, const DeclSpec &); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E) { CalledStmt(E); } /// Integrate an invoked statement into the collected data. void CalledStmt(Stmt *S); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Produce notes explaining why a defaulted function was defined as deleted. void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); /// Wrap the expression in a ConstantExpr if it is a potential immediate /// invocation. ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, QualType DeclInitType, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr *> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); // Checks that the vector type should be initialized from a scalar // by splatting the value rather than populating a single element. // This is the case for AltiVecVector types as well as with // AltiVecPixel and AltiVecBool when -faltivec-src-compat=xl is specified. bool ShouldSplatAltivecScalarInCast(const VectorType *VecTy); // Checks if the -faltivec-src-compat=gcc option is specified. // If so, AltiVecVector, AltiVecBool and AltiVecPixel types are // treated the same way as they are when trying to initialize // these vectors on gcc (an error is emitted). bool CheckAltivecInitFromScalar(SourceRange R, QualType VecTy, QualType SrcTy); /// ActOnCXXNamedCast - Parse /// {dynamic,static,reinterpret,const,addrspace}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(Scope *S, SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(UnresolvedLookupExpr *Callee, SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); // Complete an enum decl, maybe without a scope spec. bool RequireCompleteEnumDecl(EnumDecl *D, SourceLocation L, CXXScopeSpec *SS = nullptr); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Expr *TrailingRequiresClause); /// Number lambda for linkage purposes if necessary. void handleLambdaNumbering( CXXRecordDecl *Class, CXXMethodDecl *Method, Optional<std::tuple<bool, unsigned, unsigned, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc, ExprResult RequiresClause); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType, CallingConv CC); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); /// Check whether the given expression is a valid constraint expression. /// A diagnostic is emitted if it is not, false is returned, and /// PossibleNonPrimary will be set to true if the failure might be due to a /// non-primary expression being used as an atomic constraint. bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(), bool *PossibleNonPrimary = nullptr, bool IsTrailingRequiresClause = false); private: /// Caches pairs of template-like decls whose associated constraints were /// checked for subsumption and whether or not the first's constraints did in /// fact subsume the second's. llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache; /// Caches the normalized associated constraints of declarations (concepts or /// constrained declarations). If an error occurred while normalizing the /// associated constraints of the template or concept, nullptr will be cached /// here. llvm::DenseMap<NamedDecl *, NormalizedConstraint *> NormalizationCache; llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &> SatisfactionCache; public: const NormalizedConstraint * getNormalizedAssociatedConstraints( NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints); /// \brief Check whether the given declaration's associated constraints are /// at least as constrained than another declaration's according to the /// partial ordering of constraints. /// /// \param Result If no error occurred, receives the result of true if D1 is /// at least constrained than D2, and false otherwise. /// /// \returns true if an error occurred, false otherwise. bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2, bool &Result); /// If D1 was not at least as constrained as D2, but would've been if a pair /// of atomic constraints involved had been declared in a concept and not /// repeated in two separate places in code. /// \returns true if such a diagnostic was emitted, false otherwise. bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2); /// \brief Check whether the given list of constraint expressions are /// satisfied (as if in a 'conjunction') given template arguments. /// \param Template the template-like entity that triggered the constraints /// check (either a concept or a constrained entity). /// \param ConstraintExprs a list of constraint expressions, treated as if /// they were 'AND'ed together. /// \param TemplateArgs the list of template arguments to substitute into the /// constraint expression. /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// \param Satisfaction if true is returned, will contain details of the /// satisfaction, with enough information to diagnose an unsatisfied /// expression. /// \returns true if an error occurred and satisfaction could not be checked, /// false otherwise. bool CheckConstraintSatisfaction( const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction); /// \brief Check whether the given non-dependent constraint expression is /// satisfied. Returns false and updates Satisfaction with the satisfaction /// verdict if successful, emits a diagnostic and returns true if an error /// occured and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckConstraintSatisfaction(const Expr *ConstraintExpr, ConstraintSatisfaction &Satisfaction); /// Check whether the given function decl's trailing requires clause is /// satisfied, if any. Returns false and updates Satisfaction with the /// satisfaction verdict if successful, emits a diagnostic and returns true if /// an error occured and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckFunctionConstraints(const FunctionDecl *FD, ConstraintSatisfaction &Satisfaction, SourceLocation UsageLoc = SourceLocation()); /// \brief Ensure that the given template arguments satisfy the constraints /// associated with the given template, emitting a diagnostic if they do not. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateArgs The converted, canonicalized template arguments. /// /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// /// \returns true if the constrains are not satisfied or could not be checked /// for satisfaction, false if the constraints are satisfied. bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. /// \param First whether this is the first time an unsatisfied constraint is /// diagnosed for this error. void DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction, bool First = true); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. void DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction, bool First = true); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// Mark destructors of virtual bases of this class referenced. In the Itanium /// C++ ABI, this is done when emitting a destructor for any non-abstract /// class. In the Microsoft C++ ABI, this is done any time a class's /// destructor is referenced. void MarkVirtualBaseDestructorsReferenced( SourceLocation Location, CXXRecordDecl *ClassDecl, llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr); /// Do semantic checks to allow the complete destructor variant to be emitted /// when the destructor is defined in another translation unit. In the Itanium /// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they /// can be emitted in separate TUs. To emit the complete variant, run a subset /// of the checks performed when emitting a regular destructor. void CheckCompleteDestructorVariant(SourceLocation CurrentLocation, CXXDestructorDecl *Dtor); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); /// Add gsl::Pointer attribute to std::container::iterator /// \param ND The declaration that introduces the name /// std::container::iterator. \param UnderlyingRecord The record named by ND. void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord); /// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types. void inferGslOwnerPointerAttribute(CXXRecordDecl *Record); /// Add [[gsl::Pointer]] attributes for std:: types. void inferGslPointerAttribute(TypedefNameDecl *TD); void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Decl *Template, llvm::function_ref<Scope *()> EnterScope); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD); bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM); void CheckDelayedMemberExceptionSpecs(); bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD, DefaultedComparisonKind DCK); void DeclareImplicitEqualityComparison(CXXRecordDecl *RD, FunctionDecl *Spaceship); void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD, DefaultedComparisonKind DCK); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbiguousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType, SourceLocation Loc, const PartialDiagnostic &Diag); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType) { return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType, SourceLocation(), PDiag()); } void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. static NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum TemplateNameIsRequiredTag { TemplateNameIsRequired }; /// Whether and why a template name is required in this lookup. class RequiredTemplateKind { public: /// Template name is required if TemplateKWLoc is valid. RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation()) : TemplateKW(TemplateKWLoc) {} /// Template name is unconditionally required. RequiredTemplateKind(TemplateNameIsRequiredTag) : TemplateKW() {} SourceLocation getTemplateKeywordLoc() const { return TemplateKW.getValueOr(SourceLocation()); } bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); } bool isRequired() const { return TemplateKW != SourceLocation(); } explicit operator bool() const { return isRequired(); } private: llvm::Optional<SourceLocation> TemplateKW; }; enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName( LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, RequiredTemplateKind RequiredTemplate = SourceLocation(), AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization, bool Disambiguation = false); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg, bool HasTypeConstraint); bool ActOnTypeConstraint(const CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool BuildTypeConstraint(const CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc, bool AllowUnexpandedPack); bool AttachTypeConstraint(NestedNameSpecifierLoc NS, DeclarationNameInfo NameInfo, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool AttachTypeConstraint(AutoTypeLoc TL, NonTypeTemplateParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool RequireStructuralType(QualType T, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid, bool SuppressDiagnostic = false); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); /// Get a template argument mapping the given template parameter to itself, /// e.g. for X in \c template<int X>, this would return an expression template /// argument referencing X. TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param, SourceLocation Location); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); /// Get the specialization of the given variable template corresponding to /// the specified argument list, or a null-but-valid result if the arguments /// are dependent. DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); /// Form a reference to the specialization of the given variable template /// corresponding to the specified argument list, or a null-but-valid result /// if the arguments are dependent. ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &ConceptNameInfo, NamedDecl *FoundDecl, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, CXXScopeSpec &SS, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \param ConstraintsNotSatisfied If provided, and an error occured, will /// receive true if the cause for the error is the associated constraints of /// the template not being satisfied by the template arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true, bool *ConstraintsNotSatisfied = nullptr); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param, TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, TypeSourceInfo **TSI, bool DeducedTSTContext); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, bool DeducedTSTContext = true); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Concepts //===--------------------------------------------------------------------===// Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); RequiresExprBodyDecl * ActOnStartRequiresExpr(SourceLocation RequiresKWLoc, ArrayRef<ParmVarDecl *> LocalParameters, Scope *BodyScope); void ActOnFinishRequiresExpr(); concepts::Requirement *ActOnSimpleRequirement(Expr *E); concepts::Requirement *ActOnTypeRequirement( SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId); concepts::Requirement *ActOnCompoundRequirement(Expr *E, SourceLocation NoexceptLoc); concepts::Requirement * ActOnCompoundRequirement( Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, unsigned Depth); concepts::Requirement *ActOnNestedRequirement(Expr *Constraint); concepts::ExprRequirement * BuildExprRequirement( Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::ExprRequirement * BuildExprRequirement( concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type); concepts::TypeRequirement * BuildTypeRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); concepts::NestedRequirement *BuildNestedRequirement(Expr *E); concepts::NestedRequirement * BuildNestedRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc, RequiresExprBodyDecl *Body, ArrayRef<ParmVarDecl *> LocalParameters, ArrayRef<concepts::Requirement *> Requirements, SourceLocation ClosingBraceLoc); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression. UPPC_Block, /// A type constraint. UPPC_TypeConstraint, // A requirement in a requires-expression. UPPC_Requirement, // A requires-clause. UPPC_RequiresClause, }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given requirees-expression contains an unexpanded reference to one /// of its own parameter packs, diagnose the error. /// /// \param RE The requiress-expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPackInRequiresExpr(RequiresExpr *RE); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// The deduced arguments did not satisfy the constraints associated /// with the template. TDK_ConstraintsNotSatisfied, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); // Substitute auto in TypeWithAuto for a Dependent auto type QualType SubstAutoTypeDependent(QualType TypeWithAuto); // Substitute auto in TypeWithAuto for a Dependent auto type TypeSourceInfo * SubstAutoTypeSourceInfoDependent(TypeSourceInfo *TypeWithAuto); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); TypeSourceInfo *ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate( FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2, bool Reversed = false); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are instantiating a requirement of a requires expression. RequirementInstantiation, /// We are checking the satisfaction of a nested requirement of a requires /// expression. NestedRequirementConstraintsCheck, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are declaring an implicit 'operator==' for a defaulted /// 'operator<=>'. DeclaringImplicitEqualityComparison, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, // We are checking the constraints associated with a constrained entity or // the constraint expression of a concept. This includes the checks that // atomic constraints have the type 'bool' and that they can be constant // evaluated. ConstraintsCheck, // We are substituting template arguments into a constraint expression. ConstraintSubstitution, // We are normalizing a constraint expression. ConstraintNormalization, // We are substituting into the parameter mapping of an atomic constraint // during normalization. ParameterMappingSubstitution, /// We are rewriting a comparison operator in terms of an operator<=>. RewritingOperatorAsSpaceship, /// We are initializing a structured binding. InitializingStructuredBinding, /// We are marking a class as __dllexport. MarkingClassDllexported, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintsCheck {}; /// \brief Note that we are checking the constraints associated with some /// constrained entity (a concept declaration or a template with associated /// constraints). InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintsCheck, NamedDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintSubstitution {}; /// \brief Note that we are checking a constraint expression associated /// with a template declaration or as part of the satisfaction check of a /// concept. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintSubstitution, NamedDecl *Template, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange); struct ConstraintNormalization {}; /// \brief Note that we are normalizing a constraint expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintNormalization, NamedDecl *Template, SourceRange InstantiationRange); struct ParameterMappingSubstitution {}; /// \brief Note that we are subtituting into the parameter mapping of an /// atomic constraint during constraint normalization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParameterMappingSubstitution, NamedDecl *Template, SourceRange InstantiationRange); /// \brief Note that we are substituting template arguments into a part of /// a requirement of a requires expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::Requirement *Req, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are checking the satisfaction of the constraint /// expression inside of a nested requirement. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::NestedRequirement *Req, ConstraintsCheck, SourceRange InstantiationRange = SourceRange()); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } bool isImmediateFunctionContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isImmediateFunctionContext(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) { assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } else { // Template instantiations in the PCH may be delayed until the TU. S.PendingInstantiations.swap(SavedPendingInstantiations); S.PendingInstantiations.insert(S.PendingInstantiations.end(), SavedPendingInstantiations.begin(), SavedPendingInstantiations.end()); } } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); bool SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateArgumentListInfo &Outputs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the name and return type of a defaulted 'operator<=>' to form /// an implicit 'operator=='. FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD, FunctionDecl *Spaceship); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateDefaultCtorDefaultArgs(CXXConstructorDecl *Ctor); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool SubstTypeConstraint(TemplateTypeParmDecl *Inst, const TypeConstraint *TC, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); bool CheckInstantiatedFunctionTemplateConstraints( SourceLocation PointOfInstantiation, FunctionDecl *Decl, ArrayRef<TemplateArgument> TemplateArgs, ConstraintSatisfaction &Satisfaction); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); void deduceOpenCLAddressSpace(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; /// Check whether the declared result type of the given Objective-C /// method declaration is compatible with the method's class. ResultTypeCompatibilityKind checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method, const ObjCInterfaceDecl *CurrentClass); void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method, ObjCMethodDecl *overridden); void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaAlignPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaAlignPack(PragmaAlignPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaAlignPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispMode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, NamedDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// Are precise floating point semantics currently enabled? bool isPreciseFPEnabled() { return !CurFPFeatures.getAllowFPReassociate() && !CurFPFeatures.getNoSignedZero() && !CurFPFeatures.getAllowReciprocal() && !CurFPFeatures.getAllowApproxFunc(); } /// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action, PragmaFloatControlKind Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC); /// Called on well formed /// \#pragma clang fp reassociate void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled); /// Called on well formed '\#pragma clang fp' that has option 'exceptions'. void ActOnPragmaFPExceptions(SourceLocation Loc, LangOptions::FPExceptionModeKind); /// Called to set constant rounding mode for floating point operations. void setRoundingMode(SourceLocation Loc, llvm::RoundingMode); /// Called to set exception behavior for floating point operations. void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, bool IsPackExpansion); void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, Expr *OE); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ParamExpr); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E); /// AddAnnotationAttr - Adds an annotation Annot with Args arguments to D. void AddAnnotationAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Annot, MutableArrayRef<Expr *> Args); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI, Expr *MaxThreads, Expr *MinBlocks); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name, bool InInstantiation = false); void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI, ParameterABI ABI); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); /// Lookup 'coroutine_traits' in std namespace and std::experimental /// namespace. The namespace found is recorded in Namespace. ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc, NamespaceDecl *&Namespace); /// Check that the expression co_await promise.final_suspend() shall not be /// potentially-throwing. bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; struct DeclareTargetContextInfo { struct MapInfo { OMPDeclareTargetDeclAttr::MapTypeTy MT; SourceLocation Loc; }; /// Explicitly listed variables and functions in a 'to' or 'link' clause. llvm::DenseMap<NamedDecl *, MapInfo> ExplicitlyMapped; /// The 'device_type' as parsed from the clause. OMPDeclareTargetDeclAttr::DevTypeTy DT = OMPDeclareTargetDeclAttr::DT_Any; /// The directive kind, `begin declare target` or `declare target`. OpenMPDirectiveKind Kind; /// The directive location. SourceLocation Loc; DeclareTargetContextInfo(OpenMPDirectiveKind Kind, SourceLocation Loc) : Kind(Kind), Loc(Loc) {} }; /// Number of nested '#pragma omp declare target' directives. SmallVector<DeclareTargetContextInfo, 4> DeclareTargetNesting; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true, bool SuppressExprDiags = false); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Returns the number of scopes associated with the construct on the given /// OpenMP level. int getNumberOfConstructScopes(unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Analyzes and checks a loop nest for use by a loop transformation. /// /// \param Kind The loop transformation directive kind. /// \param NumLoops How many nested loops the directive is expecting. /// \param AStmt Associated statement of the transformation directive. /// \param LoopHelpers [out] The loop analysis result. /// \param Body [out] The body code nested in \p NumLoops loop. /// \param OriginalInits [out] Collection of statements and declarations that /// must have been executed/declared before entering the /// loop. /// /// \return Whether there was any error. bool checkTransformableLoopNest( OpenMPDirectiveKind Kind, Stmt *AStmt, int NumLoops, SmallVectorImpl<OMPLoopBasedDirective::HelperExprs> &LoopHelpers, Stmt *&Body, SmallVectorImpl<SmallVector<llvm::PointerUnion<Stmt *, Decl *>, 0>> &OriginalInits); /// Helper to keep information about the current `omp begin/end declare /// variant` nesting. struct OMPDeclareVariantScope { /// The associated OpenMP context selector. OMPTraitInfo *TI; /// The associated OpenMP context selector mangling. std::string NameSuffix; OMPDeclareVariantScope(OMPTraitInfo &TI); }; /// Return the OMPTraitInfo for the surrounding scope, if any. OMPTraitInfo *getOMPTraitInfoForSurroundingScope() { return OMPDeclareVariantScopes.empty() ? nullptr : OMPDeclareVariantScopes.back().TI; } /// The current `omp begin/end declare variant` scopes. SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes; /// The current `omp begin/end assumes` scopes. SmallVector<AssumptionAttr *, 4> OMPAssumeScoped; /// All `omp assumes` we encountered so far. SmallVector<AssumptionAttr *, 4> OMPAssumeGlobal; public: /// The declarator \p D defines a function in the scope \p S which is nested /// in an `omp begin/end declare variant` scope. In this method we create a /// declaration for \p D and rename \p D according to the OpenMP context /// selector of the surrounding scope. Return all base functions in \p Bases. void ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope( Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, SmallVectorImpl<FunctionDecl *> &Bases); /// Register \p D as specialization of all base functions in \p Bases in the /// current `omp begin/end declare variant` scope. void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope( Decl *D, SmallVectorImpl<FunctionDecl *> &Bases); /// Act on \p D, a function definition inside of an `omp [begin/end] assumes`. void ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D); /// Can we exit an OpenMP declare variant scope at the moment. bool isInOpenMPDeclareVariantScope() const { return !OMPDeclareVariantScopes.empty(); } /// Given the potential call expression \p Call, determine if there is a /// specialization via the OpenMP declare variant mechanism available. If /// there is, return the specialized call expression, otherwise return the /// original \p Call. ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig); /// Handle a `omp begin declare variant`. void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI); /// Handle a `omp end declare variant`. void ActOnOpenMPEndDeclareVariant(); /// Checks if the variant/multiversion functions are compatible. bool areMultiversionVariantFunctionsCompatible( const FunctionDecl *OldFD, const FunctionDecl *NewFD, const PartialDiagnostic &NoProtoDiagID, const PartialDiagnosticAt &NoteCausedDiagIDAt, const PartialDiagnosticAt &NoSupportDiagIDAt, const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported, bool ConstexprSupported, bool CLinkageMayDiffer); /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. /// \param OpenMPCaptureLevel Capture level within an OpenMP construct. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, unsigned OpenMPCaptureLevel) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// If the current region is a range loop-based region, mark the start of the /// loop construct. void startOpenMPCXXRangeFor(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level, unsigned CapLevel) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; /// Check if the specified global variable must be captured by outer capture /// regions. /// \param Level Relative level of nested OpenMP construct for that /// the check is performed. bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); /// Called on well-formed '\#pragma omp metadirective' after parsing /// of the associated statement. StmtResult ActOnOpenMPMetaDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp [begin] assume[s]'. void ActOnOpenMPAssumesDirective(SourceLocation Loc, OpenMPDirectiveKind DKind, ArrayRef<std::string> Assumptions, bool SkippedClauses); /// Check if there is an active global `omp begin assumes` directive. bool isInOpenMPAssumeScope() const { return !OMPAssumeScoped.empty(); } /// Check if there is an active global `omp assumes` directive. bool hasGlobalOpenMPAssumes() const { return !OMPAssumeGlobal.empty(); } /// Called on well-formed '#pragma omp end assumes'. void ActOnOpenMPEndAssumesDirective(); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirective( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Expr *MapperVarRef, ArrayRef<OMPClause *> Clauses, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. ExprResult ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); bool isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const; const ValueDecl *getOpenMPDeclareMapperVarName() const; /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI); /// Called at the end of target region i.e. '#pragma omp end declare target'. const DeclareTargetContextInfo ActOnOpenMPEndDeclareTargetDirective(); /// Called once a target context is completed, that can be when a /// '#pragma omp end declare target' was encountered or when a /// '#pragma omp declare target' without declaration-definition-seq was /// encountered. void ActOnFinishedOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI); /// Searches for the provided declaration name for OpenMP declare target /// directive. NamedDecl *lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, OMPDeclareTargetDeclAttr::MapTypeTy MT, OMPDeclareTargetDeclAttr::DevTypeTy DT); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Finishes analysis of the deferred functions calls that may be declared as /// host/nohost during device/host compilation. void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller, const FunctionDecl *Callee, SourceLocation Loc); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return !DeclareTargetNesting.empty(); } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// Called for syntactical loops (ForStmt or CXXForRangeStmt) associated to /// an OpenMP loop directive. StmtResult ActOnOpenMPCanonicalLoop(Stmt *AStmt); /// Process a canonical OpenMP loop nest that can either be a canonical /// literal loop (ForStmt or CXXForRangeStmt), or the generated loop of an /// OpenMP loop transformation construct. StmtResult ActOnOpenMPLoopnest(Stmt *AStmt); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '#pragma omp tile' after parsing of its clauses and /// the associated statement. StmtResult ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '#pragma omp unroll' after parsing of its clauses /// and the associated statement. StmtResult ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp depobj'. StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp scan'. StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp interop'. StmtResult ActOnOpenMPInteropDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp dispatch' after parsing of the // /associated statement. StmtResult ActOnOpenMPDispatchDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp masked' after parsing of the // /associated statement. StmtResult ActOnOpenMPMaskedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp loop' after parsing of the /// associated statement. StmtResult ActOnOpenMPGenericLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type, bool IsDeclareSimd = false); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); /// Checks '\#pragma omp declare variant' variant function and original /// functions after parsing of the associated method/function. /// \param DG Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The trait info object representing the match clause. /// \param NumAppendArgs The number of omp_interop_t arguments to account for /// in checking. /// \returns None, if the function/variant function are not compatible with /// the pragma, pair of original function/variant ref expression otherwise. Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef, OMPTraitInfo &TI, unsigned NumAppendArgs, SourceRange SR); /// Called on well-formed '\#pragma omp declare variant' after parsing of /// the associated method/function. /// \param FD Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The context traits associated with the function variant. /// \param AdjustArgsNothing The list of 'nothing' arguments. /// \param AdjustArgsNeedDevicePtr The list of 'need_device_ptr' arguments. /// \param AppendArgs The list of 'append_args' arguments. /// \param AdjustArgsLoc The Location of an 'adjust_args' clause. /// \param AppendArgsLoc The Location of an 'append_args' clause. /// \param SR The SourceRange of the 'declare variant' directive. void ActOnOpenMPDeclareVariantDirective( FunctionDecl *FD, Expr *VariantRef, OMPTraitInfo &TI, ArrayRef<Expr *> AdjustArgsNothing, ArrayRef<Expr *> AdjustArgsNeedDevicePtr, ArrayRef<OMPDeclareVariantAttr::InteropType> AppendArgs, SourceLocation AdjustArgsLoc, SourceLocation AppendArgsLoc, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'align' clause. OMPClause *ActOnOpenMPAlignClause(Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-form 'sizes' clause. OMPClause *ActOnOpenMPSizesClause(ArrayRef<Expr *> SizeExprs, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-form 'full' clauses. OMPClause *ActOnOpenMPFullClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-form 'partial' clauses. OMPClause *ActOnOpenMPPartialClause(Expr *FactorExpr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'detach' clause. OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'when' clause. OMPClause *ActOnOpenMPWhenClause(OMPTraitInfo &TI, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'order' clause. OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'compare' clause. OMPClause *ActOnOpenMPCompareClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acq_rel' clause. OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acquire' clause. OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'release' clause. OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'relaxed' clause. OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'init' clause. OMPClause *ActOnOpenMPInitClause(Expr *InteropVar, ArrayRef<Expr *> PrefExprs, bool IsTarget, bool IsTargetSync, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'use' clause. OMPClause *ActOnOpenMPUseClause(Expr *InteropVar, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'destroy' clause. OMPClause *ActOnOpenMPDestroyClause(Expr *InteropVar, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Called on well-formed 'novariants' clause. OMPClause *ActOnOpenMPNovariantsClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'nocontext' clause. OMPClause *ActOnOpenMPNocontextClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'filter' clause. OMPClause *ActOnOpenMPFilterClause(Expr *ThreadID, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit, SourceLocation ExtraModifierLoc, ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc); /// Called on well-formed 'inclusive' clause. OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'exclusive' clause. OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause( ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depobj' pseudo clause. OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier, Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause *ActOnOpenMPMapClause( ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, bool NoDiagnose = false, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause * ActOnOpenMPFromClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'use_device_addr' clause. OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'nontemporal' clause. OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Data for list of allocators. struct UsesAllocatorsData { /// Allocator. Expr *Allocator = nullptr; /// Allocator traits. Expr *AllocatorTraits = nullptr; /// Locations of '(' and ')' symbols. SourceLocation LParenLoc, RParenLoc; }; /// Called on well-formed 'uses_allocators' clause. OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<UsesAllocatorsData> Data); /// Called on well-formed 'affinity' clause. OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, Expr *Modifier, ArrayRef<Expr *> Locators); /// Called on a well-formed 'bind' clause. OMPClause *ActOnOpenMPBindClause(OpenMPBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_PRValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This function is a no-op if the operand has a function type // or an array type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check whether the given statement can have musttail applied to it, /// issuing a diagnostic and returning false if not. In the success case, /// the statement is rewritten to remove implicit nodes from the return /// value. bool checkAndRewriteMustTailAttr(Stmt *St, const Attr &MTA); private: /// Check whether the given statement can have musttail applied to it, /// issuing a diagnostic and returning false if not. bool checkMustTailAttr(const Stmt *St, const Attr &MTA); public: /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); /// Context in which we're performing a usual arithmetic conversion. enum ArithConvKind { /// An arithmetic operation. ACK_Arithmetic, /// A bitwise operation. ACK_BitwiseOp, /// A comparison. ACK_Comparison, /// A conditional (?:) operator. ACK_Conditional, /// A compound assignment expression. ACK_CompAssign, }; // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, ArithConvKind ACK); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatibleFunctionPointer - The assignment is between two function /// pointers types that are not compatible, but we accept them as an /// extension. IncompatibleFunctionPointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_PRValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); /// Type checking for matrix binary operators. QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); bool isValidSveBitcast(QualType srcType, QualType destType); bool areMatrixTypesOfTheSameDimension(QualType srcTy, QualType destTy); bool areVectorTypesSameSize(QualType srcType, QualType destType); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; // Fake up a scoped enumeration that still contextually converts to bool. struct ReferenceConversionsScope { /// The conversions that would be performed on an lvalue of type T2 when /// binding a reference of type T1 to it, as determined when evaluating /// whether T1 is reference-compatible with T2. enum ReferenceConversions { Qualification = 0x1, NestedQualification = 0x2, Function = 0x4, DerivedToBase = 0x8, ObjC = 0x10, ObjCLifetime = 0x20, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime) }; }; using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, ReferenceConversions *Conv = nullptr); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckMatrixCast - Check type constraints for matrix casts. // We allow casting between matrixes of the same dimensions i.e. when they // have the same number of rows and column. Returns true if the cast is // invalid. bool CheckMatrixCast(SourceRange R, QualType DestTy, QualType SrcTy, CastKind &Kind); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual SemaDiagnosticBuilder diagnoseNotICEType(Sema &S, SourceLocation Loc, QualType T); virtual SemaDiagnosticBuilder diagnoseNotICE(Sema &S, SourceLocation Loc) = 0; virtual SemaDiagnosticBuilder diagnoseFold(Sema &S, SourceLocation Loc); virtual ~VerifyICEDiagnoser() {} }; enum AllowFoldKind { NoFold, AllowFold, }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr, AllowFoldKind CanFold = NoFold); ExprResult VerifyIntegerConstantExpression(Expr *E, AllowFoldKind CanFold = NoFold) { return VerifyIntegerConstantExpression(E, nullptr, CanFold); } /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics /// unless \p EmitOnBothSides is true. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. SemaDiagnosticBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD); /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the host, emits the diagnostics immediately. /// - If CurContext is a non-host function, just ignore it. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. SemaDiagnosticBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD); SemaDiagnosticBuilder targetDiag(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD = nullptr); SemaDiagnosticBuilder targetDiag(SourceLocation Loc, const PartialDiagnostic &PD, FunctionDecl *FD = nullptr) { return targetDiag(Loc, PD.getDiagID(), FD) << PD; } /// Check if the type is allowed to be used for the current target. void checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D = nullptr); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); enum CUDAVariableTarget { CVT_Device, /// Emitted on device side with a shadow variable on host side CVT_Host, /// Emitted on host side only CVT_Both, /// Emitted on both sides with different addresses CVT_Unified, /// Emitted as a unified address, e.g. managed variables }; /// Determines whether the given variable is emitted on host or device side. CUDAVariableTarget IdentifyCUDATarget(const VarDecl *D); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D); // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); /// May add implicit CUDAConstantAttr attribute to VD, depending on VD /// and current compilation settings. void MaybeAddCUDAConstantAttr(VarDecl *VD); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas by default is host device function unless it has explicit /// host or device attribute. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); enum class AttributeCompletion { Attribute, Scope, None, }; void CodeCompleteAttribute( AttributeCommonInfo::Syntax Syntax, AttributeCompletion Completion = AttributeCompletion::Attribute, const IdentifierInfo *Scope = nullptr); /// Determines the preferred type of the current function argument, by /// examining the signatures of all possible overloads. /// Returns null if unknown or ambiguous, or if code completion is off. /// /// If the code completion point has been reached, also reports the function /// signatures that were considered. /// /// FIXME: rename to GuessCallArgumentType to reduce confusion. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); /// Trigger code completion for a record of \p BaseType. \p InitExprs are /// expressions in the initializer list seen so far and \p D is the current /// Designation being parsed. void CodeCompleteDesignator(const QualType BaseType, llvm::ArrayRef<Expr *> InitExprs, const Designation &D); void CodeCompleteAfterIf(Scope *S, bool IsBracedThen); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, bool IsUsingDeclaration, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteAfterFunctionEquals(Declarator &D); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl, StringRef ParamName, QualType ArgTy, QualType ParamTy); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg, bool WantCDE); bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum); bool CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinComplex(CallExpr *TheCall); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); bool SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinArithmeticFence(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum); bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID, const char *TypeDesc); bool CheckPPCMMAType(QualType Type, SourceLocation TypeLoc); bool SemaBuiltinElementwiseMath(CallExpr *TheCall); bool PrepareBuiltinElementwiseMathOneArgCall(CallExpr *TheCall); bool PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall); // Matrix builtin handling. ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, ExprResult CallResult); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckFreeArguments(const CallExpr *E); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(const Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void CheckTCBEnforcement(const CallExpr *TheCall, const FunctionDecl *Callee); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Nullable_result = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; bool isCFError(RecordDecl *D); /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// Determine the number of levels of enclosing template parameters. This is /// only usable while parsing. Note that this does not include dependent /// contexts in which no template parameters have yet been declared, such as /// in a terse function template or generic lambda before the first 'auto' is /// encountered. unsigned getTemplateDepth(Scope *S) const; /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions; private: int ParsingClassDepth = 0; class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurLexicalContext is a kernel function or it is known that the /// function will be emitted for the device, emits the diagnostics /// immediately. /// - If CurLexicalContext is a function and we are compiling /// for the device, but we don't know that this function will be codegen'ed /// for devive yet, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// Diagnose __float128 type usage only from SYCL device code if the current /// target doesn't support it /// if (!S.Context.getTargetInfo().hasFloat128Type() && /// S.getLangOpts().SYCLIsDevice) /// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128"; SemaDiagnosticBuilder SYCLDiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed, creates a deferred diagnostic to be emitted if /// and when the caller is codegen'ed, and returns true. /// /// - Otherwise, returns true without emitting any diagnostics. /// /// Adds Callee to DeviceCallGraph if we don't know if its caller will be /// codegen'ed yet. bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee); }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; template <> void Sema::PragmaStack<Sema::AlignPackInfo>::Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, AlignPackInfo Value); } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getHashValue()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
GB_binop__plus_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__plus_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__plus_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__plus_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__plus_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_fp32) // A*D function (colscale): GB (_AxD__plus_fp32) // D*A function (rowscale): GB (_DxB__plus_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__plus_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__plus_fp32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_fp32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_fp32) // C=scalar+B GB (_bind1st__plus_fp32) // C=scalar+B' GB (_bind1st_tran__plus_fp32) // C=A+scalar GB (_bind2nd__plus_fp32) // C=A'+scalar GB (_bind2nd_tran__plus_fp32) // C type: float // A type: float // A pattern? 0 // B type: float // B pattern? 0 // BinaryOp: cij = (aij + bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x + y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PLUS || GxB_NO_FP32 || GxB_NO_PLUS_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__plus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__plus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__plus_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__plus_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__plus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__plus_fp32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__plus_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; float alpha_scalar ; float beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((float *) alpha_scalar_in)) ; beta_scalar = (*((float *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__plus_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__plus_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__plus_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__plus_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__plus_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = (x + bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__plus_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = (aij + y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x + aij) ; \ } GrB_Info GB (_bind1st_tran__plus_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij + y) ; \ } GrB_Info GB (_bind2nd_tran__plus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
explicit_solver_strategy.h
// // Authors: // Miguel Angel Celigueta maceli@cimne.upc.edu // Miquel Santasusana msantasusana@cimne.upc.edu // #if !defined(KRATOS_EXPLICIT_SOLVER_STRATEGY) #define KRATOS_EXPLICIT_SOLVER_STRATEGY // Project includes #include "utilities/timer.h" #include "custom_elements/Particle_Contact_Element.h" #include "includes/variables.h" #include "includes/deprecated_variables.h" /* System includes */ #include <limits> #include <iostream> #include <iomanip> #include <time.h> /* External includes */ #ifdef _OPENMP #include <omp.h> #endif #define CUSTOMTIMER 0 // ACTIVATES AND DISABLES ::TIMER::::: #include "includes/define.h" #include "utilities/openmp_utils.h" #include "includes/model_part.h" #include "solving_strategies/strategies/solving_strategy.h" #include "solving_strategies/schemes/scheme.h" #include "custom_strategies/schemes/dem_integration_scheme.h" #include "custom_utilities/create_and_destroy.h" #include "custom_utilities/dem_fem_utilities.h" #include "custom_utilities/GeometryFunctions.h" #include "custom_utilities/inlet.h" #include "custom_elements/cluster3D.h" #include "custom_elements/rigid_body_element.h" ////Cfeng #include "custom_utilities/dem_fem_search.h" #include "custom_utilities/discrete_particle_configure.h" #include "custom_utilities/rigid_face_geometrical_object_configure.h" #ifdef USING_CGAL #include <CGAL/spatial_sort.h> #endif /* Timer defines */ #ifdef CUSTOMTIMER #define KRATOS_TIMER_START(t) Timer::Start(t); #define KRATOS_TIMER_STOP(t) Timer::Stop(t); #else #define KRATOS_TIMER_START(t) #define KRATOS_TIMER_STOP(t) #endif namespace Kratos { class ExplicitSolverSettings { public: KRATOS_CLASS_POINTER_DEFINITION(ExplicitSolverSettings); ExplicitSolverSettings() { } ~ExplicitSolverSettings() { } ModelPart* r_model_part; ModelPart* contact_model_part; ModelPart* fem_model_part; ModelPart* cluster_model_part; ModelPart* inlet_model_part; }; class KRATOS_API(DEM_APPLICATION) ExplicitSolverStrategy { public: typedef ModelPart::NodesContainerType NodesArrayType; typedef ModelPart::ElementsContainerType ElementsArrayType; typedef ElementsArrayType::iterator ElementsIterator; typedef ModelPart::ConditionsContainerType ConditionsArrayType; typedef ModelPart::NodesContainerType::ContainerType NodesContainerType; typedef ModelPart::ElementsContainerType::ContainerType ElementsContainerType; typedef ModelPart::ConditionsContainerType::ContainerType ConditionsContainerType; typedef SpatialSearch::ResultElementsContainerType ResultElementsContainerType; typedef SpatialSearch::VectorResultElementsContainerType VectorResultElementsContainerType; typedef SpatialSearch::RadiusArrayType RadiusArrayType; typedef SpatialSearch::DistanceType DistanceType; typedef SpatialSearch::VectorDistanceType VectorDistanceType; typedef SpatialSearch::ResultConditionsContainerType ResultConditionsContainerType; typedef SpatialSearch::VectorResultConditionsContainerType VectorResultConditionsContainerType; typedef PointerVectorSet<Properties, IndexedObject> PropertiesContainerType; typedef PropertiesContainerType::iterator PropertiesIterator; typedef DiscreteParticleConfigure<3> ElementConfigureType; typedef RigidFaceGeometricalObjectConfigure<3> RigidFaceGeometricalConfigureType; typedef Variable<double> ComponentOf3ComponentsVariableType; /// Pointer definition of ExplicitSolverStrategy KRATOS_CLASS_POINTER_DEFINITION(ExplicitSolverStrategy); ExplicitSolverStrategy() { } ExplicitSolverStrategy(ExplicitSolverSettings& settings, const double max_delta_time, const int n_step_search, const double safety_factor, const int delta_option, ParticleCreatorDestructor::Pointer p_creator_destructor, DEM_FEM_Search::Pointer p_dem_fem_search, SpatialSearch::Pointer pSpSearch, Parameters strategy_parameters) { mParameters = strategy_parameters; mDeltaOption = delta_option; mpParticleCreatorDestructor = p_creator_destructor; mpDemFemSearch = p_dem_fem_search; mpSpSearch = pSpSearch; if(mParameters["do_search_neighbours"].GetBool()) mDoSearchNeighbourElements = true; else mDoSearchNeighbourElements = false; p_creator_destructor->SetDoSearchNeighbourElements(mDoSearchNeighbourElements); mMaxTimeStep = max_delta_time; mNStepSearch = n_step_search; mSafetyFactor = safety_factor; mpDem_model_part = &(*(settings.r_model_part)); KRATOS_ERROR_IF(mpDem_model_part == NULL) << "Undefined settings.r_model_part in ExplicitSolverStrategy constructor" << std::endl; mpContact_model_part = &(*(settings.contact_model_part)); KRATOS_ERROR_IF(mpContact_model_part == NULL) << "Undefined settings.contact_model_part in ExplicitSolverStrategy constructor" << std::endl; mpFem_model_part = &(*(settings.fem_model_part)); KRATOS_ERROR_IF(mpFem_model_part == NULL) << "Undefined settings.fem_model_part in ExplicitSolverStrategy constructor" << std::endl; mpCluster_model_part = &(*(settings.cluster_model_part)); KRATOS_ERROR_IF(mpCluster_model_part == NULL) << "Undefined settings.cluster_model_part in ExplicitSolverStrategy constructor" << std::endl; mpInlet_model_part = &(*(settings.inlet_model_part)); KRATOS_ERROR_IF(mpInlet_model_part == NULL) << "Undefined settings.inlet_model_part in ExplicitSolverStrategy constructor" << std::endl; if(mParameters["RemoveBallsInitiallyTouchingWalls"].GetBool()) mRemoveBallsInitiallyTouchingWallsOption = true; else mRemoveBallsInitiallyTouchingWallsOption = false; } /// Destructor. virtual ~ExplicitSolverStrategy() { //Timer::SetOuputFile("TimesPartialRelease"); //Timer::PrintTimingInformation(); } struct LessX { bool operator()(const SphericParticle* p, const SphericParticle* q) const {return p->GetGeometry()[0].Coordinates()[0] < q->GetGeometry()[0].Coordinates()[0];} }; struct LessY { bool operator()(const SphericParticle* p, const SphericParticle* q) const {return p->GetGeometry()[0].Coordinates()[1] < q->GetGeometry()[0].Coordinates()[1];} }; struct LessZ { bool operator()(const SphericParticle* p, const SphericParticle* q) const {return p->GetGeometry()[0].Coordinates()[2] < q->GetGeometry()[0].Coordinates()[2];} }; struct SpatialSortingTraits { typedef SphericParticle* Point_2; typedef LessX Less_x_2; typedef LessY Less_y_2; typedef LessZ Less_z_2; Less_x_2 less_x_2_object() const {return Less_x_2();} Less_y_2 less_y_2_object() const {return Less_y_2();} Less_z_2 less_z_2_object() const { return Less_z_2();} }; #ifdef USING_CGAL void ReorderParticles() { SpatialSortingTraits sst; CGAL::spatial_sort(mListOfSphericParticles.begin(), mListOfSphericParticles.end(), sst); } #endif template <class T> void RebuildListOfSphericParticles(ElementsArrayType& pElements, std::vector<T*>& rCustomListOfParticles){ KRATOS_TRY rCustomListOfParticles.resize(pElements.size()); #pragma omp parallel for for (int k = 0; k < (int)pElements.size(); k++){ ElementsArrayType::iterator particle_pointer_it = pElements.ptr_begin() + k; T* spheric_particle = dynamic_cast<T*>(&(*particle_pointer_it)); rCustomListOfParticles[k] = spheric_particle; } return; KRATOS_CATCH("") } void RebuildListOfDiscontinuumSphericParticles() { RebuildListOfSphericParticles<SphericParticle>(GetModelPart().GetCommunicator().LocalMesh().Elements(), mListOfSphericParticles); } void RebuildPropertiesProxyPointers(std::vector<SphericParticle*>& rCustomListOfSphericParticles); void SendProcessInfoToClustersModelPart(); void UpdateMaxIdOfCreatorDestructor(); void RepairPointersToNormalProperties(std::vector<SphericParticle*>& rCustomListOfSphericParticles); virtual void Initialize(); virtual void AttachSpheresToStickyWalls(); virtual void DisplayThreadInfo(); virtual void CalculateMaxTimeStep(); double CalculateMaxInletTimeStep(); virtual void InitializeClusters(); virtual void GetClustersForce(); virtual void GetRigidBodyElementsForce(); virtual double SolveSolutionStep(); void SearchDEMOperations(ModelPart& r_model_part, bool has_mpi = true); void SearchFEMOperations(ModelPart& r_model_part, bool has_mpi = true) ; virtual void ForceOperations(ModelPart& r_model_part); void InitialTimeStepCalculation(); //TODO: remove this one void GetForce(); void FastGetForce(); virtual void PerformTimeIntegrationOfMotion(int StepFlag = 0); void InitializeSolutionStep(); virtual void BoundingBoxUtility(bool is_time_to_mark_and_remove = true); virtual void FinalizeSolutionStep(); void InitializeElements(); void InitializeDEMElements(); void InitializeFEMElements(); //void InitializeRigidBodyElements(); void InitializeFEMWallsAsRigidBodyElements(ModelPart::SubModelPartsContainerType::iterator& sub_model_part); void MarkToDeleteAllSpheresInitiallyIndentedWithFEM(ModelPart& rSpheresModelPart); void ComputeNodalArea(); void ComputeNormalPressureVectorField(); virtual void CalculateConditionsRHSAndAdd(); void ClearFEMForces(); void CalculateNodalPressuresAndStressesOnWalls(); void SetFlagAndVariableToNodes(const Kratos::Flags& r_flag_name, ComponentOf3ComponentsVariableType& r_variable_to_set, const double value, NodesArrayType& r_nodes_array); void SetVariableToNodes(ComponentOf3ComponentsVariableType& r_variable_to_set, const double value, NodesArrayType& r_nodes_array); void ResetPrescribedMotionFlagsRespectingImposedDofs(); void ApplyPrescribedBoundaryConditions(); void ApplyInitialConditions(); void SetSearchRadiiOnAllParticles(ModelPart& r_model_part, const double added_search_distance = 0.0, const double amplification = 1.0); void SetNormalRadiiOnAllParticles(ModelPart& r_model_part); void SetSearchRadiiWithFemOnAllParticles(ModelPart& r_model_part, const double added_search_distance = 0.0, const double amplification = 1.0); virtual void SearchNeighbours(); virtual void ComputeNewNeighboursHistoricalData(); virtual void CreateContactElements(); void InitializeContactElements(); // void ContactInitializeSolutionStep(); void PrepareContactElementsForPrinting(); virtual void ComputeNewRigidFaceNeighboursHistoricalData(); virtual void SearchRigidFaceNeighbours(); void CheckHierarchyWithCurrentNeighbours(); /* This should work only with one iteration, but it with mpi does not */ void CalculateInitialMaxIndentations(ProcessInfo& r_process_info); void PrepareContactModelPart(ModelPart& r_model_part, ModelPart& mcontacts_model_part); void PrepareElementsForPrinting(); void SynchronizeHistoricalVariables(ModelPart& r_model_part); void SynchronizeRHS(ModelPart& r_model_part); void CleanEnergies(); ModelPart& GetModelPart() { return (*mpDem_model_part);} ModelPart& GetFemModelPart() { return (*mpFem_model_part);} ModelPart& GetContactModelPart() { return (*mpContact_model_part);} ModelPart& GetClusterModelPart() { return (*mpCluster_model_part);} ModelPart& GetInletModelPart() { return (*mpInlet_model_part);} ModelPart& GetRigidBodyModelPart() { return (*mpRigidBody_model_part);} VectorResultElementsContainerType& GetResults() { return (mResults);} VectorDistanceType& GetResultsDistances() { return (mResultsDistances);} RadiusArrayType& GetArrayOfAmplifiedRadii() { return (mArrayOfAmplifiedRadii);} int& GetNStepSearch() { return (mNStepSearch);} int& GetSearchControl() { return mSearchControl;} int& GetNumberOfThreads() { return (mNumberOfThreads);} double& GetMaxTimeStep() { return (mMaxTimeStep);} double& GetSafetyFactor() { return (mSafetyFactor);} int& GetDeltaOption() { return (mDeltaOption);} std::vector<unsigned int>& GetElementPartition() { return (mElementPartition);} ParticleCreatorDestructor::Pointer& GetParticleCreatorDestructor() { return (mpParticleCreatorDestructor);} SpatialSearch::Pointer& GetSpSearch() { return (mpSpSearch);} VectorResultConditionsContainerType& GetRigidFaceResults() { return (mRigidFaceResults);} VectorDistanceType& GetRigidFaceResultsDistances() { return (mRigidFaceResultsDistances);} std::vector<unsigned int>& GetConditionPartition() { return (mConditionPartition);} DEM_FEM_Search::Pointer& GetDemFemSearch() { return (mpDemFemSearch);} virtual ElementsArrayType& GetElements(ModelPart& r_model_part) { return r_model_part.GetCommunicator().LocalMesh().Elements();} virtual ElementsArrayType& GetAllElements(ModelPart& r_model_part) { return r_model_part.Elements(); } protected: Parameters mParameters; bool mRemoveBallsInitiallyTouchingWallsOption; VectorResultElementsContainerType mResults; VectorDistanceType mResultsDistances; RadiusArrayType mArrayOfAmplifiedRadii; int mNStepSearch; int mSearchControl; int mNumberOfThreads; double mMaxTimeStep; double mSafetyFactor; int mDeltaOption; std::vector<unsigned int> mElementPartition; ParticleCreatorDestructor::Pointer mpParticleCreatorDestructor; DEM_FEM_Search::Pointer mpDemFemSearch; SpatialSearch::Pointer mpSpSearch; bool mDoSearchNeighbourElements; VectorResultConditionsContainerType mRigidFaceResults; VectorDistanceType mRigidFaceResultsDistances; std::vector<unsigned int> mConditionPartition; ModelPart *mpFem_model_part; ModelPart *mpDem_model_part; ModelPart *mpInlet_model_part; ModelPart *mpContact_model_part; ModelPart *mpCluster_model_part; ModelPart *mpRigidBody_model_part; std::vector<SphericParticle*> mListOfSphericParticles; std::vector<SphericParticle*> mListOfGhostSphericParticles; }; // Class ExplicitSolverStrategy } // namespace Kratos. #endif // KRATOS_EXPLICIT_SOLVER_STRATEGY defined
build_list.h
#ifndef build_list_h #define build_list_h #include <unordered_map> #include <unordered_set> #include <set> #include <queue> #include "exafmm_t.h" #include "geometry.h" #include "hilbert.h" #include "fmm_base.h" namespace exafmm_t { using std::abs; using std::max; using std::unordered_map; using std::unordered_set; using std::set; using std::queue; /** * @brief Generate the mapping from Hilbert keys to node indices in the tree. * * @param nodes Tree. * @return Keys to indices mapping. */ template <typename T> unordered_map<uint64_t, size_t> get_key2id(const Nodes<T>& nodes) { unordered_map<uint64_t, size_t> key2id; for (size_t i=0; i<nodes.size(); ++i) { key2id[nodes[i].key] = nodes[i].idx; } return key2id; } /** * @brief Generate the set of keys of all leaf nodes. * * @param nodes Tree. * @return Set of all leaf keys with level offset. */ template <typename T> unordered_set<uint64_t> get_leaf_keys(const Nodes<T>& nodes) { // we cannot use leafs to generate leaf keys, since it does not include // empty leaf nodes where ntrgs and nsrcs are 0. unordered_set<uint64_t> leaf_keys; for (size_t i=0; i<nodes.size(); ++i) { if (nodes[i].is_leaf) { leaf_keys.insert(nodes[i].key); } } return leaf_keys; } /** * @brief Given the 3D index of an octant and its depth, return the key of * the leaf that contains the octant. If such leaf does not exist, return the * key of the original octant. * * @param iX Integer index of the octant. * @param level The level of the octant. * * @return Hilbert index with level offset. */ uint64_t find_key(const ivec3& iX, int level, const unordered_set<uint64_t>& leaf_keys) { uint64_t orig_key = getKey(iX, level, true); uint64_t curr_key = orig_key; while (level>0) { if (leaf_keys.find(curr_key) != leaf_keys.end()) { // if key is leaf return curr_key; } else { // else go 1 level up curr_key = getParent(curr_key); level--; } } return orig_key; } /** * @brief Check the adjacency of two nodes. * * @param key_a, key_b Hilbert keys with level offset. */ bool is_adjacent(uint64_t key_a, uint64_t key_b) { int level_a = getLevel(key_a); int level_b = getLevel(key_b); int max_level = max(level_a, level_b); ivec3 iX_a = get3DIndex(key_a); ivec3 iX_b = get3DIndex(key_b); ivec3 iX_ac = (iX_a*2 + 1) * (1 << (max_level-level_a)); // center coordinates ivec3 iX_bc = (iX_b*2 + 1) * (1 << (max_level-level_b)); // center coordinates ivec3 diff = iX_ac - iX_bc; int max_diff = -1; // L-infinity norm of diff for (int d=0; d<3; ++d) { diff[d] = abs(diff[d]); max_diff = max(max_diff, diff[d]); } int sum_radius = (1 << (max_level-level_a)) + (1 << (max_level-level_b)); return (diff[0] <= sum_radius) && (diff[1] <= sum_radius) && (diff[2] <= sum_radius) && (max_diff == sum_radius); } /** * @brief Build lists for P2P, P2L and M2P operators for a given node. * * @param node Node. * @param nodes Tree. * @param leaf_keys The set of all leaf keys. * @param key2id The mapping from a node's key to its index in the tree. */ template <typename T> void build_other_list(Node<T>* node, Nodes<T>& nodes, const FmmBase<T>& fmm, const unordered_set<uint64_t>& leaf_keys, const unordered_map<uint64_t, size_t>& key2id) { set<Node<T>*> P2P_set, M2P_set, P2L_set; Node<T>* curr = node; if (curr->key != 0) { Node<T>* parent = curr->parent; ivec3 min_iX = 0; ivec3 max_iX = 1 << node->level; ivec3 curr_iX = get3DIndex(curr->key); ivec3 parent_iX = get3DIndex(parent->key); // search in every direction for (int i=-2; i<4; i++) { for (int j=-2; j<4; j++) { for (int k=-2; k<4; k++) { ivec3 direction; direction[0] = i; direction[1] = j; direction[2] = k; direction += parent_iX * 2; if (direction >= min_iX && direction < max_iX && direction != curr_iX) { uint64_t res_key = find_key(direction, curr->level, leaf_keys); bool adj = is_adjacent(res_key, curr->key); Node<T>* res = &nodes[key2id.at(res_key)]; if (res->level < curr->level) { // when res node is a leaf if (adj) { if (curr->is_leaf) { P2P_set.insert(res); } } else { if (curr->is_leaf && curr->ntrgs<=fmm.nsurf) { P2P_set.insert(res); } else { P2L_set.insert(res); } } } if (res->level == curr->level) { // when res is a colleague if (adj) { if (curr->is_leaf) { queue<Node<T>*> buffer; buffer.push(res); while (!buffer.empty()) { Node<T>* temp = buffer.front(); buffer.pop(); if (!is_adjacent(temp->key, curr->key)) { if (temp->is_leaf && temp->nsrcs<=fmm.nsurf) { P2P_set.insert(temp); } else { M2P_set.insert(temp); } } else { if (temp->is_leaf) { P2P_set.insert(temp); } else { for (int i=0; i<8; i++) { Node<T>* child = temp->children[i]; if (child != nullptr) { buffer.push(child); } } } } } } } } } } } } } if (curr->is_leaf) { P2P_set.insert(curr); } for (typename set<Node<T>*>::iterator i=P2P_set.begin(); i!=P2P_set.end(); i++) { if ((*i) != nullptr) { curr->P2P_list.push_back(*i); } } for (typename set<Node<T>*>::iterator i=P2L_set.begin(); i!=P2L_set.end(); i++) { if ((*i) != nullptr) { curr->P2L_list.push_back(*i); } } for (typename set<Node<T>*>::iterator i=M2P_set.begin(); i!=M2P_set.end(); i++) { if ((*i) != nullptr) { curr->M2P_list.push_back(*i); } } } /** * @brief Build M2L interaction list for a given node. * * @param node Node. * @param nodes Tree. * @param key2id The mapping from a node's key to its index in the tree. */ template <typename T> void build_M2L_list(Node<T>* node, Nodes<T>& nodes, const unordered_map<uint64_t, size_t>& key2id) { node->M2L_list.resize(REL_COORD[M2L_Type].size(), nullptr); Node<T>* curr = node; ivec3 min_iX = 0; ivec3 max_iX = 1 << curr->level; if (!node->is_leaf) { ivec3 curr_iX = get3DIndex(curr->key); ivec3 col_iX; ivec3 rel_coord; for (int i=-1; i<=1; i++) { rel_coord[0] = i; for (int j=-1; j<=1; j++) { rel_coord[1] = j; for (int k=-1; k<=1; k++) { rel_coord[2] = k; if (i || j || k) { // exclude current node itself col_iX = curr_iX + rel_coord; if (col_iX >= min_iX && col_iX < max_iX) { uint64_t col_key = getKey(col_iX, curr->level, true); if (key2id.find(col_key) != key2id.end()) { Node<T>* col = &nodes[key2id.at(col_key)]; if (!col->is_leaf) { int c_hash = hash(rel_coord); int idx = HASH_LUT[M2L_Type][c_hash]; curr->M2L_list[idx] = col; } } } } } } } } } /** * @brief Build lists for all operators for all nodes in the tree. * * @param nodes Tree. * @param fmm The FMM instance. */ template <typename T> void build_list(Nodes<T>& nodes, const FmmBase<T>& fmm) { unordered_map<uint64_t, size_t> key2id = get_key2id(nodes); unordered_set<uint64_t> leaf_keys = get_leaf_keys(nodes); #pragma omp parallel for schedule(dynamic) for (size_t i=0; i<nodes.size(); i++) { Node<T>* node = &nodes[i]; build_M2L_list(node, nodes, key2id); build_other_list(node, nodes, fmm, leaf_keys, key2id); } } } #endif
fac_setup2.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision: 2.19 $ ***********************************************************************EHEADER*/ #include "_hypre_sstruct_ls.h" #include "fac.h" /*-------------------------------------------------------------------------- * hypre_FacSetup2: Constructs the level composite structures. * Each consists only of two levels, the refinement patches and the * coarse parent base grids. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_FacSetup2( void *fac_vdata, hypre_SStructMatrix *A_in, hypre_SStructVector *b, hypre_SStructVector *x ) { hypre_FACData *fac_data = fac_vdata; HYPRE_Int *plevels = (fac_data-> plevels); hypre_Index *rfactors = (fac_data-> prefinements); MPI_Comm comm; HYPRE_Int ndim; HYPRE_Int npart; HYPRE_Int nparts_level = 2; HYPRE_Int part_crse = 0; HYPRE_Int part_fine = 1; hypre_SStructPMatrix *A_pmatrix; hypre_StructMatrix *A_smatrix; hypre_Box *A_smatrix_dbox; hypre_SStructGrid **grid_level; hypre_SStructGraph **graph_level; HYPRE_Int part, level; HYPRE_Int nvars; hypre_SStructGraph *graph; hypre_SStructGrid *grid; hypre_SStructPGrid *pgrid; hypre_StructGrid *sgrid; hypre_BoxArray *sgrid_boxes; hypre_Box *sgrid_box; hypre_SStructStencil *stencils; hypre_BoxArray *iboxarray; hypre_Index *refine_factors; hypre_IndexRef box_start; hypre_IndexRef box_end; hypre_SStructUVEntry **Uventries; HYPRE_Int nUventries; HYPRE_Int *iUventries; hypre_SStructUVEntry *Uventry; hypre_SStructUEntry *Uentry; hypre_Index index, to_index, stride; HYPRE_Int var, to_var, to_part, level_part, level_topart; HYPRE_Int var1, var2; HYPRE_Int i, j, k, to_rank, row_coord, nUentries; hypre_BoxManEntry *boxman_entry; hypre_SStructMatrix *A_rap; hypre_SStructMatrix **A_level; hypre_SStructVector **b_level; hypre_SStructVector **x_level; hypre_SStructVector **r_level; hypre_SStructVector **e_level; hypre_SStructPVector **tx_level; hypre_SStructVector *tx; void **matvec_data_level; void **pmatvec_data_level; void *matvec_data; void **relax_data_level; void **interp_data_level; void **restrict_data_level; /* coarsest grid solver */ HYPRE_Int csolver_type =(fac_data-> csolver_type); HYPRE_SStructSolver crse_solver; HYPRE_SStructSolver crse_precond; HYPRE_Int max_level = hypre_FACDataMaxLevels(fac_data); HYPRE_Int relax_type = fac_data -> relax_type; HYPRE_Int usr_jacobi_weight= fac_data -> usr_jacobi_weight; double jacobi_weight = fac_data -> jacobi_weight; HYPRE_Int *levels; HYPRE_Int *part_to_level; HYPRE_Int box, box_volume; HYPRE_Int max_box_volume; HYPRE_Int stencil_size; hypre_Index stencil_shape_i, loop_size; HYPRE_Int *stencil_vars; double *values; double *A_smatrix_value; HYPRE_Int iA; HYPRE_Int *nrows; HYPRE_Int **ncols; HYPRE_Int **rows; HYPRE_Int **cols; HYPRE_Int *cnt; double *vals; HYPRE_Int *level_rows; HYPRE_Int *level_cols; HYPRE_Int level_cnt; HYPRE_IJMatrix ij_A; HYPRE_Int matrix_type; HYPRE_Int max_cycles; HYPRE_Int ierr = 0; /*hypre_SStructMatrix *nested_A; nested_A= hypre_TAlloc(hypre_SStructMatrix , 1); nested_A= hypre_CoarsenAMROp(fac_vdata, A);*/ /* generate the composite operator with the computed coarse-grid operators */ hypre_AMR_RAP(A_in, rfactors, &A_rap); (fac_data -> A_rap)= A_rap; comm = hypre_SStructMatrixComm(A_rap); ndim = hypre_SStructMatrixNDim(A_rap); npart= hypre_SStructMatrixNParts(A_rap); graph= hypre_SStructMatrixGraph(A_rap); grid = hypre_SStructGraphGrid(graph); ij_A = hypre_SStructMatrixIJMatrix(A_rap); matrix_type= hypre_SStructMatrixObjectType(A_rap); /*-------------------------------------------------------------------------- * logging arrays. *--------------------------------------------------------------------------*/ if ((fac_data -> logging) > 0) { max_cycles = (fac_data -> max_cycles); (fac_data -> norms) = hypre_TAlloc(double, max_cycles); (fac_data -> rel_norms)= hypre_TAlloc(double, max_cycles); } /*-------------------------------------------------------------------------- * Extract the amr/sstruct level/part structure and refinement factors. *--------------------------------------------------------------------------*/ levels = hypre_CTAlloc(HYPRE_Int, npart); part_to_level = hypre_CTAlloc(HYPRE_Int, npart); refine_factors= hypre_CTAlloc(hypre_Index, npart); for (part= 0; part< npart; part++) { part_to_level[part] = plevels[part]; levels[plevels[part]]= part; for (i= 0; i< ndim; i++) { refine_factors[plevels[part]][i]= rfactors[part][i]; } for (i= ndim; i< 3; i++) { refine_factors[plevels[part]][i]= 1; } } (fac_data -> level_to_part) = levels; (fac_data -> part_to_level) = part_to_level; (fac_data -> refine_factors)= refine_factors; /*-------------------------------------------------------------------------- * Create the level SStructGrids using the original composite grid. *--------------------------------------------------------------------------*/ grid_level= hypre_TAlloc(hypre_SStructGrid *, max_level+1); for (level= max_level; level >= 0; level--) { HYPRE_SStructGridCreate(comm, ndim, nparts_level, &grid_level[level]); } for (level= max_level; level >= 0; level--) { /*-------------------------------------------------------------------------- * Create the fine part of the finest level SStructGrids using the original * composite grid. *--------------------------------------------------------------------------*/ if (level == max_level) { pgrid = hypre_SStructGridPGrid(grid, levels[level]); iboxarray= hypre_SStructPGridCellIBoxArray(pgrid); for (box = 0; box < hypre_BoxArraySize(iboxarray); box++) { HYPRE_SStructGridSetExtents(grid_level[level], part_fine, hypre_BoxIMin( hypre_BoxArrayBox(iboxarray,box) ), hypre_BoxIMax( hypre_BoxArrayBox(iboxarray,box) )); } HYPRE_SStructGridSetVariables( grid_level[level], part_fine, hypre_SStructPGridNVars(pgrid), hypre_SStructPGridVarTypes(pgrid) ); /*----------------------------------------------------------------------- * Create the coarsest level grid if A has only 1 level *-----------------------------------------------------------------------*/ if (level == 0) { for (box = 0; box < hypre_BoxArraySize(iboxarray); box++) { HYPRE_SStructGridSetExtents(grid_level[level], part_crse, hypre_BoxIMin( hypre_BoxArrayBox(iboxarray,box) ), hypre_BoxIMax( hypre_BoxArrayBox(iboxarray,box) )); } HYPRE_SStructGridSetVariables( grid_level[level], part_crse, hypre_SStructPGridNVars(pgrid), hypre_SStructPGridVarTypes(pgrid) ); } } /*-------------------------------------------------------------------------- * Create the coarse part of level SStructGrids using the original composite * grid, the coarsest part SStructGrid, and the fine part if level < max_level. *--------------------------------------------------------------------------*/ if (level > 0) { pgrid = hypre_SStructGridPGrid(grid, levels[level-1]); iboxarray= hypre_SStructPGridCellIBoxArray(pgrid); for (box = 0; box < hypre_BoxArraySize(iboxarray); box++) { HYPRE_SStructGridSetExtents(grid_level[level], part_crse, hypre_BoxIMin( hypre_BoxArrayBox(iboxarray,box) ), hypre_BoxIMax( hypre_BoxArrayBox(iboxarray,box) )); HYPRE_SStructGridSetExtents(grid_level[level-1], part_fine, hypre_BoxIMin( hypre_BoxArrayBox(iboxarray,box) ), hypre_BoxIMax( hypre_BoxArrayBox(iboxarray,box) )); if (level == 1) { HYPRE_SStructGridSetExtents(grid_level[level-1], part_crse, hypre_BoxIMin( hypre_BoxArrayBox(iboxarray,box) ), hypre_BoxIMax( hypre_BoxArrayBox(iboxarray,box) )); } } HYPRE_SStructGridSetVariables( grid_level[level], part_crse, hypre_SStructPGridNVars(pgrid), hypre_SStructPGridVarTypes(pgrid) ); HYPRE_SStructGridSetVariables( grid_level[level-1], part_fine, hypre_SStructPGridNVars(pgrid), hypre_SStructPGridVarTypes(pgrid) ); /* coarsest SStructGrid */ if (level == 1) { HYPRE_SStructGridSetVariables( grid_level[level-1], part_crse, hypre_SStructPGridNVars(pgrid), hypre_SStructPGridVarTypes(pgrid) ); } } HYPRE_SStructGridAssemble(grid_level[level]); } (fac_data -> grid_level)= grid_level; /*----------------------------------------------------------- * Set up the graph. Create only the structured components * first. *-----------------------------------------------------------*/ graph_level= hypre_TAlloc(hypre_SStructGraph *, max_level+1); for (level= max_level; level >= 0; level--) { HYPRE_SStructGraphCreate(comm, grid_level[level], &graph_level[level]); } for (level= max_level; level >= 0; level--) { /*----------------------------------------------------------------------- * Create the fine part of the finest level structured graph connection. *-----------------------------------------------------------------------*/ if (level == max_level) { pgrid = hypre_SStructGridPGrid(grid, levels[level]); nvars = hypre_SStructPGridNVars(pgrid); for (var1 = 0; var1 < nvars; var1++) { stencils= hypre_SStructGraphStencil(graph, levels[level], var1); HYPRE_SStructGraphSetStencil(graph_level[level], part_fine, var1, stencils); if (level == 0) { HYPRE_SStructGraphSetStencil(graph_level[level], part_crse, var1, stencils); } } } /*-------------------------------------------------------------------------- * Create the coarse part of the graph_level using the graph of A, and the * and the fine part if level < max_level. *--------------------------------------------------------------------------*/ if (level > 0) { pgrid = hypre_SStructGridPGrid(grid, levels[level-1]); nvars = hypre_SStructPGridNVars(pgrid); for (var1 = 0; var1 < nvars; var1++) { stencils= hypre_SStructGraphStencil(graph, levels[level-1], var1); HYPRE_SStructGraphSetStencil(graph_level[level], part_crse, var1, stencils ); HYPRE_SStructGraphSetStencil(graph_level[level-1], part_fine, var1, stencils ); if (level == 1) { HYPRE_SStructGraphSetStencil(graph_level[level-1], part_crse, var1, stencils ); } } } } /*----------------------------------------------------------- * Extract the non-stencil graph structure: assuming only like * variables connect. Also count the number of unstructured * connections per part. * * THE COARSEST COMPOSITE MATRIX DOES NOT HAVE ANY NON-STENCIL * CONNECTIONS. *-----------------------------------------------------------*/ Uventries = hypre_SStructGraphUVEntries(graph); nUventries= hypre_SStructGraphNUVEntries(graph); iUventries= hypre_SStructGraphIUVEntries(graph); nrows = hypre_CTAlloc(HYPRE_Int, max_level+1); for (i= 0; i< nUventries; i++) { Uventry= Uventries[iUventries[i]]; part = hypre_SStructUVEntryPart(Uventry); hypre_CopyIndex(hypre_SStructUVEntryIndex(Uventry), index); var = hypre_SStructUVEntryVar(Uventry); nUentries= hypre_SStructUVEntryNUEntries(Uventry); for (k= 0; k< nUentries; k++) { Uentry = hypre_SStructUVEntryUEntry(Uventry, k); to_part = hypre_SStructUEntryToPart(Uentry); hypre_CopyIndex(hypre_SStructUEntryToIndex(Uentry), to_index); to_var = hypre_SStructUEntryToVar(Uentry); if ( part_to_level[part] >= part_to_level[to_part] ) { level = part_to_level[part]; level_part = part_fine; level_topart = part_crse; } else { level = part_to_level[to_part]; level_part = part_crse; level_topart = part_fine; } nrows[level]++; HYPRE_SStructGraphAddEntries(graph_level[level], level_part, index, var, level_topart, to_index, to_var); } } for (level= 0; level <= max_level; level++) { HYPRE_SStructGraphAssemble(graph_level[level]); } (fac_data -> graph_level)= graph_level; /*--------------------------------------------------------------- * Create the level SStruct_Vectors, and temporary global * sstuct_vector. *---------------------------------------------------------------*/ b_level= hypre_TAlloc(hypre_SStructVector *, max_level+1); x_level= hypre_TAlloc(hypre_SStructVector *, max_level+1); r_level= hypre_TAlloc(hypre_SStructVector *, max_level+1); e_level= hypre_TAlloc(hypre_SStructVector *, max_level+1); tx_level= hypre_TAlloc(hypre_SStructPVector *, max_level+1); for (level= 0; level<= max_level; level++) { HYPRE_SStructVectorCreate(comm, grid_level[level], &b_level[level]); HYPRE_SStructVectorInitialize(b_level[level]); HYPRE_SStructVectorAssemble(b_level[level]); HYPRE_SStructVectorCreate(comm, grid_level[level], &x_level[level]); HYPRE_SStructVectorInitialize(x_level[level]); HYPRE_SStructVectorAssemble(x_level[level]); HYPRE_SStructVectorCreate(comm, grid_level[level], &r_level[level]); HYPRE_SStructVectorInitialize(r_level[level]); HYPRE_SStructVectorAssemble(r_level[level]); HYPRE_SStructVectorCreate(comm, grid_level[level], &e_level[level]); HYPRE_SStructVectorInitialize(e_level[level]); HYPRE_SStructVectorAssemble(e_level[level]); /* temporary vector for fine patch relaxation */ hypre_SStructPVectorCreate(comm, hypre_SStructGridPGrid(grid_level[level], part_fine), &tx_level[level]); hypre_SStructPVectorInitialize(tx_level[level]); hypre_SStructPVectorAssemble(tx_level[level]); } /* temp SStructVectors */ HYPRE_SStructVectorCreate(comm, grid, &tx); HYPRE_SStructVectorInitialize(tx); HYPRE_SStructVectorAssemble(tx); (fac_data -> b_level) = b_level; (fac_data -> x_level) = x_level; (fac_data -> r_level) = r_level; (fac_data -> e_level) = e_level; (fac_data -> tx_level)= tx_level; (fac_data -> tx) = tx; /*----------------------------------------------------------- * Set up the level composite sstruct_matrices. *-----------------------------------------------------------*/ A_level= hypre_TAlloc(hypre_SStructMatrix *, max_level+1); hypre_SetIndex(stride, 1, 1, 1); for (level= 0; level <= max_level; level++) { HYPRE_SStructMatrixCreate(comm, graph_level[level], &A_level[level]); HYPRE_SStructMatrixInitialize(A_level[level]); max_box_volume= 0; pgrid = hypre_SStructGridPGrid(grid, levels[level]); nvars = hypre_SStructPGridNVars(pgrid); for (var1 = 0; var1 < nvars; var1++) { sgrid= hypre_SStructPGridSGrid(pgrid, var1); sgrid_boxes= hypre_StructGridBoxes(sgrid); hypre_ForBoxI(i, sgrid_boxes) { sgrid_box = hypre_BoxArrayBox(sgrid_boxes, i); box_volume= hypre_BoxVolume(sgrid_box); max_box_volume= hypre_max(max_box_volume, box_volume); } } values = hypre_TAlloc(double, max_box_volume); A_pmatrix= hypre_SStructMatrixPMatrix(A_rap, levels[level]); /*----------------------------------------------------------- * extract stencil values for all fine levels. *-----------------------------------------------------------*/ for (var1 = 0; var1 < nvars; var1++) { sgrid= hypre_SStructPGridSGrid(pgrid, var1); sgrid_boxes= hypre_StructGridBoxes(sgrid); stencils= hypre_SStructGraphStencil(graph, levels[level], var1); stencil_size= hypre_SStructStencilSize(stencils); stencil_vars= hypre_SStructStencilVars(stencils); for (i = 0; i < stencil_size; i++) { var2= stencil_vars[i]; A_smatrix= hypre_SStructPMatrixSMatrix(A_pmatrix, var1, var2); hypre_CopyIndex(hypre_SStructStencilEntry(stencils, i), stencil_shape_i); hypre_ForBoxI(j, sgrid_boxes) { sgrid_box= hypre_BoxArrayBox(sgrid_boxes, j); box_start= hypre_BoxIMin(sgrid_box); box_end = hypre_BoxIMax(sgrid_box); A_smatrix_dbox= hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A_smatrix), j); A_smatrix_value= hypre_StructMatrixExtractPointerByIndex(A_smatrix, j, stencil_shape_i); hypre_BoxGetSize(sgrid_box, loop_size); hypre_BoxLoop2Begin(ndim, loop_size, sgrid_box, box_start, stride, k, A_smatrix_dbox, box_start, stride, iA); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,k,iA) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(k, iA) { values[k]= A_smatrix_value[iA]; } hypre_BoxLoop2End(k, iA); HYPRE_SStructMatrixSetBoxValues(A_level[level], part_fine, box_start, box_end, var1, 1, &i, values); } /* hypre_ForBoxI */ } /* for i */ } /* for var1 */ hypre_TFree(values); /*----------------------------------------------------------- * Extract the coarse part *-----------------------------------------------------------*/ if (level > 0) { max_box_volume= 0; pgrid = hypre_SStructGridPGrid(grid, levels[level-1]); nvars = hypre_SStructPGridNVars(pgrid); for (var1 = 0; var1 < nvars; var1++) { sgrid = hypre_SStructPGridSGrid( pgrid, var1 ); sgrid_boxes= hypre_StructGridBoxes(sgrid); hypre_ForBoxI( i, sgrid_boxes ) { sgrid_box = hypre_BoxArrayBox(sgrid_boxes, i); box_volume= hypre_BoxVolume(sgrid_box); max_box_volume= hypre_max(max_box_volume, box_volume ); } } values = hypre_TAlloc(double, max_box_volume); A_pmatrix= hypre_SStructMatrixPMatrix(A_rap, levels[level-1]); /*----------------------------------------------------------- * extract stencil values *-----------------------------------------------------------*/ for (var1 = 0; var1 < nvars; var1++) { sgrid = hypre_SStructPGridSGrid(pgrid, var1); sgrid_boxes= hypre_StructGridBoxes(sgrid); stencils= hypre_SStructGraphStencil(graph, levels[level-1], var1); stencil_size= hypre_SStructStencilSize(stencils); stencil_vars= hypre_SStructStencilVars(stencils); for (i = 0; i < stencil_size; i++) { var2= stencil_vars[i]; A_smatrix= hypre_SStructPMatrixSMatrix(A_pmatrix, var1, var2); hypre_CopyIndex(hypre_SStructStencilEntry(stencils, i), stencil_shape_i); hypre_ForBoxI( j, sgrid_boxes ) { sgrid_box= hypre_BoxArrayBox(sgrid_boxes, j); box_start= hypre_BoxIMin(sgrid_box); box_end = hypre_BoxIMax(sgrid_box); A_smatrix_dbox= hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A_smatrix), j); A_smatrix_value= hypre_StructMatrixExtractPointerByIndex(A_smatrix, j, stencil_shape_i); hypre_BoxGetSize(sgrid_box, loop_size); hypre_BoxLoop2Begin(ndim, loop_size, sgrid_box, box_start, stride, k, A_smatrix_dbox, box_start, stride, iA); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,k,iA) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(k, iA) { values[k]= A_smatrix_value[iA]; } hypre_BoxLoop2End(k, iA); HYPRE_SStructMatrixSetBoxValues(A_level[level], part_crse, box_start, box_end, var1, 1, &i, values); } /* hypre_ForBoxI */ } /* for i */ } /* for var1 */ hypre_TFree(values); } /* if level > 0 */ } /* for level */ /*----------------------------------------------------------- * extract the non-stencil values for all but the coarsest * level sstruct_matrix. Use the HYPRE_IJMatrixGetValues * for each level of A. *-----------------------------------------------------------*/ Uventries = hypre_SStructGraphUVEntries(graph); nUventries= hypre_SStructGraphNUVEntries(graph); iUventries= hypre_SStructGraphIUVEntries(graph); /*----------------------------------------------------------- * Allocate memory for arguments of HYPRE_IJMatrixGetValues. *-----------------------------------------------------------*/ ncols = hypre_TAlloc(HYPRE_Int *, max_level+1); rows = hypre_TAlloc(HYPRE_Int *, max_level+1); cols = hypre_TAlloc(HYPRE_Int *, max_level+1); cnt = hypre_CTAlloc(HYPRE_Int, max_level+1); ncols[0]= NULL; rows[0] = NULL; cols[0] = NULL; for (level= 1; level<= max_level; level++) { ncols[level]= hypre_TAlloc(HYPRE_Int, nrows[level]); for (i=0; i< nrows[level]; i++) { ncols[level][i]= 1; } rows[level] = hypre_TAlloc(HYPRE_Int, nrows[level]); cols[level] = hypre_TAlloc(HYPRE_Int, nrows[level]); } for (i= 0; i< nUventries; i++) { Uventry = Uventries[iUventries[i]]; part = hypre_SStructUVEntryPart(Uventry); hypre_CopyIndex(hypre_SStructUVEntryIndex(Uventry), index); var = hypre_SStructUVEntryVar(Uventry); hypre_SStructGridFindBoxManEntry(grid, part, index, var, &boxman_entry); hypre_SStructBoxManEntryGetGlobalRank(boxman_entry, index, &row_coord, matrix_type); nUentries= hypre_SStructUVEntryNUEntries(Uventry); for (k= 0; k< nUentries; k++) { to_part = hypre_SStructUVEntryToPart(Uventry, k); to_rank = hypre_SStructUVEntryToRank(Uventry, k); /*----------------------------------------------------------- * store the row & col indices in the correct level. *-----------------------------------------------------------*/ level = hypre_max( part_to_level[part], part_to_level[to_part] ); rows[level][ cnt[level] ]= row_coord; cols[level][ cnt[level]++ ]= to_rank; } } hypre_TFree(cnt); for (level= 1; level<= max_level; level++) { vals = hypre_CTAlloc(double, nrows[level]); level_rows= hypre_TAlloc(HYPRE_Int, nrows[level]); level_cols= hypre_TAlloc(HYPRE_Int, nrows[level]); HYPRE_IJMatrixGetValues(ij_A, nrows[level], ncols[level], rows[level], cols[level], vals); Uventries = hypre_SStructGraphUVEntries(graph_level[level]); /*----------------------------------------------------------- * Find the rows & cols of the level ij_matrices where the * extracted data must be placed. Note that because the * order in which the HYPRE_SStructGraphAddEntries in the * graph_level's is the same order in which rows[level] & * cols[level] were formed, the coefficients in val are * in the correct order. *-----------------------------------------------------------*/ level_cnt= 0; for (i= 0; i< hypre_SStructGraphNUVEntries(graph_level[level]); i++) { j = hypre_SStructGraphIUVEntry(graph_level[level], i); Uventry= Uventries[j]; part = hypre_SStructUVEntryPart(Uventry); hypre_CopyIndex(hypre_SStructUVEntryIndex(Uventry), index); var = hypre_SStructUVEntryVar(Uventry); hypre_SStructGridFindBoxManEntry(grid_level[level], part, index, var, &boxman_entry); hypre_SStructBoxManEntryGetGlobalRank(boxman_entry, index, &row_coord, matrix_type); nUentries= hypre_SStructUVEntryNUEntries(Uventry); for (k= 0; k< nUentries; k++) { to_rank = hypre_SStructUVEntryToRank(Uventry, k); level_rows[level_cnt] = row_coord; level_cols[level_cnt++]= to_rank; } } /*----------------------------------------------------------- * Place the extracted ij coefficients into the level ij * matrices. *-----------------------------------------------------------*/ HYPRE_IJMatrixSetValues( hypre_SStructMatrixIJMatrix(A_level[level]), nrows[level], ncols[level], (const HYPRE_Int *) level_rows, (const HYPRE_Int *) level_cols, (const double *) vals ); hypre_TFree(ncols[level]); hypre_TFree(rows[level]); hypre_TFree(cols[level]); hypre_TFree(vals); hypre_TFree(level_rows); hypre_TFree(level_cols); } hypre_TFree(ncols); hypre_TFree(rows); hypre_TFree(cols); hypre_TFree(nrows); /*--------------------------------------------------------------- * Construct the fine grid (part 1) SStruct_PMatrix for all * levels except for max_level. This involves coarsening the * finer level SStruct_Matrix. Coarsening involves interpolation, * matvec, and restriction (to obtain the "row-sum"). *---------------------------------------------------------------*/ matvec_data_level = hypre_TAlloc(void *, max_level+1); pmatvec_data_level = hypre_TAlloc(void *, max_level+1); interp_data_level = hypre_TAlloc(void *, max_level+1); restrict_data_level= hypre_TAlloc(void *, max_level+1); for (level= 0; level<= max_level; level++) { if (level < max_level) { hypre_FacSemiInterpCreate2(&interp_data_level[level]); hypre_FacSemiInterpSetup2(interp_data_level[level], x_level[level+1], hypre_SStructVectorPVector(x_level[level], part_fine), refine_factors[level+1]); } else { interp_data_level[level]= NULL; } if (level > 0) { hypre_FacSemiRestrictCreate2(&restrict_data_level[level]); hypre_FacSemiRestrictSetup2(restrict_data_level[level], x_level[level], part_crse, part_fine, hypre_SStructVectorPVector(x_level[level-1], part_fine), refine_factors[level]); } else { restrict_data_level[level]= NULL; } } for (level= max_level; level> 0; level--) { /* hypre_FacZeroCFSten(hypre_SStructMatrixPMatrix(A_level[level], part_fine), hypre_SStructMatrixPMatrix(A_level[level], part_crse), grid_level[level], part_fine, refine_factors[level]); hypre_FacZeroFCSten(hypre_SStructMatrixPMatrix(A_level[level], part_fine), grid_level[level], part_fine); */ hypre_ZeroAMRMatrixData(A_level[level], part_crse, refine_factors[level]); HYPRE_SStructMatrixAssemble(A_level[level]); /*------------------------------------------------------------ * create data structures that are needed for coarsening -------------------------------------------------------------*/ hypre_SStructMatvecCreate(&matvec_data_level[level]); hypre_SStructMatvecSetup(matvec_data_level[level], A_level[level], x_level[level]); hypre_SStructPMatvecCreate(&pmatvec_data_level[level]); hypre_SStructPMatvecSetup(pmatvec_data_level[level], hypre_SStructMatrixPMatrix(A_level[level],part_fine), hypre_SStructVectorPVector(x_level[level],part_fine)); } /*--------------------------------------------------------------- * To avoid memory leaks, we cannot reference the coarsest level * SStructPMatrix. We need only copy the stuctured coefs. *---------------------------------------------------------------*/ pgrid= hypre_SStructGridPGrid(grid_level[0], part_fine); nvars= hypre_SStructPGridNVars(pgrid); A_pmatrix= hypre_SStructMatrixPMatrix(A_level[0], part_fine); for (var1 = 0; var1 < nvars; var1++) { sgrid= hypre_SStructPGridSGrid(pgrid, var1); sgrid_boxes= hypre_StructGridBoxes(sgrid); max_box_volume= 0; hypre_ForBoxI(i, sgrid_boxes) { sgrid_box = hypre_BoxArrayBox(sgrid_boxes, i); box_volume= hypre_BoxVolume(sgrid_box); max_box_volume= hypre_max(max_box_volume, box_volume); } values = hypre_TAlloc(double, max_box_volume); stencils= hypre_SStructGraphStencil(graph_level[0], part_fine, var1); stencil_size= hypre_SStructStencilSize(stencils); stencil_vars= hypre_SStructStencilVars(stencils); for (i = 0; i < stencil_size; i++) { var2= stencil_vars[i]; A_smatrix= hypre_SStructPMatrixSMatrix(A_pmatrix, var1, var2); hypre_CopyIndex(hypre_SStructStencilEntry(stencils, i), stencil_shape_i); hypre_ForBoxI(j, sgrid_boxes) { sgrid_box= hypre_BoxArrayBox(sgrid_boxes, j); box_start= hypre_BoxIMin(sgrid_box); box_end = hypre_BoxIMax(sgrid_box); A_smatrix_dbox= hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A_smatrix), j); A_smatrix_value= hypre_StructMatrixExtractPointerByIndex(A_smatrix, j, stencil_shape_i); hypre_BoxGetSize(sgrid_box, loop_size); hypre_BoxLoop2Begin(ndim, loop_size, sgrid_box, box_start, stride, k, A_smatrix_dbox, box_start, stride, iA); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,k,iA) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(k, iA) { values[k]= A_smatrix_value[iA]; } hypre_BoxLoop2End(k, iA); HYPRE_SStructMatrixSetBoxValues(A_level[0], part_crse, box_start, box_end, var1, 1, &i, values); } /* hypre_ForBoxI */ } /* for i */ hypre_TFree(values); } /* for var1 */ HYPRE_SStructMatrixAssemble(A_level[0]); hypre_SStructMatvecCreate(&matvec_data_level[0]); hypre_SStructMatvecSetup(matvec_data_level[0], A_level[0], x_level[0]); hypre_SStructPMatvecCreate(&pmatvec_data_level[0]); hypre_SStructPMatvecSetup(pmatvec_data_level[0], hypre_SStructMatrixPMatrix(A_level[0],part_fine), hypre_SStructVectorPVector(x_level[0],part_fine)); hypre_SStructMatvecCreate(&matvec_data); hypre_SStructMatvecSetup(matvec_data, A_rap, x); /*HYPRE_SStructVectorPrint("sstruct.out.b_l", b_level[max_level], 0);*/ /*HYPRE_SStructMatrixPrint("sstruct.out.A_l", A_level[max_level-2], 0);*/ (fac_data -> A_level) = A_level; (fac_data -> matvec_data_level) = matvec_data_level; (fac_data -> pmatvec_data_level) = pmatvec_data_level; (fac_data -> matvec_data) = matvec_data; (fac_data -> interp_data_level) = interp_data_level; (fac_data -> restrict_data_level) = restrict_data_level; /*--------------------------------------------------------------- * Create the fine patch relax_data structure. *---------------------------------------------------------------*/ relax_data_level = hypre_TAlloc(void *, max_level+1); for (level= 0; level<= max_level; level++) { relax_data_level[level]= hypre_SysPFMGRelaxCreate(comm); hypre_SysPFMGRelaxSetTol(relax_data_level[level], 0.0); hypre_SysPFMGRelaxSetType(relax_data_level[level], relax_type); if (usr_jacobi_weight) { hypre_SysPFMGRelaxSetJacobiWeight(relax_data_level[level], jacobi_weight); } hypre_SysPFMGRelaxSetTempVec(relax_data_level[level], tx_level[level]); hypre_SysPFMGRelaxSetup(relax_data_level[level], hypre_SStructMatrixPMatrix(A_level[level], part_fine), hypre_SStructVectorPVector(b_level[level], part_fine), hypre_SStructVectorPVector(x_level[level], part_fine)); } (fac_data -> relax_data_level) = relax_data_level; /*--------------------------------------------------------------- * Create the coarsest composite level preconditioned solver. * csolver_type= 1 multigrid-pcg * csolver_type= 2 multigrid *---------------------------------------------------------------*/ if (csolver_type == 1) { HYPRE_SStructPCGCreate(comm, &crse_solver); HYPRE_PCGSetMaxIter((HYPRE_Solver) crse_solver, 1); HYPRE_PCGSetTol((HYPRE_Solver) crse_solver, 1.0e-6); HYPRE_PCGSetTwoNorm((HYPRE_Solver) crse_solver, 1); /* use SysPFMG solver as preconditioner */ HYPRE_SStructSysPFMGCreate(comm, &crse_precond); HYPRE_SStructSysPFMGSetMaxIter(crse_precond, 1); HYPRE_SStructSysPFMGSetTol(crse_precond, 0.0); HYPRE_SStructSysPFMGSetZeroGuess(crse_precond); /* weighted Jacobi = 1; red-black GS = 2 */ HYPRE_SStructSysPFMGSetRelaxType(crse_precond, 3); if (usr_jacobi_weight) { HYPRE_SStructFACSetJacobiWeight(crse_precond, jacobi_weight); } HYPRE_SStructSysPFMGSetNumPreRelax(crse_precond, 1); HYPRE_SStructSysPFMGSetNumPostRelax(crse_precond, 1); HYPRE_PCGSetPrecond((HYPRE_Solver) crse_solver, (HYPRE_PtrToSolverFcn) HYPRE_SStructSysPFMGSolve, (HYPRE_PtrToSolverFcn) HYPRE_SStructSysPFMGSetup, (HYPRE_Solver) crse_precond); HYPRE_PCGSetup((HYPRE_Solver) crse_solver, (HYPRE_Matrix) A_level[0], (HYPRE_Vector) b_level[0], (HYPRE_Vector) x_level[0]); } else if (csolver_type == 2) { crse_precond= NULL; HYPRE_SStructSysPFMGCreate(comm, &crse_solver); HYPRE_SStructSysPFMGSetMaxIter(crse_solver, 1); HYPRE_SStructSysPFMGSetTol(crse_solver, 1.0e-6); HYPRE_SStructSysPFMGSetZeroGuess(crse_solver); /* weighted Jacobi = 1; red-black GS = 2 */ HYPRE_SStructSysPFMGSetRelaxType(crse_solver, relax_type); if (usr_jacobi_weight) { HYPRE_SStructFACSetJacobiWeight(crse_precond, jacobi_weight); } HYPRE_SStructSysPFMGSetNumPreRelax(crse_solver, 1); HYPRE_SStructSysPFMGSetNumPostRelax(crse_solver, 1); HYPRE_SStructSysPFMGSetup(crse_solver, A_level[0], b_level[0], x_level[0]); } (fac_data -> csolver) = crse_solver; (fac_data -> cprecond) = crse_precond; hypre_FacZeroCData(fac_vdata, A_rap); return ierr; }
OMParrayMulti.c
// Array multiplication using OpenMP and MPI // Author: Dimitris Gravanis, 2017 /* work in progress */ #include <stdio.h> #include <stdlib.h> #include <omp.h> #include "mpi.h" int main(int argc, char *argv[]) { int *A, *B, *C; // array pointers int N; // array size int i; // iterator double start, stop; // timer start, stop /* MPI variables */ int id, P; // rank, size /* MPI Start */ MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &id); MPI_Comm_size(MPI_COMM_WORLD, &P); if (id == 0) { printf("\nInsert array size N: "); scanf("%d", &N); } MPI_Bcast(&N, 1, MPI_INT, 0, MPI_COMM_WORLD); A = malloc(N * sizeof(int)); B = malloc(N * sizeof(int)); C = malloc(N * sizeof(int)); for (i = 0; i < N; i++) { A[i] = rand()%10; B[i] = rand()%10; } if (id == 0) { start = omp_get_wtime(); // timer start } #pragma omp parallel for private(i) for (i = 0; i < N; i++) { C[i] = A[i] * B[i]; } if (id == 0) { stop = omp_get_wtime(); // timer stop /* display results */ printf("\n\n"); printf("Input arrays"); printf("\nA:\n"); for (i = 0; i < N; i++) { printf("%4d", A[i]); if (i == N - 1) { printf("\n"); } } printf("\nB:\n"); for (i = 0; i < N; i++) { printf("%4d", B[i]); if (i == N - 1) { printf("\n"); } } printf("\n\n"); printf("Output array"); printf("\nC:\n"); for (i = 0; i < N; i++) { printf("%4d", C[i]); if (i == N - 1) { printf("\n"); } } printf("\n\n"); printf("Total run time: %.6fs", stop - start); printf("\n\n"); } MPI_Finalize(); return 0; }
par_csr_matop_device.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_utilities.h" #include "_hypre_parcsr_mv.h" #include "_hypre_utilities.hpp" #if defined(HYPRE_USING_CUDA) HYPRE_Int hypre_ParcsrGetExternalRowsDeviceInit( hypre_ParCSRMatrix *A, HYPRE_Int indices_len, HYPRE_Int *indices, hypre_ParCSRCommPkg *comm_pkg, HYPRE_Int want_data, void **request_ptr) { HYPRE_Int i, j; HYPRE_Int num_sends, num_rows_send, num_nnz_send, num_recvs, num_rows_recv, num_nnz_recv; HYPRE_Int *d_send_i, *send_i, *d_send_map, *d_recv_i, *recv_i; HYPRE_BigInt *d_send_j, *d_recv_j; HYPRE_Int *send_jstarts, *recv_jstarts; HYPRE_Complex *d_send_a = NULL, *d_recv_a = NULL; hypre_ParCSRCommPkg *comm_pkg_j; hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a; /* HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); */ /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* HYPRE_Int local_num_rows = hypre_CSRMatrixNumRows(A_diag); */ /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /* HYPRE_Int *row_starts = hypre_ParCSRMatrixRowStarts(A); */ /* HYPRE_Int first_row = hypre_ParCSRMatrixFirstRowIndex(A); */ HYPRE_Int first_col = hypre_ParCSRMatrixFirstColDiag(A); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *d_col_map_offd_A = hypre_ParCSRMatrixDeviceColMapOffd(A); MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int num_procs; HYPRE_Int my_id; void **vrequest; hypre_CSRMatrix *A_ext; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); /* number of sends (#procs) */ num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); /* number of rows to send */ num_rows_send = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); /* number of recvs (#procs) */ num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); /* number of rows to recv */ num_rows_recv = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs); /* must be true if indices contains proper offd indices */ hypre_assert(indices_len == num_rows_recv); /* send_i/recv_i: * the arrays to send and recv: we first send and recv the row lengths */ d_send_i = hypre_TAlloc(HYPRE_Int, num_rows_send + 1, HYPRE_MEMORY_DEVICE); d_send_map = hypre_TAlloc(HYPRE_Int, num_rows_send, HYPRE_MEMORY_DEVICE); send_i = hypre_TAlloc(HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST); recv_i = hypre_TAlloc(HYPRE_Int, num_rows_recv + 1, HYPRE_MEMORY_HOST); d_recv_i = hypre_TAlloc(HYPRE_Int, num_rows_recv + 1, HYPRE_MEMORY_DEVICE); /* fill the send array with row lengths */ hypre_TMemcpy(d_send_map, hypre_ParCSRCommPkgSendMapElmts(comm_pkg), HYPRE_Int, num_rows_send, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_Memset(d_send_i, 0, sizeof(HYPRE_Int), HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(num_rows_send, d_send_map, A_diag_i, A_offd_i, d_send_i+1); /* send array send_i out: deviceTohost first and MPI (async) * note the shift in recv_i by one */ hypre_TMemcpy(send_i, d_send_i+1, HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, send_i, recv_i+1); hypreDevice_IntegerInclusiveScan(num_rows_send + 1, d_send_i); /* total number of nnz to send */ hypre_TMemcpy(&num_nnz_send, d_send_i+num_rows_send, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); /* prepare data to send out. overlap with the above commmunication */ d_send_j = hypre_TAlloc(HYPRE_BigInt, num_nnz_send, HYPRE_MEMORY_DEVICE); if (want_data) { d_send_a = hypre_TAlloc(HYPRE_Complex, num_nnz_send, HYPRE_MEMORY_DEVICE); } if (d_col_map_offd_A == NULL) { d_col_map_offd_A = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(d_col_map_offd_A, col_map_offd_A, HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDeviceColMapOffd(A) = d_col_map_offd_A; } /* job == 2, d_send_i is input that contains row ptrs (length num_rows_send) */ hypreDevice_CopyParCSRRows(num_rows_send, d_send_map, 2, num_procs > 1, first_col, d_col_map_offd_A, A_diag_i, A_diag_j, A_diag_a, A_offd_i, A_offd_j, A_offd_a, d_send_i, d_send_j, d_send_a); /* pointers to each proc in send_j */ send_jstarts = hypre_TAlloc(HYPRE_Int, num_sends + 1, HYPRE_MEMORY_HOST); send_jstarts[0] = 0; for (i = 1; i <= num_sends; i++) { send_jstarts[i] = send_jstarts[i-1]; for ( j = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i-1); j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); j++ ) { send_jstarts[i] += send_i[j]; } } hypre_assert(send_jstarts[num_sends] == num_nnz_send); /* finish the above communication: send_i/recv_i */ hypre_ParCSRCommHandleDestroy(comm_handle); /* adjust recv_i to ptrs */ recv_i[0] = 0; for (i = 1; i <= num_rows_recv; i++) { recv_i[i] += recv_i[i-1]; } num_nnz_recv = recv_i[num_rows_recv]; /* allocate device memory for j and a */ d_recv_j = hypre_TAlloc(HYPRE_BigInt, num_nnz_recv, HYPRE_MEMORY_DEVICE); if (want_data) { d_recv_a = hypre_TAlloc(HYPRE_Complex, num_nnz_recv, HYPRE_MEMORY_DEVICE); } recv_jstarts = hypre_TAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); recv_jstarts[0] = 0; for (i = 1; i <= num_recvs; i++) { j = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, i); recv_jstarts[i] = recv_i[j]; } /* ready to send and recv: create a communication package for data */ comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm (comm_pkg_j) = comm; hypre_ParCSRCommPkgNumSends (comm_pkg_j) = num_sends; hypre_ParCSRCommPkgSendProcs (comm_pkg_j) = hypre_ParCSRCommPkgSendProcs(comm_pkg); hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = send_jstarts; hypre_ParCSRCommPkgNumRecvs (comm_pkg_j) = num_recvs; hypre_ParCSRCommPkgRecvProcs (comm_pkg_j) = hypre_ParCSRCommPkgRecvProcs(comm_pkg); hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = recv_jstarts; /* init communication */ /* ja */ comm_handle_j = hypre_ParCSRCommHandleCreate_v2(21, comm_pkg_j, HYPRE_MEMORY_DEVICE, d_send_j, HYPRE_MEMORY_DEVICE, d_recv_j); if (want_data) { /* a */ comm_handle_a = hypre_ParCSRCommHandleCreate_v2(1, comm_pkg_j, HYPRE_MEMORY_DEVICE, d_send_a, HYPRE_MEMORY_DEVICE, d_recv_a); } else { comm_handle_a = NULL; } hypre_TMemcpy(d_recv_i, recv_i, HYPRE_Int, num_rows_recv+1, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); /* create A_ext: on device */ A_ext = hypre_CSRMatrixCreate(num_rows_recv, hypre_ParCSRMatrixGlobalNumCols(A), num_nnz_recv); hypre_CSRMatrixI (A_ext) = d_recv_i; hypre_CSRMatrixBigJ(A_ext) = d_recv_j; hypre_CSRMatrixData(A_ext) = d_recv_a; hypre_CSRMatrixMemoryLocation(A_ext) = HYPRE_MEMORY_DEVICE; /* output */ vrequest = hypre_TAlloc(void *, 3, HYPRE_MEMORY_HOST); vrequest[0] = (void *) comm_handle_j; vrequest[1] = (void *) comm_handle_a; vrequest[2] = (void *) A_ext; *request_ptr = (void *) vrequest; /* free */ hypre_TFree(send_i, HYPRE_MEMORY_HOST); hypre_TFree(recv_i, HYPRE_MEMORY_HOST); hypre_TFree(d_send_i, HYPRE_MEMORY_DEVICE); hypre_TFree(d_send_map, HYPRE_MEMORY_DEVICE); hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST); return hypre_error_flag; } hypre_CSRMatrix* hypre_ParcsrGetExternalRowsDeviceWait(void *vrequest) { void **request = (void **) vrequest; hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0]; hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1]; hypre_CSRMatrix *A_ext = (hypre_CSRMatrix *) request[2]; HYPRE_BigInt *send_j = comm_handle_j ? (HYPRE_BigInt *) hypre_ParCSRCommHandleSendData(comm_handle_j) : NULL; HYPRE_Complex *send_a = comm_handle_a ? (HYPRE_Complex *) hypre_ParCSRCommHandleSendData(comm_handle_a) : NULL; hypre_ParCSRCommHandleDestroy(comm_handle_j); hypre_ParCSRCommHandleDestroy(comm_handle_a); hypre_TFree(send_j, HYPRE_MEMORY_DEVICE); hypre_TFree(send_a, HYPRE_MEMORY_DEVICE); hypre_TFree(request, HYPRE_MEMORY_HOST); return A_ext; } hypre_CSRMatrix* hypre_MergeDiagAndOffdDevice(hypre_ParCSRMatrix *A) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int local_num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt glbal_num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt first_col = hypre_ParCSRMatrixFirstColDiag(A); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_BigInt *d_col_map_offd_A = hypre_ParCSRMatrixDeviceColMapOffd(A); hypre_CSRMatrix *B; HYPRE_Int B_nrows = local_num_rows; HYPRE_BigInt B_ncols = glbal_num_cols; HYPRE_Int *B_i = hypre_TAlloc(HYPRE_Int, B_nrows + 1, HYPRE_MEMORY_DEVICE); HYPRE_BigInt *B_j; HYPRE_Complex *B_a; HYPRE_Int B_nnz; HYPRE_Int num_procs; hypre_MPI_Comm_size(comm, &num_procs); hypre_Memset(B_i, 0, sizeof(HYPRE_Int), HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(B_nrows, NULL, A_diag_i, A_offd_i, B_i+1); hypreDevice_IntegerInclusiveScan(B_nrows+1, B_i); /* total number of nnz */ hypre_TMemcpy(&B_nnz, B_i+B_nrows, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); B_j = hypre_TAlloc(HYPRE_BigInt, B_nnz, HYPRE_MEMORY_DEVICE); B_a = hypre_TAlloc(HYPRE_Complex, B_nnz, HYPRE_MEMORY_DEVICE); if (d_col_map_offd_A == NULL) { d_col_map_offd_A = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(d_col_map_offd_A, col_map_offd_A, HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDeviceColMapOffd(A) = d_col_map_offd_A; } hypreDevice_CopyParCSRRows(B_nrows, NULL, 2, num_procs > 1, first_col, d_col_map_offd_A, A_diag_i, A_diag_j, A_diag_a, A_offd_i, A_offd_j, A_offd_a, B_i, B_j, B_a); /* output */ B = hypre_CSRMatrixCreate(B_nrows, B_ncols, B_nnz); hypre_CSRMatrixI (B) = B_i; hypre_CSRMatrixBigJ(B) = B_j; hypre_CSRMatrixData(B) = B_a; hypre_CSRMatrixMemoryLocation(B) = HYPRE_MEMORY_DEVICE; hypre_SyncCudaComputeStream(hypre_handle()); return B; } HYPRE_Int hypre_ExchangeExternalRowsDeviceInit( hypre_CSRMatrix *B_ext, hypre_ParCSRCommPkg *comm_pkg_A, void **request_ptr) { MPI_Comm comm = hypre_ParCSRCommPkgComm(comm_pkg_A); HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg_A); HYPRE_Int *recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg_A); HYPRE_Int *recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_A); HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg_A); HYPRE_Int *send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg_A); HYPRE_Int *send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A); HYPRE_Int num_elmts_send = send_map_starts[num_sends]; HYPRE_Int num_elmts_recv = recv_vec_starts[num_recvs]; HYPRE_Int *B_ext_i_d = hypre_CSRMatrixI(B_ext); HYPRE_BigInt *B_ext_j_d = hypre_CSRMatrixBigJ(B_ext); HYPRE_Complex *B_ext_a_d = hypre_CSRMatrixData(B_ext); HYPRE_Int B_ext_ncols = hypre_CSRMatrixNumCols(B_ext); HYPRE_Int B_ext_nrows = hypre_CSRMatrixNumRows(B_ext); HYPRE_Int B_ext_nnz = hypre_CSRMatrixNumNonzeros(B_ext); HYPRE_Int *B_ext_rownnz_d = hypre_TAlloc(HYPRE_Int, B_ext_nrows + 1, HYPRE_MEMORY_DEVICE); HYPRE_Int *B_ext_rownnz_h = hypre_TAlloc(HYPRE_Int, B_ext_nrows, HYPRE_MEMORY_HOST); HYPRE_Int *B_ext_i_h = hypre_TAlloc(HYPRE_Int, B_ext_nrows + 1, HYPRE_MEMORY_HOST); hypre_assert(num_elmts_recv == B_ext_nrows); /* output matrix */ hypre_CSRMatrix *B_int_d; HYPRE_Int B_int_nrows = num_elmts_send; HYPRE_Int B_int_ncols = B_ext_ncols; HYPRE_Int *B_int_i_h = hypre_TAlloc(HYPRE_Int, B_int_nrows + 1, HYPRE_MEMORY_HOST); HYPRE_Int *B_int_i_d = hypre_TAlloc(HYPRE_Int, B_int_nrows + 1, HYPRE_MEMORY_DEVICE); HYPRE_BigInt *B_int_j_d = NULL; HYPRE_Complex *B_int_a_d = NULL; HYPRE_Int B_int_nnz; hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a; hypre_ParCSRCommPkg *comm_pkg_j; HYPRE_Int *jdata_recv_vec_starts; HYPRE_Int *jdata_send_map_starts; HYPRE_Int i; HYPRE_Int num_procs, my_id; void **vrequest; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); jdata_send_map_starts = hypre_TAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); /*-------------------------------------------------------------------------- * B_ext_rownnz contains the number of elements of row j * (to be determined through send_map_elmnts on the receiving end) *--------------------------------------------------------------------------*/ HYPRE_THRUST_CALL(adjacent_difference, B_ext_i_d, B_ext_i_d + B_ext_nrows + 1, B_ext_rownnz_d); hypre_TMemcpy(B_ext_rownnz_h, B_ext_rownnz_d + 1, HYPRE_Int, B_ext_nrows, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); /*-------------------------------------------------------------------------- * initialize communication: send/recv the row nnz * (note the use of comm_pkg_A, mode 12, as in transpose matvec *--------------------------------------------------------------------------*/ comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg_A, B_ext_rownnz_h, B_int_i_h + 1); jdata_recv_vec_starts = hypre_TAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); jdata_recv_vec_starts[0] = 0; B_ext_i_h[0] = 0; hypre_TMemcpy(B_ext_i_h + 1, B_ext_rownnz_h, HYPRE_Int, B_ext_nrows, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); for (i = 1; i <= B_ext_nrows; i++) { B_ext_i_h[i] += B_ext_i_h[i-1]; } hypre_assert(B_ext_i_h[B_ext_nrows] == B_ext_nnz); for (i = 1; i <= num_recvs; i++) { jdata_recv_vec_starts[i] = B_ext_i_h[recv_vec_starts[i]]; } comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(comm_pkg_j) = comm; hypre_ParCSRCommPkgNumSends(comm_pkg_j) = num_recvs; hypre_ParCSRCommPkgNumRecvs(comm_pkg_j) = num_sends; hypre_ParCSRCommPkgSendProcs(comm_pkg_j) = recv_procs; hypre_ParCSRCommPkgRecvProcs(comm_pkg_j) = send_procs; hypre_ParCSRCommHandleDestroy(comm_handle); /*-------------------------------------------------------------------------- * compute B_int: row nnz to row ptrs *--------------------------------------------------------------------------*/ B_int_i_h[0] = 0; for (i = 1; i <= B_int_nrows; i++) { B_int_i_h[i] += B_int_i_h[i-1]; } B_int_nnz = B_int_i_h[B_int_nrows]; B_int_j_d = hypre_TAlloc(HYPRE_BigInt, B_int_nnz, HYPRE_MEMORY_DEVICE); B_int_a_d = hypre_TAlloc(HYPRE_Complex, B_int_nnz, HYPRE_MEMORY_DEVICE); for (i = 0; i <= num_sends; i++) { jdata_send_map_starts[i] = B_int_i_h[send_map_starts[i]]; } /* note the order of send/recv is reversed */ hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = jdata_send_map_starts; hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = jdata_recv_vec_starts; /* send/recv CSR rows */ comm_handle_a = hypre_ParCSRCommHandleCreate_v2( 1, comm_pkg_j, HYPRE_MEMORY_DEVICE, B_ext_a_d, HYPRE_MEMORY_DEVICE, B_int_a_d ); comm_handle_j = hypre_ParCSRCommHandleCreate_v2(21, comm_pkg_j, HYPRE_MEMORY_DEVICE, B_ext_j_d, HYPRE_MEMORY_DEVICE, B_int_j_d ); hypre_TMemcpy(B_int_i_d, B_int_i_h, HYPRE_Int, B_int_nrows+1, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); /* create CSR: on device */ B_int_d = hypre_CSRMatrixCreate(B_int_nrows, B_int_ncols, B_int_nnz); hypre_CSRMatrixI(B_int_d) = B_int_i_d; hypre_CSRMatrixBigJ(B_int_d) = B_int_j_d; hypre_CSRMatrixData(B_int_d) = B_int_a_d; hypre_CSRMatrixMemoryLocation(B_int_d) = HYPRE_MEMORY_DEVICE; /* output */ vrequest = hypre_TAlloc(void *, 3, HYPRE_MEMORY_HOST); vrequest[0] = (void *) comm_handle_j; vrequest[1] = (void *) comm_handle_a; vrequest[2] = (void *) B_int_d; *request_ptr = (void *) vrequest; /* free */ hypre_TFree(B_ext_rownnz_d, HYPRE_MEMORY_DEVICE); hypre_TFree(B_ext_rownnz_h, HYPRE_MEMORY_HOST); hypre_TFree(B_ext_i_h, HYPRE_MEMORY_HOST); hypre_TFree(B_int_i_h, HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST); return hypre_error_flag; } hypre_CSRMatrix* hypre_ExchangeExternalRowsDeviceWait(void *vrequest) { void **request = (void **) vrequest; hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0]; hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1]; hypre_CSRMatrix *B_int_d = (hypre_CSRMatrix *) request[2]; /* communication done */ hypre_ParCSRCommHandleDestroy(comm_handle_j); hypre_ParCSRCommHandleDestroy(comm_handle_a); hypre_TFree(request, HYPRE_MEMORY_HOST); return B_int_d; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ HYPRE_Int hypre_ParCSRMatrixExtractBExtDeviceInit( hypre_ParCSRMatrix *B, hypre_ParCSRMatrix *A, HYPRE_Int want_data, void **request_ptr) { hypre_assert( hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(B)) == hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(B)) ); /* hypre_assert( hypre_GetActualMemLocation( hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(B))) == HYPRE_MEMORY_DEVICE ); */ hypre_ParcsrGetExternalRowsDeviceInit(B, hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)), hypre_ParCSRMatrixColMapOffd(A), hypre_ParCSRMatrixCommPkg(A), want_data, request_ptr); return hypre_error_flag; } hypre_CSRMatrix* hypre_ParCSRMatrixExtractBExtDeviceWait(void *request) { return hypre_ParcsrGetExternalRowsDeviceWait(request); } hypre_CSRMatrix* hypre_ParCSRMatrixExtractBExtDevice( hypre_ParCSRMatrix *B, hypre_ParCSRMatrix *A, HYPRE_Int want_data ) { void *request; hypre_ParCSRMatrixExtractBExtDeviceInit(B, A, want_data, &request); return hypre_ParCSRMatrixExtractBExtDeviceWait(request); } /* return B = [Adiag, Aoffd] */ #if 1 __global__ void hypreCUDAKernel_ConcatDiagAndOffd(HYPRE_Int nrows, HYPRE_Int diag_ncol, HYPRE_Int *d_diag_i, HYPRE_Int *d_diag_j, HYPRE_Complex *d_diag_a, HYPRE_Int *d_offd_i, HYPRE_Int *d_offd_j, HYPRE_Complex *d_offd_a, HYPRE_Int *cols_offd_map, HYPRE_Int *d_ib, HYPRE_Int *d_jb, HYPRE_Complex *d_ab) { const HYPRE_Int row = hypre_cuda_get_grid_warp_id<1,1>(); if (row >= nrows) { return; } /* lane id inside the warp */ const HYPRE_Int lane_id = hypre_cuda_get_lane_id<1>(); HYPRE_Int i, j, k, p, istart, iend, bstart; /* diag part */ if (lane_id < 2) { j = read_only_load(d_diag_i + row + lane_id); } if (lane_id == 0) { k = read_only_load(d_ib + row); } istart = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 0); iend = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 1); bstart = __shfl_sync(HYPRE_WARP_FULL_MASK, k, 0); p = bstart - istart; for (i = istart + lane_id; i < iend; i += HYPRE_WARP_SIZE) { d_jb[p+i] = read_only_load(d_diag_j + i); d_ab[p+i] = read_only_load(d_diag_a + i); } /* offd part */ if (lane_id < 2) { j = read_only_load(d_offd_i + row + lane_id); } bstart += iend - istart; istart = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 0); iend = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 1); p = bstart - istart; for (i = istart + lane_id; i < iend; i += HYPRE_WARP_SIZE) { const HYPRE_Int t = read_only_load(d_offd_j + i); d_jb[p+i] = (cols_offd_map ? read_only_load(&cols_offd_map[t]) : t) + diag_ncol; d_ab[p+i] = read_only_load(d_offd_a + i); } } hypre_CSRMatrix* hypre_ConcatDiagAndOffdDevice(hypre_ParCSRMatrix *A) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *B = hypre_CSRMatrixCreate( hypre_CSRMatrixNumRows(A_diag), hypre_CSRMatrixNumCols(A_diag) + hypre_CSRMatrixNumCols(A_offd), hypre_CSRMatrixNumNonzeros(A_diag) + hypre_CSRMatrixNumNonzeros(A_offd) ); hypre_CSRMatrixInitialize_v2(B, 0, HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(hypre_CSRMatrixNumRows(B), NULL, hypre_CSRMatrixI(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixI(B)); HYPRE_THRUST_CALL( exclusive_scan, hypre_CSRMatrixI(B), hypre_CSRMatrixI(B) + hypre_CSRMatrixNumRows(B) + 1, hypre_CSRMatrixI(B) ); const dim3 bDim = hypre_GetDefaultCUDABlockDimension(); const dim3 gDim = hypre_GetDefaultCUDAGridDimension(hypre_CSRMatrixNumRows(A_diag), "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ConcatDiagAndOffd, gDim, bDim, hypre_CSRMatrixNumRows(A_diag), hypre_CSRMatrixNumCols(A_diag), hypre_CSRMatrixI(A_diag), hypre_CSRMatrixJ(A_diag), hypre_CSRMatrixData(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixJ(A_offd), hypre_CSRMatrixData(A_offd), NULL, hypre_CSRMatrixI(B), hypre_CSRMatrixJ(B), hypre_CSRMatrixData(B) ); return B; } #else hypre_CSRMatrix* hypre_ConcatDiagAndOffdDevice(hypre_ParCSRMatrix *A) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int A_diag_nnz = hypre_CSRMatrixNumNonzeros(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int A_offd_nnz = hypre_CSRMatrixNumNonzeros(A_offd); hypre_CSRMatrix *B; HYPRE_Int B_nrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int B_ncols = hypre_CSRMatrixNumCols(A_diag) + hypre_CSRMatrixNumCols(A_offd); HYPRE_Int B_nnz = A_diag_nnz + A_offd_nnz; HYPRE_Int *B_ii = hypre_TAlloc(HYPRE_Int, B_nnz, HYPRE_MEMORY_DEVICE); HYPRE_Int *B_j = hypre_TAlloc(HYPRE_Int, B_nnz, HYPRE_MEMORY_DEVICE); HYPRE_Complex *B_a = hypre_TAlloc(HYPRE_Complex, B_nnz, HYPRE_MEMORY_DEVICE); // Adiag HYPRE_Int *A_diag_ii = hypreDevice_CsrRowPtrsToIndices(B_nrows, A_diag_nnz, A_diag_i); HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(A_diag_ii, A_diag_j, A_diag_a)), A_diag_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_ii, B_j, B_a)) ); hypre_TFree(A_diag_ii, HYPRE_MEMORY_DEVICE); // Aoffd HYPRE_Int *A_offd_ii = hypreDevice_CsrRowPtrsToIndices(B_nrows, A_offd_nnz, A_offd_i); HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(A_offd_ii, A_offd_a)), A_offd_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_ii, B_a)) + A_diag_nnz ); hypre_TFree(A_offd_ii, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( transform, A_offd_j, A_offd_j + A_offd_nnz, thrust::make_constant_iterator(hypre_CSRMatrixNumCols(A_diag)), B_j + A_diag_nnz, thrust::plus<HYPRE_Int>() ); // B HYPRE_THRUST_CALL( stable_sort_by_key, B_ii, B_ii + B_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_j, B_a)) ); HYPRE_Int *B_i = hypreDevice_CsrRowIndicesToPtrs(B_nrows, B_nnz, B_ii); hypre_TFree(B_ii, HYPRE_MEMORY_DEVICE); B = hypre_CSRMatrixCreate(B_nrows, B_ncols, B_nnz); hypre_CSRMatrixI(B) = B_i; hypre_CSRMatrixJ(B) = B_j; hypre_CSRMatrixData(B) = B_a; hypre_CSRMatrixMemoryLocation(B) = HYPRE_MEMORY_DEVICE; return B; } #endif /* return B = [Adiag, Aoffd; E] */ #if 1 HYPRE_Int hypre_ConcatDiagOffdAndExtDevice(hypre_ParCSRMatrix *A, hypre_CSRMatrix *E, hypre_CSRMatrix **B_ptr, HYPRE_Int *num_cols_offd_ptr, HYPRE_BigInt **cols_map_offd_ptr) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *E_diag, *E_offd, *B; HYPRE_Int *cols_offd_map, num_cols_offd; HYPRE_BigInt *cols_map_offd; hypre_CSRMatrixSplitDevice(E, hypre_ParCSRMatrixFirstColDiag(A), hypre_ParCSRMatrixLastColDiag(A), hypre_CSRMatrixNumCols(A_offd), hypre_ParCSRMatrixDeviceColMapOffd(A), &cols_offd_map, &num_cols_offd, &cols_map_offd, &E_diag, &E_offd); B = hypre_CSRMatrixCreate(hypre_ParCSRMatrixNumRows(A) + hypre_CSRMatrixNumRows(E), hypre_ParCSRMatrixNumCols(A) + num_cols_offd, hypre_CSRMatrixNumNonzeros(A_diag) + hypre_CSRMatrixNumNonzeros(A_offd) + hypre_CSRMatrixNumNonzeros(E)); hypre_CSRMatrixInitialize_v2(B, 0, HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(hypre_ParCSRMatrixNumRows(A), NULL, hypre_CSRMatrixI(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixI(B)); HYPRE_THRUST_CALL( exclusive_scan, hypre_CSRMatrixI(B), hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + 1, hypre_CSRMatrixI(B) ); dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(hypre_ParCSRMatrixNumRows(A), "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ConcatDiagAndOffd, gDim, bDim, hypre_CSRMatrixNumRows(A_diag), hypre_CSRMatrixNumCols(A_diag), hypre_CSRMatrixI(A_diag), hypre_CSRMatrixJ(A_diag), hypre_CSRMatrixData(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixJ(A_offd), hypre_CSRMatrixData(A_offd), cols_offd_map, hypre_CSRMatrixI(B), hypre_CSRMatrixJ(B), hypre_CSRMatrixData(B) ); hypre_TFree(cols_offd_map, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + 1, hypre_CSRMatrixI(E) + 1, HYPRE_Int, hypre_CSRMatrixNumRows(E), HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( transform, hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + 1, hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + hypre_CSRMatrixNumRows(E) + 1, thrust::make_constant_iterator(hypre_CSRMatrixNumNonzeros(A_diag) + hypre_CSRMatrixNumNonzeros(A_offd)), hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + 1, thrust::plus<HYPRE_Int>() ); gDim = hypre_GetDefaultCUDAGridDimension(hypre_CSRMatrixNumRows(E), "warp", bDim); hypre_assert(hypre_CSRMatrixNumCols(E_diag) == hypre_CSRMatrixNumCols(A_diag)); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ConcatDiagAndOffd, gDim, bDim, hypre_CSRMatrixNumRows(E_diag), hypre_CSRMatrixNumCols(E_diag), hypre_CSRMatrixI(E_diag), hypre_CSRMatrixJ(E_diag), hypre_CSRMatrixData(E_diag), hypre_CSRMatrixI(E_offd), hypre_CSRMatrixJ(E_offd), hypre_CSRMatrixData(E_offd), NULL, hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A), hypre_CSRMatrixJ(B), hypre_CSRMatrixData(B) ); hypre_CSRMatrixDestroy(E_diag); hypre_CSRMatrixDestroy(E_offd); *B_ptr = B; *num_cols_offd_ptr = num_cols_offd; *cols_map_offd_ptr = cols_map_offd; return hypre_error_flag; } #else HYPRE_Int hypre_ConcatDiagOffdAndExtDevice(hypre_ParCSRMatrix *A, hypre_CSRMatrix *E, hypre_CSRMatrix **B_ptr, HYPRE_Int *num_cols_offd_ptr, HYPRE_BigInt **cols_map_offd_ptr) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int A_nrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int A_ncols = hypre_CSRMatrixNumCols(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int A_diag_nnz = hypre_CSRMatrixNumNonzeros(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int A_offd_nnz = hypre_CSRMatrixNumNonzeros(A_offd); HYPRE_BigInt first_col_A = hypre_ParCSRMatrixFirstColDiag(A); HYPRE_BigInt last_col_A = hypre_ParCSRMatrixLastColDiag(A); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixDeviceColMapOffd(A); HYPRE_Int *E_i = hypre_CSRMatrixI(E); HYPRE_BigInt *E_bigj = hypre_CSRMatrixBigJ(E); HYPRE_Complex *E_a = hypre_CSRMatrixData(E); HYPRE_Int E_nrows = hypre_CSRMatrixNumRows(E); HYPRE_Int E_nnz = hypre_CSRMatrixNumNonzeros(E); HYPRE_Int E_diag_nnz, E_offd_nnz; hypre_CSRMatrix *B; HYPRE_Int B_nnz = A_diag_nnz + A_offd_nnz + E_nnz; HYPRE_Int *B_ii = hypre_TAlloc(HYPRE_Int, B_nnz, HYPRE_MEMORY_DEVICE); HYPRE_Int *B_j = hypre_TAlloc(HYPRE_Int, B_nnz, HYPRE_MEMORY_DEVICE); HYPRE_Complex *B_a = hypre_TAlloc(HYPRE_Complex, B_nnz, HYPRE_MEMORY_DEVICE); // E hypre_CSRMatrixSplitDevice_core(0, E_nrows, E_nnz, NULL, E_bigj, NULL, NULL, first_col_A, last_col_A, num_cols_offd_A, NULL, NULL, NULL, NULL, &E_diag_nnz, NULL, NULL, NULL, NULL, &E_offd_nnz, NULL, NULL, NULL, NULL); HYPRE_Int *cols_offd_map, num_cols_offd; HYPRE_BigInt *cols_map_offd; HYPRE_Int *E_ii = hypreDevice_CsrRowPtrsToIndices(E_nrows, E_nnz, E_i); hypre_CSRMatrixSplitDevice_core(1, E_nrows, E_nnz, E_ii, E_bigj, E_a, NULL, first_col_A, last_col_A, num_cols_offd_A, col_map_offd_A, &cols_offd_map, &num_cols_offd, &cols_map_offd, &E_diag_nnz, B_ii + A_diag_nnz + A_offd_nnz, B_j + A_diag_nnz + A_offd_nnz, B_a + A_diag_nnz + A_offd_nnz, NULL, &E_offd_nnz, B_ii + A_diag_nnz + A_offd_nnz + E_diag_nnz, B_j + A_diag_nnz + A_offd_nnz + E_diag_nnz, B_a + A_diag_nnz + A_offd_nnz + E_diag_nnz, NULL); hypre_TFree(E_ii, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( transform, B_ii + A_diag_nnz + A_offd_nnz, B_ii + B_nnz, thrust::make_constant_iterator(A_nrows), B_ii + A_diag_nnz + A_offd_nnz, thrust::plus<HYPRE_Int>() ); // Adiag HYPRE_Int *A_diag_ii = hypreDevice_CsrRowPtrsToIndices(A_nrows, A_diag_nnz, A_diag_i); HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(A_diag_ii, A_diag_j, A_diag_a)), A_diag_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_ii, B_j, B_a)) ); hypre_TFree(A_diag_ii, HYPRE_MEMORY_DEVICE); // Aoffd HYPRE_Int *A_offd_ii = hypreDevice_CsrRowPtrsToIndices(A_nrows, A_offd_nnz, A_offd_i); HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(A_offd_ii, A_offd_a)), A_offd_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_ii, B_a)) + A_diag_nnz ); hypre_TFree(A_offd_ii, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( gather, A_offd_j, A_offd_j + A_offd_nnz, cols_offd_map, B_j + A_diag_nnz); hypre_TFree(cols_offd_map, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( transform, B_j + A_diag_nnz, B_j + A_diag_nnz + A_offd_nnz, thrust::make_constant_iterator(A_ncols), B_j + A_diag_nnz, thrust::plus<HYPRE_Int>() ); HYPRE_THRUST_CALL( transform, B_j + A_diag_nnz + A_offd_nnz + E_diag_nnz, B_j + B_nnz, thrust::make_constant_iterator(A_ncols), B_j + A_diag_nnz + A_offd_nnz + E_diag_nnz, thrust::plus<HYPRE_Int>() ); // B HYPRE_THRUST_CALL( stable_sort_by_key, B_ii, B_ii + B_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_j, B_a)) ); HYPRE_Int *B_i = hypreDevice_CsrRowIndicesToPtrs(A_nrows + E_nrows, B_nnz, B_ii); hypre_TFree(B_ii, HYPRE_MEMORY_DEVICE); B = hypre_CSRMatrixCreate(A_nrows + E_nrows, A_ncols + num_cols_offd, B_nnz); hypre_CSRMatrixI(B) = B_i; hypre_CSRMatrixJ(B) = B_j; hypre_CSRMatrixData(B) = B_a; hypre_CSRMatrixMemoryLocation(B) = HYPRE_MEMORY_DEVICE; *B_ptr = B; *num_cols_offd_ptr = num_cols_offd; *cols_map_offd_ptr = cols_map_offd; return hypre_error_flag; } #endif HYPRE_Int hypre_ParCSRMatrixGetRowDevice( hypre_ParCSRMatrix *mat, HYPRE_BigInt row, HYPRE_Int *size, HYPRE_BigInt **col_ind, HYPRE_Complex **values ) { HYPRE_Int nrows, local_row; HYPRE_BigInt row_start, row_end; hypre_CSRMatrix *Aa; hypre_CSRMatrix *Ba; if (!mat) { hypre_error_in_arg(1); return hypre_error_flag; } Aa = (hypre_CSRMatrix *) hypre_ParCSRMatrixDiag(mat); Ba = (hypre_CSRMatrix *) hypre_ParCSRMatrixOffd(mat); if (hypre_ParCSRMatrixGetrowactive(mat)) { return(-1); } hypre_ParCSRMatrixGetrowactive(mat) = 1; #ifdef HYPRE_NO_GLOBAL_PARTITION row_start = hypre_ParCSRMatrixFirstRowIndex(mat); row_end = hypre_ParCSRMatrixLastRowIndex(mat) + 1; #else HYPRE_Int my_id; hypre_MPI_Comm_rank(hypre_ParCSRMatrixComm(mat), &my_id); row_end = hypre_ParCSRMatrixRowStarts(mat)[ my_id + 1 ]; row_start = hypre_ParCSRMatrixRowStarts(mat)[ my_id ]; #endif nrows = row_end - row_start; if (row < row_start || row >= row_end) { return(-1); } local_row = row - row_start; /* if buffer is not allocated and some information is requested, allocate buffer with the max row_nnz */ if ( !hypre_ParCSRMatrixRowvalues(mat) && (col_ind || values) ) { HYPRE_Int max_row_nnz; HYPRE_Int *row_nnz = hypre_TAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(nrows, NULL, hypre_CSRMatrixI(Aa), hypre_CSRMatrixI(Ba), row_nnz); hypre_TMemcpy(size, row_nnz + local_row, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); max_row_nnz = HYPRE_THRUST_CALL(reduce, row_nnz, row_nnz + nrows, 0, thrust::maximum<HYPRE_Int>()); /* HYPRE_Int *max_row_nnz_d = HYPRE_THRUST_CALL(max_element, row_nnz, row_nnz + nrows); hypre_TMemcpy( &max_row_nnz, max_row_nnz_d, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE ); */ hypre_TFree(row_nnz, HYPRE_MEMORY_DEVICE); hypre_ParCSRMatrixRowvalues(mat) = (HYPRE_Complex *) hypre_TAlloc(HYPRE_Complex, max_row_nnz, hypre_ParCSRMatrixMemoryLocation(mat)); hypre_ParCSRMatrixRowindices(mat) = (HYPRE_BigInt *) hypre_TAlloc(HYPRE_BigInt, max_row_nnz, hypre_ParCSRMatrixMemoryLocation(mat)); } else { HYPRE_Int *size_d = hypre_TAlloc(HYPRE_Int, 1, HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(1, NULL, hypre_CSRMatrixI(Aa) + local_row, hypre_CSRMatrixI(Ba) + local_row, size_d); hypre_TMemcpy(size, size_d, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); hypre_TFree(size_d, HYPRE_MEMORY_DEVICE); } if (col_ind || values) { if (hypre_ParCSRMatrixDeviceColMapOffd(mat) == NULL) { hypre_ParCSRMatrixDeviceColMapOffd(mat) = hypre_TAlloc(HYPRE_BigInt, hypre_CSRMatrixNumCols(Ba), HYPRE_MEMORY_DEVICE); hypre_TMemcpy( hypre_ParCSRMatrixDeviceColMapOffd(mat), hypre_ParCSRMatrixColMapOffd(mat), HYPRE_BigInt, hypre_CSRMatrixNumCols(Ba), HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST ); } hypreDevice_CopyParCSRRows( 1, NULL, -1, Ba != NULL, hypre_ParCSRMatrixFirstColDiag(mat), hypre_ParCSRMatrixDeviceColMapOffd(mat), hypre_CSRMatrixI(Aa) + local_row, hypre_CSRMatrixJ(Aa), hypre_CSRMatrixData(Aa), hypre_CSRMatrixI(Ba) + local_row, hypre_CSRMatrixJ(Ba), hypre_CSRMatrixData(Ba), NULL, hypre_ParCSRMatrixRowindices(mat), hypre_ParCSRMatrixRowvalues(mat) ); } if (col_ind) { *col_ind = hypre_ParCSRMatrixRowindices(mat); } if (values) { *values = hypre_ParCSRMatrixRowvalues(mat); } hypre_SyncCudaComputeStream(hypre_handle()); return hypre_error_flag; } /* abs == 1, use absolute values * option == 0, drop all the entries that are smaller than tol * TODO more options */ HYPRE_Int hypre_ParCSRMatrixDropSmallEntriesDevice( hypre_ParCSRMatrix *A, HYPRE_Complex tol, HYPRE_Int abs, HYPRE_Int option) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *h_col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixDeviceColMapOffd(A); if (col_map_offd_A == NULL) { col_map_offd_A = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(col_map_offd_A, h_col_map_offd_A, HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDeviceColMapOffd(A) = col_map_offd_A; } hypre_CSRMatrixDropSmallEntriesDevice(A_diag, tol, abs, option); hypre_CSRMatrixDropSmallEntriesDevice(A_offd, tol, abs, option); hypre_ParCSRMatrixSetNumNonzeros(A); hypre_ParCSRMatrixDNumNonzeros(A) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(A); /* squeeze out zero columns of A_offd */ HYPRE_Int *tmp_j, *tmp_end, num_cols_A_offd_new; tmp_j = hypre_TAlloc(HYPRE_Int, hypre_CSRMatrixNumNonzeros(A_offd), HYPRE_MEMORY_DEVICE); hypre_TMemcpy(tmp_j, hypre_CSRMatrixJ(A_offd), HYPRE_Int, hypre_CSRMatrixNumNonzeros(A_offd), HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( sort, tmp_j, tmp_j + hypre_CSRMatrixNumNonzeros(A_offd) ); tmp_end = HYPRE_THRUST_CALL( unique, tmp_j, tmp_j + hypre_CSRMatrixNumNonzeros(A_offd) ); num_cols_A_offd_new = tmp_end - tmp_j; hypre_assert(num_cols_A_offd_new <= num_cols_A_offd); if (num_cols_A_offd_new < num_cols_A_offd) { hypre_CSRMatrixNumCols(A_offd) = num_cols_A_offd_new; HYPRE_Int *offd_mark = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_DEVICE); HYPRE_BigInt *col_map_offd_A_new = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd_new, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( scatter, thrust::counting_iterator<HYPRE_Int>(0), thrust::counting_iterator<HYPRE_Int>(num_cols_A_offd_new), tmp_j, offd_mark ); HYPRE_THRUST_CALL( gather, hypre_CSRMatrixJ(A_offd), hypre_CSRMatrixJ(A_offd) + hypre_CSRMatrixNumNonzeros(A_offd), offd_mark, hypre_CSRMatrixJ(A_offd) ); HYPRE_THRUST_CALL( gather, tmp_j, tmp_j + num_cols_A_offd_new, col_map_offd_A, col_map_offd_A_new ); hypre_TFree(offd_mark, HYPRE_MEMORY_DEVICE); hypre_TFree(col_map_offd_A, HYPRE_MEMORY_DEVICE); hypre_TFree(h_col_map_offd_A, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDeviceColMapOffd(A) = col_map_offd_A_new; hypre_ParCSRMatrixColMapOffd(A) = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd_new, HYPRE_MEMORY_HOST); hypre_TMemcpy(hypre_ParCSRMatrixColMapOffd(A), col_map_offd_A_new, HYPRE_BigInt, num_cols_A_offd_new, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); } hypre_TFree(tmp_j, HYPRE_MEMORY_DEVICE); return hypre_error_flag; } #endif // #if defined(HYPRE_USING_CUDA) /*-------------------------------------------------------------------------- * HYPRE_ParCSRDiagScale *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRDiagScale( HYPRE_ParCSRMatrix HA, HYPRE_ParVector Hy, HYPRE_ParVector Hx ) { hypre_ParCSRMatrix *A = (hypre_ParCSRMatrix *) HA; hypre_ParVector *y = (hypre_ParVector *) Hy; hypre_ParVector *x = (hypre_ParVector *) Hx; HYPRE_Real *x_data = hypre_VectorData(hypre_ParVectorLocalVector(x)); HYPRE_Real *y_data = hypre_VectorData(hypre_ParVectorLocalVector(y)); HYPRE_Real *A_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(A)); HYPRE_Int *A_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(A)); HYPRE_Int local_size = hypre_VectorSize(hypre_ParVectorLocalVector(x)); HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) hypreDevice_DiagScaleVector(local_size, A_i, A_data, y_data, x_data); //hypre_SyncCudaComputeStream(hypre_handle()); #else /* #if defined(HYPRE_USING_CUDA) */ HYPRE_Int i; #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) is_device_ptr(x_data,y_data,A_data,A_i) #elif defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < local_size; i++) { x_data[i] = y_data[i]/A_data[A_i[i]]; } #endif /* #if defined(HYPRE_USING_CUDA) */ return ierr; }
gm_order.h
#ifndef GM_ORDER_H #define GM_ORDER_H #include <list> #include "gm_internal.h" #include "gm_bitmap.h" template<typename T> class gm_order { public: gm_order(int _max_sz, int _max_thread = 16) : max_thread(_max_thread), max_sz(_max_sz) { local_Q_front = new std::list<T>[max_thread]; local_Q_back = new std::list<T>[max_thread]; bitmap = new unsigned char[(max_sz + 7) / 8]; for (int i = 0; i < (max_sz + 7) / 8; i++) bitmap[i] = 0; } virtual ~gm_order() { delete[] local_Q_front; delete[] local_Q_back; delete[] bitmap; } //------------------------------------------------------------ // API // push_back/front, pop_back/front, clear, get_size // push has separate parallel interface //------------------------------------------------------------ void push_back(T e) // sequential { if (!_gm_get_bit(bitmap, e)) { _gm_set_bit(bitmap, e); Q.push_back(e); } } void push_front(T e) { if (!_gm_get_bit(bitmap, e)) { _gm_set_bit(bitmap, e); Q.push_front(e); } } void pop_back() { T e = Q.back(); _gm_clear_bit(bitmap, e); Q.pop_back(); } void pop_front() { T e = Q.front(); _gm_clear_bit(bitmap, e); Q.pop_front(); } void clear() { Q.clear(); #pragma omp parallel for for (int i = 0; i < (max_sz + 7) / 8; i++) bitmap[i] = 0; } size_t get_size() { return Q.size(); } bool is_in(T e) { return (_gm_get_bit(bitmap, e) == 1); } // for parallel execution void push_back_par(T e, int tid) { if (!_gm_get_bit(bitmap, e)) { // test and atomic if (_gm_set_bit_atomic(bitmap, e)) { local_Q_back[tid].push_back(e); } } } void push_front_par(T e, int tid) { if (!_gm_get_bit(bitmap, e)) { // test and atomic if (_gm_set_bit_atomic(bitmap, e)) { local_Q_back[tid].push_front(e); } } } //------------------------------------------- // called when parallel addition is finished //------------------------------------------- void merge() { for (int i = 0; i < max_thread; i++) { if (local_Q_front[i].size() > 0) Q.splice(Q.begin(), local_Q_front[i]); if (local_Q_back[i].size() > 0) Q.splice(Q.end(), local_Q_back[i]); } } // for sequential iteration typename std::list<T>& get_list() { return Q; } //----------------------------------------------- // for iteration //----------------------------------------------- // todo, correctly use nested template def #define ITERATOR_CLASS(CLASS_NAME, LIST_ITER_TYPE) \ class CLASS_NAME {\ public: \ CLASS_NAME(typename LIST_ITER_TYPE I, typename LIST_ITER_TYPE E) \ : ITER(I), END_ITER(E) {} \ inline bool has_next() { \ return (ITER != END_ITER); \ } \ inline T get_next() \ { T t = *ITER; ITER++; return t;} \ private: \ typename LIST_ITER_TYPE ITER; \ typename LIST_ITER_TYPE END_ITER; \ }; ITERATOR_CLASS(seq_iter, std::list<T>::iterator) ;ITERATOR_CLASS(rev_iter, std::list<T>::reverse_iterator) ; #undef ITERATOR_CLASS class par_iter { public: par_iter(typename std::list<T>::iterator I, typename std::list<T>::iterator E) : ITER(I), END_ITER(E), is_small(true), bitmap(NULL) { } par_iter(unsigned char* B, T I, T E) : bitmap(B), ITER(I), END_ITER(E), is_small(false) { } inline bool has_next() { if (is_small) return (ITER != END_ITER); else { while (IDX < END_IDX) { if (_gm_check_bit(bitmap, IDX) == 0) return true; IDX++; } return false; } } inline T get_next() { if (is_small) { T t = *ITER; ITER++; return t; } else { return IDX++; } } private: bool is_small; unsigned char* bitmap; typename std::set<T>::iterator ITER; // for small instance use typename std::set<T>::iterator END_ITER; // for small instance use T IDX; T END_IDX; }; seq_iter prepare_seq_iteration() { seq_iter I(Q.begin(), Q.end()); return I; } rev_iter prepare_rev_iteration() { rev_iter I(Q.rbegin(), Q.rend()); return I; } par_iter prepare_par_iteration(int thread_id, int max_threads) { bool is_small = (Q.size() < THRESHOLD_LARGE); if (is_small) { // for small instance, use single thread if (thread_id == 0) { par_iter I(Q.begin(), Q.end()); return I; } else { par_iter I(Q.end(), Q.end()); return I; } } else { size_t cnt = max_sz / max_threads; T begin = cnt * thread_id; T end = (thread_id == (max_threads - 1)) ? max_sz : begin + cnt; par_iter I(bitmap, begin, end); return I; } } private: gm_order() : max_sz(-1), max_thread(-1), bitmap(NULL), local_Q_front(NULL), local_Q_back(NULL) { } // initialize without size is prohibited typename std::list<T> Q; typename std::list<T>* local_Q_front; typename std::list<T>* local_Q_back; int max_thread; int max_sz; unsigned char* bitmap; static const int THRESHOLD_LARGE = 4096; }; typedef gm_order<node_t> gm_node_order; typedef gm_order<edge_t> gm_edge_order; #endif
GB_binop__plus_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__plus_uint64) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__plus_uint64) // A.*B function (eWiseMult): GB (_AemultB_03__plus_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_uint64) // A*D function (colscale): GB (_AxD__plus_uint64) // D*A function (rowscale): GB (_DxB__plus_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__plus_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__plus_uint64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_uint64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_uint64) // C=scalar+B GB (_bind1st__plus_uint64) // C=scalar+B' GB (_bind1st_tran__plus_uint64) // C=A+scalar GB (_bind2nd__plus_uint64) // C=A'+scalar GB (_bind2nd_tran__plus_uint64) // C type: uint64_t // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = (aij + bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x + y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PLUS || GxB_NO_UINT64 || GxB_NO_PLUS_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__plus_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__plus_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__plus_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__plus_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__plus_uint64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__plus_uint64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__plus_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__plus_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__plus_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__plus_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__plus_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__plus_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = Bx [p] ; Cx [p] = (x + bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__plus_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = Ax [p] ; Cx [p] = (aij + y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = (x + aij) ; \ } GrB_Info GB (_bind1st_tran__plus_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = (aij + y) ; \ } GrB_Info GB (_bind2nd_tran__plus_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
singlenode_fspgemm.h
/****************************************************************************** * ** Copyright (c) 2016, Intel Corporation ** * ** All rights reserved. ** * ** ** * ** Redistribution and use in source and binary forms, with or without ** * ** modification, are permitted provided that the following conditions ** * ** are met: ** * ** 1. Redistributions of source code must retain the above copyright ** * ** notice, this list of conditions and the following disclaimer. ** * ** 2. Redistributions in binary form must reproduce the above copyright ** * ** notice, this list of conditions and the following disclaimer in the ** * ** documentation and/or other materials provided with the distribution. ** * ** 3. Neither the name of the copyright holder nor the names of its ** * ** contributors may be used to endorse or promote products derived ** * ** from this software without specific prior written permission. ** * ** ** * ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ** * ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ** * ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ** * ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ** * ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ** * ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED ** * ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ** * ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ** * ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ** * ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ** * ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * ******************************************************************************/ /* Michael Anderson (Intel Corp.) * * ******************************************************************************/ #ifndef SRC_SINGLENODE_FSPGEMM_H_ #define SRC_SINGLENODE_FSPGEMM_H_ #include <algorithm> #ifdef FSPGEMM_PARALLEL_SPA bool cmp_int_fspgemm(int i1, int i2) { return i1 < i2; } template <typename Ta, typename Tb, typename Tc, typename Tf> void my_fcsrmultcsr(int m, int n, int k, Ta* a, int* ja, int* ia, Tb* b, int* jb, int* ib, Tc** c, int** jc, int** ic, Tc* c_in, int* jc_in, int* ic_in, Tf* f_in, int* jf_in, int* if_in, void (*mul_fp)(Ta, Tb, Tc*, void*), void (*add_fp)(Tc, Tc, Tc*, void*), void* vsp) { int num_threads = omp_get_max_threads(); assert(num_threads <= omp_get_max_threads()); Tc** Crows = new Tc* [num_threads]; int** Cidxs = new int* [num_threads]; bool** Cflags = new bool* [num_threads]; bool** Fflags = new bool* [num_threads]; (*ic) = reinterpret_cast<int*>(_mm_malloc((m + 1) * sizeof(int), 64)); int nchunks = num_threads * 5; int chunksize = (m + nchunks - 1) / nchunks; int* nnzs = reinterpret_cast<int*>(_mm_malloc((nchunks + 1) * sizeof(int), 64)); memset(nnzs, 0, num_threads * sizeof(int)); // Flag indicating that we should union the result with another CSR mat bool Cin = (c_in != NULL) && (jc_in != NULL) && (ic_in != NULL); #pragma omp parallel num_threads(num_threads) { int tid = omp_get_thread_num(); Crows[tid] = reinterpret_cast<Tc*>(_mm_malloc(n * sizeof(Tc), 64)); Cidxs[tid] = reinterpret_cast<int*>(_mm_malloc(n * sizeof(int), 64)); Cflags[tid] = reinterpret_cast<bool*>(_mm_malloc(n * sizeof(bool), 64)); memset(Cflags[tid], 0, n * sizeof(bool)); Fflags[tid] = reinterpret_cast<bool*>(_mm_malloc(n * sizeof(bool), 64)); memset(Fflags[tid], 0, n * sizeof(bool)); #pragma omp for schedule(dynamic) for (int chunk = 0; chunk < nchunks; chunk++) { int start_row = chunk * chunksize; int end_row = (chunk + 1) * chunksize; if (end_row > m) end_row = m; // Determine number of nonzeros int nnzmax = 0; for (int Arow = start_row; Arow < end_row; Arow++) { int Arow_nnz = 0; // Load values from F into dense row vector for (int Fnz_id = if_in[Arow]; Fnz_id < if_in[Arow + 1]; Fnz_id++) { int Fcol = jf_in[Fnz_id - 1]; Fflags[tid][Fcol - 1] = true; } // Load values from C_in into dense row vector if (Cin) { int row_nnz = 0; for (int Cnz_id = ic_in[Arow]; Cnz_id < ic_in[Arow + 1]; Cnz_id++) { int Ccol = jc_in[Cnz_id - 1]; Cidxs[tid][Arow_nnz] = Ccol - 1; Cflags[tid][Ccol - 1] = true; row_nnz++; Arow_nnz++; } nnzmax += row_nnz; } for (int Anz_id = ia[Arow]; Anz_id < ia[Arow + 1]; Anz_id++) { int Acol = ja[Anz_id - 1]; int row_nnz = 0; for (int Bnz_id = ib[Acol - 1]; Bnz_id < ib[Acol]; Bnz_id++) { int Bcol = jb[Bnz_id - 1]; if (Fflags[tid][Bcol - 1] && !Cflags[tid][Bcol - 1]) { Cidxs[tid][Arow_nnz] = Bcol - 1; Cflags[tid][Bcol - 1] = true; row_nnz++; Arow_nnz++; } } nnzmax += row_nnz; } for (int idx = 0; idx < Arow_nnz; idx++) { Cflags[tid][Cidxs[tid][idx]] = false; } for (int Fnz_id = if_in[Arow]; Fnz_id < if_in[Arow + 1]; Fnz_id++) { int Fcol = jf_in[Fnz_id - 1]; Fflags[tid][Fcol - 1] = false; } } nnzs[chunk] = nnzmax; } _mm_free(Cidxs[tid]); #pragma omp barrier #pragma omp master { int nnzc = 0; for (int chunk = 0; chunk < nchunks; chunk++) { int tmp = nnzs[chunk]; nnzs[chunk] = nnzc; nnzc += tmp; } nnzs[nchunks] = nnzc; (*c) = reinterpret_cast<Tc*>( _mm_malloc((uint64_t)(nnzc) * (uint64_t)sizeof(Tc), 64)); (*jc) = reinterpret_cast<int*>( _mm_malloc((uint64_t)(nnzc) * (uint64_t)sizeof(int), 64)); } #pragma omp barrier #pragma omp for schedule(dynamic) for (int chunk = 0; chunk < nchunks; chunk++) { int start_row = chunk * chunksize; int end_row = (chunk + 1) * chunksize; if (end_row > m) end_row = m; // Perform multiplication int cnz_cnt = nnzs[chunk]; for (int Arow = start_row; Arow < end_row; Arow++) { int c_row_nz_start = cnz_cnt; (*ic)[Arow] = cnz_cnt + 1; // Load values from F into dense row vector for (int Fnz_id = if_in[Arow]; Fnz_id < if_in[Arow + 1]; Fnz_id++) { int Fcol = jf_in[Fnz_id - 1]; Fflags[tid][Fcol - 1] = true; } // Load values from C_in into dense row vector if (Cin) { for (int Cnz_id = ic_in[Arow]; Cnz_id < ic_in[Arow + 1]; Cnz_id++) { int Ccol = jc_in[Cnz_id - 1]; (*jc)[cnz_cnt] = Ccol; cnz_cnt++; Cflags[tid][Ccol - 1] = 1; Crows[tid][Ccol - 1] = c_in[Cnz_id - 1]; } } for (int Anz_id = ia[Arow]; Anz_id < ia[Arow + 1]; Anz_id++) { int Acol = ja[Anz_id - 1]; for (int Bnz_id = ib[Acol - 1]; Bnz_id < ib[Acol]; Bnz_id++) { int Bcol = jb[Bnz_id - 1]; if (Fflags[tid][Bcol - 1] && !Cflags[tid][Bcol - 1]) { (*jc)[cnz_cnt] = Bcol; mul_fp(a[Anz_id - 1], b[Bnz_id - 1], &(Crows[tid][Bcol-1]), vsp); Cflags[tid][Bcol - 1] = true; cnz_cnt++; } else if (Fflags[tid][Bcol - 1]) { Tc tmp_mul; mul_fp(a[Anz_id - 1], b[Bnz_id - 1], &tmp_mul, vsp); Tc tmp_add = Crows[tid][Bcol-1]; add_fp( tmp_add, tmp_mul, &(Crows[tid][Bcol-1]), vsp); Cflags[tid][Bcol - 1] = true; } } } #ifdef SORTED std::sort(*(jc) + c_row_nz_start, (*jc) + cnz_cnt, cmp_int_fspgemm); #endif int num_del = 0; for (int Cnz_id = c_row_nz_start; Cnz_id < cnz_cnt; Cnz_id++) { num_del++; int Ccol = (*jc)[Cnz_id]; (*c)[Cnz_id] = Crows[tid][Ccol - 1]; Cflags[tid][Ccol - 1] = false; } for (int Fnz_id = if_in[Arow]; Fnz_id < if_in[Arow + 1]; Fnz_id++) { int Fcol = jf_in[Fnz_id - 1]; Fflags[tid][Fcol - 1] = false; } } } // for each chunk _mm_free(Crows[tid]); _mm_free(Cflags[tid]); _mm_free(Fflags[tid]); } // pragma omp parallel (*ic)[m] = nnzs[nchunks] + 1; delete Crows; delete Cflags; delete Fflags; _mm_free(nnzs); } #endif #endif // SRC_SINGLENODE_FSPGEMM_H_
ddd_out_h.h
//**************************************************************************************** // // Copyright (c) 2015-2020, Yoshifumi Nakamura <nakamura@riken.jp> // Copyright (c) 2015-2020, Yuta Mukai <mukai.yuta@fujitsu.com> // Copyright (c) 2018-2020, Ken-Ichi Ishikawa <ishikawa@theo.phys.sci.hirosima-u.ac.jp> // Copyright (c) 2019-2020, Issaku Kanamori <kanamori-i@riken.jp> // // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer listed // in this license in the documentation and/or other materials // provided with the distribution. // // * Neither the name of the copyright holders nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // //---------------------------------------------------------------------------------------- // ACKNOWLEDGMENT // // This software has been developed in a co-design working group for the lattice QCD // supported by MEXT's programs for the Development and Improvement for the Next // Generation Ultra High-Speed Computer System, under its Subsidies for Operating the // Specific Advanced Large Research Facilities, and Priority Issue 9 // (Elucidation of the Fundamental Laws and Evolution of the Universe) to be tackled by // using the Supercomputer Fugaku. // //**************************************************************************************** #ifndef _DDD_OUT_H_H #define _DDD_OUT_H_H #include "ddd_out_h_0_inline.h" //---------------------------------------------------------------------------------------- preprocess mult D for boundary void ddd_out_pre_h_(const sch_t * __restrict__ in, const int *idomain) // // Send for Wilson/Clover operator, Deo or Doe, in a even/odd domain block // // oute = oute + factor Meo ine // // or // // outo = outo + factor Moe ino // // Note: Deo = -kappa Meo, Doe = -kappa Moe // // in : quark field in an even/odd domain (odd for idomain = 0, even for idomain = 1). // input domain is opposite to idomain // idomain : domain index, 0 for even output, 1 for odd output // { // // pack data for send // _S_MULT_WD_DEO_OUT_SEND_HPC_TIC_; int xdmn, ydmn, zdmn, tdmn; xdmn = 1 - *idomain; if (npe[1] == 1) { ydmn = *idomain; } else { ydmn = 1 - *idomain; } if (npe[2] == 1) { zdmn = *idomain; } else { zdmn = 1 - *idomain; } if (npe[3] == 1) { tdmn = *idomain; } else { tdmn = 1 - *idomain; } pgluh_t __restrict__ gx = &gluh[vols*0 + NDIM*vols*xdmn]; pgluh_t __restrict__ gy = &gluh[vols*1 + NDIM*vols*ydmn]; pgluh_t __restrict__ gz = &gluh[vols*2 + NDIM*vols*zdmn]; pgluh_t __restrict__ gt = &gluh[vols*3 + NDIM*vols*tdmn]; #pragma omp parallel { #pragma omp for collapse(3) nowait for (int t = 0; t < nt; ++t) { for (int z = 0; z < nz; ++z) { for (int y = 0; y < ny; ++y) { int it = nxs*ny*nz*t; int iz = nxs*ny*z; int iy = nxs*y; // X-forward { //int x = nxs-1; int ixf = 0 + iy + iz + it; int i0 = y + ny*z + ny*nz*t; __mult_x_forw_pre_3_(xfh_send[i0],in[vols*xdmn+ixf]); } // X-backward { //int x = 0; int ixb = nxs-1 + iy + iz + it; int i1 = y + ny*z + ny*nz*t; projsch1_t a __attribute__((aligned(_ALIGN_SIZE))); __mult_x_back_pre_3_(a,in[vols*xdmn+ixb]); __mult_udag_y_3_(xbh_send[i1],a,(*(gx + ixb))); } } } } #pragma omp for collapse(3) nowait for (int t = 0; t < nt; ++t) { for (int z = 0; z < nz; ++z) { for (int x = 0; x < nxs; ++x) { int it = nxs*ny*nz*t; int iz = nxs*ny*z; // // Y-forward send // { //int y = ny-1; int iyf = x + iz + it; int i2 = x + nxs*z + nxs*nz*t; __mult_y_forw_pre_(yfh_send[i2],in[vols*ydmn+iyf]); } // // Y-backward send // { //int y = 0; int iyb = x + iz + it + (ny-1)*nxs; int i3 = x + nxs*z + nxs*nz*t; __attribute__((aligned(_ALIGN_SIZE))) projsch_t a; __mult_y_back_pre_(a,in[vols*ydmn+iyb]); __mult_udag_y_(ybh_send[i3],a,(*(gy + iyb))); } } } } #pragma omp for collapse(3) nowait for (int t = 0; t < nt; ++t) { for (int y = 0; y < ny; ++y) { for (int x = 0; x < nxs; ++x) { int it = nxs*ny*nz*t; int iy = nxs*y; // // Z-forward send // { //int z = nz-1; int izf = x + iy + it; int i4 = x + nxs*y + nxs*ny*t; __mult_z_forw_pre_(zfh_send[i4],in[vols*zdmn+izf]); } // // Z-backward send // { //int z = 0; int izb = x + iy + it + (nz-1)*nxs*ny; int i5 = x + nxs*y + nxs*ny*t; __attribute__((aligned(_ALIGN_SIZE))) projsch_t a; __mult_z_back_pre_(a,in[vols*zdmn+izb]); __mult_udag_y_(zbh_send[i5],a,(*(gz + izb))); } } } } #pragma omp for collapse(3) nowait for (int z = 0; z < nz; ++z) { for (int y = 0; y < ny; ++y) { for (int x = 0; x < nxs; ++x) { int iz = nxs*ny*z; int iy = nxs*y; // // T-forward send // { //int t = nt-1; int itf = x + iy + iz; int i6 = x + nxs*y + nxs*ny*z; __mult_t_forw_pre_(tfh_send[i6],in[vols*tdmn+itf]); } // // T-backward send // { //int t = 0; int itb = x + iy + iz + (nt-1)*nxs*ny*nz; int i7 = x + nxs*y + nxs*ny*z; __attribute__((aligned(_ALIGN_SIZE))) projsch_t a; __mult_t_back_pre_(a,in[vols*tdmn+itb]); __mult_udag_y_(tbh_send[i7],a,(*(gt + itb))); } } } } #pragma omp barrier } // end of omp parallel _S_MULT_WD_DEO_OUT_SEND_HPC_TOC_; #pragma omp single nowait { if (*idomain == 0) { memcpy(xfh_recv, xfh_send, sizeof(half)*12*ny*nz*nt); } else { xbound_h_start(0); } if (*idomain == 1) { memcpy(xbh_recv, xbh_send, sizeof(half)*12*ny*nz*nt); } else { xbound_h_start(1); } xbound_h_start(2); xbound_h_start(3); xbound_h_start(4); xbound_h_start(5); xbound_h_start(6); xbound_h_start(7); } } //---------------------------------------------------------------------------------------- postprocess mult D for boundary void ddd_out_pos_h_(sch_t * __restrict__ out, const sch_t * __restrict__ in, const int *idomain, float factor) // // Multiply Wilson/Clover operator from odd/even to even/odd domain // // oute = oute + factor Meo ino // or // outo = outo + factor Moe ine // // Note: Deo = -kappa Meo, Doe = -kappa Moe // // out : quark field in an even/odd domain (output) // in : quark field in an odd/even domain (input) // idomain : domain index, 0 for even output, 1 for odd output // factor : a factor for hopping parameter kappa or -kappa // { pgluh_t __restrict__ gx = &gluh[vols*0 + NDIM*vols*(*idomain)]; pgluh_t __restrict__ gy = &gluh[vols*1 + NDIM*vols*(*idomain)]; pgluh_t __restrict__ gz = &gluh[vols*2 + NDIM*vols*(*idomain)]; pgluh_t __restrict__ gt = &gluh[vols*3 + NDIM*vols*(*idomain)]; half hfactor = half(factor); #pragma omp single { xbound_h_wait(0); xbound_h_wait(1); xbound_h_wait(2); xbound_h_wait(3); xbound_h_wait(4); xbound_h_wait(5); xbound_h_wait(6); xbound_h_wait(7); } _S_MULT_WD_DEO_OUT_RECV_HPC_CALC_TIC_; #pragma omp parallel for collapse(4) for (int t = 0; t < nt; t++) { for (int z = 0; z < nz; z++) { for (int y = 0; y < ny; y++) { for (int x = 0; x < nxs; x++) { if (x == nxs-1 || y == ny-1 || z == nz-1 || t == nt-1 || x==0 || y == 0 || z == 0 || t == 0 ) { int it = nxs*ny*nz*t; int iz = nxs*ny*z; int iy = nxs*y; int i0 = x + iy + iz + it; sch_t tmp __attribute__((aligned(_ALIGN_SIZE))) ; for(int c = 0; c < 3; c++) { for(int s = 0; s < 4; s++) { for(int ri = 0; ri < 2; ri++) { for(int j = 0; j < VLENS; j++) { tmp.c[c][s][ri][j] = half(0.0f); } } } } // // X-forward // if ( x == nxs-1 ) { _mult_forw_x_recv_; } // // X-backward // if ( x == 0 ) { _mult_back_x_recv_; } // // Y-forward // if ( y == ny-1 ) { _mult_forw_y_recv_; } // // Y-backward // if ( y == 0 ) { _mult_back_y_recv_; } // // Z-forward // if ( z == nz-1 ) { _mult_forw_z_recv_; } // // Z-backward // if ( z == 0 ) { _mult_back_z_recv_; } // // T-forward // if ( t == nt-1 ) { _mult_forw_t_recv_; } // // T-backward // if ( t == 0 ) { _mult_back_t_recv_; } __mult_clvh( tmp.cv, clvh[i0 + vols*(*idomain)].cv); sch_t *outi = out + i0; //#pragma loop norecurrence for (int c = 0; c < 3; c++) { for (int s = 0; s < 4; s++) { for (int j = 0; j < VLENS; j++) { outi->c[c][s][0][j] += tmp.c[c][s][0][j] * hfactor; outi->c[c][s][1][j] += tmp.c[c][s][1][j] * hfactor; } } } } // if edge sites } } } } _S_MULT_WD_DEO_OUT_RECV_HPC_CALC_TOC_; #pragma omp single { xbound_h_send_waitall(); } } #endif
DRB049-fprintf-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Example use of fprintf */ #include <stdio.h> int main(int argc, char* argv[]) { int i; int ret; FILE* pfile; int len=1000; int A[1000]; #pragma omp parallel for for (i=0; i<len; i++) A[i]=i; pfile = fopen("mytempfile.txt","a+"); if (pfile ==NULL) { fprintf(stderr,"Error in fopen()\n"); } for (i=0; i<len; ++i) { fprintf(pfile, "%d\n", A[i] ); } fclose(pfile); ret = remove("mytempfile.txt"); if (ret != 0) { fprintf(stderr, "Error: unable to delete mytempfile.txt\n"); } return 0; }
threshold.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT H H RRRR EEEEE SSSSS H H OOO L DDDD % % T H H R R E SS H H O O L D D % % T HHHHH RRRR EEE SSS HHHHH O O L D D % % T H H R R E SS H H O O L D D % % T H H R R EEEEE SSSSS H H OOO LLLLL DDDD % % % % % % MagickCore Image Threshold Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/property.h" #include "magick/blob.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/configure.h" #include "magick/constitute.h" #include "magick/decorate.h" #include "magick/draw.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/effect.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/montage.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/random_.h" #include "magick/random-private.h" #include "magick/resize.h" #include "magick/resource_.h" #include "magick/segment.h" #include "magick/shear.h" #include "magick/signature-private.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/transform.h" #include "magick/xml-tree.h" /* Define declarations. */ #define ThresholdsFilename "thresholds.xml" /* Typedef declarations. */ struct _ThresholdMap { char *map_id, *description; size_t width, height; ssize_t divisor, *levels; }; /* Static declarations. */ static const char *MinimalThresholdMap = "<?xml version=\"1.0\"?>" "<thresholds>" " <threshold map=\"threshold\" alias=\"1x1\">" " <description>Threshold 1x1 (non-dither)</description>" " <levels width=\"1\" height=\"1\" divisor=\"2\">" " 1" " </levels>" " </threshold>" " <threshold map=\"checks\" alias=\"2x1\">" " <description>Checkerboard 2x1 (dither)</description>" " <levels width=\"2\" height=\"2\" divisor=\"3\">" " 1 2" " 2 1" " </levels>" " </threshold>" "</thresholds>"; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveThresholdImage() selects an individual threshold for each pixel % based on the range of intensity values in its local neighborhood. This % allows for thresholding of an image whose global intensity histogram % doesn't contain distinctive peaks. % % The format of the AdaptiveThresholdImage method is: % % Image *AdaptiveThresholdImage(const Image *image, % const size_t width,const size_t height, % const ssize_t offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the width of the local neighborhood. % % o height: the height of the local neighborhood. % % o offset: the mean offset. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveThresholdImage(const Image *image, const size_t width,const size_t height,const ssize_t offset, ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view, *threshold_view; Image *threshold_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; MagickRealType number_pixels; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); threshold_image=CloneImage(image,0,0,MagickTrue,exception); if (threshold_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(threshold_image,DirectClass) == MagickFalse) { InheritException(exception,&threshold_image->exception); threshold_image=DestroyImage(threshold_image); return((Image *) NULL); } /* Local adaptive threshold. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&zero); number_pixels=(MagickRealType) (width*height); image_view=AcquireVirtualCacheView(image,exception); threshold_view=AcquireAuthenticCacheView(threshold_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,threshold_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket channel_bias, channel_sum; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p, *magick_restrict r; register IndexPacket *magick_restrict threshold_indexes; register PixelPacket *magick_restrict q; register ssize_t x; ssize_t u, v; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) height/2L,image->columns+width,height,exception); q=GetCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); threshold_indexes=GetCacheViewAuthenticIndexQueue(threshold_view); channel_bias=zero; channel_sum=zero; r=p; for (v=0; v < (ssize_t) height; v++) { for (u=0; u < (ssize_t) width; u++) { if (u == (ssize_t) (width-1)) { channel_bias.red+=r[u].red; channel_bias.green+=r[u].green; channel_bias.blue+=r[u].blue; channel_bias.opacity+=r[u].opacity; if (image->colorspace == CMYKColorspace) channel_bias.index=(MagickRealType) GetPixelIndex(indexes+(r-p)+u); } channel_sum.red+=r[u].red; channel_sum.green+=r[u].green; channel_sum.blue+=r[u].blue; channel_sum.opacity+=r[u].opacity; if (image->colorspace == CMYKColorspace) channel_sum.index=(MagickRealType) GetPixelIndex(indexes+(r-p)+u); } r+=image->columns+width; } for (x=0; x < (ssize_t) image->columns; x++) { MagickPixelPacket mean; mean=zero; r=p; channel_sum.red-=channel_bias.red; channel_sum.green-=channel_bias.green; channel_sum.blue-=channel_bias.blue; channel_sum.opacity-=channel_bias.opacity; channel_sum.index-=channel_bias.index; channel_bias=zero; for (v=0; v < (ssize_t) height; v++) { channel_bias.red+=r[0].red; channel_bias.green+=r[0].green; channel_bias.blue+=r[0].blue; channel_bias.opacity+=r[0].opacity; if (image->colorspace == CMYKColorspace) channel_bias.index=(MagickRealType) GetPixelIndex(indexes+x+(r-p)+0); channel_sum.red+=r[width-1].red; channel_sum.green+=r[width-1].green; channel_sum.blue+=r[width-1].blue; channel_sum.opacity+=r[width-1].opacity; if (image->colorspace == CMYKColorspace) channel_sum.index=(MagickRealType) GetPixelIndex(indexes+x+(r-p)+ width-1); r+=image->columns+width; } mean.red=(MagickRealType) (channel_sum.red/number_pixels+offset); mean.green=(MagickRealType) (channel_sum.green/number_pixels+offset); mean.blue=(MagickRealType) (channel_sum.blue/number_pixels+offset); mean.opacity=(MagickRealType) (channel_sum.opacity/number_pixels+offset); if (image->colorspace == CMYKColorspace) mean.index=(MagickRealType) (channel_sum.index/number_pixels+offset); SetPixelRed(q,((MagickRealType) GetPixelRed(q) <= mean.red) ? 0 : QuantumRange); SetPixelGreen(q,((MagickRealType) GetPixelGreen(q) <= mean.green) ? 0 : QuantumRange); SetPixelBlue(q,((MagickRealType) GetPixelBlue(q) <= mean.blue) ? 0 : QuantumRange); SetPixelOpacity(q,((MagickRealType) GetPixelOpacity(q) <= mean.opacity) ? 0 : QuantumRange); if (image->colorspace == CMYKColorspace) SetPixelIndex(threshold_indexes+x,(((MagickRealType) GetPixelIndex( threshold_indexes+x) <= mean.index) ? 0 : QuantumRange)); p++; q++; } sync=SyncCacheViewAuthenticPixels(threshold_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AdaptiveThresholdImage) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } threshold_view=DestroyCacheView(threshold_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) threshold_image=DestroyImage(threshold_image); return(threshold_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B i l e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BilevelImage() changes the value of individual pixels based on the % intensity of each pixel channel. The result is a high-contrast image. % % More precisely each channel value of the image is 'thresholded' so that if % it is equal to or less than the given value it is set to zero, while any % value greater than that give is set to it maximum or QuantumRange. % % This function is what is used to implement the "-threshold" operator for % the command line API. % % If the default channel setting is given the image is thresholded using just % the gray 'intensity' of the image, rather than the individual channels. % % The format of the BilevelImageChannel method is: % % MagickBooleanType BilevelImage(Image *image,const double threshold) % MagickBooleanType BilevelImageChannel(Image *image, % const ChannelType channel,const double threshold) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o threshold: define the threshold values. % % Aside: You can get the same results as operator using LevelImageChannels() % with the 'threshold' value for both the black_point and the white_point. % */ MagickExport MagickBooleanType BilevelImage(Image *image,const double threshold) { MagickBooleanType status; status=BilevelImageChannel(image,DefaultChannels,threshold); return(status); } MagickExport MagickBooleanType BilevelImageChannel(Image *image, const ChannelType channel,const double threshold) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); /* Bilevel threshold image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); if ((channel & SyncChannels) != 0) { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelIntensity(image,q) <= threshold ? 0 : QuantumRange); SetPixelGreen(q,GetPixelRed(q)); SetPixelBlue(q,GetPixelRed(q)); q++; } } else for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,(MagickRealType) GetPixelRed(q) <= threshold ? 0 : QuantumRange); if ((channel & GreenChannel) != 0) SetPixelGreen(q,(MagickRealType) GetPixelGreen(q) <= threshold ? 0 : QuantumRange); if ((channel & BlueChannel) != 0) SetPixelBlue(q,(MagickRealType) GetPixelBlue(q) <= threshold ? 0 : QuantumRange); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) SetPixelOpacity(q,(MagickRealType) GetPixelOpacity(q) <= threshold ? 0 : QuantumRange); else SetPixelOpacity(q,(MagickRealType) GetPixelOpacity(q) <= threshold ? OpaqueOpacity : TransparentOpacity); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,(MagickRealType) GetPixelIndex(indexes+x) <= threshold ? 0 : QuantumRange); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_BilevelImageChannel) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l a c k T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlackThresholdImage() is like ThresholdImage() but forces all pixels below % the threshold into black while leaving all pixels at or above the threshold % unchanged. % % The format of the BlackThresholdImage method is: % % MagickBooleanType BlackThresholdImage(Image *image,const char *threshold) % MagickBooleanType BlackThresholdImageChannel(Image *image, % const ChannelType channel,const char *threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o threshold: Define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType BlackThresholdImage(Image *image, const char *threshold) { MagickBooleanType status; status=BlackThresholdImageChannel(image,DefaultChannels,threshold, &image->exception); return(status); } MagickExport MagickBooleanType BlackThresholdImageChannel(Image *image, const ChannelType channel,const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); GetMagickPixelPacket(image,&threshold); flags=ParseGeometry(thresholds,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold.green=threshold.red; threshold.blue=geometry_info.xi; if ((flags & XiValue) == 0) threshold.blue=threshold.red; threshold.opacity=geometry_info.psi; if ((flags & PsiValue) == 0) threshold.opacity=threshold.red; threshold.index=geometry_info.chi; if ((flags & ChiValue) == 0) threshold.index=threshold.red; if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.opacity*=(MagickRealType) (QuantumRange/100.0); threshold.index*=(MagickRealType) (QuantumRange/100.0); } if ((IsMagickGray(&threshold) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace); /* Black threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (((channel & RedChannel) != 0) && ((MagickRealType) GetPixelRed(q) < threshold.red)) SetPixelRed(q,0); if (((channel & GreenChannel) != 0) && ((MagickRealType) GetPixelGreen(q) < threshold.green)) SetPixelGreen(q,0); if (((channel & BlueChannel) != 0) && ((MagickRealType) GetPixelBlue(q) < threshold.blue)) SetPixelBlue(q,0); if (((channel & OpacityChannel) != 0) && ((MagickRealType) GetPixelOpacity(q) < threshold.opacity)) SetPixelOpacity(q,0); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && ((MagickRealType) GetPixelIndex(indexes+x) < threshold.index)) SetPixelIndex(indexes+x,0); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_BlackThresholdImageChannel) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l a m p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClampImage() set each pixel whose value is below zero to zero and any the % pixel whose value is above the quantum range to the quantum range (e.g. % 65535) otherwise the pixel value remains unchanged. % % The format of the ClampImageChannel method is: % % MagickBooleanType ClampImage(Image *image) % MagickBooleanType ClampImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % */ MagickExport MagickBooleanType ClampImage(Image *image) { MagickBooleanType status; status=ClampImageChannel(image,DefaultChannels); return(status); } MagickExport MagickBooleanType ClampImageChannel(Image *image, const ChannelType channel) { #define ClampImageTag "Clamp/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelPacket *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { SetPixelRed(q,ClampPixel(GetPixelRed(q))); SetPixelGreen(q,ClampPixel(GetPixelGreen(q))); SetPixelBlue(q,ClampPixel(GetPixelBlue(q))); SetPixelOpacity(q,ClampPixel(GetPixelOpacity(q))); q++; } return(SyncImage(image)); } /* Clamp image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampPixel(GetPixelRed(q))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampPixel(GetPixelGreen(q))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampPixel(GetPixelBlue(q))); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampPixel(GetPixelOpacity(q))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampPixel(GetPixelIndex(indexes+x))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ClampImageChannel) #endif proceed=SetImageProgress(image,ClampImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyThresholdMap() de-allocate the given ThresholdMap % % The format of the ListThresholdMaps method is: % % ThresholdMap *DestroyThresholdMap(Threshold *map) % % A description of each parameter follows. % % o map: Pointer to the Threshold map to destroy % */ MagickExport ThresholdMap *DestroyThresholdMap(ThresholdMap *map) { assert(map != (ThresholdMap *) NULL); if (map->map_id != (char *) NULL) map->map_id=DestroyString(map->map_id); if (map->description != (char *) NULL) map->description=DestroyString(map->description); if (map->levels != (ssize_t *) NULL) map->levels=(ssize_t *) RelinquishMagickMemory(map->levels); map=(ThresholdMap *) RelinquishMagickMemory(map); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMapFile() look for a given threshold map name or alias in the % given XML file data, and return the allocated the map when found. % % The format of the ListThresholdMaps method is: % % ThresholdMap *GetThresholdMap(const char *xml,const char *filename, % const char *map_id,ExceptionInfo *exception) % % A description of each parameter follows. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o map_id: ID of the map to look for in XML list. % % o exception: return any errors or warnings in this structure. % */ MagickExport ThresholdMap *GetThresholdMapFile(const char *xml, const char *filename,const char *map_id,ExceptionInfo *exception) { const char *attribute, *content; double value; ThresholdMap *map; XMLTreeInfo *description, *levels, *threshold, *thresholds; map = (ThresholdMap *) NULL; (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); thresholds=NewXMLTree(xml,exception); if ( thresholds == (XMLTreeInfo *) NULL ) return(map); for (threshold = GetXMLTreeChild(thresholds,"threshold"); threshold != (XMLTreeInfo *) NULL; threshold = GetNextXMLTreeTag(threshold) ) { attribute=GetXMLTreeAttribute(threshold, "map"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; attribute=GetXMLTreeAttribute(threshold, "alias"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; } if (threshold == (XMLTreeInfo *) NULL) { thresholds=DestroyXMLTree(thresholds); return(map); } description=GetXMLTreeChild(threshold,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); return(map); } levels=GetXMLTreeChild(threshold,"levels"); if (levels == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<levels>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); return(map); } /* The map has been found -- allocate a Threshold Map to return */ map=(ThresholdMap *) AcquireMagickMemory(sizeof(ThresholdMap)); if (map == (ThresholdMap *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap"); map->map_id=(char *) NULL; map->description=(char *) NULL; map->levels=(ssize_t *) NULL; /* Assign basic attributeibutes. */ attribute=GetXMLTreeAttribute(threshold,"map"); if (attribute != (char *) NULL) map->map_id=ConstantString(attribute); content=GetXMLTreeContent(description); if (content != (char *) NULL) map->description=ConstantString(content); attribute=GetXMLTreeAttribute(levels,"width"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels width>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->width=StringToUnsignedLong(attribute); if (map->width == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels width>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels,"height"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels height>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->height=StringToUnsignedLong(attribute); if (map->height == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels height>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels, "divisor"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels divisor>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->divisor=(ssize_t) StringToLong(attribute); if (map->divisor < 2) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels divisor>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } /* Allocate theshold levels array. */ content=GetXMLTreeContent(levels); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<levels>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->levels=(ssize_t *) AcquireQuantumMemory((size_t) map->width,map->height* sizeof(*map->levels)); if (map->levels == (ssize_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap"); { char *p; register ssize_t i; /* Parse levels into integer array. */ for (i=0; i< (ssize_t) (map->width*map->height); i++) { map->levels[i]=(ssize_t) strtol(content,&p,10); if (p == content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too few values, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } if ((map->levels[i] < 0) || (map->levels[i] > map->divisor)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> %.20g out of range, map \"%s\"", (double) map->levels[i],map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } content=p; } value=(double) strtol(content,&p,10); (void) value; if (p != content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too many values, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } } thresholds=DestroyXMLTree(thresholds); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMap() load and search one or more threshold map files for the % a map matching the given name or aliase. % % The format of the GetThresholdMap method is: % % ThresholdMap *GetThresholdMap(const char *map_id, % ExceptionInfo *exception) % % A description of each parameter follows. % % o map_id: ID of the map to look for. % % o exception: return any errors or warnings in this structure. % */ MagickExport ThresholdMap *GetThresholdMap(const char *map_id, ExceptionInfo *exception) { const StringInfo *option; LinkedListInfo *options; ThresholdMap *map; map=GetThresholdMapFile(MinimalThresholdMap,"built-in",map_id,exception); if (map != (ThresholdMap *) NULL) return(map); options=GetConfigureOptions(ThresholdsFilename,exception); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { map=GetThresholdMapFile((const char *) GetStringInfoDatum(option), GetStringInfoPath(option),map_id,exception); if (map != (ThresholdMap *) NULL) break; option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + L i s t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMapFile() lists the threshold maps and their descriptions % in the given XML file data. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,const char*xml, % const char *filename,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o exception: return any errors or warnings in this structure. % */ MagickBooleanType ListThresholdMapFile(FILE *file,const char *xml, const char *filename,ExceptionInfo *exception) { XMLTreeInfo *thresholds,*threshold,*description; const char *map,*alias,*content; assert( xml != (char *) NULL ); assert( file != (FILE *) NULL ); (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); thresholds=NewXMLTree(xml,exception); if ( thresholds == (XMLTreeInfo *) NULL ) return(MagickFalse); (void) FormatLocaleFile(file,"%-16s %-12s %s\n","Map","Alias","Description"); (void) FormatLocaleFile(file, "----------------------------------------------------\n"); for( threshold = GetXMLTreeChild(thresholds,"threshold"); threshold != (XMLTreeInfo *) NULL; threshold = GetNextXMLTreeTag(threshold) ) { map = GetXMLTreeAttribute(threshold, "map"); if (map == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<map>"); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } alias = GetXMLTreeAttribute(threshold, "alias"); /* alias is optional, no if test needed */ description=GetXMLTreeChild(threshold,"description"); if ( description == (XMLTreeInfo *) NULL ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"", map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } content=GetXMLTreeContent(description); if ( content == (char *) NULL ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<description>, map \"%s\"", map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } (void) FormatLocaleFile(file,"%-16s %-12s %s\n",map,alias ? alias : "", content); } thresholds=DestroyXMLTree(thresholds); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i s t T h r e s h o l d M a p s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMaps() lists the threshold maps and their descriptions % as defined by "threshold.xml" to a file. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ListThresholdMaps(FILE *file, ExceptionInfo *exception) { const StringInfo *option; LinkedListInfo *options; MagickStatusType status; status=MagickTrue; if (file == (FILE *) NULL) file=stdout; options=GetConfigureOptions(ThresholdsFilename,exception); (void) FormatLocaleFile(file, "\n Threshold Maps for Ordered Dither Operations\n"); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { (void) FormatLocaleFile(file,"\nPath: %s\n\n",GetStringInfoPath(option)); status&=ListThresholdMapFile(file,(const char *) GetStringInfoDatum(option), GetStringInfoPath(option),exception); option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O r d e r e d D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OrderedDitherImage() uses the ordered dithering technique of reducing color % images to monochrome using positional information to retain as much % information as possible. % % WARNING: This function is deprecated, and is now just a call to % the more more powerful OrderedPosterizeImage(); function. % % The format of the OrderedDitherImage method is: % % MagickBooleanType OrderedDitherImage(Image *image) % MagickBooleanType OrderedDitherImageChannel(Image *image, % const ChannelType channel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OrderedDitherImage(Image *image) { MagickBooleanType status; status=OrderedDitherImageChannel(image,DefaultChannels,&image->exception); return(status); } MagickExport MagickBooleanType OrderedDitherImageChannel(Image *image, const ChannelType channel,ExceptionInfo *exception) { MagickBooleanType status; /* Call the augumented function OrderedPosterizeImage() */ status=OrderedPosterizeImageChannel(image,channel,"o8x8",exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O r d e r e d P o s t e r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OrderedPosterizeImage() will perform a ordered dither based on a number % of pre-defined dithering threshold maps, but over multiple intensity % levels, which can be different for different channels, according to the % input argument. % % The format of the OrderedPosterizeImage method is: % % MagickBooleanType OrderedPosterizeImage(Image *image, % const char *threshold_map,ExceptionInfo *exception) % MagickBooleanType OrderedPosterizeImageChannel(Image *image, % const ChannelType channel,const char *threshold_map, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o threshold_map: A string containing the name of the threshold dither % map to use, followed by zero or more numbers representing the number % of color levels tho dither between. % % Any level number less than 2 will be equivalent to 2, and means only % binary dithering will be applied to each color channel. % % No numbers also means a 2 level (bitmap) dither will be applied to all % channels, while a single number is the number of levels applied to each % channel in sequence. More numbers will be applied in turn to each of % the color channels. % % For example: "o3x3,6" will generate a 6 level posterization of the % image with a ordered 3x3 diffused pixel dither being applied between % each level. While checker,8,8,4 will produce a 332 colormaped image % with only a single checkerboard hash pattern (50% grey) between each % color level, to basically double the number of color levels with % a bare minimim of dithering. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OrderedPosterizeImage(Image *image, const char *threshold_map,ExceptionInfo *exception) { MagickBooleanType status; status=OrderedPosterizeImageChannel(image,DefaultChannels,threshold_map, exception); return(status); } MagickExport MagickBooleanType OrderedPosterizeImageChannel(Image *image, const ChannelType channel,const char *threshold_map,ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CacheView *image_view; LongPixelPacket levels; MagickBooleanType status; MagickOffsetType progress; ssize_t y; ThresholdMap *map; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if (threshold_map == (const char *) NULL) return(MagickTrue); { char token[MaxTextExtent]; register const char *p; p=(char *)threshold_map; while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) && (*p != '\0')) p++; threshold_map=p; while (((isspace((int) ((unsigned char) *p)) == 0) && (*p != ',')) && (*p != '\0')) { if ((p-threshold_map) >= (MaxTextExtent-1)) break; token[p-threshold_map] = *p; p++; } token[p-threshold_map] = '\0'; map = GetThresholdMap(token, exception); if ( map == (ThresholdMap *) NULL ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","ordered-dither",threshold_map); return(MagickFalse); } } /* Set channel levels from extra comma separated arguments Default to 2, the single value given, or individual channel values */ #if 1 { /* parse directly as a comma separated list of integers */ char *p; p = strchr((char *) threshold_map,','); if ( p != (char *) NULL && isdigit((int) ((unsigned char) *(++p))) ) levels.index = (unsigned int) strtoul(p, &p, 10); else levels.index = 2; levels.red = ((channel & RedChannel ) != 0) ? levels.index : 0; levels.green = ((channel & GreenChannel) != 0) ? levels.index : 0; levels.blue = ((channel & BlueChannel) != 0) ? levels.index : 0; levels.opacity = ((channel & OpacityChannel) != 0) ? levels.index : 0; levels.index = ((channel & IndexChannel) != 0 && (image->colorspace == CMYKColorspace)) ? levels.index : 0; /* if more than a single number, each channel has a separate value */ if ( p != (char *) NULL && *p == ',' ) { p=strchr((char *) threshold_map,','); p++; if ((channel & RedChannel) != 0) levels.red = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); if ((channel & GreenChannel) != 0) levels.green = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); if ((channel & BlueChannel) != 0) levels.blue = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); if ((channel & IndexChannel) != 0 && image->colorspace == CMYKColorspace) levels.index=(unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); if ((channel & OpacityChannel) != 0) levels.opacity = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); } } #else /* Parse level values as a geometry */ /* This difficult! * How to map GeometryInfo structure elements into * LongPixelPacket structure elements, but according to channel? * Note the channels list may skip elements!!!! * EG -channel BA -ordered-dither map,2,3 * will need to map g.rho -> l.blue, and g.sigma -> l.opacity * A simpler way is needed, probably converting geometry to a temporary * array, then using channel to advance the index into ssize_t pixel packet. */ #endif #if 0 printf("DEBUG levels r=%u g=%u b=%u a=%u i=%u\n", levels.red, levels.green, levels.blue, levels.opacity, levels.index); #endif { /* Do the posterized ordered dithering of the image */ ssize_t d; /* d = number of psuedo-level divisions added between color levels */ d = map->divisor-1; /* reduce levels to levels - 1 */ levels.red = levels.red ? levels.red-1 : 0; levels.green = levels.green ? levels.green-1 : 0; levels.blue = levels.blue ? levels.blue-1 : 0; levels.opacity = levels.opacity ? levels.opacity-1 : 0; levels.index = levels.index ? levels.index-1 : 0; if (SetImageStorageClass(image,DirectClass) == MagickFalse) { InheritException(exception,&image->exception); return(MagickFalse); } status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t threshold, t, l; /* Figure out the dither threshold for this pixel This must be a integer from 1 to map->divisor-1 */ threshold = map->levels[(x%map->width) +map->width*(y%map->height)]; /* Dither each channel in the image as appropriate Notes on the integer Math... total number of divisions = (levels-1)*(divisor-1)+1) t1 = this colors psuedo_level = q->red * total_divisions / (QuantumRange+1) l = posterization level 0..levels t = dither threshold level 0..divisor-1 NB: 0 only on last Each color_level is of size QuantumRange / (levels-1) NB: All input levels and divisor are already had 1 subtracted Opacity is inverted so 'off' represents transparent. */ if (levels.red) { t = (ssize_t) (QuantumScale*GetPixelRed(q)*(levels.red*d+1)); l = t/d; t = t-l*d; SetPixelRed(q,ClampToQuantum((MagickRealType) ((l+(t >= threshold))*(MagickRealType) QuantumRange/levels.red))); } if (levels.green) { t = (ssize_t) (QuantumScale*GetPixelGreen(q)* (levels.green*d+1)); l = t/d; t = t-l*d; SetPixelGreen(q,ClampToQuantum((MagickRealType) ((l+(t >= threshold))*(MagickRealType) QuantumRange/levels.green))); } if (levels.blue) { t = (ssize_t) (QuantumScale*GetPixelBlue(q)* (levels.blue*d+1)); l = t/d; t = t-l*d; SetPixelBlue(q,ClampToQuantum((MagickRealType) ((l+(t >= threshold))*(MagickRealType) QuantumRange/levels.blue))); } if (levels.opacity) { t = (ssize_t) ((1.0-QuantumScale*GetPixelOpacity(q))* (levels.opacity*d+1)); l = t/d; t = t-l*d; SetPixelOpacity(q,ClampToQuantum((MagickRealType) ((1.0-l-(t >= threshold))*(MagickRealType) QuantumRange/ levels.opacity))); } if (levels.index) { t = (ssize_t) (QuantumScale*GetPixelIndex(indexes+x)* (levels.index*d+1)); l = t/d; t = t-l*d; SetPixelIndex(indexes+x,ClampToQuantum((MagickRealType) ((l+ (t>=threshold))*(MagickRealType) QuantumRange/levels.index))); } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OrderedPosterizeImageChannel) #endif proceed=SetImageProgress(image,DitherImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); } map=DestroyThresholdMap(map); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P e r c e p t i b l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PerceptibleImage() set each pixel whose value is less than |epsilon| to % epsilon or -epsilon (whichever is closer) otherwise the pixel value remains % unchanged. % % The format of the PerceptibleImageChannel method is: % % MagickBooleanType PerceptibleImage(Image *image,const double epsilon) % MagickBooleanType PerceptibleImageChannel(Image *image, % const ChannelType channel,const double epsilon) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o epsilon: the epsilon threshold (e.g. 1.0e-9). % */ static inline Quantum PerceptibleThreshold(const Quantum quantum, const double epsilon) { double sign; sign=(double) quantum < 0.0 ? -1.0 : 1.0; if ((sign*quantum) >= epsilon) return(quantum); return((Quantum) (sign*epsilon)); } MagickExport MagickBooleanType PerceptibleImage(Image *image, const double epsilon) { MagickBooleanType status; status=PerceptibleImageChannel(image,DefaultChannels,epsilon); return(status); } MagickExport MagickBooleanType PerceptibleImageChannel(Image *image, const ChannelType channel,const double epsilon) { #define PerceptibleImageTag "Perceptible/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelPacket *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { SetPixelRed(q,PerceptibleThreshold(GetPixelRed(q),epsilon)); SetPixelGreen(q,PerceptibleThreshold(GetPixelGreen(q),epsilon)); SetPixelBlue(q,PerceptibleThreshold(GetPixelBlue(q),epsilon)); SetPixelOpacity(q,PerceptibleThreshold(GetPixelOpacity(q),epsilon)); q++; } return(SyncImage(image)); } /* Perceptible image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,PerceptibleThreshold(GetPixelRed(q),epsilon)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,PerceptibleThreshold(GetPixelGreen(q),epsilon)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,PerceptibleThreshold(GetPixelBlue(q),epsilon)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,PerceptibleThreshold(GetPixelOpacity(q),epsilon)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,PerceptibleThreshold(GetPixelIndex(indexes+x), epsilon)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_PerceptibleImageChannel) #endif proceed=SetImageProgress(image,PerceptibleImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a n d o m T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RandomThresholdImage() changes the value of individual pixels based on the % intensity of each pixel compared to a random threshold. The result is a % low-contrast, two color image. % % The format of the RandomThresholdImage method is: % % MagickBooleanType RandomThresholdImageChannel(Image *image, % const char *thresholds,ExceptionInfo *exception) % MagickBooleanType RandomThresholdImageChannel(Image *image, % const ChannelType channel,const char *thresholds, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o thresholds: a geometry string containing low,high thresholds. If the % string contains 2x2, 3x3, or 4x4, an ordered dither of order 2, 3, or 4 % is performed instead. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RandomThresholdImage(Image *image, const char *thresholds,ExceptionInfo *exception) { MagickBooleanType status; status=RandomThresholdImageChannel(image,DefaultChannels,thresholds, exception); return(status); } MagickExport MagickBooleanType RandomThresholdImageChannel(Image *image, const ChannelType channel,const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickStatusType flags; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket threshold; MagickRealType min_threshold, max_threshold; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if (thresholds == (const char *) NULL) return(MagickTrue); GetMagickPixelPacket(image,&threshold); min_threshold=0.0; max_threshold=(MagickRealType) QuantumRange; flags=ParseGeometry(thresholds,&geometry_info); min_threshold=geometry_info.rho; max_threshold=geometry_info.sigma; if ((flags & SigmaValue) == 0) max_threshold=min_threshold; if (strchr(thresholds,'%') != (char *) NULL) { max_threshold*=(MagickRealType) (0.01*QuantumRange); min_threshold*=(MagickRealType) (0.01*QuantumRange); } else if (((max_threshold == min_threshold) || (max_threshold == 1)) && (min_threshold <= 8)) { /* Backward Compatibility -- ordered-dither -- IM v 6.2.9-6. */ status=OrderedPosterizeImageChannel(image,channel,thresholds,exception); return(status); } /* Random threshold image. */ status=MagickTrue; progress=0; if (channel == CompositeChannels) { if (AcquireImageColormap(image,2) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); random_info=AcquireRandomInfoThreadSet(); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { IndexPacket index; MagickRealType intensity; intensity=GetPixelIntensity(image,q); if (intensity < min_threshold) threshold.index=min_threshold; else if (intensity > max_threshold) threshold.index=max_threshold; else threshold.index=(MagickRealType)(QuantumRange* GetPseudoRandomValue(random_info[id])); index=(IndexPacket) (intensity <= threshold.index ? 0 : 1); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RandomThresholdImageChannel) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } if (SetImageStorageClass(image,DirectClass) == MagickFalse) { InheritException(exception,&image->exception); return(MagickFalse); } random_info=AcquireRandomInfoThreadSet(); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) { if ((MagickRealType) GetPixelRed(q) < min_threshold) threshold.red=min_threshold; else if ((MagickRealType) GetPixelRed(q) > max_threshold) threshold.red=max_threshold; else threshold.red=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if ((channel & GreenChannel) != 0) { if ((MagickRealType) GetPixelGreen(q) < min_threshold) threshold.green=min_threshold; else if ((MagickRealType) GetPixelGreen(q) > max_threshold) threshold.green=max_threshold; else threshold.green=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if ((channel & BlueChannel) != 0) { if ((MagickRealType) GetPixelBlue(q) < min_threshold) threshold.blue=min_threshold; else if ((MagickRealType) GetPixelBlue(q) > max_threshold) threshold.blue=max_threshold; else threshold.blue=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if ((channel & OpacityChannel) != 0) { if ((MagickRealType) GetPixelOpacity(q) < min_threshold) threshold.opacity=min_threshold; else if ((MagickRealType) GetPixelOpacity(q) > max_threshold) threshold.opacity=max_threshold; else threshold.opacity=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { if ((MagickRealType) GetPixelIndex(indexes+x) < min_threshold) threshold.index=min_threshold; else if ((MagickRealType) GetPixelIndex(indexes+x) > max_threshold) threshold.index=max_threshold; else threshold.index=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if ((channel & RedChannel) != 0) SetPixelRed(q,(MagickRealType) GetPixelRed(q) <= threshold.red ? 0 : QuantumRange); if ((channel & GreenChannel) != 0) SetPixelGreen(q,(MagickRealType) GetPixelGreen(q) <= threshold.green ? 0 : QuantumRange); if ((channel & BlueChannel) != 0) SetPixelBlue(q,(MagickRealType) GetPixelBlue(q) <= threshold.blue ? 0 : QuantumRange); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,(MagickRealType) GetPixelOpacity(q) <= threshold.opacity ? 0 : QuantumRange); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,(MagickRealType) GetPixelIndex(indexes+x) <= threshold.index ? 0 : QuantumRange); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RandomThresholdImageChannel) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W h i t e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WhiteThresholdImage() is like ThresholdImage() but forces all pixels above % the threshold into white while leaving all pixels at or below the threshold % unchanged. % % The format of the WhiteThresholdImage method is: % % MagickBooleanType WhiteThresholdImage(Image *image,const char *threshold) % MagickBooleanType WhiteThresholdImageChannel(Image *image, % const ChannelType channel,const char *threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o threshold: Define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WhiteThresholdImage(Image *image, const char *threshold) { MagickBooleanType status; status=WhiteThresholdImageChannel(image,DefaultChannels,threshold, &image->exception); return(status); } MagickExport MagickBooleanType WhiteThresholdImageChannel(Image *image, const ChannelType channel,const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); flags=ParseGeometry(thresholds,&geometry_info); GetMagickPixelPacket(image,&threshold); threshold.red=geometry_info.rho; threshold.green=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold.green=threshold.red; threshold.blue=geometry_info.xi; if ((flags & XiValue) == 0) threshold.blue=threshold.red; threshold.opacity=geometry_info.psi; if ((flags & PsiValue) == 0) threshold.opacity=threshold.red; threshold.index=geometry_info.chi; if ((flags & ChiValue) == 0) threshold.index=threshold.red; if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.opacity*=(MagickRealType) (QuantumRange/100.0); threshold.index*=(MagickRealType) (QuantumRange/100.0); } if ((IsMagickGray(&threshold) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace); /* White threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (((channel & RedChannel) != 0) && ((MagickRealType) GetPixelRed(q) > threshold.red)) SetPixelRed(q,QuantumRange); if (((channel & GreenChannel) != 0) && ((MagickRealType) GetPixelGreen(q) > threshold.green)) SetPixelGreen(q,QuantumRange); if (((channel & BlueChannel) != 0) && ((MagickRealType) GetPixelBlue(q) > threshold.blue)) SetPixelBlue(q,QuantumRange); if (((channel & OpacityChannel) != 0) && ((MagickRealType) GetPixelOpacity(q) > threshold.opacity)) SetPixelOpacity(q,QuantumRange); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && ((MagickRealType) GetPixelIndex(indexes+x)) > threshold.index) SetPixelIndex(indexes+x,QuantumRange); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_WhiteThresholdImageChannel) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
progress_counter.h
/* * Copyright (C) 2015, Nils Moehrle * TU Darmstadt - Graphics, Capture and Massively Parallel Computing * All rights reserved. * * This software may be modified and distributed under the terms * of the BSD 3-Clause license. See the LICENSE.txt file for details. */ #ifndef TEX_PROGRESSCOUNTER_HEADER #define TEX_PROGRESSCOUNTER_HEADER #include <atomic> #include <fstream> #include <iostream> #include <sstream> #include "util/timer.h" #include <cmath> enum ProgressCounterStyle { ETA, SIMPLE }; static const std::string clear = "\r" + std::string(80,' ') + "\r"; class ProgressCounter { private: std::ofstream tty; util::WallTimer timer; std::string task; std::size_t max; std::atomic_size_t count; public: ProgressCounter(std::string const & _task, std::size_t max); template <ProgressCounterStyle T> void progress(void); void inc(void); void reset(std::string const & _task); }; inline ProgressCounter::ProgressCounter(std::string const & _task, std::size_t _max) : tty("/dev/tty", std::ios_base::out), timer(), task(_task), max(_max), count(0) {} inline void ProgressCounter::inc(void) { std::size_t tmp; tmp = ++count; if(tmp == max) { std::stringstream ss; ss << clear << task << " 100%... done. (Took " << timer.get_elapsed_sec() << "s)"; #pragma omp critical(progress_counter_inc) std::cout << ss.rdbuf() << std::endl; } } inline void ProgressCounter::reset(std::string const & _task) { timer.reset(); count = 0; task = _task; } template <ProgressCounterStyle T> void ProgressCounter::progress(void) { if ((max > 100 && count % (max / 100) == 0) || max <= 100) { float percent = static_cast<float>(count) / max; int ipercent = std::floor(percent * 100.0f + 0.5f); std::stringstream ss; ss << clear << task << " " << ipercent << "%..."; if (T == ETA && ipercent > 3){ std::size_t const elapsed = timer.get_elapsed(); std::size_t eta = (elapsed / percent - elapsed) / 1000; ss << " eta ~ " << eta << " s"; } #pragma omp critical(progress_counter_progress) tty << ss.rdbuf() << std::flush; } } #endif /* TEX_PROGRESSCOUNTER_HEADER */
draw.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD RRRR AAA W W % % D D R R A A W W % % D D RRRR AAAAA W W W % % D D R RN A A WW WW % % DDDD R R A A W W % % % % % % MagickCore Image Drawing Methods % % % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon % rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion", % Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent % (www.appligent.com) contributed the dash pattern, linecap stroking % algorithm, and minor rendering improvements. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" /* Define declarations. */ #define BezierQuantum 200 #define DrawEpsilon (1.0e-10) /* Typedef declarations. */ typedef struct _EdgeInfo { SegmentInfo bounds; double scanline; PointInfo *points; size_t number_points; ssize_t direction; MagickBooleanType ghostline; size_t highwater; } EdgeInfo; typedef struct _ElementInfo { double cx, cy, major, minor, angle; } ElementInfo; typedef struct _PolygonInfo { EdgeInfo *edges; size_t number_edges; } PolygonInfo; typedef enum { MoveToCode, OpenCode, GhostlineCode, LineToCode, EndCode } PathInfoCode; typedef struct _PathInfo { PointInfo point; PathInfoCode code; } PathInfo; /* Forward declarations. */ static MagickBooleanType DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *, ExceptionInfo *); static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *,const PrimitiveInfo *); static size_t TracePath(PrimitiveInfo *,const char *); static void TraceArc(PrimitiveInfo *,const PointInfo,const PointInfo,const PointInfo), TraceArcPath(PrimitiveInfo *,const PointInfo,const PointInfo,const PointInfo, const double,const MagickBooleanType,const MagickBooleanType), TraceBezier(PrimitiveInfo *,const size_t), TraceCircle(PrimitiveInfo *,const PointInfo,const PointInfo), TraceEllipse(PrimitiveInfo *,const PointInfo,const PointInfo, const PointInfo), TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRoundRectangle(PrimitiveInfo *,const PointInfo,const PointInfo, PointInfo), TraceSquareLinecap(PrimitiveInfo *,const size_t,const double); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireDrawInfo() returns a DrawInfo structure properly initialized. % % The format of the AcquireDrawInfo method is: % % DrawInfo *AcquireDrawInfo(void) % */ MagickExport DrawInfo *AcquireDrawInfo(void) { DrawInfo *draw_info; draw_info=(DrawInfo *) AcquireMagickMemory(sizeof(*draw_info)); if (draw_info == (DrawInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); GetDrawInfo((ImageInfo *) NULL,draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneDrawInfo() makes a copy of the given draw_info structure. If NULL % is specified, a new DrawInfo structure is created initialized to default % values. % % The format of the CloneDrawInfo method is: % % DrawInfo *CloneDrawInfo(const ImageInfo *image_info, % const DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info. % % o draw_info: the draw info. % */ MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info, const DrawInfo *draw_info) { DrawInfo *clone_info; ExceptionInfo *exception; clone_info=(DrawInfo *) AcquireMagickMemory(sizeof(*clone_info)); if (clone_info == (DrawInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); GetDrawInfo(image_info,clone_info); if (draw_info == (DrawInfo *) NULL) return(clone_info); exception=AcquireExceptionInfo(); if (clone_info->primitive != (char *) NULL) (void) CloneString(&clone_info->primitive,draw_info->primitive); if (draw_info->geometry != (char *) NULL) (void) CloneString(&clone_info->geometry,draw_info->geometry); clone_info->viewbox=draw_info->viewbox; clone_info->affine=draw_info->affine; clone_info->gravity=draw_info->gravity; clone_info->fill=draw_info->fill; clone_info->stroke=draw_info->stroke; clone_info->stroke_width=draw_info->stroke_width; if (draw_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue, exception); if (draw_info->stroke_pattern != (Image *) NULL) clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0, MagickTrue,exception); clone_info->stroke_antialias=draw_info->stroke_antialias; clone_info->text_antialias=draw_info->text_antialias; clone_info->fill_rule=draw_info->fill_rule; clone_info->linecap=draw_info->linecap; clone_info->linejoin=draw_info->linejoin; clone_info->miterlimit=draw_info->miterlimit; clone_info->dash_offset=draw_info->dash_offset; clone_info->decorate=draw_info->decorate; clone_info->compose=draw_info->compose; if (draw_info->text != (char *) NULL) (void) CloneString(&clone_info->text,draw_info->text); if (draw_info->font != (char *) NULL) (void) CloneString(&clone_info->font,draw_info->font); if (draw_info->metrics != (char *) NULL) (void) CloneString(&clone_info->metrics,draw_info->metrics); if (draw_info->family != (char *) NULL) (void) CloneString(&clone_info->family,draw_info->family); clone_info->style=draw_info->style; clone_info->stretch=draw_info->stretch; clone_info->weight=draw_info->weight; if (draw_info->encoding != (char *) NULL) (void) CloneString(&clone_info->encoding,draw_info->encoding); clone_info->pointsize=draw_info->pointsize; clone_info->kerning=draw_info->kerning; clone_info->interline_spacing=draw_info->interline_spacing; clone_info->interword_spacing=draw_info->interword_spacing; clone_info->direction=draw_info->direction; if (draw_info->density != (char *) NULL) (void) CloneString(&clone_info->density,draw_info->density); clone_info->align=draw_info->align; clone_info->undercolor=draw_info->undercolor; clone_info->border_color=draw_info->border_color; if (draw_info->server_name != (char *) NULL) (void) CloneString(&clone_info->server_name,draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) { register ssize_t x; for (x=0; fabs(draw_info->dash_pattern[x]) >= DrawEpsilon; x++) ; clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) x+1UL, sizeof(*clone_info->dash_pattern)); if (clone_info->dash_pattern == (double *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) CopyMagickMemory(clone_info->dash_pattern,draw_info->dash_pattern, (size_t) (x+1)*sizeof(*clone_info->dash_pattern)); } clone_info->gradient=draw_info->gradient; if (draw_info->gradient.stops != (StopInfo *) NULL) { size_t number_stops; number_stops=clone_info->gradient.number_stops; clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t) number_stops,sizeof(*clone_info->gradient.stops)); if (clone_info->gradient.stops == (StopInfo *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) CopyMagickMemory(clone_info->gradient.stops, draw_info->gradient.stops,(size_t) number_stops* sizeof(*clone_info->gradient.stops)); } if (draw_info->clip_mask != (char *) NULL) (void) CloneString(&clone_info->clip_mask,draw_info->clip_mask); clone_info->bounds=draw_info->bounds; clone_info->clip_units=draw_info->clip_units; clone_info->render=draw_info->render; clone_info->fill_alpha=draw_info->fill_alpha; clone_info->stroke_alpha=draw_info->stroke_alpha; clone_info->element_reference=draw_info->element_reference; clone_info->debug=IsEventLogging(); exception=DestroyExceptionInfo(exception); return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P a t h T o P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPathToPolygon() converts a path to the more efficient sorted % rendering form. % % The format of the ConvertPathToPolygon method is: % % PolygonInfo *ConvertPathToPolygon(const DrawInfo *draw_info, % const PathInfo *path_info) % % A description of each parameter follows: % % o Method ConvertPathToPolygon returns the path in a more efficient sorted % rendering form of type PolygonInfo. % % o draw_info: Specifies a pointer to an DrawInfo structure. % % o path_info: Specifies a pointer to an PathInfo structure. % % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int CompareEdges(const void *x,const void *y) { register const EdgeInfo *p, *q; /* Compare two edges. */ p=(const EdgeInfo *) x; q=(const EdgeInfo *) y; if ((p->points[0].y-DrawEpsilon) > q->points[0].y) return(1); if ((p->points[0].y+DrawEpsilon) < q->points[0].y) return(-1); if ((p->points[0].x-DrawEpsilon) > q->points[0].x) return(1); if ((p->points[0].x+DrawEpsilon) < q->points[0].x) return(-1); if (((p->points[1].x-p->points[0].x)*(q->points[1].y-q->points[0].y)- (p->points[1].y-p->points[0].y)*(q->points[1].x-q->points[0].x)) > 0.0) return(1); return(-1); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static void LogPolygonInfo(const PolygonInfo *polygon_info) { register EdgeInfo *p; register ssize_t i, j; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge"); p=polygon_info->edges; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { (void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:", (double) i); (void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s", p->direction != MagickFalse ? "down" : "up"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s", p->ghostline != MagickFalse ? "transparent" : "opaque"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1, p->bounds.x2,p->bounds.y2); for (j=0; j < (ssize_t) p->number_points; j++) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g", p->points[j].x,p->points[j].y); p++; } (void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge"); } static void ReversePoints(PointInfo *points,const size_t number_points) { PointInfo point; register ssize_t i; for (i=0; i < (ssize_t) (number_points >> 1); i++) { point=points[i]; points[i]=points[number_points-(i+1)]; points[number_points-(i+1)]=point; } } static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info) { long direction, next_direction; PointInfo point, *points; PolygonInfo *polygon_info; SegmentInfo bounds; register ssize_t i, n; MagickBooleanType ghostline; size_t edge, number_edges, number_points; /* Convert a path to the more efficient sorted rendering form. */ polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info)); if (polygon_info == (PolygonInfo *) NULL) return((PolygonInfo *) NULL); number_edges=16; polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); (void) ResetMagickMemory(polygon_info->edges,0,number_edges* sizeof(*polygon_info->edges)); direction=0; edge=0; ghostline=MagickFalse; n=0; number_points=0; points=(PointInfo *) NULL; (void) ResetMagickMemory(&point,0,sizeof(point)); (void) ResetMagickMemory(&bounds,0,sizeof(bounds)); for (i=0; path_info[i].code != EndCode; i++) { if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) || (path_info[i].code == GhostlineCode)) { /* Move to. */ if ((points != (PointInfo *) NULL) && (n >= 2)) { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; points=(PointInfo *) NULL; ghostline=MagickFalse; edge++; } if (points == (PointInfo *) NULL) { number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); } ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse; point=path_info[i].point; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; direction=0; n=1; continue; } /* Line to. */ next_direction=((path_info[i].point.y > point.y) || ((fabs(path_info[i].point.y-point.y) < DrawEpsilon) && (path_info[i].point.x > point.x))) ? 1 : -1; if ((points != (PointInfo *) NULL) && (direction != 0) && (direction != next_direction)) { /* New edge. */ point=points[n-1]; if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); n=1; ghostline=MagickFalse; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; edge++; } direction=next_direction; if (points == (PointInfo *) NULL) continue; if (n == (ssize_t) number_points) { number_points<<=1; points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); } point=path_info[i].point; points[n]=point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.x > bounds.x2) bounds.x2=point.x; n++; } if (points != (PointInfo *) NULL) { if (n < 2) points=(PointInfo *) RelinquishMagickMemory(points); else { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; ghostline=MagickFalse; edge++; } } polygon_info->number_edges=edge; qsort(polygon_info->edges,(size_t) polygon_info->number_edges, sizeof(*polygon_info->edges),CompareEdges); if (IsEventLogging() != MagickFalse) LogPolygonInfo(polygon_info); return(polygon_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P r i m i t i v e T o P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector % path structure. % % The format of the ConvertPrimitiveToPath method is: % % PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o Method ConvertPrimitiveToPath returns a vector path structure of type % PathInfo. % % o draw_info: a structure of type DrawInfo. % % o primitive_info: Specifies a pointer to an PrimitiveInfo structure. % % */ static void LogPathInfo(const PathInfo *path_info) { register const PathInfo *p; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path"); for (p=path_info; p->code != EndCode; p++) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ? "moveto ghostline" : p->code == OpenCode ? "moveto open" : p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" : "?"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path"); } static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info) { PathInfo *path_info; PathInfoCode code; PointInfo p, q; register ssize_t i, n; ssize_t coordinates, start; /* Converts a PrimitiveInfo structure into a vector path structure. */ switch (primitive_info->primitive) { case AlphaPrimitive: case ColorPrimitive: case ImagePrimitive: case PointPrimitive: case TextPrimitive: return((PathInfo *) NULL); default: break; } for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; path_info=(PathInfo *) AcquireQuantumMemory((size_t) (2UL*i+3UL), sizeof(*path_info)); if (path_info == (PathInfo *) NULL) return((PathInfo *) NULL); coordinates=0; n=0; p.x=(-1.0); p.y=(-1.0); q.x=(-1.0); q.y=(-1.0); start=0; for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { code=LineToCode; if (coordinates <= 0) { coordinates=(ssize_t) primitive_info[i].coordinates; p=primitive_info[i].point; start=n; code=MoveToCode; } coordinates--; /* Eliminate duplicate points. */ if ((i == 0) || (fabs(q.x-primitive_info[i].point.x) >= DrawEpsilon) || (fabs(q.y-primitive_info[i].point.y) >= DrawEpsilon)) { path_info[n].code=code; path_info[n].point=primitive_info[i].point; q=primitive_info[i].point; n++; } if (coordinates > 0) continue; if ((fabs(p.x-primitive_info[i].point.x) < DrawEpsilon) && (fabs(p.y-primitive_info[i].point.y) < DrawEpsilon)) continue; /* Mark the p point as open if it does not match the q. */ path_info[start].code=OpenCode; path_info[n].code=GhostlineCode; path_info[n].point=primitive_info[i].point; n++; path_info[n].code=LineToCode; path_info[n].point=p; n++; } path_info[n].code=EndCode; path_info[n].point.x=0.0; path_info[n].point.y=0.0; if (IsEventLogging() != MagickFalse) LogPathInfo(path_info); return(path_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyDrawInfo() deallocates memory associated with an DrawInfo % structure. % % The format of the DestroyDrawInfo method is: % % DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) % % A description of each parameter follows: % % o draw_info: the draw info. % */ MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) { if (draw_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if (draw_info->primitive != (char *) NULL) draw_info->primitive=DestroyString(draw_info->primitive); if (draw_info->text != (char *) NULL) draw_info->text=DestroyString(draw_info->text); if (draw_info->geometry != (char *) NULL) draw_info->geometry=DestroyString(draw_info->geometry); if (draw_info->fill_pattern != (Image *) NULL) draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern); if (draw_info->stroke_pattern != (Image *) NULL) draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern); if (draw_info->font != (char *) NULL) draw_info->font=DestroyString(draw_info->font); if (draw_info->metrics != (char *) NULL) draw_info->metrics=DestroyString(draw_info->metrics); if (draw_info->family != (char *) NULL) draw_info->family=DestroyString(draw_info->family); if (draw_info->encoding != (char *) NULL) draw_info->encoding=DestroyString(draw_info->encoding); if (draw_info->density != (char *) NULL) draw_info->density=DestroyString(draw_info->density); if (draw_info->server_name != (char *) NULL) draw_info->server_name=(char *) RelinquishMagickMemory(draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) draw_info->dash_pattern=(double *) RelinquishMagickMemory( draw_info->dash_pattern); if (draw_info->gradient.stops != (StopInfo *) NULL) draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory( draw_info->gradient.stops); if (draw_info->clip_mask != (char *) NULL) draw_info->clip_mask=DestroyString(draw_info->clip_mask); draw_info->signature=(~MagickCoreSignature); draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y E d g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyEdge() destroys the specified polygon edge. % % The format of the DestroyEdge method is: % % ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge) % % A description of each parameter follows: % % o polygon_info: Specifies a pointer to an PolygonInfo structure. % % o edge: the polygon edge number to destroy. % */ static size_t DestroyEdge(PolygonInfo *polygon_info, const size_t edge) { assert(edge < polygon_info->number_edges); polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory( polygon_info->edges[edge].points); polygon_info->number_edges--; if (edge < polygon_info->number_edges) (void) CopyMagickMemory(polygon_info->edges+edge,polygon_info->edges+edge+1, (size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges)); return(polygon_info->number_edges); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P o l y g o n I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPolygonInfo() destroys the PolygonInfo data structure. % % The format of the DestroyPolygonInfo method is: % % PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info) % % A description of each parameter follows: % % o polygon_info: Specifies a pointer to an PolygonInfo structure. % */ static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info) { register ssize_t i; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) polygon_info->edges[i].points=(PointInfo *) RelinquishMagickMemory(polygon_info->edges[i].points); polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(polygon_info->edges); return((PolygonInfo *) RelinquishMagickMemory(polygon_info)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w A f f i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawAffineImage() composites the source over the destination image as % dictated by the affine transform. % % The format of the DrawAffineImage method is: % % MagickBooleanType DrawAffineImage(Image *image,const Image *source, % const AffineMatrix *affine,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o source: the source image. % % o affine: the affine transform. % % o exception: return any errors or warnings in this structure. % */ static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine, const double y,const SegmentInfo *edge) { double intercept, z; register double x; SegmentInfo inverse_edge; /* Determine left and right edges. */ inverse_edge.x1=edge->x1; inverse_edge.y1=edge->y1; inverse_edge.x2=edge->x2; inverse_edge.y2=edge->y2; z=affine->ry*y+affine->tx; if (affine->sx >= DrawEpsilon) { intercept=(-z/affine->sx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->sx < -DrawEpsilon) { intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->sx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns)) { inverse_edge.x2=edge->x1; return(inverse_edge); } /* Determine top and bottom edges. */ z=affine->sy*y+affine->ty; if (affine->rx >= DrawEpsilon) { intercept=(-z/affine->rx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->rx < -DrawEpsilon) { intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->rx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows)) { inverse_edge.x2=edge->x2; return(inverse_edge); } return(inverse_edge); } static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine) { AffineMatrix inverse_affine; double determinant; determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx* affine->ry); inverse_affine.sx=determinant*affine->sy; inverse_affine.rx=determinant*(-affine->rx); inverse_affine.ry=determinant*(-affine->ry); inverse_affine.sy=determinant*affine->sx; inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty* inverse_affine.ry; inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty* inverse_affine.sy; return(inverse_affine); } MagickExport MagickBooleanType DrawAffineImage(Image *image, const Image *source,const AffineMatrix *affine,ExceptionInfo *exception) { AffineMatrix inverse_affine; CacheView *image_view, *source_view; MagickBooleanType status; PixelInfo zero; PointInfo extent[4], min, max; register ssize_t i; SegmentInfo edge; ssize_t start, stop, y; /* Determine bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(source != (const Image *) NULL); assert(source->signature == MagickCoreSignature); assert(affine != (AffineMatrix *) NULL); extent[0].x=0.0; extent[0].y=0.0; extent[1].x=(double) source->columns-1.0; extent[1].y=0.0; extent[2].x=(double) source->columns-1.0; extent[2].y=(double) source->rows-1.0; extent[3].x=0.0; extent[3].y=(double) source->rows-1.0; for (i=0; i < 4; i++) { PointInfo point; point=extent[i]; extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx; extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty; } min=extent[0]; max=extent[0]; for (i=1; i < 4; i++) { if (min.x > extent[i].x) min.x=extent[i].x; if (min.y > extent[i].y) min.y=extent[i].y; if (max.x < extent[i].x) max.x=extent[i].x; if (max.y < extent[i].y) max.y=extent[i].y; } /* Affine transform image. */ if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; edge.x1=MagickMax(min.x,0.0); edge.y1=MagickMax(min.y,0.0); edge.x2=MagickMin(max.x,(double) image->columns-1.0); edge.y2=MagickMin(max.y,(double) image->rows-1.0); inverse_affine=InverseAffineMatrix(affine); GetPixelInfo(image,&zero); start=(ssize_t) ceil(edge.y1-0.5); stop=(ssize_t) floor(edge.y2+0.5); source_view=AcquireVirtualCacheView(source,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(source,image,1,1) #endif for (y=start; y <= stop; y++) { PixelInfo composite, pixel; PointInfo point; register ssize_t x; register Quantum *magick_restrict q; SegmentInfo inverse_edge; ssize_t x_offset; inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge); if (inverse_edge.x2 < inverse_edge.x1) continue; q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1- 0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1), 1,exception); if (q == (Quantum *) NULL) continue; pixel=zero; composite=zero; x_offset=0; for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++) { point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+ inverse_affine.tx; point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+ inverse_affine.ty; (void) InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel, point.x,point.y,&pixel,exception); GetPixelInfoPixel(image,q,&composite); CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha, &composite); SetPixelViaPixelInfo(image,&composite,q); x_offset++; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w B o u n d i n g R e c t a n g l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawBoundingRectangles() draws the bounding rectangles on the image. This % is only useful for developers debugging the rendering algorithm. % % The format of the DrawBoundingRectangles method is: % % void DrawBoundingRectangles(Image *image,const DrawInfo *draw_info, % PolygonInfo *polygon_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o polygon_info: Specifies a pointer to a PolygonInfo structure. % % o exception: return any errors or warnings in this structure. % */ static void DrawBoundingRectangles(Image *image,const DrawInfo *draw_info, const PolygonInfo *polygon_info,ExceptionInfo *exception) { DrawInfo *clone_info; double mid; PointInfo end, resolution, start; PrimitiveInfo primitive_info[6]; register ssize_t i; SegmentInfo bounds; ssize_t coordinates; clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) QueryColorCompliance("#000F",AllCompliance,&clone_info->fill, exception); resolution.x=96.0; resolution.y=96.0; if (clone_info->density != (char *) NULL) { GeometryInfo geometry_info; MagickStatusType flags; flags=ParseGeometry(clone_info->density,&geometry_info); resolution.x=geometry_info.rho; resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == MagickFalse) resolution.y=resolution.x; } mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)* clone_info->stroke_width/2.0; bounds.x1=0.0; bounds.y1=0.0; bounds.x2=0.0; bounds.y2=0.0; if (polygon_info != (PolygonInfo *) NULL) { bounds=polygon_info->edges[0].bounds; for (i=1; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1) bounds.x1=polygon_info->edges[i].bounds.x1; if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1) bounds.y1=polygon_info->edges[i].bounds.y1; if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2) bounds.x2=polygon_info->edges[i].bounds.x2; if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2) bounds.y2=polygon_info->edges[i].bounds.y2; } bounds.x1-=mid; bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns ? (double) image->columns-1 : bounds.x1; bounds.y1-=mid; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows ? (double) image->rows-1 : bounds.y1; bounds.x2+=mid; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns ? (double) image->columns-1 : bounds.x2; bounds.y2+=mid; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows ? (double) image->rows-1 : bounds.y2; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].direction != 0) (void) QueryColorCompliance("red",AllCompliance,&clone_info->stroke, exception); else (void) QueryColorCompliance("green",AllCompliance,&clone_info->stroke, exception); start.x=(double) (polygon_info->edges[i].bounds.x1-mid); start.y=(double) (polygon_info->edges[i].bounds.y1-mid); end.x=(double) (polygon_info->edges[i].bounds.x2+mid); end.y=(double) (polygon_info->edges[i].bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; (void) DrawPrimitive(image,clone_info,primitive_info,exception); } } (void) QueryColorCompliance("blue",AllCompliance,&clone_info->stroke, exception); start.x=(double) (bounds.x1-mid); start.y=(double) (bounds.y1-mid); end.x=(double) (bounds.x2+mid); end.y=(double) (bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; (void) DrawPrimitive(image,clone_info,primitive_info,exception); clone_info=DestroyDrawInfo(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClipPath() draws the clip path on the image mask. % % The format of the DrawClipPath method is: % % MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info, % const char *name,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o name: the name of the clip path. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawClipPath(Image *image, const DrawInfo *draw_info,const char *name,ExceptionInfo *exception) { char filename[MagickPathExtent]; Image *clip_mask; const char *value; DrawInfo *clone_info; MagickStatusType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); (void) FormatLocaleString(filename,MagickPathExtent,"%s",name); value=GetImageArtifact(image,filename); if (value == (const char *) NULL) return(MagickFalse); clip_mask=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (clip_mask == (Image *) NULL) return(MagickFalse); (void) QueryColorCompliance("#0000",AllCompliance, &clip_mask->background_color,exception); clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha; (void) SetImageBackgroundColor(clip_mask,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s", draw_info->clip_mask); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,value); (void) QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); clone_info->clip_mask=(char *) NULL; status=NegateImage(clip_mask,MagickFalse,exception); (void) SetImageMask(image,ReadPixelMask,clip_mask,exception); clip_mask=DestroyImage(clip_mask); status&=DrawImage(image,clone_info,exception); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w D a s h P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the % image while respecting the dash offset and dash pattern attributes. % % The format of the DrawDashPolygon method is: % % MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception) { DrawInfo *clone_info; double length, maximum_length, offset, scale, total_length; MagickStatusType status; PrimitiveInfo *dash_polygon; register ssize_t i; register double dx, dy; size_t number_vertices; ssize_t j, n; assert(draw_info != (const DrawInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash"); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; number_vertices=(size_t) i; dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (2UL*number_vertices+1UL),sizeof(*dash_polygon)); if (dash_polygon == (PrimitiveInfo *) NULL) return(MagickFalse); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->miterlimit=0; dash_polygon[0]=primitive_info[0]; scale=ExpandAffine(&draw_info->affine); length=scale*(draw_info->dash_pattern[0]-0.5); offset=fabs(draw_info->dash_offset) >= DrawEpsilon ? scale*draw_info->dash_offset : 0.0; j=1; for (n=0; offset > 0.0; j=0) { if (draw_info->dash_pattern[n] <= 0.0) break; length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5)); if (offset > length) { offset-=length; n++; length=scale*(draw_info->dash_pattern[n]+0.5); continue; } if (offset < length) { length-=offset; offset=0.0; break; } offset=0.0; n++; } status=MagickTrue; maximum_length=0.0; total_length=0.0; for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++) { dx=primitive_info[i].point.x-primitive_info[i-1].point.x; dy=primitive_info[i].point.y-primitive_info[i-1].point.y; maximum_length=hypot((double) dx,dy); if (fabs(length) < DrawEpsilon) { n++; if (fabs(draw_info->dash_pattern[n]) < DrawEpsilon) n=0; length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5)); } for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); ) { total_length+=length; if ((n & 0x01) != 0) { dash_polygon[0]=primitive_info[0]; dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx* total_length/maximum_length); dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy* total_length/maximum_length); j=1; } else { if ((j+1) > (ssize_t) (2*number_vertices)) break; dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx* total_length/maximum_length); dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy* total_length/maximum_length); dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception); } n++; if (fabs(draw_info->dash_pattern[n]) < DrawEpsilon) n=0; length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5)); } length-=(maximum_length-total_length); if ((n & 0x01) != 0) continue; dash_polygon[j]=primitive_info[i]; dash_polygon[j].coordinates=1; j++; } if ((total_length <= maximum_length) && ((n & 0x01) == 0) && (j > 1)) { dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x+=DrawEpsilon; dash_polygon[j].point.y+=DrawEpsilon; dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception); } dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawImage() draws a graphic primitive on your image. The primitive % may be represented as a string or filename. Precede the filename with an % "at" sign (@) and the contents of the file are drawn on the image. You % can affect how text is drawn by setting one or more members of the draw % info structure. % % The format of the DrawImage method is: % % MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType IsPoint(const char *point) { char *p; double value; value=StringToDouble(point,&p); return((fabs(value) < DrawEpsilon) && (p == point) ? MagickFalse : MagickTrue); } static inline void TracePoint(PrimitiveInfo *primitive_info, const PointInfo point) { primitive_info->coordinates=1; primitive_info->point=point; } MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info, ExceptionInfo *exception) { #define RenderImageTag "Render/Image" AffineMatrix affine, current; char keyword[MagickPathExtent], geometry[MagickPathExtent], *next_token, pattern[MagickPathExtent], *primitive, *token; const char *q; double angle, factor, primitive_extent; DrawInfo **graphic_context; MagickBooleanType proceed; MagickSizeType length, number_points; MagickStatusType status; PointInfo point; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; register const char *p; register ssize_t i, x; SegmentInfo bounds; size_t extent, number_stops; ssize_t j, k, n; StopInfo *stops; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if ((draw_info->primitive == (char *) NULL) || (*draw_info->primitive == '\0')) return(MagickFalse); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image"); if (*draw_info->primitive != '@') primitive=AcquireString(draw_info->primitive); else primitive=FileToString(draw_info->primitive+1,~0UL,exception); if (primitive == (char *) NULL) return(MagickFalse); primitive_extent=(double) strlen(primitive); (void) SetImageArtifact(image,"MVG",primitive); n=0; number_stops=0; stops=(StopInfo *) NULL; /* Allocate primitive info memory. */ graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { primitive=DestroyString(primitive); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } number_points=6553; primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*primitive_info)); if (primitive_info == (PrimitiveInfo *) NULL) { primitive=DestroyString(primitive); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info); graphic_context[n]->viewbox=image->page; if ((image->page.width == 0) || (image->page.height == 0)) { graphic_context[n]->viewbox.width=image->columns; graphic_context[n]->viewbox.height=image->rows; } token=AcquireString(primitive); extent=strlen(token)+MagickPathExtent; if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; for (q=primitive; *q != '\0'; ) { /* Interpret graphic primitive. */ GetNextToken(q,&q,MagickPathExtent,keyword); if (*keyword == '\0') break; if (*keyword == '#') { /* Comment. */ while ((*q != '\n') && (*q != '\0')) q++; continue; } p=q-strlen(keyword)-1; primitive_type=UndefinedPrimitive; current=graphic_context[n]->affine; GetAffineMatrix(&affine); switch (*keyword) { case ';': break; case 'a': case 'A': { if (LocaleCompare("affine",keyword) == 0) { GetNextToken(q,&q,extent,token); affine.sx=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.rx=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.ry=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.sy=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.tx=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.ty=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("alpha",keyword) == 0) { primitive_type=AlphaPrimitive; break; } if (LocaleCompare("arc",keyword) == 0) { primitive_type=ArcPrimitive; break; } status=MagickFalse; break; } case 'b': case 'B': { if (LocaleCompare("bezier",keyword) == 0) { primitive_type=BezierPrimitive; break; } if (LocaleCompare("border-color",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) QueryColorCompliance(token,AllCompliance, &graphic_context[n]->border_color,exception); break; } status=MagickFalse; break; } case 'c': case 'C': { if (LocaleCompare("clip-path",keyword) == 0) { /* Create clip mask. */ GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->clip_mask,token); (void) DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask,exception); break; } if (LocaleCompare("clip-rule",keyword) == 0) { ssize_t fill_rule; GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) status=MagickFalse; else graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("clip-units",keyword) == 0) { ssize_t clip_units; GetNextToken(q,&q,extent,token); clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse, token); if (clip_units == -1) { status=MagickFalse; break; } graphic_context[n]->clip_units=(ClipPathUnits) clip_units; if (clip_units == ObjectBoundingBox) { GetAffineMatrix(&current); affine.sx=draw_info->bounds.x2; affine.sy=draw_info->bounds.y2; affine.tx=draw_info->bounds.x1; affine.ty=draw_info->bounds.y1; break; } break; } if (LocaleCompare("circle",keyword) == 0) { primitive_type=CirclePrimitive; break; } if (LocaleCompare("color",keyword) == 0) { primitive_type=ColorPrimitive; break; } status=MagickFalse; break; } case 'd': case 'D': { if (LocaleCompare("decorate",keyword) == 0) { ssize_t decorate; GetNextToken(q,&q,extent,token); decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse, token); if (decorate == -1) status=MagickFalse; else graphic_context[n]->decorate=(DecorationType) decorate; break; } if (LocaleCompare("density",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->density,token); break; } if (LocaleCompare("direction",keyword) == 0) { ssize_t direction; GetNextToken(q,&q,extent,token); direction=ParseCommandOption(MagickDirectionOptions,MagickFalse, token); if (direction == -1) status=MagickFalse; else graphic_context[n]->direction=(DirectionType) direction; break; } status=MagickFalse; break; } case 'e': case 'E': { if (LocaleCompare("ellipse",keyword) == 0) { primitive_type=EllipsePrimitive; break; } if (LocaleCompare("encoding",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->encoding,token); break; } status=MagickFalse; break; } case 'f': case 'F': { if (LocaleCompare("fill",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) FormatLocaleString(pattern,MagickPathExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->fill_pattern,exception); else { status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->fill,exception); if (graphic_context[n]->fill_alpha != OpaqueAlpha) graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha; if (status == MagickFalse) { ImageInfo *pattern_info; pattern_info=AcquireImageInfo(); (void) CopyMagickString(pattern_info->filename,token, MagickPathExtent); graphic_context[n]->fill_pattern=ReadImage(pattern_info, exception); CatchException(exception); pattern_info=DestroyImageInfo(pattern_info); } } break; } if (LocaleCompare("fill-opacity",keyword) == 0) { GetNextToken(q,&q,extent,token); factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; graphic_context[n]->fill.alpha=QuantumRange-ClampToQuantum( (MagickRealType) QuantumRange*(1.0-factor*StringToDouble(token, &next_token))); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("fill-rule",keyword) == 0) { ssize_t fill_rule; GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) status=MagickFalse; else graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("font",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->font,token); if (LocaleCompare("none",token) == 0) graphic_context[n]->font=(char *) RelinquishMagickMemory( graphic_context[n]->font); break; } if (LocaleCompare("font-family",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->family,token); break; } if (LocaleCompare("font-size",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->pointsize=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("font-stretch",keyword) == 0) { ssize_t stretch; GetNextToken(q,&q,extent,token); stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token); if (stretch == -1) status=MagickFalse; else graphic_context[n]->stretch=(StretchType) stretch; break; } if (LocaleCompare("font-style",keyword) == 0) { ssize_t style; GetNextToken(q,&q,extent,token); style=ParseCommandOption(MagickStyleOptions,MagickFalse,token); if (style == -1) status=MagickFalse; else graphic_context[n]->style=(StyleType) style; break; } if (LocaleCompare("font-weight",keyword) == 0) { ssize_t weight; GetNextToken(q,&q,extent,token); weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(token); graphic_context[n]->weight=(size_t) weight; break; } status=MagickFalse; break; } case 'g': case 'G': { if (LocaleCompare("gradient-units",keyword) == 0) { GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("gravity",keyword) == 0) { ssize_t gravity; GetNextToken(q,&q,extent,token); gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token); if (gravity == -1) status=MagickFalse; else graphic_context[n]->gravity=(GravityType) gravity; break; } status=MagickFalse; break; } case 'i': case 'I': { if (LocaleCompare("image",keyword) == 0) { ssize_t compose; primitive_type=ImagePrimitive; GetNextToken(q,&q,extent,token); compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token); if (compose == -1) status=MagickFalse; else graphic_context[n]->compose=(CompositeOperator) compose; break; } if (LocaleCompare("interline-spacing",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->interline_spacing=StringToDouble(token, &next_token); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("interword-spacing",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=StringToDouble(token, &next_token); if (token == next_token) status=MagickFalse; break; } status=MagickFalse; break; } case 'k': case 'K': { if (LocaleCompare("kerning",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->kerning=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; break; } status=MagickFalse; break; } case 'l': case 'L': { if (LocaleCompare("line",keyword) == 0) primitive_type=LinePrimitive; else status=MagickFalse; break; } case 'o': case 'O': { if (LocaleCompare("offset",keyword) == 0) { GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("opacity",keyword) == 0) { GetNextToken(q,&q,extent,token); factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; graphic_context[n]->alpha=QuantumRange*(1.0-(QuantumScale* graphic_context[n]->alpha*(1.0-factor*StringToDouble(token, &next_token)))); graphic_context[n]->fill_alpha=QuantumRange*(1.0-(QuantumScale* graphic_context[n]->fill_alpha*(1.0-factor*StringToDouble(token, &next_token)))); graphic_context[n]->stroke_alpha=QuantumRange*(1.0-(QuantumScale* graphic_context[n]->stroke_alpha*(1.0-factor*StringToDouble(token, &next_token)))); if (token == next_token) status=MagickFalse; break; } status=MagickFalse; break; } case 'p': case 'P': { if (LocaleCompare("path",keyword) == 0) { primitive_type=PathPrimitive; break; } if (LocaleCompare("point",keyword) == 0) { primitive_type=PointPrimitive; break; } if (LocaleCompare("polyline",keyword) == 0) { primitive_type=PolylinePrimitive; break; } if (LocaleCompare("polygon",keyword) == 0) { primitive_type=PolygonPrimitive; break; } if (LocaleCompare("pop",keyword) == 0) { GetNextToken(q,&q,extent,token); if (LocaleCompare("clip-path",token) == 0) break; if (LocaleCompare("defs",token) == 0) break; if (LocaleCompare("gradient",token) == 0) break; if (LocaleCompare("graphic-context",token) == 0) { if (n <= 0) { (void) ThrowMagickException(exception,GetMagickModule(), DrawError,"UnbalancedGraphicContextPushPop","`%s'",token); status=MagickFalse; n=0; break; } if (graphic_context[n]->clip_mask != (char *) NULL) if (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0) (void) SetImageMask(image,ReadPixelMask,(Image *) NULL, exception); graphic_context[n]=DestroyDrawInfo(graphic_context[n]); n--; break; } if (LocaleCompare("pattern",token) == 0) break; status=MagickFalse; break; } if (LocaleCompare("push",keyword) == 0) { GetNextToken(q,&q,extent,token); if (LocaleCompare("clip-path",token) == 0) { char name[MagickPathExtent]; GetNextToken(q,&q,extent,token); (void) FormatLocaleString(name,MagickPathExtent,"%s",token); for (p=q; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (LocaleCompare(token,"pop") != 0) continue; GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"clip-path") != 0) continue; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); (void) SetImageArtifact(image,name,token); GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("gradient",token) == 0) { char key[2*MagickPathExtent], name[MagickPathExtent], type[MagickPathExtent]; SegmentInfo segment; GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MagickPathExtent); GetNextToken(q,&q,extent,token); (void) CopyMagickString(type,token,MagickPathExtent); GetNextToken(q,&q,extent,token); segment.x1=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); segment.y1=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); segment.x2=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); segment.y2=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; if (LocaleCompare(type,"radial") == 0) { GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); } for (p=q; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (LocaleCompare(token,"pop") != 0) continue; GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"gradient") != 0) continue; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); bounds.x1=graphic_context[n]->affine.sx*segment.x1+ graphic_context[n]->affine.ry*segment.y1+ graphic_context[n]->affine.tx; bounds.y1=graphic_context[n]->affine.rx*segment.x1+ graphic_context[n]->affine.sy*segment.y1+ graphic_context[n]->affine.ty; bounds.x2=graphic_context[n]->affine.sx*segment.x2+ graphic_context[n]->affine.ry*segment.y2+ graphic_context[n]->affine.tx; bounds.y2=graphic_context[n]->affine.rx*segment.x2+ graphic_context[n]->affine.sy*segment.y2+ graphic_context[n]->affine.ty; (void) FormatLocaleString(key,MagickPathExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MagickPathExtent,"%s-type",name); (void) SetImageArtifact(image,key,type); (void) FormatLocaleString(key,MagickPathExtent,"%s-geometry", name); (void) FormatLocaleString(geometry,MagickPathExtent, "%gx%g%+.15g%+.15g", MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0), MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0), bounds.x1,bounds.y1); (void) SetImageArtifact(image,key,geometry); GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("pattern",token) == 0) { char key[2*MagickPathExtent], name[MagickPathExtent]; RectangleInfo pattern_bounds; GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MagickPathExtent); GetNextToken(q,&q,extent,token); pattern_bounds.x=(ssize_t) ceil(StringToDouble(token, &next_token)-0.5); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); pattern_bounds.y=(ssize_t) ceil(StringToDouble(token, &next_token)-0.5); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); pattern_bounds.width=(size_t) floor(StringToDouble(token, &next_token)+0.5); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); pattern_bounds.height=(size_t) floor(StringToDouble(token, &next_token)+0.5); if (token == next_token) status=MagickFalse; for (p=q; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (LocaleCompare(token,"pop") != 0) continue; GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"pattern") != 0) continue; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); (void) FormatLocaleString(key,MagickPathExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MagickPathExtent,"%s-geometry", name); (void) FormatLocaleString(geometry,MagickPathExtent, "%.20gx%.20g%+.20g%+.20g",(double)pattern_bounds.width, (double)pattern_bounds.height,(double)pattern_bounds.x, (double)pattern_bounds.y); (void) SetImageArtifact(image,key,geometry); GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("graphic-context",token) == 0) { n++; graphic_context=(DrawInfo **) ResizeQuantumMemory( graphic_context,(size_t) (n+1),sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL, graphic_context[n-1]); break; } if (LocaleCompare("defs",token) == 0) break; status=MagickFalse; break; } status=MagickFalse; break; } case 'r': case 'R': { if (LocaleCompare("rectangle",keyword) == 0) { primitive_type=RectanglePrimitive; break; } if (LocaleCompare("rotate",keyword) == 0) { GetNextToken(q,&q,extent,token); angle=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0))); affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0))); affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0)))); affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0))); break; } if (LocaleCompare("roundRectangle",keyword) == 0) { primitive_type=RoundRectanglePrimitive; break; } status=MagickFalse; break; } case 's': case 'S': { if (LocaleCompare("scale",keyword) == 0) { GetNextToken(q,&q,extent,token); affine.sx=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.sy=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("skewX",keyword) == 0) { GetNextToken(q,&q,extent,token); angle=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; affine.ry=sin(DegreesToRadians(angle)); break; } if (LocaleCompare("skewY",keyword) == 0) { GetNextToken(q,&q,extent,token); angle=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; affine.rx=(-tan(DegreesToRadians(angle)/2.0)); break; } if (LocaleCompare("stop-color",keyword) == 0) { PixelInfo stop_color; number_stops++; if (number_stops == 1) stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops)); else if (number_stops > 2) stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops, sizeof(*stops)); if (stops == (StopInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } GetNextToken(q,&q,extent,token); (void) QueryColorCompliance(token,AllCompliance,&stop_color, exception); stops[number_stops-1].color=stop_color; GetNextToken(q,&q,extent,token); stops[number_stops-1].offset=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("stroke",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) FormatLocaleString(pattern,MagickPathExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->stroke_pattern,exception); else { status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->stroke,exception); if (graphic_context[n]->stroke_alpha != OpaqueAlpha) graphic_context[n]->stroke.alpha= graphic_context[n]->stroke_alpha; if (status == MagickFalse) { ImageInfo *pattern_info; pattern_info=AcquireImageInfo(); (void) CopyMagickString(pattern_info->filename,token, MagickPathExtent); graphic_context[n]->stroke_pattern=ReadImage(pattern_info, exception); CatchException(exception); pattern_info=DestroyImageInfo(pattern_info); } } break; } if (LocaleCompare("stroke-antialias",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->stroke_antialias= StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("stroke-dasharray",keyword) == 0) { if (graphic_context[n]->dash_pattern != (double *) NULL) graphic_context[n]->dash_pattern=(double *) RelinquishMagickMemory(graphic_context[n]->dash_pattern); if (IsPoint(q) != MagickFalse) { const char *r; r=q; GetNextToken(r,&r,extent,token); if (*token == ',') GetNextToken(r,&r,extent,token); for (x=0; IsPoint(token) != MagickFalse; x++) { GetNextToken(r,&r,extent,token); if (*token == ',') GetNextToken(r,&r,extent,token); } graphic_context[n]->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2UL*x+1UL), sizeof(*graphic_context[n]->dash_pattern)); if (graphic_context[n]->dash_pattern == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); status=MagickFalse; break; } for (j=0; j < x; j++) { GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->dash_pattern[j]=StringToDouble(token, &next_token); if (token == next_token) status=MagickFalse; if (graphic_context[n]->dash_pattern[j] < 0.0) status=MagickFalse; } if ((x & 0x01) != 0) for ( ; j < (2*x); j++) graphic_context[n]->dash_pattern[j]= graphic_context[n]->dash_pattern[j-x]; graphic_context[n]->dash_pattern[j]=0.0; break; } GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("stroke-dashoffset",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->dash_offset=StringToDouble(token, &next_token); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("stroke-linecap",keyword) == 0) { ssize_t linecap; GetNextToken(q,&q,extent,token); linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token); if (linecap == -1) status=MagickFalse; else graphic_context[n]->linecap=(LineCap) linecap; break; } if (LocaleCompare("stroke-linejoin",keyword) == 0) { ssize_t linejoin; GetNextToken(q,&q,extent,token); linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse, token); if (linejoin == -1) status=MagickFalse; else graphic_context[n]->linejoin=(LineJoin) linejoin; break; } if (LocaleCompare("stroke-miterlimit",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->miterlimit=StringToUnsignedLong(token); break; } if (LocaleCompare("stroke-opacity",keyword) == 0) { GetNextToken(q,&q,extent,token); factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; graphic_context[n]->stroke.alpha=QuantumRange-ClampToQuantum( (MagickRealType) QuantumRange*(1.0-factor*StringToDouble(token, &next_token))); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("stroke-width",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->stroke_width=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; break; } status=MagickFalse; break; } case 't': case 'T': { if (LocaleCompare("text",keyword) == 0) { primitive_type=TextPrimitive; break; } if (LocaleCompare("text-align",keyword) == 0) { ssize_t align; GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) status=MagickFalse; else graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-anchor",keyword) == 0) { ssize_t align; GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) status=MagickFalse; else graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-antialias",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->text_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("text-undercolor",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) QueryColorCompliance(token,AllCompliance, &graphic_context[n]->undercolor,exception); break; } if (LocaleCompare("translate",keyword) == 0) { GetNextToken(q,&q,extent,token); affine.tx=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.ty=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; break; } status=MagickFalse; break; } case 'v': case 'V': { if (LocaleCompare("viewbox",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.x=(ssize_t) ceil(StringToDouble(token, &next_token)-0.5); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.y=(ssize_t) ceil(StringToDouble(token, &next_token)-0.5); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.width=(size_t) floor(StringToDouble( token,&next_token)+0.5); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.height=(size_t) floor(StringToDouble( token,&next_token)+0.5); if (token == next_token) status=MagickFalse; break; } status=MagickFalse; break; } default: { status=MagickFalse; break; } } if (status == MagickFalse) break; if ((fabs(affine.sx-1.0) >= DrawEpsilon) || (fabs(affine.rx) >= DrawEpsilon) || (fabs(affine.ry) >= DrawEpsilon) || (fabs(affine.sy-1.0) >= DrawEpsilon) || (fabs(affine.tx) >= DrawEpsilon) || (fabs(affine.ty) >= DrawEpsilon)) { graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx; graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx; graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy; graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy; graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+ current.tx; graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+ current.ty; } if (primitive_type == UndefinedPrimitive) { if (*q == '\0') { if (number_stops > 1) { GradientType type; type=LinearGradient; if (draw_info->gradient.type == RadialGradient) type=RadialGradient; (void) GradientImage(image,type,PadSpread,stops,number_stops, exception); } if (number_stops > 0) stops=(StopInfo *) RelinquishMagickMemory(stops); } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p),p); continue; } /* Parse the primitive attributes. */ i=0; j=0; primitive_info[0].point.x=0.0; primitive_info[0].point.y=0.0; for (x=0; *q != '\0'; x++) { /* Define points. */ if (IsPoint(q) == MagickFalse) break; GetNextToken(q,&q,extent,token); point.x=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); point.y=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,(const char **) NULL,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); primitive_info[i].primitive=primitive_type; primitive_info[i].point=point; primitive_info[i].coordinates=0; primitive_info[i].method=FloodfillMethod; i++; if (i < (ssize_t) number_points) continue; number_points<<=1; primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(primitive_info, (size_t) number_points,sizeof(*primitive_info)); if ((primitive_info == (PrimitiveInfo *) NULL) || (number_points != (MagickSizeType) ((size_t) number_points))) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } primitive_info[j].primitive=primitive_type; primitive_info[j].coordinates=(size_t) x; primitive_info[j].method=FloodfillMethod; primitive_info[j].text=(char *) NULL; /* Circumscribe primitive within a circle. */ bounds.x1=primitive_info[j].point.x; bounds.y1=primitive_info[j].point.y; bounds.x2=primitive_info[j].point.x; bounds.y2=primitive_info[j].point.y; for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++) { point=primitive_info[j+k].point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.y < bounds.y1) bounds.y1=point.y; if (point.x > bounds.x2) bounds.x2=point.x; if (point.y > bounds.y2) bounds.y2=point.y; } /* Speculate how many points our primitive might consume. */ length=primitive_info[j].coordinates; switch (primitive_type) { case RectanglePrimitive: { length*=5; break; } case RoundRectanglePrimitive: { double alpha, beta, radius; alpha=bounds.x2-bounds.x1; beta=bounds.y2-bounds.y1; radius=hypot((double) alpha,(double) beta); length*=5; length+=2*((size_t) ceil((double) MagickPI*radius))+6*BezierQuantum+360; break; } case BezierPrimitive: { if (primitive_info[j].coordinates > 107) (void) ThrowMagickException(exception,GetMagickModule(),DrawError, "TooManyBezierCoordinates","`%s'",token); length=BezierQuantum*primitive_info[j].coordinates; break; } case PathPrimitive: { char *s, *t; GetNextToken(q,&q,extent,token); length=1; t=token; for (s=token; *s != '\0'; s=t) { double value; value=StringToDouble(s,&t); (void) value; if (s == t) { t++; continue; } length++; } length=length*BezierQuantum; break; } case CirclePrimitive: case ArcPrimitive: case EllipsePrimitive: { double alpha, beta, radius; alpha=bounds.x2-bounds.x1; beta=bounds.y2-bounds.y1; radius=hypot((double) alpha,(double) beta); length=2*((size_t) ceil((double) MagickPI*radius))+6*BezierQuantum+360; break; } default: break; } if ((i+length) >= number_points) { /* Resize based on speculative points required by primitive. */ number_points+=length+1; primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(primitive_info, (size_t) number_points,sizeof(*primitive_info)); if ((primitive_info == (PrimitiveInfo *) NULL) || (number_points != (MagickSizeType) ((size_t) number_points))) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } } switch (primitive_type) { case PointPrimitive: default: { if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } TracePoint(primitive_info+j,primitive_info[j].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case LinePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } TraceLine(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RectanglePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } TraceRectangle(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RoundRectanglePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } TraceRoundRectangle(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case ArcPrimitive: { if (primitive_info[j].coordinates != 3) { primitive_type=UndefinedPrimitive; break; } TraceArc(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case EllipsePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } TraceEllipse(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case CirclePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } TraceCircle(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PolylinePrimitive: break; case PolygonPrimitive: { primitive_info[i]=primitive_info[j]; primitive_info[i].coordinates=0; primitive_info[j].coordinates++; i++; break; } case BezierPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } TraceBezier(primitive_info+j,primitive_info[j].coordinates); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PathPrimitive: { i=(ssize_t) (j+TracePath(primitive_info+j,token)); break; } case AlphaPrimitive: case ColorPrimitive: { ssize_t method; if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } GetNextToken(q,&q,extent,token); method=ParseCommandOption(MagickMethodOptions,MagickFalse,token); if (method == -1) status=MagickFalse; else primitive_info[j].method=(PaintMethod) method; break; } case TextPrimitive: { if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } if (*token != ',') GetNextToken(q,&q,extent,token); primitive_info[j].text=AcquireString(token); break; } case ImagePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } GetNextToken(q,&q,extent,token); primitive_info[j].text=AcquireString(token); break; } } if (primitive_info == (PrimitiveInfo *) NULL) break; if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p),p); if (status == MagickFalse) break; primitive_info[i].primitive=UndefinedPrimitive; if (i == 0) continue; /* Transform points. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+ graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx; primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+ graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty; point=primitive_info[i].point; if (point.x < graphic_context[n]->bounds.x1) graphic_context[n]->bounds.x1=point.x; if (point.y < graphic_context[n]->bounds.y1) graphic_context[n]->bounds.y1=point.y; if (point.x > graphic_context[n]->bounds.x2) graphic_context[n]->bounds.x2=point.x; if (point.y > graphic_context[n]->bounds.y2) graphic_context[n]->bounds.y2=point.y; if (primitive_info[i].primitive == ImagePrimitive) break; if (i >= (ssize_t) number_points) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); } if (graphic_context[n]->render != MagickFalse) { if ((n != 0) && (graphic_context[n]->clip_mask != (char *) NULL) && (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0)) status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask,exception); status&=DrawPrimitive(image,graphic_context[n],primitive_info, exception); } if (primitive_info->text != (char *) NULL) primitive_info->text=(char *) RelinquishMagickMemory( primitive_info->text); proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType) primitive_extent); if (proceed == MagickFalse) break; if (status == 0) break; } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image"); /* Relinquish resources. */ token=DestroyString(token); if (primitive_info != (PrimitiveInfo *) NULL) primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info); primitive=DestroyString(primitive); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); if (status == MagickFalse) ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition", keyword); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGradientImage() draws a linear gradient on the image. % % The format of the DrawGradientImage method is: % % MagickBooleanType DrawGradientImage(Image *image, % const DrawInfo *draw_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o exception: return any errors or warnings in this structure. % */ static inline double GetStopColorOffset(const GradientInfo *gradient, const ssize_t x,const ssize_t y) { switch (gradient->type) { case UndefinedGradient: case LinearGradient: { double gamma, length, offset, scale; PointInfo p, q; const SegmentInfo *gradient_vector; gradient_vector=(&gradient->gradient_vector); p.x=gradient_vector->x2-gradient_vector->x1; p.y=gradient_vector->y2-gradient_vector->y1; q.x=(double) x-gradient_vector->x1; q.y=(double) y-gradient_vector->y1; length=sqrt(q.x*q.x+q.y*q.y); gamma=sqrt(p.x*p.x+p.y*p.y)*length; gamma=PerceptibleReciprocal(gamma); scale=p.x*q.x+p.y*q.y; offset=gamma*scale*length; return(offset); } case RadialGradient: { PointInfo v; if (gradient->spread == RepeatSpread) { v.x=(double) x-gradient->center.x; v.y=(double) y-gradient->center.y; return(sqrt(v.x*v.x+v.y*v.y)); } v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians( gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians( gradient->angle))))/gradient->radii.x; v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians( gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians( gradient->angle))))/gradient->radii.y; return(sqrt(v.x*v.x+v.y*v.y)); } } return(0.0); } static int StopInfoCompare(const void *x,const void *y) { StopInfo *stop_1, *stop_2; stop_1=(StopInfo *) x; stop_2=(StopInfo *) y; if (stop_1->offset > stop_2->offset) return(1); if (fabs(stop_1->offset-stop_2->offset) <= DrawEpsilon) return(0); return(-1); } MagickExport MagickBooleanType DrawGradientImage(Image *image, const DrawInfo *draw_info,ExceptionInfo *exception) { CacheView *image_view; const GradientInfo *gradient; const SegmentInfo *gradient_vector; double length; MagickBooleanType status; PixelInfo zero; PointInfo point; RectangleInfo bounding_box; ssize_t y; /* Draw linear or radial gradient on image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); gradient=(&draw_info->gradient); qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo), StopInfoCompare); gradient_vector=(&gradient->gradient_vector); point.x=gradient_vector->x2-gradient_vector->x1; point.y=gradient_vector->y2-gradient_vector->y1; length=sqrt(point.x*point.x+point.y*point.y); bounding_box=gradient->bounding_box; status=MagickTrue; GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++) { PixelInfo composite, pixel; double alpha, offset; register Quantum *magick_restrict q; register ssize_t i, x; ssize_t j; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; composite=zero; offset=GetStopColorOffset(gradient,0,y); if (gradient->type != RadialGradient) offset/=length; for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++) { GetPixelInfoPixel(image,q,&pixel); switch (gradient->spread) { case UndefinedSpread: case PadSpread: { if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset/=length; } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if ((offset < 0.0) || (i == 0)) composite=gradient->stops[0].color; else if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops)) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case ReflectSpread: { if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset/=length; } if (offset < 0.0) offset=(-offset); if ((ssize_t) fmod(offset,2.0) == 0) offset=fmod(offset,1.0); else offset=1.0-fmod(offset,1.0); for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case RepeatSpread: { MagickBooleanType antialias; double repeat; antialias=MagickFalse; repeat=0.0; if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type == LinearGradient) { repeat=fmod(offset,length); if (repeat < 0.0) repeat=length-fmod(-repeat,length); else repeat=fmod(offset,length); antialias=(repeat < length) && ((repeat+1.0) > length) ? MagickTrue : MagickFalse; offset=repeat/length; } else { repeat=fmod(offset,gradient->radius); if (repeat < 0.0) repeat=gradient->radius-fmod(-repeat,gradient->radius); else repeat=fmod(offset,gradient->radius); antialias=repeat+1.0 > gradient->radius ? MagickTrue : MagickFalse; offset=repeat/gradient->radius; } } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); if (antialias != MagickFalse) { if (gradient->type == LinearGradient) alpha=length-repeat; else alpha=gradient->radius-repeat; i=0; j=(ssize_t) gradient->number_stops-1L; } CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } } CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha, &pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P a t t e r n P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPatternPath() draws a pattern. % % The format of the DrawPatternPath method is: % % MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info, % const char *name,Image **pattern,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o name: the pattern name. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawPatternPath(Image *image, const DrawInfo *draw_info,const char *name,Image **pattern, ExceptionInfo *exception) { char property[MagickPathExtent]; const char *geometry, *path, *type; DrawInfo *clone_info; ImageInfo *image_info; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); assert(name != (const char *) NULL); (void) FormatLocaleString(property,MagickPathExtent,"%s",name); path=GetImageArtifact(image,property); if (path == (const char *) NULL) return(MagickFalse); (void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name); geometry=GetImageArtifact(image,property); if (geometry == (const char *) NULL) return(MagickFalse); if ((*pattern) != (Image *) NULL) *pattern=DestroyImage(*pattern); image_info=AcquireImageInfo(); image_info->size=AcquireString(geometry); *pattern=AcquireImage(image_info,exception); image_info=DestroyImageInfo(image_info); (void) QueryColorCompliance("#000000ff",AllCompliance, &(*pattern)->background_color,exception); (void) SetImageBackgroundColor(*pattern,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), "begin pattern-path %s %s",name,geometry); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->fill_pattern=NewImageList(); clone_info->stroke_pattern=NewImageList(); (void) FormatLocaleString(property,MagickPathExtent,"%s-type",name); type=GetImageArtifact(image,property); if (type != (const char *) NULL) clone_info->gradient.type=(GradientType) ParseCommandOption( MagickGradientOptions,MagickFalse,type); (void) CloneString(&clone_info->primitive,path); status=DrawImage(*pattern,clone_info,exception); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w P o l y g o n P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPolygonPrimitive() draws a polygon on the image. % % The format of the DrawPolygonPrimitive method is: % % MagickBooleanType DrawPolygonPrimitive(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o exception: return any errors or warnings in this structure. % */ static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info) { register ssize_t i; assert(polygon_info != (PolygonInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (polygon_info[i] != (PolygonInfo *) NULL) polygon_info[i]=DestroyPolygonInfo(polygon_info[i]); polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info); return(polygon_info); } static PolygonInfo **AcquirePolygonThreadSet( const PrimitiveInfo *primitive_info) { PathInfo *magick_restrict path_info; PolygonInfo **polygon_info; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads, sizeof(*polygon_info)); if (polygon_info == (PolygonInfo **) NULL) return((PolygonInfo **) NULL); (void) ResetMagickMemory(polygon_info,0,number_threads*sizeof(*polygon_info)); path_info=ConvertPrimitiveToPath(primitive_info); if (path_info == (PathInfo *) NULL) return(DestroyPolygonThreadSet(polygon_info)); for (i=0; i < (ssize_t) number_threads; i++) { polygon_info[i]=ConvertPathToPolygon(path_info); if (polygon_info[i] == (PolygonInfo *) NULL) return(DestroyPolygonThreadSet(polygon_info)); } path_info=(PathInfo *) RelinquishMagickMemory(path_info); return(polygon_info); } static double GetFillAlpha(PolygonInfo *polygon_info,const double mid, const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x, const ssize_t y,double *stroke_alpha) { double alpha, beta, distance, subpath_alpha; PointInfo delta; register const PointInfo *q; register EdgeInfo *p; register ssize_t i; ssize_t j, winding_number; /* Compute fill & stroke opacity for this (x,y) point. */ *stroke_alpha=0.0; subpath_alpha=0.0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= (p->bounds.y1-mid-0.5)) break; if ((double) y > (p->bounds.y2+mid+0.5)) { (void) DestroyEdge(polygon_info,(size_t) j); continue; } if (((double) x <= (p->bounds.x1-mid-0.5)) || ((double) x > (p->bounds.x2+mid+0.5))) continue; i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) p->number_points; i++) { if ((double) y <= (p->points[i-1].y-mid-0.5)) break; if ((double) y > (p->points[i].y+mid+0.5)) continue; if (p->scanline != (double) y) { p->scanline=(double) y; p->highwater=(size_t) i; } /* Compute distance between a point and an edge. */ q=p->points+i-1; delta.x=(q+1)->x-q->x; delta.y=(q+1)->y-q->y; beta=delta.x*(x-q->x)+delta.y*(y-q->y); if (beta < 0.0) { delta.x=(double) x-q->x; delta.y=(double) y-q->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=delta.x*delta.x+delta.y*delta.y; if (beta > alpha) { delta.x=(double) x-(q+1)->x; delta.y=(double) y-(q+1)->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=1.0/alpha; beta=delta.x*(y-q->y)-delta.y*(x-q->x); distance=alpha*beta*beta; } } /* Compute stroke & subpath opacity. */ beta=0.0; if (p->ghostline == MagickFalse) { alpha=mid+0.5; if ((*stroke_alpha < 1.0) && (distance <= ((alpha+0.25)*(alpha+0.25)))) { alpha=mid-0.5; if (distance <= ((alpha+0.25)*(alpha+0.25))) *stroke_alpha=1.0; else { beta=1.0; if (fabs(distance-1.0) >= DrawEpsilon) beta=sqrt((double) distance); alpha=beta-mid-0.5; if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25))) *stroke_alpha=(alpha-0.25)*(alpha-0.25); } } } if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0)) continue; if (distance <= 0.0) { subpath_alpha=1.0; continue; } if (distance > 1.0) continue; if (fabs(beta) < DrawEpsilon) { beta=1.0; if (fabs(distance-1.0) >= DrawEpsilon) beta=sqrt(distance); } alpha=beta-1.0; if (subpath_alpha < (alpha*alpha)) subpath_alpha=alpha*alpha; } } /* Compute fill opacity. */ if (fill == MagickFalse) return(0.0); if (subpath_alpha >= 1.0) return(1.0); /* Determine winding number. */ winding_number=0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= p->bounds.y1) break; if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1)) continue; if ((double) x > p->bounds.x2) { winding_number+=p->direction ? 1 : -1; continue; } i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) p->number_points; i++) if ((double) y <= p->points[i].y) break; q=p->points+i-1; if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x))) winding_number+=p->direction ? 1 : -1; } if (fill_rule != NonZeroRule) { if ((MagickAbsoluteValue(winding_number) & 0x01) != 0) return(1.0); } else if (MagickAbsoluteValue(winding_number) != 0) return(1.0); return(subpath_alpha); } static MagickBooleanType DrawPolygonPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType fill, status; double mid; PolygonInfo **magick_restrict polygon_info; register EdgeInfo *p; register ssize_t i; SegmentInfo bounds; ssize_t start_y, stop_y, y; /* Compute bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); assert(primitive_info != (PrimitiveInfo *) NULL); if (primitive_info->coordinates == 0) return(MagickTrue); polygon_info=AcquirePolygonThreadSet(primitive_info); if (polygon_info == (PolygonInfo **) NULL) return(MagickFalse); DisableMSCWarning(4127) if (0) DrawBoundingRectangles(image,draw_info,polygon_info[0],exception); RestoreMSCWarning if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon"); fill=(primitive_info->method == FillToBorderMethod) || (primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse; mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; bounds=polygon_info[0]->edges[0].bounds; for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++) { p=polygon_info[0]->edges+i; if (p->bounds.x1 < bounds.x1) bounds.x1=p->bounds.x1; if (p->bounds.y1 < bounds.y1) bounds.y1=p->bounds.y1; if (p->bounds.x2 > bounds.x2) bounds.x2=p->bounds.x2; if (p->bounds.y2 > bounds.y2) bounds.y2=p->bounds.y2; } bounds.x1-=(mid+1.0); bounds.x1=bounds.x1 < 0.0 ? 0.0 : (size_t) ceil(bounds.x1-0.5) >= image->columns ? (double) image->columns-1 : bounds.x1; bounds.y1-=(mid+1.0); bounds.y1=bounds.y1 < 0.0 ? 0.0 : (size_t) ceil(bounds.y1-0.5) >= image->rows ? (double) image->rows-1 : bounds.y1; bounds.x2+=(mid+1.0); bounds.x2=bounds.x2 < 0.0 ? 0.0 : (size_t) floor(bounds.x2+0.5) >= image->columns ? (double) image->columns-1 : bounds.x2; bounds.y2+=(mid+1.0); bounds.y2=bounds.y2 < 0.0 ? 0.0 : (size_t) floor(bounds.y2+0.5) >= image->rows ? (double) image->rows-1 : bounds.y2; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); if ((primitive_info->coordinates == 1) || (polygon_info[0]->number_edges == 0)) { /* Draw point. */ start_y=(ssize_t) ceil(bounds.y1-0.5); stop_y=(ssize_t) floor(bounds.y2+0.5); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (y=start_y; y <= stop_y; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=(ssize_t) ceil(bounds.x1-0.5); stop_x=(ssize_t) floor(bounds.x2+0.5); x=start_x; q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for ( ; x <= stop_x; x++) { if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) && (y == (ssize_t) ceil(primitive_info->point.y-0.5))) { GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); } q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-polygon"); return(status); } /* Draw polygon or line. */ if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); start_y=(ssize_t) ceil(bounds.y1-0.5); stop_y=(ssize_t) floor(bounds.y2+0.5); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (y=start_y; y <= stop_y; y++) { const int id = GetOpenMPThreadId(); double fill_alpha, stroke_alpha; PixelInfo fill_color, stroke_color; register Quantum *magick_restrict q; register ssize_t x; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=(ssize_t) ceil(bounds.x1-0.5); stop_x=(ssize_t) floor(bounds.x2+0.5); q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+ 1),1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=start_x; x <= stop_x; x++) { /* Fill and/or stroke. */ fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule, x,y,&stroke_alpha); if (draw_info->stroke_antialias == MagickFalse) { fill_alpha=fill_alpha > 0.25 ? 1.0 : 0.0; stroke_alpha=stroke_alpha > 0.25 ? 1.0 : 0.0; } GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception); fill_alpha=fill_alpha*fill_color.alpha; CompositePixelOver(image,&fill_color,fill_alpha,q,(double) GetPixelAlpha(image,q),q); GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception); stroke_alpha=stroke_alpha*stroke_color.alpha; CompositePixelOver(image,&stroke_color,stroke_alpha,q,(double) GetPixelAlpha(image,q),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image. % % The format of the DrawPrimitive method is: % % MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info, % PrimitiveInfo *primitive_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o exception: return any errors or warnings in this structure. % */ static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info) { const char *methods[] = { "point", "replace", "floodfill", "filltoborder", "reset", "?" }; PointInfo p, q, point; register ssize_t i, x; ssize_t coordinates, y; x=(ssize_t) ceil(primitive_info->point.x-0.5); y=(ssize_t) ceil(primitive_info->point.y-0.5); switch (primitive_info->primitive) { case AlphaPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ColorPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ColorPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ImagePrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ImagePrimitive %.20g,%.20g",(double) x,(double) y); return; } case PointPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "PointPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case TextPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "TextPrimitive %.20g,%.20g",(double) x,(double) y); return; } default: break; } coordinates=0; p=primitive_info[0].point; q.x=(-1.0); q.y=(-1.0); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; if (coordinates <= 0) { coordinates=(ssize_t) primitive_info[i].coordinates; (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin open (%.20g)",(double) coordinates); p=point; } point=primitive_info[i].point; if ((fabs(q.x-point.x) >= DrawEpsilon) || (fabs(q.y-point.y) >= DrawEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y); else (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y); q=point; coordinates--; if (coordinates > 0) continue; if ((fabs(p.x-point.x) >= DrawEpsilon) || (fabs(p.y-point.y) >= DrawEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)", (double) coordinates); else (void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)", (double) coordinates); } } MagickExport MagickBooleanType DrawPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { CacheView *image_view; MagickStatusType status; register ssize_t i, x; ssize_t y; if (image->debug != MagickFalse) { (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-primitive"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx, draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy, draw_info->affine.tx,draw_info->affine.ty); } if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsPixelInfoGray(&draw_info->fill) == MagickFalse) || (IsPixelInfoGray(&draw_info->stroke) == MagickFalse))) (void) SetImageColorspace(image,sRGBColorspace,exception); status=MagickTrue; x=(ssize_t) ceil(primitive_info->point.x-0.5); y=(ssize_t) ceil(primitive_info->point.y-0.5); image_view=AcquireAuthenticCacheView(image,exception); switch (primitive_info->primitive) { case AlphaPrimitive: { if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; register Quantum *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); (void) SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { MagickBooleanType sync; PixelInfo pixel, target; (void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target, exception); GetPixelInfo(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { q+=GetPixelChannels(image); continue; } GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { ChannelType channel_mask; PixelInfo target; (void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y, &target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(double) draw_info->border_color.red; target.green=(double) draw_info->border_color.green; target.blue=(double) draw_info->border_color.blue; } channel_mask=SetImageChannelMask(image,AlphaChannel); status&=FloodfillPaintImage(image,draw_info,&target,x,y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue,exception); (void) SetImageChannelMask(image,channel_mask); break; } case ResetMethod: { MagickBooleanType sync; PixelInfo pixel; for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } } break; } case ColorPrimitive: { switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; register Quantum *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetPixelInfo(image,&pixel); GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); (void) SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { MagickBooleanType sync; PixelInfo pixel, target; (void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target, exception); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { q+=GetPixelChannels(image); continue; } GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { PixelInfo target; (void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y, &target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(double) draw_info->border_color.red; target.green=(double) draw_info->border_color.green; target.blue=(double) draw_info->border_color.blue; } status&=FloodfillPaintImage(image,draw_info,&target,x,y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue,exception); break; } case ResetMethod: { MagickBooleanType sync; PixelInfo pixel; GetPixelInfo(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } } break; } case ImagePrimitive: { AffineMatrix affine; char composite_geometry[MagickPathExtent]; Image *composite_image; ImageInfo *clone_info; RectangleInfo geometry; ssize_t x1, y1; if (primitive_info->text == (char *) NULL) break; clone_info=AcquireImageInfo(); if (LocaleNCompare(primitive_info->text,"data:",5) == 0) composite_image=ReadInlineImage(clone_info,primitive_info->text, exception); else { (void) CopyMagickString(clone_info->filename,primitive_info->text, MagickPathExtent); composite_image=ReadImage(clone_info,exception); } clone_info=DestroyImageInfo(clone_info); if (composite_image == (Image *) NULL) break; (void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor) NULL,(void *) NULL); x1=(ssize_t) ceil(primitive_info[1].point.x-0.5); y1=(ssize_t) ceil(primitive_info[1].point.y-0.5); if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) || ((y1 != 0L) && (y1 != (ssize_t) composite_image->rows))) { /* Resize image. */ (void) FormatLocaleString(composite_geometry,MagickPathExtent, "%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y); composite_image->filter=image->filter; (void) TransformImage(&composite_image,(char *) NULL, composite_geometry,exception); } if (composite_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(composite_image,OpaqueAlphaChannel, exception); if (draw_info->alpha != OpaqueAlpha) (void) SetImageAlpha(composite_image,draw_info->alpha,exception); SetGeometry(image,&geometry); image->gravity=draw_info->gravity; geometry.x=x; geometry.y=y; (void) FormatLocaleString(composite_geometry,MagickPathExtent, "%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double) composite_image->rows,(double) geometry.x,(double) geometry.y); (void) ParseGravityGeometry(image,composite_geometry,&geometry,exception); affine=draw_info->affine; affine.tx=(double) geometry.x; affine.ty=(double) geometry.y; composite_image->interpolate=image->interpolate; if (draw_info->compose == OverCompositeOp) (void) DrawAffineImage(image,composite_image,&affine,exception); else (void) CompositeImage(image,composite_image,draw_info->compose, MagickTrue,geometry.x,geometry.y,exception); composite_image=DestroyImage(composite_image); break; } case PointPrimitive: { PixelInfo fill_color; register Quantum *q; if ((y < 0) || (y >= (ssize_t) image->rows)) break; if ((x < 0) || (x >= (ssize_t) image->columns)) break; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info,x,y,&fill_color,exception); CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q, (double) GetPixelAlpha(image,q),q); (void) SyncCacheViewAuthenticPixels(image_view,exception); break; } case TextPrimitive: { char geometry[MagickPathExtent]; DrawInfo *clone_info; if (primitive_info->text == (char *) NULL) break; clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->text,primitive_info->text); (void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f", primitive_info->point.x,primitive_info->point.y); (void) CloneString(&clone_info->geometry,geometry); status&=AnnotateImage(image,clone_info,exception); clone_info=DestroyDrawInfo(clone_info); break; } default: { double mid, scale; DrawInfo *clone_info; if (IsEventLogging() != MagickFalse) LogPrimitiveInfo(primitive_info); scale=ExpandAffine(&draw_info->affine); if ((draw_info->dash_pattern != (double *) NULL) && (fabs(draw_info->dash_pattern[0]) >= DrawEpsilon) && (fabs(scale*draw_info->stroke_width) >= DrawEpsilon) && (draw_info->stroke.alpha != (Quantum) TransparentAlpha)) { /* Draw dash polygon. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; status&=DrawPolygonPrimitive(image,clone_info,primitive_info, exception); clone_info=DestroyDrawInfo(clone_info); (void) DrawDashPolygon(draw_info,primitive_info,image,exception); break; } mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; if ((mid > 1.0) && ((draw_info->stroke.alpha != (Quantum) TransparentAlpha) || (draw_info->stroke_pattern != (Image *) NULL))) { MagickBooleanType closed_path; /* Draw strokes while respecting line cap/join attributes. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; closed_path= (fabs(primitive_info[i-1].point.x-primitive_info[0].point.x) < DrawEpsilon) && (fabs(primitive_info[i-1].point.y-primitive_info[0].point.y) < DrawEpsilon) ? MagickTrue : MagickFalse; i=(ssize_t) primitive_info[0].coordinates; if ((((draw_info->linecap == RoundCap) || (closed_path != MagickFalse)) && (draw_info->linejoin == RoundJoin)) || (primitive_info[i].primitive != UndefinedPrimitive)) { (void) DrawPolygonPrimitive(image,draw_info,primitive_info, exception); break; } clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; status&=DrawPolygonPrimitive(image,clone_info,primitive_info, exception); clone_info=DestroyDrawInfo(clone_info); status&=DrawStrokePolygon(image,draw_info,primitive_info,exception); break; } status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception); break; } } image_view=DestroyCacheView(image_view); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w S t r o k e P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on % the image while respecting the line cap and join attributes. % % The format of the DrawStrokePolygon method is: % % MagickBooleanType DrawStrokePolygon(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % */ static void DrawRoundLinecap(Image *image,const DrawInfo *draw_info, const PrimitiveInfo *primitive_info,ExceptionInfo *exception) { PrimitiveInfo linecap[5]; register ssize_t i; for (i=0; i < 4; i++) linecap[i]=(*primitive_info); linecap[0].coordinates=4; linecap[1].point.x+=2.0*DrawEpsilon; linecap[2].point.x+=2.0*DrawEpsilon; linecap[2].point.y+=2.0*DrawEpsilon; linecap[3].point.y+=2.0*DrawEpsilon; linecap[4].primitive=UndefinedPrimitive; (void) DrawPolygonPrimitive(image,draw_info,linecap,exception); } static MagickBooleanType DrawStrokePolygon(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { DrawInfo *clone_info; MagickBooleanType closed_path; MagickStatusType status; PrimitiveInfo *stroke_polygon; register const PrimitiveInfo *p, *q; /* Draw stroked polygon. */ if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-stroke-polygon"); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->fill=draw_info->stroke; if (clone_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern); if (clone_info->stroke_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0, MagickTrue,exception); clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; clone_info->stroke_width=0.0; clone_info->fill_rule=NonZeroRule; status=MagickTrue; for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates) { stroke_polygon=TraceStrokePolygon(draw_info,p); status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception); if (status == 0) break; stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon); q=p+p->coordinates-1; closed_path=(fabs(q->point.x-p->point.x) < DrawEpsilon) && (fabs(q->point.y-p->point.y) < DrawEpsilon) ? MagickTrue : MagickFalse; if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse)) { DrawRoundLinecap(image,draw_info,p,exception); DrawRoundLinecap(image,draw_info,q,exception); } } clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-stroke-polygon"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A f f i n e M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAffineMatrix() returns an AffineMatrix initialized to the identity % matrix. % % The format of the GetAffineMatrix method is: % % void GetAffineMatrix(AffineMatrix *affine_matrix) % % A description of each parameter follows: % % o affine_matrix: the affine matrix. % */ MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(affine_matrix != (AffineMatrix *) NULL); (void) ResetMagickMemory(affine_matrix,0,sizeof(*affine_matrix)); affine_matrix->sx=1.0; affine_matrix->sy=1.0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetDrawInfo() initializes draw_info to default values from image_info. % % The format of the GetDrawInfo method is: % % void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info.. % % o draw_info: the draw info. % */ MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) { char *next_token; const char *option; ExceptionInfo *exception; ImageInfo *clone_info; /* Initialize draw attributes. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info != (DrawInfo *) NULL); (void) ResetMagickMemory(draw_info,0,sizeof(*draw_info)); clone_info=CloneImageInfo(image_info); GetAffineMatrix(&draw_info->affine); exception=AcquireExceptionInfo(); (void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill, exception); (void) QueryColorCompliance("#0000",AllCompliance,&draw_info->stroke, exception); draw_info->stroke_width=1.0; draw_info->fill_rule=EvenOddRule; draw_info->alpha=OpaqueAlpha; draw_info->fill_alpha=OpaqueAlpha; draw_info->stroke_alpha=OpaqueAlpha; draw_info->linecap=ButtCap; draw_info->linejoin=MiterJoin; draw_info->miterlimit=10; draw_info->decorate=NoDecoration; draw_info->pointsize=12.0; draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha; draw_info->compose=OverCompositeOp; draw_info->render=MagickTrue; draw_info->debug=IsEventLogging(); draw_info->stroke_antialias=clone_info->antialias; if (clone_info->font != (char *) NULL) draw_info->font=AcquireString(clone_info->font); if (clone_info->density != (char *) NULL) draw_info->density=AcquireString(clone_info->density); draw_info->text_antialias=clone_info->antialias; if (fabs(clone_info->pointsize) >= DrawEpsilon) draw_info->pointsize=clone_info->pointsize; draw_info->border_color=clone_info->border_color; if (clone_info->server_name != (char *) NULL) draw_info->server_name=AcquireString(clone_info->server_name); option=GetImageOption(clone_info,"direction"); if (option != (const char *) NULL) draw_info->direction=(DirectionType) ParseCommandOption( MagickDirectionOptions,MagickFalse,option); else draw_info->direction=UndefinedDirection; option=GetImageOption(clone_info,"encoding"); if (option != (const char *) NULL) (void) CloneString(&draw_info->encoding,option); option=GetImageOption(clone_info,"family"); if (option != (const char *) NULL) (void) CloneString(&draw_info->family,option); option=GetImageOption(clone_info,"fill"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->fill, exception); option=GetImageOption(clone_info,"gravity"); if (option != (const char *) NULL) draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(clone_info,"interline-spacing"); if (option != (const char *) NULL) draw_info->interline_spacing=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"interword-spacing"); if (option != (const char *) NULL) draw_info->interword_spacing=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"kerning"); if (option != (const char *) NULL) draw_info->kerning=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"stroke"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke, exception); option=GetImageOption(clone_info,"strokewidth"); if (option != (const char *) NULL) draw_info->stroke_width=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"style"); if (option != (const char *) NULL) draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions, MagickFalse,option); option=GetImageOption(clone_info,"undercolor"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor, exception); option=GetImageOption(clone_info,"weight"); if (option != (const char *) NULL) { ssize_t weight; weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(option); draw_info->weight=(size_t) weight; } exception=DestroyExceptionInfo(exception); draw_info->signature=MagickCoreSignature; clone_info=DestroyImageInfo(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r m u t a t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Permutate() returns the permuation of the (n,k). % % The format of the Permutate method is: % % void Permutate(ssize_t n,ssize_t k) % % A description of each parameter follows: % % o n: % % o k: % % */ static inline double Permutate(const ssize_t n,const ssize_t k) { double r; register ssize_t i; r=1.0; for (i=k+1; i <= n; i++) r*=i; for (i=1; i <= (n-k); i++) r/=i; return(r); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a c e P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TracePrimitive is a collection of methods for generating graphic % primitives such as arcs, ellipses, paths, etc. % */ static void TraceArc(PrimitiveInfo *primitive_info,const PointInfo start, const PointInfo end,const PointInfo degrees) { PointInfo center, radii; center.x=0.5*(end.x+start.x); center.y=0.5*(end.y+start.y); radii.x=fabs(center.x-start.x); radii.y=fabs(center.y-start.y); TraceEllipse(primitive_info,center,radii,degrees); } static void TraceArcPath(PrimitiveInfo *primitive_info,const PointInfo start, const PointInfo end,const PointInfo arc,const double angle, const MagickBooleanType large_arc,const MagickBooleanType sweep) { double alpha, beta, delta, factor, gamma, theta; PointInfo center, points[3], radii; register double cosine, sine; register PrimitiveInfo *p; register ssize_t i; size_t arc_segments; if ((fabs(start.x-end.x) < DrawEpsilon) && (fabs(start.y-end.y) < DrawEpsilon)) { TracePoint(primitive_info,end); return; } radii.x=fabs(arc.x); radii.y=fabs(arc.y); if ((fabs(radii.x) < DrawEpsilon) || (fabs(radii.y) < DrawEpsilon)) { TraceLine(primitive_info,start,end); return; } cosine=cos(DegreesToRadians(fmod((double) angle,360.0))); sine=sin(DegreesToRadians(fmod((double) angle,360.0))); center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2); center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2); delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/ (radii.y*radii.y); if (delta < DrawEpsilon) { TraceLine(primitive_info,start,end); return; } if (delta > 1.0) { radii.x*=sqrt((double) delta); radii.y*=sqrt((double) delta); } points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x); points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y); points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x); points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y); alpha=points[1].x-points[0].x; beta=points[1].y-points[0].y; factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25; if (factor <= 0.0) factor=0.0; else { factor=sqrt((double) factor); if (sweep == large_arc) factor=(-factor); } center.x=(double) ((points[0].x+points[1].x)/2-factor*beta); center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha); alpha=atan2(points[0].y-center.y,points[0].x-center.x); theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha; if ((theta < 0.0) && (sweep != MagickFalse)) theta+=2.0*MagickPI; else if ((theta > 0.0) && (sweep == MagickFalse)) theta-=2.0*MagickPI; arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+DrawEpsilon)))); p=primitive_info; for (i=0; i < (ssize_t) arc_segments; i++) { beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments)); gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))* sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/ sin(fmod((double) beta,DegreesToRadians(360.0))); points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x; p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y; (p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y* points[0].y); (p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y* points[0].y); (p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y* points[1].y); (p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y* points[1].y); (p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y* points[2].y); (p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y* points[2].y); if (i == (ssize_t) (arc_segments-1)) (p+3)->point=end; TraceBezier(p,4); p+=p->coordinates; } primitive_info->coordinates=(size_t) (p-primitive_info); for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } } static void TraceBezier(PrimitiveInfo *primitive_info, const size_t number_coordinates) { double alpha, *coefficients, weight; PointInfo end, point, *points; register PrimitiveInfo *p; register ssize_t i, j; size_t control_points, quantum; /* Allocate coeficients. */ quantum=number_coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { for (j=i+1; j < (ssize_t) number_coordinates; j++) { alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x); if (alpha > (double) quantum) quantum=(size_t) alpha; alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y); if (alpha > (double) quantum) quantum=(size_t) alpha; } } quantum=(size_t) MagickMin((double) quantum/number_coordinates, (double) BezierQuantum); control_points=quantum*number_coordinates; coefficients=(double *) AcquireQuantumMemory((size_t) number_coordinates,sizeof(*coefficients)); points=(PointInfo *) AcquireQuantumMemory((size_t) control_points, sizeof(*points)); if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL)) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); /* Compute bezier points. */ end=primitive_info[number_coordinates-1].point; for (i=0; i < (ssize_t) number_coordinates; i++) coefficients[i]=Permutate((ssize_t) number_coordinates-1,i); weight=0.0; for (i=0; i < (ssize_t) control_points; i++) { p=primitive_info; point.x=0.0; point.y=0.0; alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0); for (j=0; j < (ssize_t) number_coordinates; j++) { point.x+=alpha*coefficients[j]*p->point.x; point.y+=alpha*coefficients[j]*p->point.y; alpha*=weight/(1.0-weight); p++; } points[i]=point; weight+=1.0/control_points; } /* Bezier curves are just short segmented polys. */ p=primitive_info; for (i=0; i < (ssize_t) control_points; i++) { TracePoint(p,points[i]); p+=p->coordinates; } TracePoint(p,end); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); } static void TraceCircle(PrimitiveInfo *primitive_info,const PointInfo start, const PointInfo end) { double alpha, beta, radius; PointInfo offset, degrees; alpha=end.x-start.x; beta=end.y-start.y; radius=hypot((double) alpha,(double) beta); offset.x=(double) radius; offset.y=(double) radius; degrees.x=0.0; degrees.y=360.0; TraceEllipse(primitive_info,start,offset,degrees); } static void TraceEllipse(PrimitiveInfo *primitive_info,const PointInfo start, const PointInfo stop,const PointInfo degrees) { double delta, step, y; PointInfo angle, point; register PrimitiveInfo *p; register ssize_t i; /* Ellipses are just short segmented polys. */ if ((fabs(stop.x) < DrawEpsilon) && (fabs(stop.y) < DrawEpsilon)) { TracePoint(primitive_info,start); return; } delta=2.0/MagickMax(stop.x,stop.y); step=MagickPI/8.0; if ((delta >= 0.0) && (delta < (MagickPI/8.0))) step=MagickPI/(4*(MagickPI/delta/2+0.5)); angle.x=DegreesToRadians(degrees.x); y=degrees.y; while (y < degrees.x) y+=360.0; angle.y=DegreesToRadians(y); for (p=primitive_info; angle.x < angle.y; angle.x+=step) { point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*stop.x+start.x; point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*stop.y+start.y; TracePoint(p,point); p+=p->coordinates; } point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*stop.x+start.x; point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*stop.y+start.y; TracePoint(p,point); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } } static void TraceLine(PrimitiveInfo *primitive_info,const PointInfo start, const PointInfo end) { TracePoint(primitive_info,start); if ((fabs(start.x-end.x) < DrawEpsilon) && (fabs(start.y-end.y) < DrawEpsilon)) { primitive_info->primitive=PointPrimitive; primitive_info->coordinates=1; return; } TracePoint(primitive_info+1,end); (primitive_info+1)->primitive=primitive_info->primitive; primitive_info->coordinates=2; } static size_t TracePath(PrimitiveInfo *primitive_info,const char *path) { char *next_token, token[MagickPathExtent]; const char *p; double x, y; int attribute, last_attribute; PointInfo end = {0.0, 0.0}, points[4] = { {0.0,0.0}, {0.0,0.0}, {0.0,0.0}, {0.0,0.0} }, point = {0.0, 0.0}, start = {0.0, 0.0}; PrimitiveType primitive_type; register PrimitiveInfo *q; register ssize_t i; size_t number_coordinates, z_count; attribute=0; number_coordinates=0; z_count=0; primitive_type=primitive_info->primitive; q=primitive_info; for (p=path; *p != '\0'; ) { while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == '\0') break; last_attribute=attribute; attribute=(int) (*p++); switch (attribute) { case 'a': case 'A': { double angle; MagickBooleanType large_arc, sweep; PointInfo arc; /* Compute arc points. */ do { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); arc.x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); arc.y=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); angle=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse; GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse; GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); end.x=(double) (attribute == (int) 'A' ? x : point.x+x); end.y=(double) (attribute == (int) 'A' ? y : point.y+y); TraceArcPath(q,point,end,arc,angle,large_arc,sweep); q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'c': case 'C': { /* Compute bezier points. */ do { points[0]=point; for (i=1; i < 4; i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); end.x=(double) (attribute == (int) 'C' ? x : point.x+x); end.y=(double) (attribute == (int) 'C' ? y : point.y+y); points[i]=end; } for (i=0; i < 4; i++) (q+i)->point=points[i]; TraceBezier(q,4); q+=q->coordinates; point=end; } while (IsPoint(p) != MagickFalse); break; } case 'H': case 'h': { do { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); point.x=(double) (attribute == (int) 'H' ? x: point.x+x); TracePoint(q,point); q+=q->coordinates; } while (IsPoint(p) != MagickFalse); break; } case 'l': case 'L': { do { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); point.x=(double) (attribute == (int) 'L' ? x : point.x+x); point.y=(double) (attribute == (int) 'L' ? y : point.y+y); TracePoint(q,point); q+=q->coordinates; } while (IsPoint(p) != MagickFalse); break; } case 'M': case 'm': { if (q != primitive_info) { primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; primitive_info=q; } i=0; do { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); point.x=(double) (attribute == (int) 'M' ? x : point.x+x); point.y=(double) (attribute == (int) 'M' ? y : point.y+y); if (i == 0) start=point; i++; TracePoint(q,point); q+=q->coordinates; if ((i != 0) && (attribute == (int) 'M')) { TracePoint(q,point); q+=q->coordinates; } } while (IsPoint(p) != MagickFalse); break; } case 'q': case 'Q': { /* Compute bezier points. */ do { points[0]=point; for (i=1; i < 3; i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (*p == ',') p++; end.x=(double) (attribute == (int) 'Q' ? x : point.x+x); end.y=(double) (attribute == (int) 'Q' ? y : point.y+y); points[i]=end; } for (i=0; i < 3; i++) (q+i)->point=points[i]; TraceBezier(q,3); q+=q->coordinates; point=end; } while (IsPoint(p) != MagickFalse); break; } case 's': case 'S': { /* Compute bezier points. */ do { points[0]=points[3]; points[1].x=2.0*points[3].x-points[2].x; points[1].y=2.0*points[3].y-points[2].y; for (i=2; i < 4; i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (*p == ',') p++; end.x=(double) (attribute == (int) 'S' ? x : point.x+x); end.y=(double) (attribute == (int) 'S' ? y : point.y+y); points[i]=end; } if (strchr("CcSs",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 4; i++) (q+i)->point=points[i]; TraceBezier(q,4); q+=q->coordinates; point=end; } while (IsPoint(p) != MagickFalse); break; } case 't': case 'T': { /* Compute bezier points. */ do { points[0]=points[2]; points[1].x=2.0*points[2].x-points[1].x; points[1].y=2.0*points[2].y-points[1].y; for (i=2; i < 3; i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); end.x=(double) (attribute == (int) 'T' ? x : point.x+x); end.y=(double) (attribute == (int) 'T' ? y : point.y+y); points[i]=end; } if (strchr("QqTt",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 3; i++) (q+i)->point=points[i]; TraceBezier(q,3); q+=q->coordinates; point=end; } while (IsPoint(p) != MagickFalse); break; } case 'v': case 'V': { do { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); point.y=(double) (attribute == (int) 'V' ? y : point.y+y); TracePoint(q,point); q+=q->coordinates; } while (IsPoint(p) != MagickFalse); break; } case 'z': case 'Z': { point=start; TracePoint(q,point); q+=q->coordinates; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; primitive_info=q; z_count++; break; } default: { if (isalpha((int) ((unsigned char) attribute)) != 0) (void) FormatLocaleFile(stderr,"attribute not recognized: %c\n", attribute); break; } } } primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { q--; q->primitive=primitive_type; if (z_count > 1) q->method=FillToBorderMethod; } q=primitive_info; return(number_coordinates); } static void TraceRectangle(PrimitiveInfo *primitive_info,const PointInfo start, const PointInfo end) { PointInfo point; register PrimitiveInfo *p; register ssize_t i; p=primitive_info; TracePoint(p,start); p+=p->coordinates; point.x=start.x; point.y=end.y; TracePoint(p,point); p+=p->coordinates; TracePoint(p,end); p+=p->coordinates; point.x=end.x; point.y=start.y; TracePoint(p,point); p+=p->coordinates; TracePoint(p,start); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } } static void TraceRoundRectangle(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end,PointInfo arc) { PointInfo degrees, offset, point; register PrimitiveInfo *p; register ssize_t i; p=primitive_info; offset.x=fabs(end.x-start.x); offset.y=fabs(end.y-start.y); if (arc.x > (0.5*offset.x)) arc.x=0.5*offset.x; if (arc.y > (0.5*offset.y)) arc.y=0.5*offset.y; point.x=start.x+offset.x-arc.x; point.y=start.y+arc.y; degrees.x=270.0; degrees.y=360.0; TraceEllipse(p,point,arc,degrees); p+=p->coordinates; point.x=start.x+offset.x-arc.x; point.y=start.y+offset.y-arc.y; degrees.x=0.0; degrees.y=90.0; TraceEllipse(p,point,arc,degrees); p+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+offset.y-arc.y; degrees.x=90.0; degrees.y=180.0; TraceEllipse(p,point,arc,degrees); p+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+arc.y; degrees.x=180.0; degrees.y=270.0; TraceEllipse(p,point,arc,degrees); p+=p->coordinates; TracePoint(p,primitive_info->point); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } } static void TraceSquareLinecap(PrimitiveInfo *primitive_info, const size_t number_vertices,const double offset) { double distance; register double dx, dy; register ssize_t i; ssize_t j; dx=0.0; dy=0.0; for (i=1; i < (ssize_t) number_vertices; i++) { dx=primitive_info[0].point.x-primitive_info[i].point.x; dy=primitive_info[0].point.y-primitive_info[i].point.y; if ((fabs((double) dx) >= DrawEpsilon) || (fabs((double) dy) >= DrawEpsilon)) break; } if (i == (ssize_t) number_vertices) i=(ssize_t) number_vertices-1L; distance=hypot((double) dx,(double) dy); primitive_info[0].point.x=(double) (primitive_info[i].point.x+ dx*(distance+offset)/distance); primitive_info[0].point.y=(double) (primitive_info[i].point.y+ dy*(distance+offset)/distance); for (j=(ssize_t) number_vertices-2; j >= 0; j--) { dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x; dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y; if ((fabs((double) dx) >= DrawEpsilon) || (fabs((double) dy) >= DrawEpsilon)) break; } distance=hypot((double) dx,(double) dy); primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+ dx*(distance+offset)/distance); primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+ dy*(distance+offset)/distance); } static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info) { typedef struct _LineSegment { double p, q; } LineSegment; double delta_theta, dot_product, mid, miterlimit; LineSegment dx, dy, inverse_slope, slope, theta; MagickBooleanType closed_path; PointInfo box_p[5], box_q[5], center, offset, *path_p, *path_q; PrimitiveInfo *polygon_primitive, *stroke_polygon; register ssize_t i; size_t arc_segments, max_strokes, number_vertices; ssize_t j, n, p, q; /* Allocate paths. */ number_vertices=primitive_info->coordinates; max_strokes=2*number_vertices+6*BezierQuantum+360; path_p=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes, sizeof(*path_p)); path_q=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes, sizeof(*path_q)); polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_vertices+2UL,sizeof(*polygon_primitive)); if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL) || (polygon_primitive == (PrimitiveInfo *) NULL)) return((PrimitiveInfo *) NULL); (void) CopyMagickMemory(polygon_primitive,primitive_info,(size_t) number_vertices*sizeof(*polygon_primitive)); closed_path= (fabs(primitive_info[number_vertices-1].point.x-primitive_info[0].point.x) < DrawEpsilon) && (fabs(primitive_info[number_vertices-1].point.y-primitive_info[0].point.y) < DrawEpsilon) ? MagickTrue : MagickFalse; if (((draw_info->linejoin == RoundJoin) || (draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse)) { polygon_primitive[number_vertices]=primitive_info[1]; number_vertices++; } polygon_primitive[number_vertices].primitive=UndefinedPrimitive; /* Compute the slope for the first line segment, p. */ dx.p=0.0; dy.p=0.0; for (n=1; n < (ssize_t) number_vertices; n++) { dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x; dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y; if ((fabs(dx.p) >= DrawEpsilon) || (fabs(dy.p) >= DrawEpsilon)) break; } if (n == (ssize_t) number_vertices) n=(ssize_t) number_vertices-1L; slope.p=0.0; inverse_slope.p=0.0; if (fabs(dx.p) < DrawEpsilon) { if (dx.p >= 0.0) slope.p=dy.p < 0.0 ? -1.0/DrawEpsilon : 1.0/DrawEpsilon; else slope.p=dy.p < 0.0 ? 1.0/DrawEpsilon : -1.0/DrawEpsilon; } else if (fabs(dy.p) < DrawEpsilon) { if (dy.p >= 0.0) inverse_slope.p=dx.p < 0.0 ? -1.0/DrawEpsilon : 1.0/DrawEpsilon; else inverse_slope.p=dx.p < 0.0 ? 1.0/DrawEpsilon : -1.0/DrawEpsilon; } else { slope.p=dy.p/dx.p; inverse_slope.p=(-1.0/slope.p); } mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid); if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse)) TraceSquareLinecap(polygon_primitive,number_vertices,mid); offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0))); offset.y=(double) (offset.x*inverse_slope.p); if ((dy.p*offset.x-dx.p*offset.y) > 0.0) { box_p[0].x=polygon_primitive[0].point.x-offset.x; box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p; box_p[1].x=polygon_primitive[n].point.x-offset.x; box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p; box_q[0].x=polygon_primitive[0].point.x+offset.x; box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p; box_q[1].x=polygon_primitive[n].point.x+offset.x; box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p; } else { box_p[0].x=polygon_primitive[0].point.x+offset.x; box_p[0].y=polygon_primitive[0].point.y+offset.y; box_p[1].x=polygon_primitive[n].point.x+offset.x; box_p[1].y=polygon_primitive[n].point.y+offset.y; box_q[0].x=polygon_primitive[0].point.x-offset.x; box_q[0].y=polygon_primitive[0].point.y-offset.y; box_q[1].x=polygon_primitive[n].point.x-offset.x; box_q[1].y=polygon_primitive[n].point.y-offset.y; } /* Create strokes for the line join attribute: bevel, miter, round. */ p=0; q=0; path_q[p++]=box_q[0]; path_p[q++]=box_p[0]; for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++) { /* Compute the slope for this line segment, q. */ dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x; dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y; dot_product=dx.q*dx.q+dy.q*dy.q; if (dot_product < 0.25) continue; slope.q=0.0; inverse_slope.q=0.0; if (fabs(dx.q) < DrawEpsilon) { if (dx.q >= 0.0) slope.q=dy.q < 0.0 ? -1.0/DrawEpsilon : 1.0/DrawEpsilon; else slope.q=dy.q < 0.0 ? 1.0/DrawEpsilon : -1.0/DrawEpsilon; } else if (fabs(dy.q) < DrawEpsilon) { if (dy.q >= 0.0) inverse_slope.q=dx.q < 0.0 ? -1.0/DrawEpsilon : 1.0/DrawEpsilon; else inverse_slope.q=dx.q < 0.0 ? 1.0/DrawEpsilon : -1.0/DrawEpsilon; } else { slope.q=dy.q/dx.q; inverse_slope.q=(-1.0/slope.q); } offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0))); offset.y=(double) (offset.x*inverse_slope.q); dot_product=dy.q*offset.x-dx.q*offset.y; if (dot_product > 0.0) { box_p[2].x=polygon_primitive[n].point.x-offset.x; box_p[2].y=polygon_primitive[n].point.y-offset.y; box_p[3].x=polygon_primitive[i].point.x-offset.x; box_p[3].y=polygon_primitive[i].point.y-offset.y; box_q[2].x=polygon_primitive[n].point.x+offset.x; box_q[2].y=polygon_primitive[n].point.y+offset.y; box_q[3].x=polygon_primitive[i].point.x+offset.x; box_q[3].y=polygon_primitive[i].point.y+offset.y; } else { box_p[2].x=polygon_primitive[n].point.x+offset.x; box_p[2].y=polygon_primitive[n].point.y+offset.y; box_p[3].x=polygon_primitive[i].point.x+offset.x; box_p[3].y=polygon_primitive[i].point.y+offset.y; box_q[2].x=polygon_primitive[n].point.x-offset.x; box_q[2].y=polygon_primitive[n].point.y-offset.y; box_q[3].x=polygon_primitive[i].point.x-offset.x; box_q[3].y=polygon_primitive[i].point.y-offset.y; } if (fabs((double) (slope.p-slope.q)) < DrawEpsilon) { box_p[4]=box_p[1]; box_q[4]=box_q[1]; } else { box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+ box_p[3].y)/(slope.p-slope.q)); box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y); box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+ box_q[3].y)/(slope.p-slope.q)); box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y); } if (q >= (ssize_t) (max_strokes-6*BezierQuantum-360)) { if (~max_strokes < (6*BezierQuantum+360)) { path_p=(PointInfo *) RelinquishMagickMemory(path_p); path_q=(PointInfo *) RelinquishMagickMemory(path_q); } else { max_strokes+=6*BezierQuantum+360; path_p=(PointInfo *) ResizeQuantumMemory(path_p,max_strokes, sizeof(*path_p)); path_q=(PointInfo *) ResizeQuantumMemory(path_q,max_strokes, sizeof(*path_q)); } if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL)) { if (path_p != (PointInfo *) NULL) path_p=(PointInfo *) RelinquishMagickMemory(path_p); if (path_q != (PointInfo *) NULL) path_q=(PointInfo *) RelinquishMagickMemory(path_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return((PrimitiveInfo *) NULL); } } dot_product=dx.q*dy.p-dx.p*dy.q; if (dot_product <= 0.0) switch (draw_info->linejoin) { case BevelJoin: { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_p[p++]=box_p[4]; else { path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { path_q[q++]=box_q[4]; path_p[p++]=box_p[4]; } else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_p[p++]=box_p[4]; else { path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x); theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x); if (theta.q < theta.p) theta.q+=2.0*MagickPI; arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/ (2.0*sqrt((double) (1.0/mid))))); path_q[q].x=box_q[1].x; path_q[q].y=box_q[1].y; q++; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); path_q[q].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); path_q[q].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); q++; } path_q[q++]=box_q[2]; break; } default: break; } else switch (draw_info->linejoin) { case BevelJoin: { path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_q[q++]=box_q[4]; else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { path_q[q++]=box_q[4]; path_p[p++]=box_p[4]; } else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_q[q++]=box_q[4]; else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x); theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x); if (theta.p < theta.q) theta.p+=2.0*MagickPI; arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/ (2.0*sqrt((double) (1.0/mid))))); path_p[p++]=box_p[1]; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); path_p[p].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); path_p[p].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); p++; } path_p[p++]=box_p[2]; break; } default: break; } slope.p=slope.q; inverse_slope.p=inverse_slope.q; box_p[0]=box_p[2]; box_p[1]=box_p[3]; box_q[0]=box_q[2]; box_q[1]=box_q[3]; dx.p=dx.q; dy.p=dy.q; n=i; } path_p[p++]=box_p[1]; path_q[q++]=box_q[1]; /* Trace stroked polygon. */ stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon)); if (stroke_polygon != (PrimitiveInfo *) NULL) { for (i=0; i < (ssize_t) p; i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=path_p[i]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; } for ( ; i < (ssize_t) (p+q+closed_path); i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=path_q[p+q+closed_path-(i+1)]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[p+closed_path].point; i++; } stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; stroke_polygon[i].primitive=UndefinedPrimitive; stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1); } path_p=(PointInfo *) RelinquishMagickMemory(path_p); path_q=(PointInfo *) RelinquishMagickMemory(path_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return(stroke_polygon); }
bml_introspection_ellpack_typed.c
#include "../../macros.h" #include "../../typed.h" #include "../bml_introspection.h" #include "../bml_types.h" #include "bml_introspection_ellpack.h" #include "bml_types_ellpack.h" #include <stdio.h> #include <stdlib.h> #include <complex.h> #include <math.h> /** Return the sparsity of a matrix. * * Note that the the sparsity of a matrix is defined * as NumberOfZeroes/N*N where N is the matrix dimension. * To density of matrix A will be defined as 1-sparsity(A) * * \ingroup introspection_group_C * * \param A The bml matrix. * \param threshold The threshold used to compute the sparsity. * \return The sparsity of A. */ double TYPED_FUNC( bml_get_sparsity_ellpack) ( bml_matrix_ellpack_t * A, double threshold) { int nnzs = 0; int i; int j; double sparsity; REAL_T *A_value = (REAL_T *) A->value; int A_N = A->N; int A_M = A->M; int *A_nnz = A->nnz; #ifdef USE_OMP_OFFLOAD #pragma omp target update from(A_value[:A_N*A_M], A_nnz[:A_N]) #endif for (int i = 0; i < A->N; i++) { for (int j = 0; j < A_nnz[i]; j++) { if (ABS(A_value[ROWMAJOR(i, j, A_N, A_M)]) > threshold) { nnzs++; } } } sparsity = (1.0 - (double) nnzs / ((double) (A_N * A_N))); return sparsity; }
sptree.h
/* * * Copyright (c) 2014, Laurens van der Maaten (Delft University of Technology) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the Delft University of Technology. * 4. Neither the name of the Delft University of Technology nor the names of * its contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY LAURENS VAN DER MAATEN ''AS IS'' AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL LAURENS VAN DER MAATEN BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * */ /* * * Copyright (c) 2014, Nicola Pezzotti (Delft University of Technology) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the Delft University of Technology. * 4. Neither the name of the Delft University of Technology nor the names of * its contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY NICOLA PEZZOTTI ''AS IS'' AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL NICOLA PEZZOTTI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * */ #ifndef SPTREE_H #define SPTREE_H #include <iostream> #include <vector> #include <unordered_map> #include <map> #ifdef __APPLE__ #include <dispatch/dispatch.h> #endif namespace hdi{ namespace dr{ //! Sparse Partitioning Tree used for the Barnes Hut approximation /*! Sparse Partitioning Tree used for the Barnes Hut approximation. The original version was implemented by Laurens van der Maaten, \author Laurens van der Maaten \author Nicola Pezzotti */ template <typename scalar_type> class SPTree{ public: typedef double hp_scalar_type; private: class Cell { unsigned int _emb_dimension; hp_scalar_type* corner; hp_scalar_type* width; public: Cell(unsigned int inp__emb_dimension); Cell(unsigned int inp__emb_dimension, hp_scalar_type* inp_corner, hp_scalar_type* inp_width); ~Cell(); hp_scalar_type getCorner(unsigned int d); hp_scalar_type getWidth(unsigned int d); void setCorner(unsigned int d, hp_scalar_type val); void setWidth(unsigned int d, hp_scalar_type val); bool containsPoint(scalar_type point[]); }; // Fixed constants static const unsigned int QT_NODE_CAPACITY = 1; // A buffer we use when doing force computations //hp_scalar_type* buff; // Properties of this node in the tree SPTree* parent; unsigned int _emb_dimension; bool is_leaf; unsigned int size; unsigned int cum_size; // Axis-aligned bounding box stored as a center with half-_emb_dimensions to represent the boundaries of this quad tree Cell* boundary; // Indices in this space-partitioning tree node, corresponding center-of-mass, and list of all children scalar_type* _emb_positions; hp_scalar_type* _center_of_mass; unsigned int index[QT_NODE_CAPACITY]; // Children SPTree** children; unsigned int no_children; public: SPTree(unsigned int D, scalar_type* inp_data, unsigned int N); private: SPTree(unsigned int D, scalar_type* inp_data, hp_scalar_type* inp_corner, hp_scalar_type* inp_width); SPTree(unsigned int D, scalar_type* inp_data, unsigned int N, hp_scalar_type* inp_corner, hp_scalar_type* inp_width); SPTree(SPTree* inp_parent, unsigned int D, scalar_type* inp_data, unsigned int N, hp_scalar_type* inp_corner, hp_scalar_type* inp_width); SPTree(SPTree* inp_parent, unsigned int D, scalar_type* inp_data, hp_scalar_type* inp_corner, hp_scalar_type* inp_width); public: ~SPTree(); void setData(scalar_type* inp_data); SPTree* getParent(); void construct(Cell boundary); bool insert(unsigned int new_index); void subdivide(); bool isCorrect(); void rebuildTree(); void getAllIndices(unsigned int* indices); unsigned int getDepth(); void computeNonEdgeForcesOMP(unsigned int point_index, hp_scalar_type theta, hp_scalar_type neg_f[], hp_scalar_type& sum_Q)const; void computeNonEdgeForces(unsigned int point_index, hp_scalar_type theta, hp_scalar_type neg_f[], hp_scalar_type* sum_Q)const; void computeEdgeForces(unsigned int* row_P, unsigned int* col_P, hp_scalar_type* val_P, hp_scalar_type sum_P, int N, hp_scalar_type* pos_f)const; template <typename sparse_scalar_matrix> void computeEdgeForces(const sparse_scalar_matrix& matrix, hp_scalar_type multiplier, hp_scalar_type* pos_f)const; void print(); private: void init(SPTree* inp_parent, unsigned int D, scalar_type* inp_data, hp_scalar_type* inp_corner, hp_scalar_type* inp_width); void fill(unsigned int N); unsigned int getAllIndices(unsigned int* indices, unsigned int loc); bool isChild(unsigned int test_index, unsigned int start, unsigned int end); }; ///////////////////////////////////////////////////////////////////////// template <typename scalar_type> template <typename sparse_scalar_matrix> void SPTree<scalar_type>::computeEdgeForces(const sparse_scalar_matrix& sparse_matrix, hp_scalar_type multiplier, hp_scalar_type* pos_f)const{ const int n = sparse_matrix.size(); // Loop over all edges in the graph #ifdef __APPLE__ //std::cout << "GCD dispatch, sptree 180.\n"; dispatch_apply(n, dispatch_get_global_queue(0, 0), ^(size_t j) { #else #pragma omp parallel for for(int j = 0; j < n; ++j) { #endif //__APPLE__ std::vector<hp_scalar_type> buff(_emb_dimension,0); unsigned int ind1, ind2; hp_scalar_type q_ij_1; ind1 = j * _emb_dimension; for(auto elem: sparse_matrix[j]) { // Compute pairwise distance and Q-value q_ij_1 = 1.0; ind2 = elem.first * _emb_dimension; for(unsigned int d = 0; d < _emb_dimension; d++) buff[d] = _emb_positions[ind1 + d] - _emb_positions[ind2 + d]; //buff contains (yi-yj) per each _emb_dimension for(unsigned int d = 0; d < _emb_dimension; d++) q_ij_1 += buff[d] * buff[d]; hp_scalar_type p_ij = elem.second; hp_scalar_type res = hp_scalar_type(p_ij) * multiplier / q_ij_1 / n; // Sum positive force for(unsigned int d = 0; d < _emb_dimension; d++) pos_f[ind1 + d] += res * buff[d] * multiplier; //(p_ij*q_j*mult) * (yi-yj) } } #ifdef __APPLE__ ); #endif } } } #endif
DRACC_OMP_049_MxV_missing_free_other.c
/* Matrix Vector multiplication without deallocating the matrix "b" after the kernel finishes. */ #include <stdio.h> #include <stdbool.h> #include <stdlib.h> #define C 512 int *a; int *b; int *c; int init(){ for(int i=0; i<C; i++){ for(int j=0; j<C; j++){ b[j+i*C]=1; } a[i]=1; c[i]=0; } return 0; } int Mult(){ #pragma omp target enter data map(to:a[0:C],b[0:C*C]) map(alloc:c[0:C]) device(0) #pragma omp target device(0) { #pragma omp teams distribute parallel for for(int i=0; i<C; i++){ for(int j=0; j<C; j++){ c[i]+=b[j+i*C]*a[j]; } } } #pragma omp target exit data map(from:c[0:C]) map(release:a[0:C]) device(0) return 0; } int check(){ bool test = false; for(int i=0; i<C; i++){ if(c[i]!=C){ test = true; } } printf("Memory Access Issue visible: %s\n",test ? "true" : "false"); return 0; } int main(){ a = malloc(C*sizeof(int)); b = malloc(C*C*sizeof(int)); c = malloc(C*sizeof(int)); init(); Mult(); check(); free(a); free(b); free(c); return 0; }
lasso.c
/****************************************************************************** * INCLUDES *****************************************************************************/ #include "../admm.h" /****************************************************************************** * PUBLIC FUNCTIONS *****************************************************************************/ /** * @brief The proximal update for a L1-regularized factorization (LASSO) via * soft thresholding with lambda/rho. * * @param[out] primal The row-major matrix to update. * @param nrows The number of rows in primal. * @param ncols The number of columns in primal. * @param offset Not used. * @param data Not used. * @param rho Not used. * @param should_parallelize If true, parallelize. */ void splatt_lasso_prox( val_t * primal, idx_t const nrows, idx_t const ncols, idx_t const offset, void * data, val_t const rho, bool const should_parallelize) { val_t const lambda = *((val_t *) data); val_t const mult = lambda / rho; #pragma omp parallel for schedule(static) if(should_parallelize) for(idx_t x=0; x < nrows * ncols; ++x) { val_t const v = primal[x]; /* TODO: could this be done faster? */ if(v > mult) { primal[x] = v - mult; } else if(v < -mult) { primal[x] = v + mult; } else { primal[x] = 0.; } } } /** * @brief Free the single val_t allocated for L1 regularization. * * @param data The data to free. */ void splatt_lasso_free( void * data) { splatt_free(data); } /****************************************************************************** * API FUNCTIONS *****************************************************************************/ splatt_error_type splatt_register_lasso( splatt_cpd_opts * opts, splatt_val_t const multiplier, splatt_idx_t const * const modes_included, splatt_idx_t const num_modes) { splatt_cpd_constraint * lasso_con = NULL; for(idx_t m = 0; m < num_modes; ++m) { idx_t const mode = modes_included[m]; lasso_con = splatt_alloc_constraint(SPLATT_CON_ADMM); lasso_con->prox_func = splatt_lasso_prox; lasso_con->free_func = splatt_lasso_free; /* important hints */ lasso_con->hints.row_separable = true; lasso_con->hints.sparsity_inducing = true; sprintf(lasso_con->description, "L1-REG (%0.1e)", multiplier); /* store multiplier */ val_t * mult = splatt_malloc(sizeof(*mult)); *mult = multiplier; lasso_con->data = mult; /* store the constraint for use */ splatt_register_constraint(opts, mode, lasso_con); } return SPLATT_SUCCESS; }
Helm3dkaczswp_mex.c
#include <stdlib.h> /* for getenv */ #include <stddef.h> /* for size_t type */ #include <string.h> /* for memcpy */ #include <omp.h> #include "math.h" #include "mex.h" #include "matrix.h" #include <assert.h> #include "Helm3d_27pt.h" #ifdef DERIV #undef DERIV #endif /* * * 27pt stencil Helmholtz matrix-vector product. * * * Usage: * y = Helm3dkaczswp_mex(wn,h,n,nmpl,omega,x,b); * * Input: * wn - wavenumber evaluated on pml grid * h - [hx,hy,hz] spacings in each direction * n - [nx,ny,nz] points in each dimension, including pml * npml - 2 x 3 matrix denoting the number of pml points in each dimension * omega - relaxation parameter, in (0,2) * x - nx*ny*nz x 1 vector * b - right hand side, nx*ny*nz x 1 vector * * Output: * y - Kaczmarz sweep applied to x * * Curt Da Silva, 2015 * * To compile, run: * mex -O -largeArrayDims Helm3dkaczswp_mex.c -DDEFINEUNIX -lmwblas CFLAGS="\$CFLAGS -fopenmp -std=c99" LDFLAGS="\$LDFLAGS -fopenmp" * */ #define WN prhs[0] #define H prhs[1] #define N prhs[2] #define NPML prhs[3] #define OMEGA prhs[4] #define X prhs[5] #define B prhs[6] #define OUTPUT plhs[0] void init_zero(double *x, int n){ int i; for(i=0;i<n;i++){ x[i] = 0; } } void do_sweep( double * wnr, double * wni, double * h, double * n, double * npml, double *yr, double *yi, double *xr, double *xi, double * br, double * bi, const double omega, int P) { int i,j,k,kout,t,s; int nx = (int)n[0]; int ny = (int)n[1]; int nz = (int)n[2]; int npmlx_lo = (int)npml[0]; int npmlx_hi = (int)npml[1]; int npmly_lo = (int)npml[2]; int npmly_hi = (int)npml[3]; int npmlz_lo = (int)npml[4]; int npmlz_hi = (int)npml[5]; coef_consts c = compute_coef_consts(h); int pmlz_alloc = 0; int pmly_alloc = 0; int pmlx_alloc = 0; double complex coef[27]; double complex x[27]; double complex d; double row_norm_sq; wn_type wn_window[27]; pml_info p; pml_adj_info padj; double complex atx; p.x_hasL = 0; p.x_hasR = 1; p.y_hasL = 0; p.y_hasR = 1; p.z_hasL = 0; p.z_hasR = 1; #pragma omp parallel for schedule(static) private(i,j,k,kout,t,wn_window,coef,atx,row_norm_sq,d,x) firstprivate(p,padj,pmlz_alloc,pmly_alloc,pmlx_alloc) num_threads(P) for(s=0; s<P; s++) { xyzloop_updown( // Cache a window of the wavenumber around the current point load_wn_nbrhood(wn_window,wnr,wni,i,j,k,nx,ny,nz,p); // Get coefficients get_coefs(coef,wn_window, c, p, padj); // Cache a window of the wavefield around the current point load_nbrhoodc(x,yr,yi,i,j,k,nx,ny,nz,s,p); kout = IDX1D4(i,j,k,s,nx,ny,nz); atx = 0.0 + 0.0*I; row_norm_sq = 0; for(t=0; t<27; t++){ atx += coef[t] * x[t]; row_norm_sq += creal(conj(coef[t])*coef[t]); } d = CMPLX(br[kout],bi[kout]); d = omega*(d - atx)/row_norm_sq; for(t=0; t<27; t++){ coef[t] = d*conj(coef[t]); } nbrhood_update(coef,yr,yi,i,j,k,nx,ny,nz,s,p); ) } } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { mwSize n_threads = 1; double * wnr, *wni, *h, *n, *npml, *yr, *yi, *br, *bi, *xr, *xi; double omega; char *n_threads_str = NULL; int bi_alloc = 0; int xi_alloc = 0; int P; wnr = mxGetPr(WN); if(mxIsComplex(WN)) { wni = mxGetPi(WN); } else { wni = NULL; } if(mxGetM(N) != 3 && mxGetN(N) != 3) { mexErrMsgIdAndTxt("SLIM_release_apps:tools:algorithms:ThreeDFreqModeling:Helm3dkaczswp_mex:Nsize", "n must be a 3-length vector"); } h = mxGetPr(H); n = mxGetPr(N); omega = mxGetScalar(OMEGA); npml = mxGetPr(NPML); int numel = (int)n[0]*n[1]*n[2]; if(mxGetM(X) != numel) { mexErrMsgIdAndTxt("SLIM_release_apps:tools:algorithms:ThreeDFreqModeling:Helm3dkaczswp_mex:Xsize", "x must have nx*ny*nz elements"); } P = mxGetN(X); if(mxGetM(B) != numel) { mexErrMsgIdAndTxt("SLIM_release_apps:tools:algorithms:ThreeDFreqModeling:Helm3dkaczswp_mex:Bsize", "b must have nx*ny*nz elements"); } xr = mxGetPr(X); if(mxIsComplex(X)){ xi = mxGetPi(X); } else{ xi = mxCalloc(numel*P,sizeof(double)); xi_alloc = 1; } br = mxGetPr(B); if(mxIsComplex(B)){ bi = mxGetPi(B); } else { bi = mxCalloc(numel*P,sizeof(double)); bi_alloc = 1; } /* define output vector y and initialize with input vector x.*/ OUTPUT = mxCreateDoubleMatrix(numel, P, mxCOMPLEX); yr = mxGetPr(OUTPUT); yi = mxGetPi(OUTPUT); memcpy( yr, xr, sizeof(double)*numel*P ); memcpy( yi, xi, sizeof(double)*numel*P ); do_sweep( wnr, wni, h, n, npml, yr, yi, xr, xi, br, bi,omega,P ); if (bi_alloc) { mxFree(bi); } if (xi_alloc) { mxFree(xi); } return; }
GeneralMatrixMatrix.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GENERAL_MATRIX_MATRIX_H #define EIGEN_GENERAL_MATRIX_MATRIX_H namespace Eigen { namespace internal { template<typename _LhsScalar, typename _RhsScalar> class level3_blocking; /* Specialization for a row-major destination matrix => simple transposition of the product */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor> { typedef gebp_traits<RhsScalar,LhsScalar> Traits; typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar; static EIGEN_STRONG_INLINE void run( Index rows, Index cols, Index depth, const LhsScalar* lhs, Index lhsStride, const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resStride, ResScalar alpha, level3_blocking<RhsScalar,LhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { // transpose the product such that the result is column major general_matrix_matrix_product<Index, RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs, LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs, ColMajor> ::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info); } }; /* Specialization for a col-major destination matrix * => Blocking algorithm following Goto's paper */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor> { typedef gebp_traits<LhsScalar,RhsScalar> Traits; typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar; static void run(Index rows, Index cols, Index depth, const LhsScalar* _lhs, Index lhsStride, const RhsScalar* _rhs, Index rhsStride, ResScalar* res, Index resStride, ResScalar alpha, level3_blocking<LhsScalar,RhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride); const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride); Index kc = blocking.kc(); // cache block size along the K direction Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction Index nc = (std::min)(cols,blocking.nc()); // cache block size along the N direction gemm_pack_lhs<LhsScalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs; gemm_pack_rhs<RhsScalar, Index, Traits::nr, RhsStorageOrder> pack_rhs; gebp_kernel<LhsScalar, RhsScalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp; #ifdef EIGEN_HAS_OPENMP if(info) { // this is the parallel version! Index tid = omp_get_thread_num(); Index threads = omp_get_num_threads(); LhsScalar* blockA = blocking.blockA(); eigen_internal_assert(blockA!=0); std::size_t sizeB = kc*nc; ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0); // For each horizontal panel of the rhs, and corresponding vertical panel of the lhs... for(Index k=0; k<depth; k+=kc) { const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A' // In order to reduce the chance that a thread has to wait for the other, // let's start by packing B'. pack_rhs(blockB, &rhs(k,0), rhsStride, actual_kc, nc); // Pack A_k to A' in a parallel fashion: // each thread packs the sub block A_k,i to A'_i where i is the thread id. // However, before copying to A'_i, we have to make sure that no other thread is still using it, // i.e., we test that info[tid].users equals 0. // Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it. while(info[tid].users!=0) {} info[tid].users += threads; pack_lhs(blockA+info[tid].lhs_start*actual_kc, &lhs(info[tid].lhs_start,k), lhsStride, actual_kc, info[tid].lhs_length); // Notify the other threads that the part A'_i is ready to go. info[tid].sync = k; // Computes C_i += A' * B' per A'_i for(Index shift=0; shift<threads; ++shift) { Index i = (tid+shift)%threads; // At this point we have to make sure that A'_i has been updated by the thread i, // we use testAndSetOrdered to mimic a volatile access. // However, no need to wait for the B' part which has been updated by the current thread! if(shift>0) while(info[i].sync!=k) {} gebp(res+info[i].lhs_start, resStride, blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha); } // Then keep going as usual with the remaining B' for(Index j=nc; j<cols; j+=nc) { const Index actual_nc = (std::min)(j+nc,cols)-j; // pack B_k,j to B' pack_rhs(blockB, &rhs(k,j), rhsStride, actual_kc, actual_nc); // C_j += A' * B' gebp(res+j*resStride, resStride, blockA, blockB, rows, actual_kc, actual_nc, alpha); } // Release all the sub blocks A'_i of A' for the current thread, // i.e., we simply decrement the number of users by 1 #pragma omp critical { for(Index i=0; i<threads; ++i) #pragma omp atomic --(info[i].users); } } } else #endif // EIGEN_HAS_OPENMP { EIGEN_UNUSED_VARIABLE(info); // this is the sequential version! std::size_t sizeA = kc*mc; std::size_t sizeB = kc*nc; ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA()); ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB()); // For each horizontal panel of the rhs, and corresponding panel of the lhs... for(Index k2=0; k2<depth; k2+=kc) { const Index actual_kc = (std::min)(k2+kc,depth)-k2; // OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs. // => Pack lhs's panel into a sequential chunk of memory (L2/L3 caching) // Note that this panel will be read as many times as the number of blocks in the rhs's // horizontal panel which is, in practice, a very low number. pack_lhs(blockA, &lhs(0,k2), lhsStride, actual_kc, rows); // For each kc x nc block of the rhs's horizontal panel... for(Index j2=0; j2<cols; j2+=nc) { const Index actual_nc = (std::min)(j2+nc,cols)-j2; // We pack the rhs's block into a sequential chunk of memory (L2 caching) // Note that this block will be read a very high number of times, which is equal to the number of // micro horizontal panel of the large rhs's panel (e.g., rows/12 times). pack_rhs(blockB, &rhs(k2,j2), rhsStride, actual_kc, actual_nc); // Everything is packed, we can now call the panel * block kernel: gebp(res+j2*resStride, resStride, blockA, blockB, rows, actual_kc, actual_nc, alpha); } } } } }; /********************************************************************************* * Specialization of GeneralProduct<> for "large" GEMM, i.e., * implementation of the high level wrapper to general_matrix_matrix_product **********************************************************************************/ template<typename Lhs, typename Rhs> struct traits<GeneralProduct<Lhs,Rhs,GemmProduct> > : traits<ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs> > {}; template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType> struct gemm_functor { gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking) : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking) {} void initParallelSession() const { m_blocking.allocateA(); } void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const { if(cols==-1) cols = m_rhs.cols(); Gemm::run(rows, cols, m_lhs.cols(), /*(const Scalar*)*/&m_lhs.coeffRef(row,0), m_lhs.outerStride(), /*(const Scalar*)*/&m_rhs.coeffRef(0,col), m_rhs.outerStride(), (Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(), m_actualAlpha, m_blocking, info); } typedef typename Gemm::Traits Traits; protected: const Lhs& m_lhs; const Rhs& m_rhs; Dest& m_dest; Scalar m_actualAlpha; BlockingType& m_blocking; }; template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1, bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space; template<typename _LhsScalar, typename _RhsScalar> class level3_blocking { typedef _LhsScalar LhsScalar; typedef _RhsScalar RhsScalar; protected: LhsScalar* m_blockA; RhsScalar* m_blockB; DenseIndex m_mc; DenseIndex m_nc; DenseIndex m_kc; public: level3_blocking() : m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0) {} inline DenseIndex mc() const { return m_mc; } inline DenseIndex nc() const { return m_nc; } inline DenseIndex kc() const { return m_kc; } inline LhsScalar* blockA() { return m_blockA; } inline RhsScalar* blockB() { return m_blockB; } }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor, ActualRows = Transpose ? MaxCols : MaxRows, ActualCols = Transpose ? MaxRows : MaxCols }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; enum { SizeA = ActualRows * MaxDepth, SizeB = ActualCols * MaxDepth }; EIGEN_ALIGN_DEFAULT LhsScalar m_staticA[SizeA]; EIGEN_ALIGN_DEFAULT RhsScalar m_staticB[SizeB]; public: gemm_blocking_space(DenseIndex /*rows*/, DenseIndex /*cols*/, DenseIndex /*depth*/, bool /*full_rows*/ = false) { this->m_mc = ActualRows; this->m_nc = ActualCols; this->m_kc = MaxDepth; this->m_blockA = m_staticA; this->m_blockB = m_staticB; } inline void allocateA() {} inline void allocateB() {} inline void allocateAll() {} }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; DenseIndex m_sizeA; DenseIndex m_sizeB; public: gemm_blocking_space(DenseIndex rows, DenseIndex cols, DenseIndex depth, bool full_rows = false) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; if(full_rows) { DenseIndex m = this->m_mc; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, this->m_nc); } else // full columns { DenseIndex n = this->m_nc; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, n); } m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; } void allocateA() { if(this->m_blockA==0) this->m_blockA = aligned_new<LhsScalar>(m_sizeA); } void allocateB() { if(this->m_blockB==0) this->m_blockB = aligned_new<RhsScalar>(m_sizeB); } void allocateAll() { allocateA(); allocateB(); } ~gemm_blocking_space() { aligned_delete(this->m_blockA, m_sizeA); aligned_delete(this->m_blockB, m_sizeB); } }; } // end namespace internal template<typename Lhs, typename Rhs> class GeneralProduct<Lhs, Rhs, GemmProduct> : public ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs> { enum { MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime) }; public: EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct) typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; typedef Scalar ResScalar; GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) { typedef internal::scalar_product_op<LhsScalar,RhsScalar> BinOp; EIGEN_CHECK_BINARY_COMPATIBILIY(BinOp,LhsScalar,RhsScalar); } template<typename Dest> inline void evalTo(Dest& dst) const { if((m_rhs.rows()+dst.rows()+dst.cols())<20 && m_rhs.rows()>0) dst.noalias() = m_lhs .lazyProduct( m_rhs ); else { dst.setZero(); scaleAndAddTo(dst,Scalar(1)); } } template<typename Dest> inline void addTo(Dest& dst) const { if((m_rhs.rows()+dst.rows()+dst.cols())<20 && m_rhs.rows()>0) dst.noalias() += m_lhs .lazyProduct( m_rhs ); else scaleAndAddTo(dst,Scalar(1)); } template<typename Dest> inline void subTo(Dest& dst) const { if((m_rhs.rows()+dst.rows()+dst.cols())<20 && m_rhs.rows()>0) dst.noalias() -= m_lhs .lazyProduct( m_rhs ); else scaleAndAddTo(dst,Scalar(-1)); } template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const { eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols()); typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(m_lhs); typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(m_rhs); Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs) * RhsBlasTraits::extractScalarFactor(m_rhs); typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar, Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType; typedef internal::gemm_functor< Scalar, Index, internal::general_matrix_matrix_product< Index, LhsScalar, (_ActualLhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate), RhsScalar, (_ActualRhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate), (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>, _ActualLhsType, _ActualRhsType, Dest, BlockingType> GemmFunctor; BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), true); internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), this->rows(), this->cols(), Dest::Flags&RowMajorBit); } }; } // end namespace Eigen #endif // EIGEN_GENERAL_MATRIX_MATRIX_H
convolution_3x3_pack8to4_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd42_transform_kernel_pack8to4_int8_neon(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch, const Option& opt) { // winograd42 transform kernel Mat kernel_tm(6 * 6, inch, outch, (size_t)2u); const short ktm[6][3] = { {6, 0, 0}, {-4, -4, -4}, {-4, 4, -4}, {1, 2, 4}, {1, -2, 4}, {0, 0, 6} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9; short* kernel_tm0 = kernel_tm.channel(p).row<short>(q); // transform kernel const signed char* k0 = kernel0; const signed char* k1 = kernel0 + 3; const signed char* k2 = kernel0 + 6; // h short tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { short* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 36-inch-outch // dst = 4b-8a-inch/8a-36-outch/4b kernel_tm_pack8.create(inch / 8, 36, outch / 8 + (outch % 8) / 4, (size_t)2u * 64, 64); int q = 0; for (; q + 7 < outch; q += 8) { const Mat k0 = kernel_tm.channel(q); const Mat k1 = kernel_tm.channel(q + 1); const Mat k2 = kernel_tm.channel(q + 2); const Mat k3 = kernel_tm.channel(q + 3); const Mat k4 = kernel_tm.channel(q + 4); const Mat k5 = kernel_tm.channel(q + 5); const Mat k6 = kernel_tm.channel(q + 6); const Mat k7 = kernel_tm.channel(q + 7); Mat kernel_tm = kernel_tm_pack8.channel(q / 8); for (int k = 0; k < 36; k++) { short* g00 = kernel_tm.row<short>(k); for (int p = 0; p + 7 < inch; p += 8) { for (int i = 0; i < 8; i++) { const short* k00 = k0.row<const short>(p + i); const short* k10 = k1.row<const short>(p + i); const short* k20 = k2.row<const short>(p + i); const short* k30 = k3.row<const short>(p + i); const short* k40 = k4.row<const short>(p + i); const short* k50 = k5.row<const short>(p + i); const short* k60 = k6.row<const short>(p + i); const short* k70 = k7.row<const short>(p + i); g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00[4] = k40[k]; g00[5] = k50[k]; g00[6] = k60[k]; g00[7] = k70[k]; g00 += 8; } } } } for (; q + 3 < outch; q += 4) { const Mat k0 = kernel_tm.channel(q); const Mat k1 = kernel_tm.channel(q + 1); const Mat k2 = kernel_tm.channel(q + 2); const Mat k3 = kernel_tm.channel(q + 3); Mat kernel_tm = kernel_tm_pack8.channel(q / 8 + (q % 8) / 4); for (int k = 0; k < 36; k++) { short* g00 = kernel_tm.row<short>(k); for (int p = 0; p + 7 < inch; p += 8) { for (int i = 0; i < 8; i++) { const short* k00 = k0.row<const short>(p + i); const short* k10 = k1.row<const short>(p + i); const short* k20 = k2.row<const short>(p + i); const short* k30 = k3.row<const short>(p + i); g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00 += 4; } } } } } static void conv3x3s1_winograd42_pack8to4_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; // size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; bottom_blob_tm.create(tiles, 36, inch, 2u * elempack, elempack, opt.workspace_allocator); // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); short tmp[6][6][8]; // tile for (int i = 0; i < h_tm / 6; i++) { for (int j = 0; j < w_tm / 6; j++) { const signed char* r0 = img0.row<const signed char>(i * 4) + (j * 4) * 8; for (int m = 0; m < 6; m++) { int8x8_t _r00 = vld1_s8(r0); int8x8_t _r01 = vld1_s8(r0 + 8); int8x8_t _r02 = vld1_s8(r0 + 16); int8x8_t _r03 = vld1_s8(r0 + 24); int8x8_t _r04 = vld1_s8(r0 + 32); int8x8_t _r05 = vld1_s8(r0 + 40); int8x8_t _v4s8 = vdup_n_s8(4); int8x8_t _v5s8 = vdup_n_s8(5); int16x8_t _v2 = vdupq_n_s16(2); int16x8_t _v4 = vdupq_n_s16(4); // int16x8_t _tmp0m = vfmsq_n_f16(vfmaq_n_f16(_r04, _r00, 4.f), _r02, 5.f); int16x8_t _tmp0m = vsubq_s16(vaddw_s8(vmull_s8(_r00, _v4s8), _r04), vmull_s8(_r02, _v5s8)); // int16x8_t _tmp1m = vfmsq_n_f16(vaddq_f16(_r04, _r03), vaddq_f16(_r01, _r02), 4.f); int16x8_t _tmp1m = vmlsq_s16(vaddl_s8(_r04, _r03), vaddl_s8(_r01, _r02), _v4); // int16x8_t _tmp2m = vfmaq_n_f16(vsubq_f16(_r04, _r03), vsubq_f16(_r01, _r02), 4.f); int16x8_t _tmp2m = vmlaq_s16(vsubl_s8(_r04, _r03), vsubl_s8(_r01, _r02), _v4); // int16x8_t _tmp3m = vfmsq_n_f16(vsubq_f16(_r04, _r02), vsubq_f16(_r01, _r03), 2.f); int16x8_t _tmp3m = vmlsq_s16(vsubl_s8(_r04, _r02), vsubl_s8(_r01, _r03), _v2); // int16x8_t _tmp4m = vfmaq_n_f16(vsubq_f16(_r04, _r02), vsubq_f16(_r01, _r03), 2.f); int16x8_t _tmp4m = vmlaq_s16(vsubl_s8(_r04, _r02), vsubl_s8(_r01, _r03), _v2); // int16x8_t _tmp5m = vfmsq_n_f16(vfmaq_n_f16(_r05, _r01, 4.f), _r03, 5.f); int16x8_t _tmp5m = vsubq_s16(vaddw_s8(vmull_s8(_r01, _v4s8), _r05), vmull_s8(_r03, _v5s8)); vst1q_s16(tmp[0][m], _tmp0m); vst1q_s16(tmp[1][m], _tmp1m); vst1q_s16(tmp[2][m], _tmp2m); vst1q_s16(tmp[3][m], _tmp3m); vst1q_s16(tmp[4][m], _tmp4m); vst1q_s16(tmp[5][m], _tmp5m); r0 += w * 8; } short* r0_tm_0 = (short*)img0_tm + (i * w_tm / 6 + j) * 8; short* r0_tm_1 = r0_tm_0 + tiles * 8; short* r0_tm_2 = r0_tm_0 + tiles * 16; short* r0_tm_3 = r0_tm_0 + tiles * 24; short* r0_tm_4 = r0_tm_0 + tiles * 32; short* r0_tm_5 = r0_tm_0 + tiles * 40; for (int m = 0; m < 6; m++) { int16x8_t _tmp00 = vld1q_s16(tmp[m][0]); int16x8_t _tmp01 = vld1q_s16(tmp[m][1]); int16x8_t _tmp02 = vld1q_s16(tmp[m][2]); int16x8_t _tmp03 = vld1q_s16(tmp[m][3]); int16x8_t _tmp04 = vld1q_s16(tmp[m][4]); int16x8_t _tmp05 = vld1q_s16(tmp[m][5]); int16x8_t _v2 = vdupq_n_s16(2); int16x8_t _v4 = vdupq_n_s16(4); int16x8_t _v5 = vdupq_n_s16(5); int16x8_t _r0tm0 = vmlsq_s16(vmlaq_s16(_tmp04, _tmp00, _v4), _tmp02, _v5); int16x8_t _r0tm1 = vmlsq_s16(vaddq_s16(_tmp04, _tmp03), vaddq_s16(_tmp01, _tmp02), _v4); int16x8_t _r0tm2 = vmlaq_s16(vsubq_s16(_tmp04, _tmp03), vsubq_s16(_tmp01, _tmp02), _v4); int16x8_t _r0tm3 = vmlsq_s16(vsubq_s16(_tmp04, _tmp02), vsubq_s16(_tmp01, _tmp03), _v2); int16x8_t _r0tm4 = vmlaq_s16(vsubq_s16(_tmp04, _tmp02), vsubq_s16(_tmp01, _tmp03), _v2); int16x8_t _r0tm5 = vmlsq_s16(vmlaq_s16(_tmp05, _tmp01, _v4), _tmp03, _v5); vst1q_s16(r0_tm_0, _r0tm0); vst1q_s16(r0_tm_1, _r0tm1); vst1q_s16(r0_tm_2, _r0tm2); vst1q_s16(r0_tm_3, _r0tm3); vst1q_s16(r0_tm_4, _r0tm4); vst1q_s16(r0_tm_5, _r0tm5); r0_tm_0 += tiles * 48; r0_tm_1 += tiles * 48; r0_tm_2 += tiles * 48; r0_tm_3 += tiles * 48; r0_tm_4 += tiles * 48; r0_tm_5 += tiles * 48; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = h_tm / 6 * w_tm / 6; // permute // bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; #if __aarch64__ if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator); #else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator); #endif #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 36; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { short* tm2p = tm2.row<short>(i / 12); const short* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 12x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0], #64 \n" "ld4 {v16.8h, v17.8h, v18.8h, v19.8h}, [%0] \n" "sub %0, %0, #128 \n" "uzp1 v20.8h, v0.8h, v4.8h \n" // 0 "uzp1 v21.8h, v16.8h, v1.8h \n" // 1 "uzp1 v22.8h, v5.8h, v17.8h \n" // 2 "uzp1 v23.8h, v2.8h, v6.8h \n" // 3 "uzp1 v24.8h, v18.8h, v3.8h \n" // 4 "uzp1 v25.8h, v7.8h, v19.8h \n" // 5 "uzp2 v26.8h, v0.8h, v4.8h \n" // 6 "uzp2 v27.8h, v16.8h, v1.8h \n" // 7 "uzp2 v28.8h, v5.8h, v17.8h \n" // 8 "uzp2 v29.8h, v2.8h, v6.8h \n" // 9 "uzp2 v30.8h, v18.8h, v3.8h \n" // 10 "uzp2 v31.8h, v7.8h, v19.8h \n" // 11 "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" "st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%1], #64 \n" "st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); r0 += bottom_blob_tm.cstep * 8; } } for (; i + 7 < tiles; i += 8) { short* tmpptr = tm2.row<short>(i / 12 + (i % 12) / 8); const short* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 8x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n" "sub %0, %0, #64 \n" "uzp1 v16.8h, v0.8h, v4.8h \n" "uzp2 v20.8h, v0.8h, v4.8h \n" "uzp1 v17.8h, v1.8h, v5.8h \n" "uzp2 v21.8h, v1.8h, v5.8h \n" "uzp1 v18.8h, v2.8h, v6.8h \n" "uzp2 v22.8h, v2.8h, v6.8h \n" "uzp1 v19.8h, v3.8h, v7.8h \n" "uzp2 v23.8h, v3.8h, v7.8h \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tmpptr) // %1 : "0"(r0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); r0 += bottom_blob_tm.cstep * 8; } } #endif // __aarch64__ for (; i + 3 < tiles; i += 4) { #if __aarch64__ short* tmpptr = tm2.row<short>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else short* tmpptr = tm2.row<short>(i / 4); #endif const short* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n" "st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tmpptr) // %1 : "0"(r0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3"); #else asm volatile( "pld [%0, #512] \n" "vldm %0, {d0-d7} \n" "vstm %1!, {d0-d7} \n" : "=r"(r0), // %0 "=r"(tmpptr) // %1 : "0"(r0), "1"(tmpptr) : "memory", "q0", "q1", "q2", "q3"); #endif r0 += bottom_blob_tm.cstep * 8; } } for (; i + 1 < tiles; i += 2) { #if __aarch64__ short* tmpptr = tm2.row<short>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); #else short* tmpptr = tm2.row<short>(i / 4 + (i % 4) / 2); #endif const short* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.8h, v1.8h}, [%0] \n" "st1 {v0.8h, v1.8h}, [%1], #32 \n" : "=r"(r0), // %0 "=r"(tmpptr) // %1 : "0"(r0), "1"(tmpptr) : "memory", "v0", "v1"); #else asm volatile( "pld [%0, #256] \n" "vld1.s16 {d0-d3}, [%0 :128] \n" "vst1.s16 {d0-d3}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tmpptr) // %1 : "0"(r0), "1"(tmpptr) : "memory", "q0", "q1"); #endif r0 += bottom_blob_tm.cstep * 8; } } for (; i < tiles; i++) { #if __aarch64__ short* tmpptr = tm2.row<short>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); #else short* tmpptr = tm2.row<short>(i / 4 + (i % 4) / 2 + i % 2); #endif const short* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.8h}, [%0] \n" "st1 {v0.8h}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tmpptr) // %1 : "0"(r0), "1"(tmpptr) : "memory", "v0"); #else asm volatile( "pld [%0, #128] \n" "vld1.s16 {d0-d1}, [%0 :128] \n" "vst1.s16 {d0-d1}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tmpptr) // %1 : "0"(r0), "1"(tmpptr) : "memory", "q0"); #endif r0 += bottom_blob_tm.cstep * 8; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 36, outch, 4u * 4, 4, opt.workspace_allocator); int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p + 1); const Mat kernel0_tm = kernel_tm.channel(p / 2); for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { const short* r0 = bb2.row<const short>(i / 12); const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 asm volatile( "ld1 {v0.8h, v1.8h}, [%3], #32 \n" // r01 "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "ld1 {v4.8h, v5.8h}, [%4], #32 \n" // w01 "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "prfm pldl1keep, [%3, #256] \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "prfm pldl1keep, [%4, #256] \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "smlal v8.4s, v4.4h, v0.h[0] \n" "smlal2 v20.4s, v4.8h, v0.h[0] \n" "smlal v9.4s, v4.4h, v0.h[1] \n" "smlal2 v21.4s, v4.8h, v0.h[1] \n" "smlal v10.4s, v4.4h, v0.h[2] \n" "smlal2 v22.4s, v4.8h, v0.h[2] \n" "smlal v11.4s, v4.4h, v0.h[3] \n" "smlal2 v23.4s, v4.8h, v0.h[3] \n" "smlal v12.4s, v4.4h, v0.h[4] \n" "smlal2 v24.4s, v4.8h, v0.h[4] \n" "smlal v13.4s, v4.4h, v0.h[5] \n" "smlal2 v25.4s, v4.8h, v0.h[5] \n" "smlal v14.4s, v4.4h, v0.h[6] \n" "smlal2 v26.4s, v4.8h, v0.h[6] \n" "smlal v15.4s, v4.4h, v0.h[7] \n" "smlal2 v27.4s, v4.8h, v0.h[7] \n" "ld1 {v2.8h, v3.8h}, [%3], #32 \n" // r23 "smlal v16.4s, v4.4h, v1.h[0] \n" "smlal2 v28.4s, v4.8h, v1.h[0] \n" "smlal v17.4s, v4.4h, v1.h[1] \n" "smlal2 v29.4s, v4.8h, v1.h[1] \n" "prfm pldl1keep, [%3, #256] \n" "smlal v18.4s, v4.4h, v1.h[2] \n" "smlal2 v30.4s, v4.8h, v1.h[2] \n" "smlal v19.4s, v4.4h, v1.h[3] \n" "smlal2 v31.4s, v4.8h, v1.h[3] \n" "ld1 {v6.8h, v7.8h}, [%4], #32 \n" // w23 "smlal v8.4s, v5.4h, v1.h[4] \n" "smlal2 v20.4s, v5.8h, v1.h[4] \n" "smlal v9.4s, v5.4h, v1.h[5] \n" "smlal2 v21.4s, v5.8h, v1.h[5] \n" "prfm pldl1keep, [%4, #256] \n" "smlal v10.4s, v5.4h, v1.h[6] \n" "smlal2 v22.4s, v5.8h, v1.h[6] \n" "smlal v11.4s, v5.4h, v1.h[7] \n" "smlal2 v23.4s, v5.8h, v1.h[7] \n" "smlal v12.4s, v5.4h, v2.h[0] \n" "smlal2 v24.4s, v5.8h, v2.h[0] \n" "smlal v13.4s, v5.4h, v2.h[1] \n" "smlal2 v25.4s, v5.8h, v2.h[1] \n" "smlal v14.4s, v5.4h, v2.h[2] \n" "smlal2 v26.4s, v5.8h, v2.h[2] \n" "smlal v15.4s, v5.4h, v2.h[3] \n" "smlal2 v27.4s, v5.8h, v2.h[3] \n" "smlal v16.4s, v5.4h, v2.h[4] \n" "smlal2 v28.4s, v5.8h, v2.h[4] \n" "smlal v17.4s, v5.4h, v2.h[5] \n" "smlal2 v29.4s, v5.8h, v2.h[5] \n" "smlal v18.4s, v5.4h, v2.h[6] \n" "smlal2 v30.4s, v5.8h, v2.h[6] \n" "smlal v19.4s, v5.4h, v2.h[7] \n" "smlal2 v31.4s, v5.8h, v2.h[7] \n" "ld1 {v0.8h, v1.8h}, [%3], #32 \n" // r45 "smlal v8.4s, v6.4h, v3.h[0] \n" "smlal2 v20.4s, v6.8h, v3.h[0] \n" "smlal v9.4s, v6.4h, v3.h[1] \n" "smlal2 v21.4s, v6.8h, v3.h[1] \n" "prfm pldl1keep, [%3, #256] \n" "smlal v10.4s, v6.4h, v3.h[2] \n" "smlal2 v22.4s, v6.8h, v3.h[2] \n" "smlal v11.4s, v6.4h, v3.h[3] \n" "smlal2 v23.4s, v6.8h, v3.h[3] \n" "smlal v12.4s, v6.4h, v3.h[4] \n" "smlal2 v24.4s, v6.8h, v3.h[4] \n" "smlal v13.4s, v6.4h, v3.h[5] \n" "smlal2 v25.4s, v6.8h, v3.h[5] \n" "smlal v14.4s, v6.4h, v3.h[6] \n" "smlal2 v26.4s, v6.8h, v3.h[6] \n" "smlal v15.4s, v6.4h, v3.h[7] \n" "smlal2 v27.4s, v6.8h, v3.h[7] \n" "smlal v16.4s, v6.4h, v0.h[0] \n" "smlal2 v28.4s, v6.8h, v0.h[0] \n" "smlal v17.4s, v6.4h, v0.h[1] \n" "smlal2 v29.4s, v6.8h, v0.h[1] \n" "smlal v18.4s, v6.4h, v0.h[2] \n" "smlal2 v30.4s, v6.8h, v0.h[2] \n" "smlal v19.4s, v6.4h, v0.h[3] \n" "smlal2 v31.4s, v6.8h, v0.h[3] \n" "ld1 {v4.8h, v5.8h}, [%4], #32 \n" // w45 "smlal v8.4s, v7.4h, v0.h[4] \n" "smlal2 v20.4s, v7.8h, v0.h[4] \n" "smlal v9.4s, v7.4h, v0.h[5] \n" "smlal2 v21.4s, v7.8h, v0.h[5] \n" "prfm pldl1keep, [%4, #256] \n" "smlal v10.4s, v7.4h, v0.h[6] \n" "smlal2 v22.4s, v7.8h, v0.h[6] \n" "smlal v11.4s, v7.4h, v0.h[7] \n" "smlal2 v23.4s, v7.8h, v0.h[7] \n" "ld1 {v2.8h, v3.8h}, [%3], #32 \n" // r67 "smlal v12.4s, v7.4h, v1.h[0] \n" "smlal2 v24.4s, v7.8h, v1.h[0] \n" "smlal v13.4s, v7.4h, v1.h[1] \n" "smlal2 v25.4s, v7.8h, v1.h[1] \n" "prfm pldl1keep, [%3, #256] \n" "smlal v14.4s, v7.4h, v1.h[2] \n" "smlal2 v26.4s, v7.8h, v1.h[2] \n" "smlal v15.4s, v7.4h, v1.h[3] \n" "smlal2 v27.4s, v7.8h, v1.h[3] \n" "smlal v16.4s, v7.4h, v1.h[4] \n" "smlal2 v28.4s, v7.8h, v1.h[4] \n" "smlal v17.4s, v7.4h, v1.h[5] \n" "smlal2 v29.4s, v7.8h, v1.h[5] \n" "smlal v18.4s, v7.4h, v1.h[6] \n" "smlal2 v30.4s, v7.8h, v1.h[6] \n" "smlal v19.4s, v7.4h, v1.h[7] \n" "smlal2 v31.4s, v7.8h, v1.h[7] \n" "smlal v8.4s, v4.4h, v2.h[0] \n" "smlal2 v20.4s, v4.8h, v2.h[0] \n" "smlal v9.4s, v4.4h, v2.h[1] \n" "smlal2 v21.4s, v4.8h, v2.h[1] \n" "smlal v10.4s, v4.4h, v2.h[2] \n" "smlal2 v22.4s, v4.8h, v2.h[2] \n" "smlal v11.4s, v4.4h, v2.h[3] \n" "smlal2 v23.4s, v4.8h, v2.h[3] \n" "smlal v12.4s, v4.4h, v2.h[4] \n" "smlal2 v24.4s, v4.8h, v2.h[4] \n" "smlal v13.4s, v4.4h, v2.h[5] \n" "smlal2 v25.4s, v4.8h, v2.h[5] \n" "smlal v14.4s, v4.4h, v2.h[6] \n" "smlal2 v26.4s, v4.8h, v2.h[6] \n" "smlal v15.4s, v4.4h, v2.h[7] \n" "smlal2 v27.4s, v4.8h, v2.h[7] \n" "ld1 {v0.8h, v1.8h}, [%3], #32 \n" // r89 "smlal v16.4s, v4.4h, v3.h[0] \n" "smlal2 v28.4s, v4.8h, v3.h[0] \n" "smlal v17.4s, v4.4h, v3.h[1] \n" "smlal2 v29.4s, v4.8h, v3.h[1] \n" "prfm pldl1keep, [%3, #256] \n" "smlal v18.4s, v4.4h, v3.h[2] \n" "smlal2 v30.4s, v4.8h, v3.h[2] \n" "smlal v19.4s, v4.4h, v3.h[3] \n" "smlal2 v31.4s, v4.8h, v3.h[3] \n" "ld1 {v6.8h, v7.8h}, [%4], #32 \n" // w67 "smlal v8.4s, v5.4h, v3.h[4] \n" "smlal2 v20.4s, v5.8h, v3.h[4] \n" "smlal v9.4s, v5.4h, v3.h[5] \n" "smlal2 v21.4s, v5.8h, v3.h[5] \n" "prfm pldl1keep, [%4, #256] \n" "smlal v10.4s, v5.4h, v3.h[6] \n" "smlal2 v22.4s, v5.8h, v3.h[6] \n" "smlal v11.4s, v5.4h, v3.h[7] \n" "smlal2 v23.4s, v5.8h, v3.h[7] \n" "smlal v12.4s, v5.4h, v0.h[0] \n" "smlal2 v24.4s, v5.8h, v0.h[0] \n" "smlal v13.4s, v5.4h, v0.h[1] \n" "smlal2 v25.4s, v5.8h, v0.h[1] \n" "smlal v14.4s, v5.4h, v0.h[2] \n" "smlal2 v26.4s, v5.8h, v0.h[2] \n" "smlal v15.4s, v5.4h, v0.h[3] \n" "smlal2 v27.4s, v5.8h, v0.h[3] \n" "smlal v16.4s, v5.4h, v0.h[4] \n" "smlal2 v28.4s, v5.8h, v0.h[4] \n" "smlal v17.4s, v5.4h, v0.h[5] \n" "smlal2 v29.4s, v5.8h, v0.h[5] \n" "smlal v18.4s, v5.4h, v0.h[6] \n" "smlal2 v30.4s, v5.8h, v0.h[6] \n" "smlal v19.4s, v5.4h, v0.h[7] \n" "smlal2 v31.4s, v5.8h, v0.h[7] \n" "ld1 {v2.8h, v3.8h}, [%3], #32 \n" // r1011 "smlal v8.4s, v6.4h, v1.h[0] \n" "smlal2 v20.4s, v6.8h, v1.h[0] \n" "smlal v9.4s, v6.4h, v1.h[1] \n" "smlal2 v21.4s, v6.8h, v1.h[1] \n" "prfm pldl1keep, [%3, #256] \n" "smlal v10.4s, v6.4h, v1.h[2] \n" "smlal2 v22.4s, v6.8h, v1.h[2] \n" "smlal v11.4s, v6.4h, v1.h[3] \n" "smlal2 v23.4s, v6.8h, v1.h[3] \n" "smlal v12.4s, v6.4h, v1.h[4] \n" "smlal2 v24.4s, v6.8h, v1.h[4] \n" "smlal v13.4s, v6.4h, v1.h[5] \n" "smlal2 v25.4s, v6.8h, v1.h[5] \n" "smlal v14.4s, v6.4h, v1.h[6] \n" "smlal2 v26.4s, v6.8h, v1.h[6] \n" "smlal v15.4s, v6.4h, v1.h[7] \n" "smlal2 v27.4s, v6.8h, v1.h[7] \n" "smlal v16.4s, v6.4h, v2.h[0] \n" "smlal2 v28.4s, v6.8h, v2.h[0] \n" "smlal v17.4s, v6.4h, v2.h[1] \n" "smlal2 v29.4s, v6.8h, v2.h[1] \n" "smlal v18.4s, v6.4h, v2.h[2] \n" "smlal2 v30.4s, v6.8h, v2.h[2] \n" "smlal v19.4s, v6.4h, v2.h[3] \n" "smlal2 v31.4s, v6.8h, v2.h[3] \n" "ld1 {v4.8h, v5.8h}, [%4], #32 \n" // w01 "smlal v8.4s, v7.4h, v2.h[4] \n" "smlal2 v20.4s, v7.8h, v2.h[4] \n" "smlal v9.4s, v7.4h, v2.h[5] \n" "smlal2 v21.4s, v7.8h, v2.h[5] \n" "prfm pldl1keep, [%4, #256] \n" "smlal v10.4s, v7.4h, v2.h[6] \n" "smlal2 v22.4s, v7.8h, v2.h[6] \n" "smlal v11.4s, v7.4h, v2.h[7] \n" "smlal2 v23.4s, v7.8h, v2.h[7] \n" "ld1 {v0.8h, v1.8h}, [%3], #32 \n" // r01 "smlal v12.4s, v7.4h, v3.h[0] \n" "smlal2 v24.4s, v7.8h, v3.h[0] \n" "smlal v13.4s, v7.4h, v3.h[1] \n" "smlal2 v25.4s, v7.8h, v3.h[1] \n" "prfm pldl1keep, [%3, #256] \n" "smlal v14.4s, v7.4h, v3.h[2] \n" "smlal2 v26.4s, v7.8h, v3.h[2] \n" "smlal v15.4s, v7.4h, v3.h[3] \n" "smlal2 v27.4s, v7.8h, v3.h[3] \n" "smlal v16.4s, v7.4h, v3.h[4] \n" "smlal2 v28.4s, v7.8h, v3.h[4] \n" "smlal v17.4s, v7.4h, v3.h[5] \n" "smlal2 v29.4s, v7.8h, v3.h[5] \n" "subs %w0, %w0, #1 \n" "smlal v18.4s, v7.4h, v3.h[6] \n" "smlal2 v30.4s, v7.8h, v3.h[6] \n" "smlal v19.4s, v7.4h, v3.h[7] \n" "smlal2 v31.4s, v7.8h, v3.h[7] \n" "bne 0b \n" "sub %3, %3, #32 \n" "sub %4, %4, #32 \n" "st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" "st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k0) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 7 < tiles; i += 8) { const short* r0 = bb2.row<const short>(i / 12 + (i % 12) / 8); const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int32x4_t _sum4 = vdupq_n_s32(0); int32x4_t _sum5 = vdupq_n_s32(0); int32x4_t _sum6 = vdupq_n_s32(0); int32x4_t _sum7 = vdupq_n_s32(0); int32x4_t _sum8 = vdupq_n_s32(0); int32x4_t _sum9 = vdupq_n_s32(0); int32x4_t _suma = vdupq_n_s32(0); int32x4_t _sumb = vdupq_n_s32(0); int32x4_t _sumc = vdupq_n_s32(0); int32x4_t _sumd = vdupq_n_s32(0); int32x4_t _sume = vdupq_n_s32(0); int32x4_t _sumf = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int16x8_t _val0 = vld1q_s16(r0); int16x8_t _val1 = vld1q_s16(r0 + 8); int16x8_t _val2 = vld1q_s16(r0 + 16); int16x8_t _val3 = vld1q_s16(r0 + 24); int16x8_t _val4 = vld1q_s16(r0 + 32); int16x8_t _val5 = vld1q_s16(r0 + 40); int16x8_t _val6 = vld1q_s16(r0 + 48); int16x8_t _val7 = vld1q_s16(r0 + 56); int16x8_t _w0 = vld1q_s16(k0); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w0), vget_low_s16(_val0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w0), vget_low_s16(_val0), 0); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w0), vget_low_s16(_val0), 1); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w0), vget_low_s16(_val0), 1); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w0), vget_low_s16(_val0), 2); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w0), vget_low_s16(_val0), 2); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w0), vget_low_s16(_val0), 3); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w0), vget_low_s16(_val0), 3); _sum8 = vmlal_lane_s16(_sum8, vget_low_s16(_w0), vget_high_s16(_val0), 0); _sum9 = vmlal_lane_s16(_sum9, vget_high_s16(_w0), vget_high_s16(_val0), 0); _suma = vmlal_lane_s16(_suma, vget_low_s16(_w0), vget_high_s16(_val0), 1); _sumb = vmlal_lane_s16(_sumb, vget_high_s16(_w0), vget_high_s16(_val0), 1); _sumc = vmlal_lane_s16(_sumc, vget_low_s16(_w0), vget_high_s16(_val0), 2); _sumd = vmlal_lane_s16(_sumd, vget_high_s16(_w0), vget_high_s16(_val0), 2); _sume = vmlal_lane_s16(_sume, vget_low_s16(_w0), vget_high_s16(_val0), 3); _sumf = vmlal_lane_s16(_sumf, vget_high_s16(_w0), vget_high_s16(_val0), 3); int16x8_t _w1 = vld1q_s16(k0 + 8); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w1), vget_low_s16(_val1), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w1), vget_low_s16(_val1), 0); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w1), vget_low_s16(_val1), 1); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w1), vget_low_s16(_val1), 1); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w1), vget_low_s16(_val1), 2); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w1), vget_low_s16(_val1), 2); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w1), vget_low_s16(_val1), 3); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w1), vget_low_s16(_val1), 3); _sum8 = vmlal_lane_s16(_sum8, vget_low_s16(_w1), vget_high_s16(_val1), 0); _sum9 = vmlal_lane_s16(_sum9, vget_high_s16(_w1), vget_high_s16(_val1), 0); _suma = vmlal_lane_s16(_suma, vget_low_s16(_w1), vget_high_s16(_val1), 1); _sumb = vmlal_lane_s16(_sumb, vget_high_s16(_w1), vget_high_s16(_val1), 1); _sumc = vmlal_lane_s16(_sumc, vget_low_s16(_w1), vget_high_s16(_val1), 2); _sumd = vmlal_lane_s16(_sumd, vget_high_s16(_w1), vget_high_s16(_val1), 2); _sume = vmlal_lane_s16(_sume, vget_low_s16(_w1), vget_high_s16(_val1), 3); _sumf = vmlal_lane_s16(_sumf, vget_high_s16(_w1), vget_high_s16(_val1), 3); int16x8_t _w2 = vld1q_s16(k0 + 16); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w2), vget_low_s16(_val2), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w2), vget_low_s16(_val2), 0); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w2), vget_low_s16(_val2), 1); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w2), vget_low_s16(_val2), 1); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w2), vget_low_s16(_val2), 2); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w2), vget_low_s16(_val2), 2); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w2), vget_low_s16(_val2), 3); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w2), vget_low_s16(_val2), 3); _sum8 = vmlal_lane_s16(_sum8, vget_low_s16(_w2), vget_high_s16(_val2), 0); _sum9 = vmlal_lane_s16(_sum9, vget_high_s16(_w2), vget_high_s16(_val2), 0); _suma = vmlal_lane_s16(_suma, vget_low_s16(_w2), vget_high_s16(_val2), 1); _sumb = vmlal_lane_s16(_sumb, vget_high_s16(_w2), vget_high_s16(_val2), 1); _sumc = vmlal_lane_s16(_sumc, vget_low_s16(_w2), vget_high_s16(_val2), 2); _sumd = vmlal_lane_s16(_sumd, vget_high_s16(_w2), vget_high_s16(_val2), 2); _sume = vmlal_lane_s16(_sume, vget_low_s16(_w2), vget_high_s16(_val2), 3); _sumf = vmlal_lane_s16(_sumf, vget_high_s16(_w2), vget_high_s16(_val2), 3); int16x8_t _w3 = vld1q_s16(k0 + 24); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w3), vget_low_s16(_val3), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w3), vget_low_s16(_val3), 0); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w3), vget_low_s16(_val3), 1); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w3), vget_low_s16(_val3), 1); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w3), vget_low_s16(_val3), 2); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w3), vget_low_s16(_val3), 2); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w3), vget_low_s16(_val3), 3); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w3), vget_low_s16(_val3), 3); _sum8 = vmlal_lane_s16(_sum8, vget_low_s16(_w3), vget_high_s16(_val3), 0); _sum9 = vmlal_lane_s16(_sum9, vget_high_s16(_w3), vget_high_s16(_val3), 0); _suma = vmlal_lane_s16(_suma, vget_low_s16(_w3), vget_high_s16(_val3), 1); _sumb = vmlal_lane_s16(_sumb, vget_high_s16(_w3), vget_high_s16(_val3), 1); _sumc = vmlal_lane_s16(_sumc, vget_low_s16(_w3), vget_high_s16(_val3), 2); _sumd = vmlal_lane_s16(_sumd, vget_high_s16(_w3), vget_high_s16(_val3), 2); _sume = vmlal_lane_s16(_sume, vget_low_s16(_w3), vget_high_s16(_val3), 3); _sumf = vmlal_lane_s16(_sumf, vget_high_s16(_w3), vget_high_s16(_val3), 3); int16x8_t _w4 = vld1q_s16(k0 + 32); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w4), vget_low_s16(_val4), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w4), vget_low_s16(_val4), 0); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w4), vget_low_s16(_val4), 1); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w4), vget_low_s16(_val4), 1); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w4), vget_low_s16(_val4), 2); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w4), vget_low_s16(_val4), 2); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w4), vget_low_s16(_val4), 3); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w4), vget_low_s16(_val4), 3); _sum8 = vmlal_lane_s16(_sum8, vget_low_s16(_w4), vget_high_s16(_val4), 0); _sum9 = vmlal_lane_s16(_sum9, vget_high_s16(_w4), vget_high_s16(_val4), 0); _suma = vmlal_lane_s16(_suma, vget_low_s16(_w4), vget_high_s16(_val4), 1); _sumb = vmlal_lane_s16(_sumb, vget_high_s16(_w4), vget_high_s16(_val4), 1); _sumc = vmlal_lane_s16(_sumc, vget_low_s16(_w4), vget_high_s16(_val4), 2); _sumd = vmlal_lane_s16(_sumd, vget_high_s16(_w4), vget_high_s16(_val4), 2); _sume = vmlal_lane_s16(_sume, vget_low_s16(_w4), vget_high_s16(_val4), 3); _sumf = vmlal_lane_s16(_sumf, vget_high_s16(_w4), vget_high_s16(_val4), 3); int16x8_t _w5 = vld1q_s16(k0 + 40); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w5), vget_low_s16(_val5), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w5), vget_low_s16(_val5), 0); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w5), vget_low_s16(_val5), 1); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w5), vget_low_s16(_val5), 1); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w5), vget_low_s16(_val5), 2); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w5), vget_low_s16(_val5), 2); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w5), vget_low_s16(_val5), 3); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w5), vget_low_s16(_val5), 3); _sum8 = vmlal_lane_s16(_sum8, vget_low_s16(_w5), vget_high_s16(_val5), 0); _sum9 = vmlal_lane_s16(_sum9, vget_high_s16(_w5), vget_high_s16(_val5), 0); _suma = vmlal_lane_s16(_suma, vget_low_s16(_w5), vget_high_s16(_val5), 1); _sumb = vmlal_lane_s16(_sumb, vget_high_s16(_w5), vget_high_s16(_val5), 1); _sumc = vmlal_lane_s16(_sumc, vget_low_s16(_w5), vget_high_s16(_val5), 2); _sumd = vmlal_lane_s16(_sumd, vget_high_s16(_w5), vget_high_s16(_val5), 2); _sume = vmlal_lane_s16(_sume, vget_low_s16(_w5), vget_high_s16(_val5), 3); _sumf = vmlal_lane_s16(_sumf, vget_high_s16(_w5), vget_high_s16(_val5), 3); int16x8_t _w6 = vld1q_s16(k0 + 48); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w6), vget_low_s16(_val6), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w6), vget_low_s16(_val6), 0); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w6), vget_low_s16(_val6), 1); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w6), vget_low_s16(_val6), 1); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w6), vget_low_s16(_val6), 2); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w6), vget_low_s16(_val6), 2); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w6), vget_low_s16(_val6), 3); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w6), vget_low_s16(_val6), 3); _sum8 = vmlal_lane_s16(_sum8, vget_low_s16(_w6), vget_high_s16(_val6), 0); _sum9 = vmlal_lane_s16(_sum9, vget_high_s16(_w6), vget_high_s16(_val6), 0); _suma = vmlal_lane_s16(_suma, vget_low_s16(_w6), vget_high_s16(_val6), 1); _sumb = vmlal_lane_s16(_sumb, vget_high_s16(_w6), vget_high_s16(_val6), 1); _sumc = vmlal_lane_s16(_sumc, vget_low_s16(_w6), vget_high_s16(_val6), 2); _sumd = vmlal_lane_s16(_sumd, vget_high_s16(_w6), vget_high_s16(_val6), 2); _sume = vmlal_lane_s16(_sume, vget_low_s16(_w6), vget_high_s16(_val6), 3); _sumf = vmlal_lane_s16(_sumf, vget_high_s16(_w6), vget_high_s16(_val6), 3); int16x8_t _w7 = vld1q_s16(k0 + 56); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w7), vget_low_s16(_val7), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w7), vget_low_s16(_val7), 0); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w7), vget_low_s16(_val7), 1); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w7), vget_low_s16(_val7), 1); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w7), vget_low_s16(_val7), 2); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w7), vget_low_s16(_val7), 2); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w7), vget_low_s16(_val7), 3); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w7), vget_low_s16(_val7), 3); _sum8 = vmlal_lane_s16(_sum8, vget_low_s16(_w7), vget_high_s16(_val7), 0); _sum9 = vmlal_lane_s16(_sum9, vget_high_s16(_w7), vget_high_s16(_val7), 0); _suma = vmlal_lane_s16(_suma, vget_low_s16(_w7), vget_high_s16(_val7), 1); _sumb = vmlal_lane_s16(_sumb, vget_high_s16(_w7), vget_high_s16(_val7), 1); _sumc = vmlal_lane_s16(_sumc, vget_low_s16(_w7), vget_high_s16(_val7), 2); _sumd = vmlal_lane_s16(_sumd, vget_high_s16(_w7), vget_high_s16(_val7), 2); _sume = vmlal_lane_s16(_sume, vget_low_s16(_w7), vget_high_s16(_val7), 3); _sumf = vmlal_lane_s16(_sumf, vget_high_s16(_w7), vget_high_s16(_val7), 3); r0 += 64; k0 += 64; } vst1q_s32(output0_tm, _sum0); vst1q_s32(output1_tm, _sum1); vst1q_s32(output0_tm + 4, _sum2); vst1q_s32(output1_tm + 4, _sum3); vst1q_s32(output0_tm + 8, _sum4); vst1q_s32(output1_tm + 8, _sum5); vst1q_s32(output0_tm + 12, _sum6); vst1q_s32(output1_tm + 12, _sum7); vst1q_s32(output0_tm + 16, _sum8); vst1q_s32(output1_tm + 16, _sum9); vst1q_s32(output0_tm + 20, _suma); vst1q_s32(output1_tm + 20, _sumb); vst1q_s32(output0_tm + 24, _sumc); vst1q_s32(output1_tm + 24, _sumd); vst1q_s32(output0_tm + 28, _sume); vst1q_s32(output1_tm + 28, _sumf); output0_tm += 32; output1_tm += 32; } #endif // __aarch64__ for (; i + 3 < tiles; i += 4) { #if __aarch64__ const short* r0 = bb2.row<const short>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else const short* r0 = bb2.row<const short>(i / 4); #endif const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 #if __aarch64__ int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int32x4_t _sum4 = vdupq_n_s32(0); int32x4_t _sum5 = vdupq_n_s32(0); int32x4_t _sum6 = vdupq_n_s32(0); int32x4_t _sum7 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int16x8_t _val0 = vld1q_s16(r0); int16x8_t _val1 = vld1q_s16(r0 + 8); int16x8_t _val2 = vld1q_s16(r0 + 16); int16x8_t _val3 = vld1q_s16(r0 + 24); int16x8_t _w0 = vld1q_s16(k0); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w0), vget_low_s16(_val0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w0), vget_low_s16(_val0), 0); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w0), vget_low_s16(_val1), 0); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w0), vget_low_s16(_val1), 0); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w0), vget_low_s16(_val2), 0); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w0), vget_low_s16(_val2), 0); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w0), vget_low_s16(_val3), 0); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w0), vget_low_s16(_val3), 0); int16x8_t _w1 = vld1q_s16(k0 + 8); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w1), vget_low_s16(_val0), 1); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w1), vget_low_s16(_val0), 1); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w1), vget_low_s16(_val1), 1); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w1), vget_low_s16(_val1), 1); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w1), vget_low_s16(_val2), 1); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w1), vget_low_s16(_val2), 1); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w1), vget_low_s16(_val3), 1); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w1), vget_low_s16(_val3), 1); int16x8_t _w2 = vld1q_s16(k0 + 16); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w2), vget_low_s16(_val0), 2); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w2), vget_low_s16(_val0), 2); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w2), vget_low_s16(_val1), 2); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w2), vget_low_s16(_val1), 2); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w2), vget_low_s16(_val2), 2); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w2), vget_low_s16(_val2), 2); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w2), vget_low_s16(_val3), 2); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w2), vget_low_s16(_val3), 2); int16x8_t _w3 = vld1q_s16(k0 + 24); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w3), vget_low_s16(_val0), 3); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w3), vget_low_s16(_val0), 3); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w3), vget_low_s16(_val1), 3); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w3), vget_low_s16(_val1), 3); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w3), vget_low_s16(_val2), 3); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w3), vget_low_s16(_val2), 3); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w3), vget_low_s16(_val3), 3); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w3), vget_low_s16(_val3), 3); int16x8_t _w4 = vld1q_s16(k0 + 32); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w4), vget_high_s16(_val0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w4), vget_high_s16(_val0), 0); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w4), vget_high_s16(_val1), 0); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w4), vget_high_s16(_val1), 0); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w4), vget_high_s16(_val2), 0); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w4), vget_high_s16(_val2), 0); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w4), vget_high_s16(_val3), 0); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w4), vget_high_s16(_val3), 0); int16x8_t _w5 = vld1q_s16(k0 + 40); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w5), vget_high_s16(_val0), 1); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w5), vget_high_s16(_val0), 1); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w5), vget_high_s16(_val1), 1); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w5), vget_high_s16(_val1), 1); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w5), vget_high_s16(_val2), 1); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w5), vget_high_s16(_val2), 1); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w5), vget_high_s16(_val3), 1); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w5), vget_high_s16(_val3), 1); int16x8_t _w6 = vld1q_s16(k0 + 48); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w6), vget_high_s16(_val0), 2); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w6), vget_high_s16(_val0), 2); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w6), vget_high_s16(_val1), 2); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w6), vget_high_s16(_val1), 2); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w6), vget_high_s16(_val2), 2); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w6), vget_high_s16(_val2), 2); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w6), vget_high_s16(_val3), 2); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w6), vget_high_s16(_val3), 2); int16x8_t _w7 = vld1q_s16(k0 + 56); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w7), vget_high_s16(_val0), 3); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w7), vget_high_s16(_val0), 3); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w7), vget_high_s16(_val1), 3); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w7), vget_high_s16(_val1), 3); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w7), vget_high_s16(_val2), 3); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w7), vget_high_s16(_val2), 3); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w7), vget_high_s16(_val3), 3); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w7), vget_high_s16(_val3), 3); r0 += 32; k0 += 64; } vst1q_s32(output0_tm, _sum0); vst1q_s32(output1_tm, _sum1); vst1q_s32(output0_tm + 4, _sum2); vst1q_s32(output1_tm + 4, _sum3); vst1q_s32(output0_tm + 8, _sum4); vst1q_s32(output1_tm + 8, _sum5); vst1q_s32(output0_tm + 12, _sum6); vst1q_s32(output1_tm + 12, _sum7); output0_tm += 16; output1_tm += 16; #else asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "veor q12, q12 \n" "veor q13, q13 \n" "veor q14, q14 \n" "veor q15, q15 \n" "0: \n" "pld [%3, #256] \n" "pld [%3, #512] \n" "vldm %3!, {d0-d7} \n" "pld [%4, #256] \n" "vld1.s16 {d8-d11}, [%4 :128]! \n" "vmlal.s16 q8, d8, d0[0] \n" "vmlal.s16 q12, d9, d0[0] \n" "vmlal.s16 q9, d8, d2[0] \n" "vmlal.s16 q13, d9, d2[0] \n" "vmlal.s16 q10, d8, d4[0] \n" "vmlal.s16 q14, d9, d4[0] \n" "vmlal.s16 q11, d8, d6[0] \n" "vmlal.s16 q15, d9, d6[0] \n" "pld [%4, #128] \n" "vld1.s16 {d8-d9}, [%4 :128]! \n" "vmlal.s16 q8, d10, d0[1] \n" "vmlal.s16 q12, d11, d0[1] \n" "vmlal.s16 q9, d10, d2[1] \n" "vmlal.s16 q13, d11, d2[1] \n" "vmlal.s16 q10, d10, d4[1] \n" "vmlal.s16 q14, d11, d4[1] \n" "vmlal.s16 q11, d10, d6[1] \n" "vmlal.s16 q15, d11, d6[1] \n" "pld [%4, #128] \n" "vld1.s16 {d10-d11}, [%4 :128]! \n" "vmlal.s16 q8, d8, d0[2] \n" "vmlal.s16 q12, d9, d0[2] \n" "vmlal.s16 q9, d8, d2[2] \n" "vmlal.s16 q13, d9, d2[2] \n" "vmlal.s16 q10, d8, d4[2] \n" "vmlal.s16 q14, d9, d4[2] \n" "vmlal.s16 q11, d8, d6[2] \n" "vmlal.s16 q15, d9, d6[2] \n" "pld [%4, #128] \n" "vld1.s16 {d8-d9}, [%4 :128]! \n" "vmlal.s16 q8, d10, d0[3] \n" "vmlal.s16 q12, d11, d0[3] \n" "vmlal.s16 q9, d10, d2[3] \n" "vmlal.s16 q13, d11, d2[3] \n" "vmlal.s16 q10, d10, d4[3] \n" "vmlal.s16 q14, d11, d4[3] \n" "vmlal.s16 q11, d10, d6[3] \n" "vmlal.s16 q15, d11, d6[3] \n" "pld [%4, #128] \n" "vld1.s16 {d10-d11}, [%4 :128]! \n" "vmlal.s16 q8, d8, d1[0] \n" "vmlal.s16 q12, d9, d1[0] \n" "vmlal.s16 q9, d8, d3[0] \n" "vmlal.s16 q13, d9, d3[0] \n" "vmlal.s16 q10, d8, d5[0] \n" "vmlal.s16 q14, d9, d5[0] \n" "vmlal.s16 q11, d8, d7[0] \n" "vmlal.s16 q15, d9, d7[0] \n" "pld [%4, #128] \n" "vld1.s16 {d8-d9}, [%4 :128]! \n" "vmlal.s16 q8, d10, d1[1] \n" "vmlal.s16 q12, d11, d1[1] \n" "vmlal.s16 q9, d10, d3[1] \n" "vmlal.s16 q13, d11, d3[1] \n" "vmlal.s16 q10, d10, d5[1] \n" "vmlal.s16 q14, d11, d5[1] \n" "vmlal.s16 q11, d10, d7[1] \n" "vmlal.s16 q15, d11, d7[1] \n" "pld [%4, #128] \n" "vld1.s16 {d10-d11}, [%4 :128]! \n" "vmlal.s16 q8, d8, d1[2] \n" "vmlal.s16 q12, d9, d1[2] \n" "vmlal.s16 q9, d8, d3[2] \n" "vmlal.s16 q13, d9, d3[2] \n" "vmlal.s16 q10, d8, d5[2] \n" "vmlal.s16 q14, d9, d5[2] \n" "vmlal.s16 q11, d8, d7[2] \n" "vmlal.s16 q15, d9, d7[2] \n" "subs %0, %0, #1 \n" "vmlal.s16 q8, d10, d1[3] \n" "vmlal.s16 q12, d11, d1[3] \n" "vmlal.s16 q9, d10, d3[3] \n" "vmlal.s16 q13, d11, d3[3] \n" "vmlal.s16 q10, d10, d5[3] \n" "vmlal.s16 q14, d11, d5[3] \n" "vmlal.s16 q11, d10, d7[3] \n" "vmlal.s16 q15, d11, d7[3] \n" "bne 0b \n" "vstm %1!, {d16-d23} \n" "vstm %2!, {d24-d31} \n" : "=r"(nn), "=r"(output0_tm), "=r"(output1_tm), "=r"(r0), "=r"(k0) : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k0) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif } for (; i + 1 < tiles; i += 2) { #if __aarch64__ const short* r0 = bb2.row<const short>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); #else const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2); #endif const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int16x8_t _val0 = vld1q_s16(r0); int16x8_t _val1 = vld1q_s16(r0 + 8); int16x8_t _w0 = vld1q_s16(k0); int16x8_t _w1 = vld1q_s16(k0 + 8); int16x8_t _w2 = vld1q_s16(k0 + 16); int16x8_t _w3 = vld1q_s16(k0 + 24); int16x8_t _w4 = vld1q_s16(k0 + 32); int16x8_t _w5 = vld1q_s16(k0 + 40); int16x8_t _w6 = vld1q_s16(k0 + 48); int16x8_t _w7 = vld1q_s16(k0 + 56); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w0), vget_low_s16(_val0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w0), vget_low_s16(_val0), 0); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w0), vget_low_s16(_val1), 0); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w0), vget_low_s16(_val1), 0); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w1), vget_low_s16(_val0), 1); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w1), vget_low_s16(_val0), 1); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w1), vget_low_s16(_val1), 1); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w1), vget_low_s16(_val1), 1); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w2), vget_low_s16(_val0), 2); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w2), vget_low_s16(_val0), 2); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w2), vget_low_s16(_val1), 2); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w2), vget_low_s16(_val1), 2); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w3), vget_low_s16(_val0), 3); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w3), vget_low_s16(_val0), 3); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w3), vget_low_s16(_val1), 3); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w3), vget_low_s16(_val1), 3); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w4), vget_high_s16(_val0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w4), vget_high_s16(_val0), 0); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w4), vget_high_s16(_val1), 0); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w4), vget_high_s16(_val1), 0); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w5), vget_high_s16(_val0), 1); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w5), vget_high_s16(_val0), 1); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w5), vget_high_s16(_val1), 1); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w5), vget_high_s16(_val1), 1); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w6), vget_high_s16(_val0), 2); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w6), vget_high_s16(_val0), 2); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w6), vget_high_s16(_val1), 2); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w6), vget_high_s16(_val1), 2); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w7), vget_high_s16(_val0), 3); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w7), vget_high_s16(_val0), 3); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w7), vget_high_s16(_val1), 3); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w7), vget_high_s16(_val1), 3); r0 += 16; k0 += 64; } vst1q_s32(output0_tm, _sum0); vst1q_s32(output1_tm, _sum1); vst1q_s32(output0_tm + 4, _sum2); vst1q_s32(output1_tm + 4, _sum3); output0_tm += 8; output1_tm += 8; } for (; i < tiles; i++) { #if __aarch64__ const short* r0 = bb2.row<const short>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); #else const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2 + i % 2); #endif const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int16x8_t _val0 = vld1q_s16(r0); int16x8_t _w0 = vld1q_s16(k0); int16x8_t _w1 = vld1q_s16(k0 + 8); int16x8_t _w2 = vld1q_s16(k0 + 16); int16x8_t _w3 = vld1q_s16(k0 + 24); int16x8_t _w4 = vld1q_s16(k0 + 32); int16x8_t _w5 = vld1q_s16(k0 + 40); int16x8_t _w6 = vld1q_s16(k0 + 48); int16x8_t _w7 = vld1q_s16(k0 + 56); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w0), vget_low_s16(_val0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w0), vget_low_s16(_val0), 0); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w1), vget_low_s16(_val0), 1); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w1), vget_low_s16(_val0), 1); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w2), vget_low_s16(_val0), 2); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w2), vget_low_s16(_val0), 2); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w3), vget_low_s16(_val0), 3); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w3), vget_low_s16(_val0), 3); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w4), vget_high_s16(_val0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w4), vget_high_s16(_val0), 0); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w5), vget_high_s16(_val0), 1); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w5), vget_high_s16(_val0), 1); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w6), vget_high_s16(_val0), 2); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w6), vget_high_s16(_val0), 2); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w7), vget_high_s16(_val0), 3); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w7), vget_high_s16(_val0), 3); r0 += 8; k0 += 64; } vst1q_s32(output0_tm, _sum0); vst1q_s32(output1_tm, _sum1); output0_tm += 4; output1_tm += 4; } } } remain_outch_start += nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { int* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p / 2 + p % 2); for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { const short* r0 = bb2.row<const short>(i / 12); const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 asm volatile( "ld1 {v0.8h, v1.8h}, [%2], #32 \n" // r01 "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "ld1 {v4.8h, v5.8h}, [%3], #32 \n" // w01 "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "prfm pldl1keep, [%2, #256] \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "prfm pldl1keep, [%3, #256] \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "smlal v8.4s, v4.4h, v0.h[0] \n" "smlal v9.4s, v4.4h, v0.h[1] \n" "smlal v10.4s, v4.4h, v0.h[2] \n" "smlal v11.4s, v4.4h, v0.h[3] \n" "smlal v12.4s, v4.4h, v0.h[4] \n" "smlal v13.4s, v4.4h, v0.h[5] \n" "smlal v14.4s, v4.4h, v0.h[6] \n" "smlal v15.4s, v4.4h, v0.h[7] \n" "ld1 {v2.8h, v3.8h}, [%2], #32 \n" // r23 "smlal v16.4s, v4.4h, v1.h[0] \n" "smlal v17.4s, v4.4h, v1.h[1] \n" "prfm pldl1keep, [%2, #256] \n" "smlal v18.4s, v4.4h, v1.h[2] \n" "smlal v19.4s, v4.4h, v1.h[3] \n" "smlal2 v8.4s, v4.8h, v1.h[4] \n" "smlal2 v9.4s, v4.8h, v1.h[5] \n" "smlal2 v10.4s, v4.8h, v1.h[6] \n" "smlal2 v11.4s, v4.8h, v1.h[7] \n" "smlal2 v12.4s, v4.8h, v2.h[0] \n" "smlal2 v13.4s, v4.8h, v2.h[1] \n" "smlal2 v14.4s, v4.8h, v2.h[2] \n" "smlal2 v15.4s, v4.8h, v2.h[3] \n" "smlal2 v16.4s, v4.8h, v2.h[4] \n" "smlal2 v17.4s, v4.8h, v2.h[5] \n" "smlal2 v18.4s, v4.8h, v2.h[6] \n" "smlal2 v19.4s, v4.8h, v2.h[7] \n" "ld1 {v0.8h, v1.8h}, [%2], #32 \n" // r45 "smlal v8.4s, v5.4h, v3.h[0] \n" "smlal v9.4s, v5.4h, v3.h[1] \n" "prfm pldl1keep, [%2, #256] \n" "smlal v10.4s, v5.4h, v3.h[2] \n" "smlal v11.4s, v5.4h, v3.h[3] \n" "smlal v12.4s, v5.4h, v3.h[4] \n" "smlal v13.4s, v5.4h, v3.h[5] \n" "smlal v14.4s, v5.4h, v3.h[6] \n" "smlal v15.4s, v5.4h, v3.h[7] \n" "smlal v16.4s, v5.4h, v0.h[0] \n" "smlal v17.4s, v5.4h, v0.h[1] \n" "smlal v18.4s, v5.4h, v0.h[2] \n" "smlal v19.4s, v5.4h, v0.h[3] \n" "ld1 {v6.8h, v7.8h}, [%3], #32 \n" // w23 "smlal2 v8.4s, v5.8h, v0.h[4] \n" "smlal2 v9.4s, v5.8h, v0.h[5] \n" "prfm pldl1keep, [%3, #256] \n" "smlal2 v10.4s, v5.8h, v0.h[6] \n" "smlal2 v11.4s, v5.8h, v0.h[7] \n" "ld1 {v2.8h, v3.8h}, [%2], #32 \n" // r67 "smlal2 v12.4s, v5.8h, v1.h[0] \n" "smlal2 v13.4s, v5.8h, v1.h[1] \n" "prfm pldl1keep, [%2, #256] \n" "smlal2 v14.4s, v5.8h, v1.h[2] \n" "smlal2 v15.4s, v5.8h, v1.h[3] \n" "smlal2 v16.4s, v5.8h, v1.h[4] \n" "smlal2 v17.4s, v5.8h, v1.h[5] \n" "smlal2 v18.4s, v5.8h, v1.h[6] \n" "smlal2 v19.4s, v5.8h, v1.h[7] \n" "smlal v8.4s, v6.4h, v2.h[0] \n" "smlal v9.4s, v6.4h, v2.h[1] \n" "smlal v10.4s, v6.4h, v2.h[2] \n" "smlal v11.4s, v6.4h, v2.h[3] \n" "smlal v12.4s, v6.4h, v2.h[4] \n" "smlal v13.4s, v6.4h, v2.h[5] \n" "smlal v14.4s, v6.4h, v2.h[6] \n" "smlal v15.4s, v6.4h, v2.h[7] \n" "ld1 {v0.8h, v1.8h}, [%2], #32 \n" // r89 "smlal v16.4s, v6.4h, v3.h[0] \n" "smlal v17.4s, v6.4h, v3.h[1] \n" "prfm pldl1keep, [%2, #256] \n" "smlal v18.4s, v6.4h, v3.h[2] \n" "smlal v19.4s, v6.4h, v3.h[3] \n" "smlal2 v8.4s, v6.8h, v3.h[4] \n" "smlal2 v9.4s, v6.8h, v3.h[5] \n" "smlal2 v10.4s, v6.8h, v3.h[6] \n" "smlal2 v11.4s, v6.8h, v3.h[7] \n" "smlal2 v12.4s, v6.8h, v0.h[0] \n" "smlal2 v13.4s, v6.8h, v0.h[1] \n" "smlal2 v14.4s, v6.8h, v0.h[2] \n" "smlal2 v15.4s, v6.8h, v0.h[3] \n" "smlal2 v16.4s, v6.8h, v0.h[4] \n" "smlal2 v17.4s, v6.8h, v0.h[5] \n" "smlal2 v18.4s, v6.8h, v0.h[6] \n" "smlal2 v19.4s, v6.8h, v0.h[7] \n" "ld1 {v2.8h, v3.8h}, [%2], #32 \n" // r1011 "smlal v8.4s, v7.4h, v1.h[0] \n" "smlal v9.4s, v7.4h, v1.h[1] \n" "prfm pldl1keep, [%2, #256] \n" "smlal v10.4s, v7.4h, v1.h[2] \n" "smlal v11.4s, v7.4h, v1.h[3] \n" "smlal v12.4s, v7.4h, v1.h[4] \n" "smlal v13.4s, v7.4h, v1.h[5] \n" "smlal v14.4s, v7.4h, v1.h[6] \n" "smlal v15.4s, v7.4h, v1.h[7] \n" "smlal v16.4s, v7.4h, v2.h[0] \n" "smlal v17.4s, v7.4h, v2.h[1] \n" "smlal v18.4s, v7.4h, v2.h[2] \n" "smlal v19.4s, v7.4h, v2.h[3] \n" "ld1 {v4.8h, v5.8h}, [%3], #32 \n" // w01 "smlal2 v8.4s, v7.8h, v2.h[4] \n" "smlal2 v9.4s, v7.8h, v2.h[5] \n" "prfm pldl1keep, [%3, #256] \n" "smlal2 v10.4s, v7.8h, v2.h[6] \n" "smlal2 v11.4s, v7.8h, v2.h[7] \n" "ld1 {v0.8h, v1.8h}, [%2], #32 \n" // r01 "smlal2 v12.4s, v7.8h, v3.h[0] \n" "smlal2 v13.4s, v7.8h, v3.h[1] \n" "prfm pldl1keep, [%2, #256] \n" "smlal2 v14.4s, v7.8h, v3.h[2] \n" "smlal2 v15.4s, v7.8h, v3.h[3] \n" "smlal2 v16.4s, v7.8h, v3.h[4] \n" "smlal2 v17.4s, v7.8h, v3.h[5] \n" "subs %w0, %w0, #1 \n" "smlal2 v18.4s, v7.8h, v3.h[6] \n" "smlal2 v19.4s, v7.8h, v3.h[7] \n" "bne 0b \n" "sub %2, %2, #32 \n" "sub %3, %3, #32 \n" "st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n" "st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); } for (; i + 7 < tiles; i += 8) { const short* r0 = bb2.row<const short>(i / 12 + (i % 12) / 8); const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int32x4_t _sum4 = vdupq_n_s32(0); int32x4_t _sum5 = vdupq_n_s32(0); int32x4_t _sum6 = vdupq_n_s32(0); int32x4_t _sum7 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int16x8_t _val0 = vld1q_s16(r0); int16x8_t _val1 = vld1q_s16(r0 + 8); int16x8_t _val2 = vld1q_s16(r0 + 16); int16x8_t _val3 = vld1q_s16(r0 + 24); int16x8_t _val4 = vld1q_s16(r0 + 32); int16x8_t _val5 = vld1q_s16(r0 + 40); int16x8_t _val6 = vld1q_s16(r0 + 48); int16x8_t _val7 = vld1q_s16(r0 + 56); int16x8_t _w0 = vld1q_s16(k0); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w0), vget_low_s16(_val0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_low_s16(_w0), vget_low_s16(_val0), 1); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w0), vget_low_s16(_val0), 2); _sum3 = vmlal_lane_s16(_sum3, vget_low_s16(_w0), vget_low_s16(_val0), 3); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w0), vget_high_s16(_val0), 0); _sum5 = vmlal_lane_s16(_sum5, vget_low_s16(_w0), vget_high_s16(_val0), 1); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w0), vget_high_s16(_val0), 2); _sum7 = vmlal_lane_s16(_sum7, vget_low_s16(_w0), vget_high_s16(_val0), 3); _sum0 = vmlal_lane_s16(_sum0, vget_high_s16(_w0), vget_low_s16(_val1), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w0), vget_low_s16(_val1), 1); _sum2 = vmlal_lane_s16(_sum2, vget_high_s16(_w0), vget_low_s16(_val1), 2); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w0), vget_low_s16(_val1), 3); _sum4 = vmlal_lane_s16(_sum4, vget_high_s16(_w0), vget_high_s16(_val1), 0); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w0), vget_high_s16(_val1), 1); _sum6 = vmlal_lane_s16(_sum6, vget_high_s16(_w0), vget_high_s16(_val1), 2); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w0), vget_high_s16(_val1), 3); int16x8_t _w1 = vld1q_s16(k0 + 8); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w1), vget_low_s16(_val2), 0); _sum1 = vmlal_lane_s16(_sum1, vget_low_s16(_w1), vget_low_s16(_val2), 1); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w1), vget_low_s16(_val2), 2); _sum3 = vmlal_lane_s16(_sum3, vget_low_s16(_w1), vget_low_s16(_val2), 3); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w1), vget_high_s16(_val2), 0); _sum5 = vmlal_lane_s16(_sum5, vget_low_s16(_w1), vget_high_s16(_val2), 1); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w1), vget_high_s16(_val2), 2); _sum7 = vmlal_lane_s16(_sum7, vget_low_s16(_w1), vget_high_s16(_val2), 3); _sum0 = vmlal_lane_s16(_sum0, vget_high_s16(_w1), vget_low_s16(_val3), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w1), vget_low_s16(_val3), 1); _sum2 = vmlal_lane_s16(_sum2, vget_high_s16(_w1), vget_low_s16(_val3), 2); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w1), vget_low_s16(_val3), 3); _sum4 = vmlal_lane_s16(_sum4, vget_high_s16(_w1), vget_high_s16(_val3), 0); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w1), vget_high_s16(_val3), 1); _sum6 = vmlal_lane_s16(_sum6, vget_high_s16(_w1), vget_high_s16(_val3), 2); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w1), vget_high_s16(_val3), 3); int16x8_t _w2 = vld1q_s16(k0 + 16); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w2), vget_low_s16(_val4), 0); _sum1 = vmlal_lane_s16(_sum1, vget_low_s16(_w2), vget_low_s16(_val4), 1); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w2), vget_low_s16(_val4), 2); _sum3 = vmlal_lane_s16(_sum3, vget_low_s16(_w2), vget_low_s16(_val4), 3); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w2), vget_high_s16(_val4), 0); _sum5 = vmlal_lane_s16(_sum5, vget_low_s16(_w2), vget_high_s16(_val4), 1); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w2), vget_high_s16(_val4), 2); _sum7 = vmlal_lane_s16(_sum7, vget_low_s16(_w2), vget_high_s16(_val4), 3); _sum0 = vmlal_lane_s16(_sum0, vget_high_s16(_w2), vget_low_s16(_val5), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w2), vget_low_s16(_val5), 1); _sum2 = vmlal_lane_s16(_sum2, vget_high_s16(_w2), vget_low_s16(_val5), 2); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w2), vget_low_s16(_val5), 3); _sum4 = vmlal_lane_s16(_sum4, vget_high_s16(_w2), vget_high_s16(_val5), 0); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w2), vget_high_s16(_val5), 1); _sum6 = vmlal_lane_s16(_sum6, vget_high_s16(_w2), vget_high_s16(_val5), 2); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w2), vget_high_s16(_val5), 3); int16x8_t _w3 = vld1q_s16(k0 + 24); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w3), vget_low_s16(_val6), 0); _sum1 = vmlal_lane_s16(_sum1, vget_low_s16(_w3), vget_low_s16(_val6), 1); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w3), vget_low_s16(_val6), 2); _sum3 = vmlal_lane_s16(_sum3, vget_low_s16(_w3), vget_low_s16(_val6), 3); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w3), vget_high_s16(_val6), 0); _sum5 = vmlal_lane_s16(_sum5, vget_low_s16(_w3), vget_high_s16(_val6), 1); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w3), vget_high_s16(_val6), 2); _sum7 = vmlal_lane_s16(_sum7, vget_low_s16(_w3), vget_high_s16(_val6), 3); _sum0 = vmlal_lane_s16(_sum0, vget_high_s16(_w3), vget_low_s16(_val7), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w3), vget_low_s16(_val7), 1); _sum2 = vmlal_lane_s16(_sum2, vget_high_s16(_w3), vget_low_s16(_val7), 2); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w3), vget_low_s16(_val7), 3); _sum4 = vmlal_lane_s16(_sum4, vget_high_s16(_w3), vget_high_s16(_val7), 0); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w3), vget_high_s16(_val7), 1); _sum6 = vmlal_lane_s16(_sum6, vget_high_s16(_w3), vget_high_s16(_val7), 2); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w3), vget_high_s16(_val7), 3); r0 += 64; k0 += 32; } vst1q_s32(output0_tm, _sum0); vst1q_s32(output0_tm + 4, _sum1); vst1q_s32(output0_tm + 8, _sum2); vst1q_s32(output0_tm + 12, _sum3); vst1q_s32(output0_tm + 16, _sum4); vst1q_s32(output0_tm + 20, _sum5); vst1q_s32(output0_tm + 24, _sum6); vst1q_s32(output0_tm + 28, _sum7); output0_tm += 32; } #endif // __aarch64__ for (; i + 3 < tiles; i += 4) { #if __aarch64__ const short* r0 = bb2.row<const short>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else const short* r0 = bb2.row<const short>(i / 4); #endif const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 #if __aarch64__ int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int32x4_t _sum4 = vdupq_n_s32(0); int32x4_t _sum5 = vdupq_n_s32(0); int32x4_t _sum6 = vdupq_n_s32(0); int32x4_t _sum7 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int16x8_t _val0 = vld1q_s16(r0); int16x8_t _val1 = vld1q_s16(r0 + 8); int16x8_t _val2 = vld1q_s16(r0 + 16); int16x8_t _val3 = vld1q_s16(r0 + 24); int16x8_t _w0 = vld1q_s16(k0); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w0), vget_low_s16(_val0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w0), vget_low_s16(_val0), 1); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w0), vget_low_s16(_val1), 0); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w0), vget_low_s16(_val1), 1); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w0), vget_low_s16(_val2), 0); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w0), vget_low_s16(_val2), 1); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w0), vget_low_s16(_val3), 0); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w0), vget_low_s16(_val3), 1); int16x8_t _w1 = vld1q_s16(k0 + 8); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w1), vget_low_s16(_val0), 2); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w1), vget_low_s16(_val0), 3); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w1), vget_low_s16(_val1), 2); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w1), vget_low_s16(_val1), 3); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w1), vget_low_s16(_val2), 2); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w1), vget_low_s16(_val2), 3); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w1), vget_low_s16(_val3), 2); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w1), vget_low_s16(_val3), 3); int16x8_t _w2 = vld1q_s16(k0 + 16); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w2), vget_high_s16(_val0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w2), vget_high_s16(_val0), 1); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w2), vget_high_s16(_val1), 0); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w2), vget_high_s16(_val1), 1); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w2), vget_high_s16(_val2), 0); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w2), vget_high_s16(_val2), 1); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w2), vget_high_s16(_val3), 0); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w2), vget_high_s16(_val3), 1); int16x8_t _w3 = vld1q_s16(k0 + 24); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w3), vget_high_s16(_val0), 2); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w3), vget_high_s16(_val0), 3); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w3), vget_high_s16(_val1), 2); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w3), vget_high_s16(_val1), 3); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_w3), vget_high_s16(_val2), 2); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_w3), vget_high_s16(_val2), 3); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_w3), vget_high_s16(_val3), 2); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_w3), vget_high_s16(_val3), 3); r0 += 32; k0 += 32; } _sum0 = vaddq_s32(_sum0, _sum1); _sum2 = vaddq_s32(_sum2, _sum3); _sum4 = vaddq_s32(_sum4, _sum5); _sum6 = vaddq_s32(_sum6, _sum7); vst1q_s32(output0_tm, _sum0); vst1q_s32(output0_tm + 4, _sum2); vst1q_s32(output0_tm + 8, _sum4); vst1q_s32(output0_tm + 12, _sum6); output0_tm += 16; #else asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "veor q12, q12 \n" "veor q13, q13 \n" "veor q14, q14 \n" "veor q15, q15 \n" "0: \n" "pld [%2, #256] \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #256] \n" "vld1.s16 {d8-d11}, [%3 :128]! \n" "vmlal.s16 q8, d8, d0[0] \n" "vmlal.s16 q12, d9, d0[1] \n" "vmlal.s16 q9, d8, d2[0] \n" "vmlal.s16 q13, d9, d2[1] \n" "vmlal.s16 q10, d8, d4[0] \n" "vmlal.s16 q14, d9, d4[1] \n" "vmlal.s16 q11, d8, d6[0] \n" "vmlal.s16 q15, d9, d6[1] \n" "pld [%3, #128] \n" "vld1.s16 {d8-d9}, [%3 :128]! \n" "vmlal.s16 q8, d10, d0[2] \n" "vmlal.s16 q12, d11, d0[3] \n" "vmlal.s16 q9, d10, d2[2] \n" "vmlal.s16 q13, d11, d2[3] \n" "vmlal.s16 q10, d10, d4[2] \n" "vmlal.s16 q14, d11, d4[3] \n" "vmlal.s16 q11, d10, d6[2] \n" "vmlal.s16 q15, d11, d6[3] \n" "pld [%3, #128] \n" "vld1.s16 {d10-d11}, [%3 :128]! \n" "vmlal.s16 q8, d8, d1[0] \n" "vmlal.s16 q12, d9, d1[1] \n" "vmlal.s16 q9, d8, d3[0] \n" "vmlal.s16 q13, d9, d3[1] \n" "vmlal.s16 q10, d8, d5[0] \n" "vmlal.s16 q14, d9, d5[1] \n" "vmlal.s16 q11, d8, d7[0] \n" "vmlal.s16 q15, d9, d7[1] \n" "subs %0, %0, #1 \n" "vmlal.s16 q8, d10, d1[2] \n" "vmlal.s16 q12, d11, d1[3] \n" "vmlal.s16 q9, d10, d3[2] \n" "vmlal.s16 q13, d11, d3[3] \n" "vmlal.s16 q10, d10, d5[2] \n" "vmlal.s16 q14, d11, d5[3] \n" "vmlal.s16 q11, d10, d7[2] \n" "vmlal.s16 q15, d11, d7[3] \n" "bne 0b \n" "vadd.s32 q8, q8, q12 \n" "vadd.s32 q9, q9, q13 \n" "vadd.s32 q10, q10, q14 \n" "vadd.s32 q11, q11, q15 \n" "vstm %1!, {d16-d23} \n" : "=r"(nn), "=r"(output0_tm), "=r"(r0), "=r"(k0) : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif } for (; i + 1 < tiles; i += 2) { #if __aarch64__ const short* r0 = bb2.row<const short>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); #else const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2); #endif const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int16x8_t _val0 = vld1q_s16(r0); int16x8_t _val1 = vld1q_s16(r0 + 8); int16x8_t _w0 = vld1q_s16(k0); int16x8_t _w1 = vld1q_s16(k0 + 8); int16x8_t _w2 = vld1q_s16(k0 + 16); int16x8_t _w3 = vld1q_s16(k0 + 24); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w0), vget_low_s16(_val0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w0), vget_low_s16(_val0), 1); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w0), vget_low_s16(_val1), 0); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w0), vget_low_s16(_val1), 1); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w1), vget_low_s16(_val0), 2); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w1), vget_low_s16(_val0), 3); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w1), vget_low_s16(_val1), 2); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w1), vget_low_s16(_val1), 3); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w2), vget_high_s16(_val0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w2), vget_high_s16(_val0), 1); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w2), vget_high_s16(_val1), 0); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w2), vget_high_s16(_val1), 1); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w3), vget_high_s16(_val0), 2); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w3), vget_high_s16(_val0), 3); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_w3), vget_high_s16(_val1), 2); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_w3), vget_high_s16(_val1), 3); r0 += 16; k0 += 32; } _sum0 = vaddq_s32(_sum0, _sum1); _sum2 = vaddq_s32(_sum2, _sum3); vst1q_s32(output0_tm, _sum0); vst1q_s32(output0_tm + 4, _sum2); output0_tm += 8; } for (; i < tiles; i++) { #if __aarch64__ const short* r0 = bb2.row<const short>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); #else const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2 + i % 2); #endif const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int16x8_t _val0 = vld1q_s16(r0); int16x8_t _w0 = vld1q_s16(k0); int16x8_t _w1 = vld1q_s16(k0 + 8); int16x8_t _w2 = vld1q_s16(k0 + 16); int16x8_t _w3 = vld1q_s16(k0 + 24); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w0), vget_low_s16(_val0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w0), vget_low_s16(_val0), 1); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w1), vget_low_s16(_val0), 2); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w1), vget_low_s16(_val0), 3); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w2), vget_high_s16(_val0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w2), vget_high_s16(_val0), 1); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w3), vget_high_s16(_val0), 2); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w3), vget_high_s16(_val0), 3); r0 += 8; k0 += 32; } _sum0 = vaddq_s32(_sum0, _sum1); vst1q_s32(output0_tm, _sum0); output0_tm += 4; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, 4u * 4, 4, opt.workspace_allocator); } { // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); int tmp[4][6][4]; // tile for (int i = 0; i < outh / 4; i++) { for (int j = 0; j < outw / 4; j++) { // top_blob_tm.create(tiles, 36, outch, elemsize, elempack); const int* output0_tm_0 = (const int*)out0_tm + (i * w_tm / 6 + j) * 4; const int* output0_tm_1 = output0_tm_0 + tiles * 4; const int* output0_tm_2 = output0_tm_0 + tiles * 8; const int* output0_tm_3 = output0_tm_0 + tiles * 12; const int* output0_tm_4 = output0_tm_0 + tiles * 16; const int* output0_tm_5 = output0_tm_0 + tiles * 20; int* output0 = out0.row<int>(i * 4) + (j * 4) * 4; // TODO neon optimize for (int m = 0; m < 5; m++) { int32x4_t _out0tm0 = vld1q_s32(output0_tm_0); int32x4_t _out0tm1 = vld1q_s32(output0_tm_1); int32x4_t _out0tm2 = vld1q_s32(output0_tm_2); int32x4_t _out0tm3 = vld1q_s32(output0_tm_3); int32x4_t _out0tm4 = vld1q_s32(output0_tm_4); int32x4_t _out0tm5 = vld1q_s32(output0_tm_5); int32x4_t _tmp02a = vaddq_s32(_out0tm1, _out0tm2); int32x4_t _tmp13a = vsubq_s32(_out0tm1, _out0tm2); int32x4_t _tmp02b = vaddq_s32(_out0tm3, _out0tm4); int32x4_t _tmp13b = vsubq_s32(_out0tm3, _out0tm4); int32x4_t _v2 = vdupq_n_s32(2); int32x4_t _v4 = vdupq_n_s32(4); int32x4_t _v8 = vdupq_n_s32(8); int32x4_t _tmp0m = vaddq_s32(vaddq_s32(_out0tm0, _tmp02a), _tmp02b); int32x4_t _tmp1m = vmlaq_s32(_tmp13a, _tmp13b, _v2); int32x4_t _tmp2m = vmlaq_s32(_tmp02a, _tmp02b, _v4); int32x4_t _tmp3m = vmlaq_s32(vmlaq_s32(_tmp13a, _out0tm5, _v4), _tmp13b, _v8); vst1q_s32(tmp[0][m], _tmp0m); vst1q_s32(tmp[1][m], _tmp1m); vst1q_s32(tmp[2][m], _tmp2m); vst1q_s32(tmp[3][m], _tmp3m); output0_tm_0 += tiles * 24; output0_tm_1 += tiles * 24; output0_tm_2 += tiles * 24; output0_tm_3 += tiles * 24; output0_tm_4 += tiles * 24; output0_tm_5 += tiles * 24; } for (int m = 5; m < 6; m++) { int32x4_t _out0tm0 = vld1q_s32(output0_tm_0); int32x4_t _out0tm1 = vld1q_s32(output0_tm_1); int32x4_t _out0tm2 = vld1q_s32(output0_tm_2); int32x4_t _out0tm3 = vld1q_s32(output0_tm_3); int32x4_t _out0tm4 = vld1q_s32(output0_tm_4); int32x4_t _out0tm5 = vld1q_s32(output0_tm_5); int32x4_t _tmp02a = vaddq_s32(_out0tm1, _out0tm2); int32x4_t _tmp13a = vsubq_s32(_out0tm1, _out0tm2); int32x4_t _tmp02b = vaddq_s32(_out0tm3, _out0tm4); int32x4_t _tmp13b = vsubq_s32(_out0tm3, _out0tm4); int32x4_t _v2 = vdupq_n_s32(2); int32x4_t _v4 = vdupq_n_s32(4); int32x4_t _v8 = vdupq_n_s32(8); int32x4_t _tmp0m = vaddq_s32(vaddq_s32(_out0tm0, _tmp02a), _tmp02b); int32x4_t _tmp1m = vmlaq_s32(_tmp13a, _tmp13b, _v2); int32x4_t _tmp2m = vmlaq_s32(_tmp02a, _tmp02b, _v4); int32x4_t _tmp3m = vmlaq_s32(vmlaq_s32(_tmp13a, _out0tm5, _v4), _tmp13b, _v8); _tmp0m = vmulq_s32(_tmp0m, _v4); _tmp1m = vmulq_s32(_tmp1m, _v4); _tmp2m = vmulq_s32(_tmp2m, _v4); _tmp3m = vmulq_s32(_tmp3m, _v4); vst1q_s32(tmp[0][m], _tmp0m); vst1q_s32(tmp[1][m], _tmp1m); vst1q_s32(tmp[2][m], _tmp2m); vst1q_s32(tmp[3][m], _tmp3m); output0_tm_0 += tiles * 24; output0_tm_1 += tiles * 24; output0_tm_2 += tiles * 24; output0_tm_3 += tiles * 24; output0_tm_4 += tiles * 24; output0_tm_5 += tiles * 24; } for (int m = 0; m < 4; m++) { int32x4_t _tmp00 = vld1q_s32(tmp[m][0]); int32x4_t _tmp01 = vld1q_s32(tmp[m][1]); int32x4_t _tmp02 = vld1q_s32(tmp[m][2]); int32x4_t _tmp03 = vld1q_s32(tmp[m][3]); int32x4_t _tmp04 = vld1q_s32(tmp[m][4]); int32x4_t _tmp05 = vld1q_s32(tmp[m][5]); int32x4_t _tmp02a = vaddq_s32(_tmp01, _tmp02); int32x4_t _tmp13a = vsubq_s32(_tmp01, _tmp02); int32x4_t _tmp02b = vaddq_s32(_tmp03, _tmp04); int32x4_t _tmp13b = vsubq_s32(_tmp03, _tmp04); int32x4_t _v2 = vdupq_n_s32(2); int32x4_t _v4 = vdupq_n_s32(4); int32x4_t _v8 = vdupq_n_s32(8); int32x4_t _out00 = vaddq_s32(vaddq_s32(_tmp00, _tmp02a), _tmp02b); int32x4_t _out01 = vmlaq_s32(_tmp13a, _tmp13b, _v2); int32x4_t _out02 = vmlaq_s32(_tmp02a, _tmp02b, _v4); int32x4_t _out03 = vmlaq_s32(vaddq_s32(_tmp05, _tmp13a), _tmp13b, _v8); // TODO use integer trick for division by 576 float32x4_t _v576 = vdupq_n_f32(1.0 / 576); _out00 = vcvtq_s32_f32(vmulq_f32(vcvtq_f32_s32(_out00), _v576)); _out01 = vcvtq_s32_f32(vmulq_f32(vcvtq_f32_s32(_out01), _v576)); _out02 = vcvtq_s32_f32(vmulq_f32(vcvtq_f32_s32(_out02), _v576)); _out03 = vcvtq_s32_f32(vmulq_f32(vcvtq_f32_s32(_out03), _v576)); vst1q_s32(output0, _out00); vst1q_s32(output0 + 4, _out01); vst1q_s32(output0 + 8, _out02); vst1q_s32(output0 + 12, _out03); output0 += outw * 4; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
GB_extract_vector_list.c
//------------------------------------------------------------------------------ // GB_extract_vector_list: extract vector indices for all entries in a matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Constructs a list of vector indices for each entry in a matrix. Creates // the output J for GB_extractTuples, and I for GB_transpose when the qsort // method is used. // TODO: use #include "GB_positional_op_ijp.c" here #include "GB_ek_slice.h" #define GB_FREE_ALL \ { \ GB_WERK_POP (A_ek_slicing, int64_t) ; \ } GrB_Info GB_extract_vector_list // extract vector list from a matrix ( // output: int64_t *restrict J, // size nnz(A) or more // input: const GrB_Matrix A, GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (J != NULL) ; ASSERT (A != NULL) ; ASSERT (GB_JUMBLED_OK (A)) ; // pattern not accessed ASSERT (GB_ZOMBIES_OK (A)) ; // pattern not accessed ASSERT (!GB_IS_BITMAP (A)) ; //-------------------------------------------------------------------------- // get A //-------------------------------------------------------------------------- const int64_t *restrict Ap = A->p ; const int64_t *restrict Ah = A->h ; const int64_t avlen = A->vlen ; //-------------------------------------------------------------------------- // determine the max number of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; //-------------------------------------------------------------------------- // slice the entries for each task //-------------------------------------------------------------------------- GB_WERK_DECLARE (A_ek_slicing, int64_t) ; int A_ntasks, A_nthreads ; GB_SLICE_MATRIX (A, 2, chunk) ; //-------------------------------------------------------------------------- // extract the vector index for each entry //-------------------------------------------------------------------------- int tid ; #pragma omp parallel for num_threads(A_nthreads) schedule(dynamic,1) for (tid = 0 ; tid < A_ntasks ; tid++) { // if kfirst > klast then task tid does no work at all int64_t kfirst = kfirst_Aslice [tid] ; int64_t klast = klast_Aslice [tid] ; for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // find the part of A(:,k) to be operated on by this task //------------------------------------------------------------------ int64_t j = GBH (Ah, k) ; int64_t pA_start, pA_end ; GB_get_pA (&pA_start, &pA_end, tid, k, kfirst, klast, pstart_Aslice, Ap, avlen) ; //------------------------------------------------------------------ // extract vector indices of A(:,j) //------------------------------------------------------------------ for (int64_t p = pA_start ; p < pA_end ; p++) { J [p] = j ; } } } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_ALL ; return (GrB_SUCCESS) ; }
GB_unop__identity_fp64_uint64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fp64_uint64) // op(A') function: GB (_unop_tran__identity_fp64_uint64) // C type: double // A type: uint64_t // cast: double cij = (double) aij // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ double z = (double) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (double) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fp64_uint64) ( double *Cx, // Cx and Ax may be aliased const uint64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; double z = (double) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint64_t aij = Ax [p] ; double z = (double) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fp64_uint64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
edgelist.h
/****************************************************************************** * ** Copyright (c) 2016, Intel Corporation ** * ** All rights reserved. ** * ** ** * ** Redistribution and use in source and binary forms, with or without ** * ** modification, are permitted provided that the following conditions ** * ** are met: ** * ** 1. Redistributions of source code must retain the above copyright ** * ** notice, this list of conditions and the following disclaimer. ** * ** 2. Redistributions in binary form must reproduce the above copyright ** * ** notice, this list of conditions and the following disclaimer in the ** * ** documentation and/or other materials provided with the distribution. ** * ** 3. Neither the name of the copyright holder nor the names of its ** * ** contributors may be used to endorse or promote products derived ** * ** from this software without specific prior written permission. ** * ** ** * ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ** * ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ** * ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ** * ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ** * ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ** * ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED ** * ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ** * ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ** * ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ** * ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ** * ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * ******************************************************************************/ /* Michael Anderson (Intel Corp.), Narayanan Sundaram (Intel Corp.) * * ******************************************************************************/ #ifndef SRC_EDGELIST_H_ #define SRC_EDGELIST_H_ #include <string> template <typename T> struct edge_t { edge_t() {} edge_t(int _src, int _dst, T _val) { src = _src; dst = _dst; val = _val; } int src; int dst; T val; }; template <typename T> struct edgelist_t { edge_t<T>* edges; int m; int n; int nnz; edgelist_t() : m(0), n(0), nnz(0), edges(nullptr) {} edgelist_t(int _m, int _n, int _nnz) { m = _m; n = _n; nnz = _nnz; if(nnz > 0) { edges = reinterpret_cast<edge_t<T>*>(_mm_malloc((size_t)nnz * sizeof(edge_t<T>), 64)); } } edgelist_t(edge_t<T>* edges, int m, int n, int nnz) : edges(edges), m(m), n(n), nnz(nnz) {} void clear() { if (nnz > 0) { _mm_free(edges); } edges = nullptr; nnz = 0; m = 0; n = 0; } }; template <typename T> struct tedge_t { int src; int dst; int tile_id; T val; }; template<typename T> bool readLine (FILE * ifile, int * src, int * dst, T * val, bool binaryformat=true, bool edgeweights=true) { if(binaryformat) { auto fread_bytes = fread(src, sizeof(int), 1, ifile); if (feof(ifile)) return false; assert(fread_bytes == 1); fread_bytes = fread(dst, sizeof(int), 1, ifile); if (feof(ifile)) return false; assert(fread_bytes == 1); if (edgeweights) { fread_bytes = fread(val, sizeof(T), 1, ifile); if (feof(ifile)) return false; assert(fread_bytes == 1); } else { *val = (T)(1); } } else { if (edgeweights) { int ret; if (std::is_same<T, float>::value) { ret = fscanf(ifile, "%d %d %f", src, dst, val); if (ret != 3) return false; } else if (std::is_same<T, double>::value) { ret = fscanf(ifile, "%d %d %lf", src, dst, val); if (ret != 3) return false; } else if (std::is_same<T, int>::value) { ret = fscanf(ifile, "%d %d %d", src, dst, val); if (ret != 3) return false; } else if (std::is_same<T, unsigned int>::value) { ret = fscanf(ifile, "%d %d %u", src, dst, val); if (ret != 3) return false; }else { std::cout << "Data type not supported (read)" << std::endl; } } else { int ret = fscanf(ifile, "%d %d", src, dst); if (ret == 2) { *val = (T)(1); } else return false; } if (feof(ifile)) return false; } return true; } template<typename T> void get_maxid_and_nnz(FILE* fp, int* m, int* n, unsigned long int* nnz, bool* symmetric, bool *pattern, bool binaryformat=true, bool header=true, bool edgeweights=true) { if (header) { int tmp_[3]; if (binaryformat) { auto fread_bytes = fread(tmp_, sizeof(int), 3, fp); assert(fread_bytes == 3); *m = tmp_[0]; *n = tmp_[1]; *nnz = tmp_[2]; } else { unsigned long position; char line[256]; // read matrixmarket header fflush(fp); position = ftell(fp); fgets(line, 256, fp); *symmetric = strstr(line, "symmetric") != NULL; *pattern = strstr(line, "pattern") != NULL; edgeweights = *pattern; // skip all the comments line[0] = '%'; fseek(fp, position, SEEK_SET); while (line[0] == '%') { fflush(fp); position = ftell(fp); fgets(line, 256, fp); } fseek(fp, position, SEEK_SET); int ret = fscanf(fp, "%d %d %u", &(tmp_[0]), &(tmp_[1]), &(tmp_[2])); assert(ret == 3); *m = tmp_[0]; *n = tmp_[1]; *nnz = tmp_[2]; } return; } else { //no header unsigned long nnz_ = 0; int tempsrc, tempdst; int maxm = 0; int maxn = 0; T tempval; while(true) { if(feof(fp)) { break; } if (!readLine<T>(fp, &tempsrc, &tempdst, &tempval, binaryformat, edgeweights)) { break; } maxm = (maxm > tempsrc)?(maxm):(tempsrc); maxn = (maxn > tempdst)?(maxn):(tempdst); nnz_++; } *m = maxm; *n = maxn; *nnz = nnz_; } } template<typename T> void writeLine (FILE* ofile, int src, int dst, T val, bool binaryformat=true, bool edgeweights=true) { if (binaryformat) { auto fwrite_bytes = fwrite(&src, sizeof(int), 1, ofile); assert(fwrite_bytes == 1); fwrite_bytes = fwrite(&dst, sizeof(int), 1, ofile); assert(fwrite_bytes == 1); if (edgeweights) { fwrite_bytes = fwrite(&val, sizeof(T), 1, ofile); assert(fwrite_bytes == 1); } } else { if (edgeweights) { if (std::is_same<T, float>::value) { fprintf(ofile, "%d %d %.8f\n", src, dst, val); } else if (std::is_same<T, double>::value) { fprintf(ofile, "%d %d %.15lf\n", src, dst, val); } else if (std::is_same<T, int>::value) { fprintf(ofile, "%d %d %d\n", src, dst, val); } else if (std::is_same<T, unsigned int>::value) { fprintf(ofile, "%d %d %u\n", src, dst, val); } else { std::cout << "Data type not supported (write)\n"; } } else { fprintf(ofile, "%d %d\n", src, dst); } } } template <typename T> void write_edgelist(const char* dir, const edgelist_t<T> & edgelist, bool binaryformat=true, bool header=true, bool edgeweights=true) { int global_nrank = get_global_nrank(); int global_myrank = get_global_myrank(); std::stringstream fname_ss; fname_ss << dir << global_myrank; printf("Writing file: %s\n", fname_ss.str().c_str()); FILE * fp; if (binaryformat) { fp = fopen(fname_ss.str().c_str(), "wb"); if (header) { auto fwrite_bytes = fwrite(&(edgelist.m), sizeof(int), 1, fp); assert(fwrite_bytes == 1); fwrite_bytes = fwrite(&(edgelist.n), sizeof(int), 1, fp); assert(fwrite_bytes == 1); fwrite_bytes = fwrite(&(edgelist.nnz), sizeof(int), 1, fp); assert(fwrite_bytes == 1); } } else { fp = fopen(fname_ss.str().c_str(), "w"); if (header) { fprintf(fp, "%d %d %u\n", edgelist.m, edgelist.n, edgelist.nnz); } } for(auto i = 0 ; i < edgelist.nnz ; i++) { writeLine<T>(fp, edgelist.edges[i].src, edgelist.edges[i].dst, edgelist.edges[i].val, binaryformat, edgeweights); } fclose(fp); } template <typename T> void load_edgelist(const char* dir, edgelist_t<T>* edgelist, bool single=true, bool binaryformat=true, bool header=true, bool edgeweights=true) { int global_nrank = get_global_nrank(); int global_myrank = get_global_myrank(); edgelist->m = 0; edgelist->n = 0; edgelist->nnz = 0; bool symmetric, pattern; for(int i = global_myrank ; ; i += global_nrank) { std::stringstream fname_ss; if (single) fname_ss << dir; else fname_ss << dir << i; FILE* fp; if (binaryformat) { fp = fopen(fname_ss.str().c_str(), "rb"); } else { fp = fopen(fname_ss.str().c_str(), "r"); } if(!fp) { printf("Could not open file: %s\n", fname_ss.str().c_str()); break; } else { printf("Reading file: %s\n", fname_ss.str().c_str()); } int m_, n_; unsigned long nnz_; get_maxid_and_nnz<T>(fp, &m_, &n_, &nnz_, &symmetric, &pattern, binaryformat, header, edgeweights); edgelist->m = std::max(m_, edgelist->m); edgelist->n = std::max(n_, edgelist->n); edgelist->nnz += nnz_; if (symmetric) edgelist->nnz += nnz_; fclose(fp); if (single) break; } int local_max_m = edgelist->m; int max_m = edgelist->m; int local_max_n = edgelist->n; int max_n = edgelist->n; MPI_Allreduce(&local_max_m, &max_m, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); MPI_Allreduce(&local_max_n, &max_n, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); edgelist->m = max_m; edgelist->n = max_n; std::cout << (symmetric ? "Undirected " : "Directed ") << (pattern ? "Unweighted" : "Weighted") << std::endl; std::cout << "Got: " << edgelist->m << " by " << edgelist->n << " vertices" << std::endl; std::cout << "Got: " << edgelist->nnz << " edges" << std::endl; edgelist->edges = reinterpret_cast<edge_t<T>*>( _mm_malloc((uint64_t)edgelist->nnz * (uint64_t)sizeof(edge_t<T>), 64)); unsigned long int nnzcnt = 0; for(int i = global_myrank ; ; i += global_nrank) { std::stringstream fname_ss; if (single) fname_ss << dir; else fname_ss << dir << i; //printf("Opening file: %s\n", fname_ss.str().c_str()); FILE* fp; if (binaryformat) { fp = fopen(fname_ss.str().c_str(), "rb"); } else { fp = fopen(fname_ss.str().c_str(), "r"); } if(!fp) break; if (header) { //remove header int m_, n_; unsigned long nnz_; get_maxid_and_nnz<T>(fp, &m_, &n_, &nnz_, &symmetric, &pattern, binaryformat, header, edgeweights); } int j = 0; while(true) { if (feof(fp)) { break; } if (!readLine<T>(fp, &(edgelist->edges[nnzcnt].src), &(edgelist->edges[nnzcnt].dst), &(edgelist->edges[nnzcnt].val), binaryformat, !pattern && edgeweights)) { break; } #ifdef __DEBUG //std::cout <<(edgelist->edges[nnzcnt].src) << " " << (edgelist->edges[nnzcnt].dst) << std::endl; if(edgelist->edges[nnzcnt].src <= 0 || edgelist->edges[nnzcnt].dst <= 0 || edgelist->edges[nnzcnt].src > edgelist->m || edgelist->edges[nnzcnt].dst > edgelist->n) { std::cout << "Invalid edge, i, j, nnz: " << i << " , " << j << " , " << nnzcnt << std::endl; exit(0); } j++; #endif nnzcnt++; if (symmetric) { edgelist->edges[nnzcnt].src = edgelist->edges[nnzcnt-1].dst; edgelist->edges[nnzcnt].dst = edgelist->edges[nnzcnt-1].src; edgelist->edges[nnzcnt].val = edgelist->edges[nnzcnt-1].val; nnzcnt++; } } fclose(fp); if (single) break; } } template <typename T> void randomize_edgelist_square(edgelist_t<T>* edgelist) { unsigned int* mapping = new unsigned int[edgelist->m]; unsigned int* rval = new unsigned int[edgelist->m]; int global_myrank = get_global_myrank(); if (global_myrank == 0) { srand(5); // #pragma omp parallel for for (int i = 0; i < edgelist->m; i++) { mapping[i] = i; rval[i] = rand() % edgelist->m; } for (int i = 0; i < edgelist->m; i++) { unsigned int tmp = mapping[i]; mapping[i] = mapping[rval[i]]; mapping[rval[i]] = tmp; } } delete[] rval; MPI_Bcast(mapping, edgelist->m, MPI_INT, 0, MPI_COMM_WORLD); #pragma omp parallel for for (int i = 0; i < edgelist->nnz; i++) { edgelist->edges[i].src = mapping[edgelist->edges[i].src - 1] + 1; edgelist->edges[i].dst = mapping[edgelist->edges[i].dst - 1] + 1; } delete[] mapping; } template<typename T> void remove_empty_columns(edgelist_t<T> * edges, int ** remaining_indices) { // Remove empty columns bool * colexists = new bool[edges->n]; memset(colexists, 0, edges->n * sizeof(bool)); int * new_colids = new int[edges->n+1]; memset(new_colids, 0, (edges->n + 1) * sizeof(int)); int new_ncols = 0; for(int i = 0 ; i < edges->nnz ; i++) { if(!colexists[edges->edges[i].dst-1]) { new_ncols++; } colexists[edges->edges[i].dst-1] = true; } std::cout << "New ncols: " << new_ncols << std::endl; *(remaining_indices) = (int*) _mm_malloc(new_ncols * sizeof(int), 64); int new_colcnt = 0; for(int i = 0 ; i < edges->n; i++) { new_colids[i+1] = (colexists[i] ? 1 : 0) + new_colids[i]; if(colexists[i]) { assert(new_colcnt < new_ncols); (*(remaining_indices))[new_colcnt] = i+1; new_colcnt++; } } assert(new_colcnt == new_ncols); #pragma omp parallel for for(int i = 0 ; i < edges->nnz ; i++) { edges->edges[i].dst = new_colids[edges->edges[i].dst-1] + 1; assert(edges->edges[i].dst - 1 >= 0); assert(edges->edges[i].dst - 1 < new_ncols); } edges->n = new_ncols; delete [] colexists; delete [] new_colids; } template<typename T> void filter_edges_by_row(edgelist_t<T> * edges, int start_row, int end_row) { int valid_edgecnt = 0; for(int i = 0 ; i < edges->nnz ; i++) { if(edges->edges[i].src-1 < end_row && edges->edges[i].src-1 >= start_row) { edges->edges[valid_edgecnt] = edges->edges[i]; edges->edges[valid_edgecnt].src -= start_row; valid_edgecnt++; } } edges->nnz = valid_edgecnt; edges->m = (end_row-start_row); std::cout << "New edges->m: " << edges->m << std::endl; } template<typename T> void get_dimensions(edge_t<T> * edges, int nnz, int &max_m, int &max_n) { int local_max_m = 0; int local_max_n = 0; #pragma omp parallel for reduction(max:local_max_m, local_max_n) for(int i = 0 ; i < nnz ; i++) { local_max_m = std::max(local_max_m, edges[i].src); local_max_n = std::max(local_max_n, edges[i].dst); } MPI_Allreduce(&local_max_m, &max_m, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); MPI_Allreduce(&local_max_n, &max_n, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); } template <typename T> void ReadEdges(edgelist_t<T>* edgelist, const char* fname_in, bool binaryformat=true, bool header=true, bool edgeweights=true, bool randomize=false) { load_edgelist(fname_in, edgelist, binaryformat, header, edgeweights); if (randomize) { randomize_edgelist_square<T>(edgelist); } } template <typename T> void WriteEdges(const edgelist_t<T>& edgelist, const char* fname_in, bool binaryformat=true, bool header=true, bool edgeweights=true) { write_edgelist(fname_in, edgelist, binaryformat, header, edgeweights); } #endif // SRC_EDGELIST_H_
vlisa_onesample.c
/* ** Implementation of LISA algorithm ** for statistical inference of fMRI images ** ** 2nd level inference (one-sample test) ** ** G.Lohmann, 2017 */ #include <viaio/Vlib.h> #include <viaio/file.h> #include <viaio/mu.h> #include <viaio/option.h> #include <viaio/os.h> #include <viaio/VImage.h> #include <gsl/gsl_cdf.h> #include <gsl/gsl_errno.h> #include <gsl/gsl_math.h> #include <gsl/gsl_rng.h> #include <gsl/gsl_randist.h> #include <gsl/gsl_histogram.h> #include <math.h> #include <stdio.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif /*_OPENMP*/ #define ABS(x) ((x) > 0 ? (x) : -(x)) extern void VIsolatedVoxels(VImage src,float threshold); extern void VHistogram(gsl_histogram *histogram,VString filename); extern void VCheckImage(VImage src); extern void FDR(VImage src,VImage dest,gsl_histogram *nullhist,gsl_histogram *realhist,double); extern double ttest1(double *data1,int n); extern void ImageStats(VImage src,double *,double *,double *hmin,double *hmax); extern void VBilateralFilter(VImage src,VImage dest,int radius,double var1,double var2,int); extern double VImageVar(VImage src); extern void VImageCount(VImage src); extern void VGetHistRange(VImage src,double *hmin,double *hmax); extern void VZScale(VImage src,float mode,float stddev); extern float VGetMode(VImage src); extern double t2z(double,double); extern void HistoUpdate(VImage,gsl_histogram *); /* generate permutation table */ int **genperm(gsl_rng *rx,int n,int numperm) { int i,j; int **table = (int **) VCalloc(numperm,sizeof(int *)); for (i = 0; i < numperm; i++) { table[i] = (int *) VCalloc(n,sizeof(int)); for (j=0; j<n; j++) { table[i][j] = 0; if (gsl_ran_bernoulli (rx,(double)0.5) == 1) table[i][j] = 1; } } return table; } /* onesample t-test */ void OnesampleTest(VImage *src1,VImage dest,int *permtable,int n) { int i,k,b,r,c,nslices,nrows,ncols; double nx,ave,var,u,t,z,df; double tiny=1.0e-8; extern void avevar(double *data,int n,double *a,double *v); nslices = VImageNBands(src1[0]); nrows = VImageNRows(src1[0]); ncols = VImageNColumns(src1[0]); VFillImage(dest,VAllBands,0); double *data = (double *) VCalloc(n,sizeof(double)); for (b=0; b<nslices; b++) { for (r=0; r<nrows; r++) { for (c=0; c<ncols; c++) { k = 0; for (i=0; i<n; i++) { u = (double)VPixel(src1[i],b,r,c,VFloat); if (ABS(u) > tiny) { if (permtable[i] > 0) u = -u; data[k] = u; k++; } } if (k < n-2) continue; avevar(data,k,&ave,&var); if (var < tiny) continue; nx = (double)k; t = sqrt(nx) * ave/sqrt(var); df = nx - 1.0; z = t2z(t,df); if (t < 0) z = -z; VPixel(dest,b,r,c,VFloat) = z; } } } VFree(data); } int main (int argc, char *argv[]) { static VArgVector in_files1; static VString out_filename=""; static VString mask_filename=""; static VFloat alpha = 0.05; static VShort radius = 2; static VFloat rvar = 2.0; static VFloat svar = 2.0; static VShort numiter = 2; static VShort numperm = 5000; static VLong seed = 99402622; static VBoolean centering = FALSE; static VBoolean cleanup = TRUE; static VShort nproc = 0; static VOptionDescRec options[] = { {"in", VStringRepn, 0, & in_files1, VRequiredOpt, NULL,"Input files" }, {"out", VStringRepn, 1, & out_filename, VRequiredOpt, NULL,"Output file" }, {"alpha",VFloatRepn,1,(VPointer) &alpha,VOptionalOpt,NULL,"FDR significance level"}, {"perm",VShortRepn,1,(VPointer) &numperm,VOptionalOpt,NULL,"Number of permutations"}, {"mask", VStringRepn, 1, (VPointer) &mask_filename, VRequiredOpt, NULL, "Mask"}, {"seed",VLongRepn,1,(VPointer) &seed,VOptionalOpt,NULL,"Seed for random number generation"}, {"radius",VShortRepn,1,(VPointer) &radius,VOptionalOpt,NULL,"Bilateral parameter (radius in voxels)"}, {"rvar",VFloatRepn,1,(VPointer) &rvar,VOptionalOpt,NULL,"Bilateral parameter (radiometric)"}, {"svar",VFloatRepn,1,(VPointer) &svar,VOptionalOpt,NULL,"Bilateral parameter (spatial)"}, {"filteriterations",VShortRepn,1,(VPointer) &numiter,VOptionalOpt,NULL,"Bilateral parameter (number of iterations)"}, {"cleanup",VBooleanRepn,1,(VPointer) &cleanup,VOptionalOpt,NULL,"Whether to remove isloated voxels"}, {"j",VShortRepn,1,(VPointer) &nproc,VOptionalOpt,NULL,"Number of processors to use, '0' to use all"}, }; FILE *fp=NULL; VString in_filename; VAttrList list1=NULL,out_list=NULL,geolist=NULL; VImage *src1; int i,nimages,npix=0; char *prg_name=GetLipsiaName("vlisa_onesample"); fprintf (stderr, "%s\n", prg_name); /* parse command line */ if (! VParseCommand (VNumber (options), options, & argc, argv)) { VReportUsage (argv[0], VNumber (options), options, NULL); exit (EXIT_FAILURE); } if (argc > 1) { VReportBadArgs (argc, argv); exit (EXIT_FAILURE); } /* omp-stuff */ #ifdef _OPENMP int num_procs=omp_get_num_procs(); if (nproc > 0 && nproc < num_procs) num_procs = nproc; fprintf(stderr," using %d cores\n",(int)num_procs); omp_set_num_threads(num_procs); #endif /* _OPENMP */ /* images */ VImage mask = VReadImageFile(mask_filename); if (mask==NULL) VError("Error reading mask file %s",mask_filename); nimages = in_files1.number; src1 = (VImage *) VCalloc(nimages,sizeof(VImage)); for (i = 0; i < nimages; i++) { in_filename = ((VString *) in_files1.vector)[i]; list1 = VReadAttrList(in_filename,0L,TRUE,FALSE); VMaskMinval(list1,mask,0.0); src1[i] = VReadImage(list1); if (src1[i] == NULL) VError(" no input image found"); if (VPixelRepn(src1[i]) != VFloatRepn) VError(" input pixel repn must be float"); if (i == 0) npix = VImageNPixels(src1[i]); else if (npix != VImageNPixels(src1[i])) VError(" inconsistent image dimensions"); /* use geometry info from 1st file */ if (geolist == NULL) { geolist = VGetGeoInfo(list1); double *D = VGetGeoPixdim(geolist,NULL); if (fabs(D[0]-4.0) < TINY) VError(" The input must be a list of separate 3D images, not one 4D file"); } } /* ini random permutations */ size_t n = nimages; gsl_rng_env_setup(); const gsl_rng_type *T = gsl_rng_default; gsl_rng *rx = gsl_rng_alloc(T); gsl_rng_set(rx,(unsigned long int)seed); int nperm=0; int **permtable = genperm(rx,(int)n,(int)numperm); /* estimate null variance based on first 30 permutations */ double hmin=0,hmax=0; float stddev=1.0; if (numperm > 0) { int tstperm = 30; if (tstperm > numperm) tstperm = numperm; double varsum=0,nx=0; #pragma omp parallel for shared(src1,permtable) schedule(dynamic) for (nperm = 0; nperm < tstperm; nperm++) { VImage zmap = VCreateImageLike(src1[0]); OnesampleTest(src1,zmap,permtable[nperm],nimages); #pragma omp critical { varsum += VImageVar(zmap); nx++; } VDestroyImage(zmap); } double meanvar = varsum/nx; stddev = sqrt(meanvar); } /* no permutation */ int *nopermtable = (int *) VCalloc(n,sizeof(int)); VImage dst1 = VCreateImageLike (src1[0]); VImage zmap1 = VCreateImageLike(src1[0]); OnesampleTest(src1,zmap1,nopermtable,nimages); if (numperm == 0) { double z = VImageVar(zmap1); stddev = (float)(sqrt(z)); /* update stddev */ } float mode=0; if (centering) mode = VGetMode(zmap1); if (numperm > 0) VZScale(zmap1,mode,stddev); VBilateralFilter(zmap1,dst1,(int)radius,(double)(rvar),(double)svar,(int)numiter); /* ini histograms */ VGetHistRange(dst1,&hmin,&hmax); size_t nbins = 20000; gsl_histogram *hist0 = gsl_histogram_alloc (nbins); gsl_histogram_set_ranges_uniform (hist0,hmin,hmax); gsl_histogram *histz = gsl_histogram_alloc (nbins); gsl_histogram_set_ranges_uniform (histz,hmin,hmax); HistoUpdate(dst1,histz); /* random permutations */ #pragma omp parallel for shared(src1,permtable) schedule(dynamic) for (nperm = 0; nperm < numperm; nperm++) { if (nperm%20 == 0) fprintf(stderr," perm %4d of %d\r",nperm,(int)numperm); VImage zmap = VCreateImageLike(src1[0]); VImage dst = VCreateImageLike (zmap); OnesampleTest(src1,zmap,permtable[nperm],nimages); float mode=0; if (centering) mode = VGetMode(zmap); VZScale(zmap,mode,stddev); VBilateralFilter(zmap,dst,(int)radius,(double)(rvar),(double)svar,(int)numiter); #pragma omp critical { HistoUpdate(dst,hist0); } VDestroyImage(dst); VDestroyImage(zmap); } /* apply fdr */ VImage fdrimage = VCopyImage (dst1,NULL,VAllBands); if (numperm > 0) { FDR(dst1,fdrimage,hist0,histz,(double)alpha); if (cleanup && alpha < 1.0) { VIsolatedVoxels(fdrimage,(float)(1.0-alpha)); } if (alpha < 1.0) VImageCount(fdrimage); } /* write output to disk */ out_list = VCreateAttrList (); VHistory(VNumber(options),options,prg_name,&list1,&out_list); VSetGeoInfo(geolist,out_list); VAppendAttr (out_list,"image",NULL,VImageRepn,fdrimage); fp = VOpenOutputFile (out_filename, TRUE); if (! VWriteFile (fp, out_list)) exit (1); fclose(fp); fprintf (stderr, "\n%s: done.\n", argv[0]); exit(0); }
MsnhCVMatOp.h
#ifndef MSNHCVOP_H #define MSNHCVOP_H #include <Msnhnet/cv/MsnhCVMat.h> #include <Msnhnet/utils/MsnhTimeUtil.h> namespace Msnhnet { class MsnhNet_API MatOp { public: static void getROI(Mat &src, Mat &dst, const Vec2I32 &p1, const Vec2I32 &p2); static void setROI(Mat &srcDst, Mat &roi, const Vec2I32 &pos); static void cvtColor(Mat &src, Mat &dst, const CvtColorType& cvtType); static void resize(Mat& src, Mat &dst, const Vec2I32 &outSize, const ResizeType& resizeType=RESIZE_BILINEAR); template<typename T> static void copyMakeBorder(Mat& src, Mat &dst, const int &top ,const int &down, const int &left, const int &right, const T & val) { int array = DataType<T>::array; int fmt = DataType<T>::fmt; src.checkPixelType(array, fmt); int srcWidth = src.getWidth() ; int srcHeight = src.getHeight(); int finalWidth = srcWidth + left + right; int finalHeight = srcHeight + top + down; Mat tmpMat(finalWidth, finalHeight, src.getMatType()); tmpMat.fillPixel<T>(val); #ifdef USE_OMP #pragma omp parallel for num_threads(OMP_THREAD) #endif for (int i = 0; i < srcHeight; ++i) { if(fmt == 'b') { memcpy(tmpMat.getData().u8+(finalWidth*(i+top)+left)*array, src.getData().u8+(srcWidth*i)*array, array*srcWidth); } else if(fmt == 'f') { memcpy(tmpMat.getData().f32+(finalWidth*(i+top)+left)*array, src.getData().f32+(srcWidth*i)*array, array*srcWidth*4); } else if(fmt == 'd') { memcpy(tmpMat.getData().f64+(finalWidth*(i+top)+left)*array, src.getData().f64+(srcWidth*i)*array, array*srcWidth*8); } } dst = tmpMat; } static void flip(Mat &mat, const FlipMode &flipMode=FLIP_V); static double norm(Mat &mat1, Mat &mat2, const NormType& normType = NORM_L2); static double norm(Mat &mat, const NormType& normType = NORM_L2); template<typename T> static void _split(Mat &src, std::vector<Mat> &dst) { dst.clear(); if(src.getChannel()==1) { dst.push_back(src); } else if(src.getChannel()==3) { Mat R; Mat G; Mat B; Mat::createMat<T>(src.getWidth(),src.getHeight(),1,R); Mat::createMat<T>(src.getWidth(),src.getHeight(),1,G); Mat::createMat<T>(src.getWidth(),src.getHeight(),1,B); #ifdef USE_OMP uint64_t dataLen = src.getHeight()*src.getWidth(); uint16_t threadNum = dataLen>MIN_OMP_DATA?OMP_THREAD:1; #pragma omp parallel for num_threads(threadNum) #endif for (int i = 0; i < src.getHeight(); ++i) { for (int j = 0; j < src.getWidth(); ++j) { reinterpret_cast<T*>(R.getBytes())[i*src.getWidth() + j] = reinterpret_cast<T*>(src.getBytes())[(i*src.getWidth() + j)*3 + 0]; reinterpret_cast<T*>(G.getBytes())[i*src.getWidth() + j] = reinterpret_cast<T*>(src.getBytes())[(i*src.getWidth() + j)*3 + 1]; reinterpret_cast<T*>(B.getBytes())[i*src.getWidth() + j] = reinterpret_cast<T*>(src.getBytes())[(i*src.getWidth() + j)*3 + 2]; } } dst.push_back(R); dst.push_back(G); dst.push_back(B); } else if(src.getChannel()==4) { Mat R; Mat G; Mat B; Mat A; Mat::createMat<T>(src.getWidth(),src.getHeight(),1,R); Mat::createMat<T>(src.getWidth(),src.getHeight(),1,G); Mat::createMat<T>(src.getWidth(),src.getHeight(),1,B); Mat::createMat<T>(src.getWidth(),src.getHeight(),1,A); #ifdef USE_OMP uint64_t dataLen = src.getHeight()*src.getWidth(); uint16_t threadNum = dataLen>MIN_OMP_DATA?OMP_THREAD:1; #pragma omp parallel for num_threads(threadNum) #endif for (int i = 0; i < src.getHeight(); ++i) { for (int j = 0; j < src.getWidth(); ++j) { reinterpret_cast<T*>(R.getBytes())[i*src.getWidth() + j] = reinterpret_cast<T*>(src.getBytes())[(i*src.getWidth() + j)*4 + 0]; reinterpret_cast<T*>(G.getBytes())[i*src.getWidth() + j] = reinterpret_cast<T*>(src.getBytes())[(i*src.getWidth() + j)*4 + 1]; reinterpret_cast<T*>(B.getBytes())[i*src.getWidth() + j] = reinterpret_cast<T*>(src.getBytes())[(i*src.getWidth() + j)*4 + 2]; reinterpret_cast<T*>(A.getBytes())[i*src.getWidth() + j] = reinterpret_cast<T*>(src.getBytes())[(i*src.getWidth() + j)*4 + 3]; } } dst.push_back(R); dst.push_back(G); dst.push_back(B); dst.push_back(A); } } static void split(Mat &src, std::vector<Mat> &dst); template<typename T> static void _merge(std::vector<Mat> &src, Mat &dst) { int width = src[0].getWidth(); int height = src[1].getHeight(); if(src.size()==1) { dst = src[0]; return; } else if(src.size()==3) { Mat::createMat<T>(width,height,3,dst); #ifdef USE_OMP uint64_t dataLen = width*height; uint16_t threadNum = dataLen>MIN_OMP_DATA?OMP_THREAD:1; #pragma omp parallel for num_threads(threadNum) #endif for (int i = 0; i < height; ++i) { for (int j = 0; j < width; ++j) { reinterpret_cast<T*>(dst.getBytes())[(i*width + j)*3 + 0] = reinterpret_cast<T*>(src[0].getBytes())[i*width + j]; reinterpret_cast<T*>(dst.getBytes())[(i*width + j)*3 + 1] = reinterpret_cast<T*>(src[1].getBytes())[i*width + j]; reinterpret_cast<T*>(dst.getBytes())[(i*width + j)*3 + 2] = reinterpret_cast<T*>(src[2].getBytes())[i*width + j]; } } } else if(src.size()==4) { Mat::createMat<T>(width,height,4,dst); #ifdef USE_OMP uint64_t dataLen = width*height; uint16_t threadNum = dataLen>MIN_OMP_DATA?OMP_THREAD:1; #pragma omp parallel for num_threads(threadNum) #endif for (int i = 0; i < height; ++i) { for (int j = 0; j < width; ++j) { reinterpret_cast<T*>(dst.getBytes())[(i*width + j)*4 + 0] = reinterpret_cast<T*>(src[0].getBytes())[i*width + j]; reinterpret_cast<T*>(dst.getBytes())[(i*width + j)*4 + 1] = reinterpret_cast<T*>(src[1].getBytes())[i*width + j]; reinterpret_cast<T*>(dst.getBytes())[(i*width + j)*4 + 2] = reinterpret_cast<T*>(src[2].getBytes())[i*width + j]; reinterpret_cast<T*>(dst.getBytes())[(i*width + j)*4 + 3] = reinterpret_cast<T*>(src[3].getBytes())[i*width + j]; } } } } static void merge(std::vector<Mat> &src, Mat &dst); static bool checkMatsProps(Mat &mat1, Mat &mat2); static void threshold(Mat &src, Mat &dst, const double& threshold, const double& maxVal, const int &thresholdType); static Mat hContact(const Mat &A, const Mat &B); static Mat vContact(const Mat &A, const Mat &B); static std::vector<int> histogram(Mat &src); static uint8_t getOtsu(Mat &src); private: static void RGB2BGR(const Mat &src, Mat &dst); static void RGB2GRAY(Mat &src, Mat &dst); static void RGBA2GRAY(Mat &src, Mat &dst); static void GRAY2RGB(Mat &src,Mat &dst); static void GRAY2RGBA(Mat &src, Mat &dst); static void RGB2RGBA(Mat &src, Mat &dst); static void RGBA2RGB(Mat &src, Mat &dst); static void flipV(Mat &mat); static void flipH(Mat &mat); }; } #endif
log_helper.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <sys/time.h> #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> #include <assert.h> /** * This flag is necessary when we want to re-write the * log filename if the tiem is incorrect */ #define USE_DUPLICATE_LOG_FILENAME 1 //Buff for ECC check #define BUFSIZE 128 #include "log_helper.h" #ifdef MIC_NATIVE char timestamp_watchdog[200] = "/micNfs/carol/logs/timestamp.txt"; #define QUERY_GPU "echo Enabled" #define ENABLED_CONFIRMATION "Enabled" #else // Location of timetamp file for software watchdog //char timestamp_watchdog[200] = "/home/carol/watchdog/timestamp.txt"; char *timestamp_watchdog; char timestamp_file[] = "timestamp.txt"; char vardir_key[] = "vardir"; //Terminal query which will tells if ECC is enable or not, it could vary depend the platform #define QUERY_GPU "/usr/bin/nvidia-smi --query-gpu=gpu_name,ecc.mode.current --format=csv,noheader 2>/tmp/trash" #define ENABLED_CONFIRMATION "Enabled" #endif // Max errors that can be found for a single iteration // If more than max errors is found, exit the program unsigned long int max_errors_per_iter = 500; unsigned long int max_infos_per_iter = 500; //Double error kill flag unsigned char kill_after_double_error = 1; // Absolute path for log file, if needed #ifdef MIC_NATIVE char absolute_path[200] = "/micNfs/carol/logs/"; #else //char absolute_path[200] = "/home/carol/logs/"; char *absolute_path; #endif char logdir_key[] = "logdir"; char signalcmd_key[] = "signalcmd"; #ifdef MIC_NATIVE char config_file[] = "/micNfs/radiation-benchmarks.conf"; #else char config_file[] = "/etc/radiation-benchmarks.conf"; #endif // Used to print the log only for some iterations, equal 1 means print every iteration int iter_interval_print = 1; // Used to log max_error_per_iter details each iteration int log_error_detail_count = 0; int log_info_detail_count = 0; char log_file_name[200] = ""; char full_log_file_name[300] = ""; // Saves the last amount of error found for a specific iteration unsigned long int last_iter_errors = 0; // Saves the last iteration index that had an error unsigned long int last_iter_with_errors = 0; unsigned long int kernels_total_errors = 0; unsigned long int kernels_total_infos = 0; unsigned long int iteration_number = 0; double kernel_time_acc = 0; double kernel_time = 0; long long it_time_start; // ~ =========================================================================== // Functions to check ECC /** * String contains * check if word contains in sent string * return 1 if contains * return 0 otherwise */ int contains(const char *sent, char *word) { //call popen on terminal--------------- const char *temp = strstr(sent, word); if (temp) { return 1; } return 0; } // ~ =========================================================================== /** * popen_call * call popen and check if check_line is in output string * if check_line is in popen output an output is writen in output_line * return 1 if the procedure executed * return 0 otherwise */ int popen_call(char *cmd, char *check_line, char *output_line) { FILE *fp; char buf[BUFSIZE]; int ret = 0; if ((fp = popen(cmd, "r")) == NULL) { //printf("Error opening pipe!\n"); return 0; } while (fgets(buf, BUFSIZE, fp) != NULL) { if (contains(buf, check_line)) { strcpy(output_line, buf); ret = 1; } } fflush(fp); if (pclose(fp)) { //printf("Command not found or exited with error status\n"); return 0; } return ret; } // ~ =========================================================================== /** * This functions checks if ECC is enable or disabled for NVIDIA GPUs * 0 if ECC is disabled * 1 if ECC is enabled */ int check_ecc_status() { char output_line[BUFSIZE]; memset(output_line, 0, BUFSIZE); //check for enabled ECC return (popen_call(QUERY_GPU, ENABLED_CONFIRMATION, output_line)); } // ~ =========================================================================== long long get_time() { struct timeval tv; gettimeofday(&tv, NULL); return (tv.tv_sec * 1000000) + tv.tv_usec; } // ~ =========================================================================== unsigned long int set_max_errors_iter(unsigned long int max_errors) { max_errors_per_iter = max_errors; return max_errors_per_iter; } // ~ =========================================================================== unsigned long int set_max_infos_iter(unsigned long int max_infos) { max_infos_per_iter = max_infos; return max_infos_per_iter; } // ~ =========================================================================== // Set the interval the program must print log details, default is 1 (each iteration) int set_iter_interval_print(int interval) { if (interval < 1) { iter_interval_print = 1; } else { iter_interval_print = interval; } return iter_interval_print; } // ~ =========================================================================== // Read config file to get the value of a 'key = value' pair char * getValueConfig(char * key) { FILE * fp; char * line = NULL; size_t len = 0; ssize_t read; char value[200]; int i, j; int key_not_match; fp = fopen(config_file, "r"); if (fp == NULL) return NULL; while ((read = getline(&line, &len, fp)) != -1) { // ignore comments and sections in config file if (line[0] == '#' || line[0] == '[') continue; // remove white spaces for (i = 0; line[i] == ' '; i++) ; // check if key of this line is the key we are looking for j = 0; key_not_match = 0; for (; line[i] != ' ' && line[i] != '=' && key[j] != '\0'; i++) { if (key[j] != line[i]) { key_not_match = 1; break; } j++; } // Key not matched if (key_not_match) continue; // key of line is a substring of the key we are looking for if (key[j] != '\0') continue; // key matched but is a substring of current key if (line[i] != ' ' && line[i] != '=') continue; // ignore spaces and '=' to go the the frist character of value for (; line[i] == ' ' || line[i] == '='; i++) ; j = 0; // copy value to buffer until end of line or '#' is found for (; line[i] != '\0' && line[i] != '#' && line[i] != '\n'; i++) { value[j] = line[i]; j++; } value[j] = '\0'; char *v = (char *) malloc(sizeof(char) * strlen(value) + 2); strcpy(v, value); fclose(fp); if (line) free(line); return v; } fclose(fp); if (line) free(line); return NULL; } // ~ =========================================================================== // Update with current timestamp the file where the software watchdog watchs void update_timestamp() { char *signalcmd = getValueConfig(signalcmd_key); system(signalcmd); time_t timestamp = time(NULL); FILE *fp = fopen(timestamp_watchdog, "w"); if (fp) { fprintf(fp, "%d", (int) timestamp); fclose(fp); } } // ~ =========================================================================== // Return the name of the log file generated char * get_log_file_name() { return full_log_file_name; } // ~ =========================================================================== // Generate the log file name, log info from user about the test to be executed and reset log variables int start_log_file(char *benchmark_name, char *test_info) { #ifndef MIC_NATIVE char *var_dir = getValueConfig(vardir_key); if (!var_dir) { fprintf(stderr, "[ERROR] Could not read var dir in config file '%s'\n", config_file); return 1; //exit(1); } timestamp_watchdog = (char *) malloc( sizeof(char) * (strlen(var_dir) + strlen(timestamp_file) + 4)); strcpy(timestamp_watchdog, var_dir); if (strlen(timestamp_watchdog) > 0 && timestamp_watchdog[strlen(timestamp_watchdog) - 1] != '/') strcat(timestamp_watchdog, "/"); strcat(timestamp_watchdog, timestamp_file); #endif update_timestamp(); time_t file_time; struct tm *ptm; char day[10], month[10], year[15], hour[10], second[10], minute[10]; char log_file_name[190] = ""; file_time = time(NULL); //Local time is the correct one ptm = localtime(&file_time); snprintf(day, sizeof(day), "%02d", ptm->tm_mday); snprintf(month, sizeof(month), "%02d", ptm->tm_mon + 1); snprintf(year, sizeof(year), "%04d", ptm->tm_year + 1900); snprintf(hour, sizeof(hour), "%02d", ptm->tm_hour); snprintf(minute, sizeof(minute), "%02d", ptm->tm_min); snprintf(second, sizeof(second), "%02d", ptm->tm_sec); // ~ Get the host name to add inside the log name. char host[35] = "Host"; int host_error = 0; host_error = gethostname(host, 35); if (host_error != 0) { fprintf(stderr, "[ERROR in gethostname(char *, int)] Could not access the host name\n"); return 1; } strcpy(log_file_name, year); strcat(log_file_name, "_"); strcat(log_file_name, month); strcat(log_file_name, "_"); strcat(log_file_name, day); strcat(log_file_name, "_"); strcat(log_file_name, hour); strcat(log_file_name, "_"); strcat(log_file_name, minute); strcat(log_file_name, "_"); strcat(log_file_name, second); strcat(log_file_name, "_"); strcat(log_file_name, benchmark_name); strcat(log_file_name, "_"); //check ECC if (check_ecc_status()) { strcat(log_file_name, "ECC_ON_"); } else { strcat(log_file_name, "ECC_OFF_"); } //-------- strcat(log_file_name, host); strcat(log_file_name, ".log"); #ifndef MIC_NATIVE absolute_path = getValueConfig(logdir_key); if (!absolute_path) { fprintf(stderr, "[ERROR] Could not read log dir in config file '%s'\n", config_file); return 1; //exit(1); } if (!absolute_path) { absolute_path = (char *) malloc(sizeof(char)); absolute_path[0] = '\0'; } #endif strcpy(full_log_file_name, absolute_path); if (strlen(absolute_path) > 0 && absolute_path[strlen(absolute_path) - 1] != '/') strcat(full_log_file_name, "/"); strcat(full_log_file_name, log_file_name); // ~ printf("%s\n", full_log_file_name); #ifndef USE_DUPLICATE_LOG_FILENAME struct stat buf; if (stat(full_log_file_name, &buf) == 0) { fprintf(stderr, "[ERROR in create_log_file(char *)] File already exists %s\n", full_log_file_name); return 1; } #endif FILE *file = fopen(full_log_file_name, "a"); if (file == NULL) { fprintf(stderr, "[ERROR in create_log_file(char *)] Unable to open file %s\n", full_log_file_name); return 1; } else if (test_info != NULL) { fprintf(file, "#HEADER %s\n", test_info); } else { fprintf(file, "#HEADER\n"); } fprintf(file, "#BEGIN Y:%s M:%s D:%s Time:%s:%s:%s\n", year, month, day, hour, minute, second); fflush(file); fclose(file); kernels_total_errors = 0; iteration_number = 0; kernel_time_acc = 0; return 0; } // ~ =========================================================================== // Log the string "#END" and reset global variables int end_log_file() { FILE *file = NULL; file = fopen(full_log_file_name, "a"); if (file == NULL) { fprintf(stderr, "[ERROR in log_string(char *)] Unable to open file %s\n", full_log_file_name); return 1; } fprintf(file, "#END"); fflush(file); fclose(file); kernels_total_errors = 0; iteration_number = 0; kernel_time_acc = 0; strcpy(log_file_name, ""); strcpy(absolute_path, ""); strcpy(full_log_file_name, ""); return 0; } // ~ =========================================================================== // Start time to measure kernel time, also update iteration number and log to file int start_iteration() { update_timestamp(); /* FILE *file = fopen(full_log_file_name, "a"); if (file == NULL){ fprintf(stderr, "[ERROR in log_string(char *)] Unable to open file %s\n",full_log_file_name); return 1; } fprintf(file, "#ITER it:%lu\n", iteration_number); fflush(file); fclose(file); iteration_number++; */ log_error_detail_count = 0; log_info_detail_count = 0; it_time_start = get_time(); return 0; } // ~ =========================================================================== // Finish the measured kernel time log both time (total time and kernel time) int end_iteration() { update_timestamp(); kernel_time = (double) (get_time() - it_time_start) / 1000000; kernel_time_acc += kernel_time; log_error_detail_count = 0; log_info_detail_count = 0; if (iteration_number % iter_interval_print == 0) { FILE *file = fopen(full_log_file_name, "a"); if (file == NULL) { fprintf(stderr, "[ERROR in log_string(char *)] Unable to open file %s\n", full_log_file_name); return 1; } fprintf(file, "#IT Ite:%lu KerTime:%f AccTime:%f\n", iteration_number, kernel_time, kernel_time_acc); //fprintf(file, "#TIME kernel_time:%f\n", kernel_time); //fprintf(file, "#ACC_TIME total_time:%f\n", kernel_time_acc); fflush(file); fclose(file); } iteration_number++; return 0; } // ~ =========================================================================== // Update total errors variable and log both errors(total errors and kernel errors) int log_error_count(unsigned long int kernel_errors) { update_timestamp(); if (kernel_errors < 1) { return 0; } kernels_total_errors += kernel_errors; FILE *file = NULL; file = fopen(full_log_file_name, "a"); if (file == NULL) { fprintf(stderr, "[ERROR in log_string(char *)] Unable to open file %s\n", full_log_file_name); return 1; } // (iteration_number-1) because this function is called after end_iteration() that increments iteration_number fprintf(file, "#SDC Ite:%lu KerTime:%f AccTime:%f KerErr:%lu AccErr:%lu\n", iteration_number - 1, kernel_time, kernel_time_acc, kernel_errors, kernels_total_errors); //fprintf(file, "#SDC kernel_errors:%lu\n", kernel_errors); //fprintf(file, "#TOTAL_SDC total_errors:%lu\n", kernels_total_errors); fflush(file); if (kernel_errors > max_errors_per_iter) { #ifdef ERR_INJ fprintf(file, "#ERR_INJ not aborting, we would abort otherwise\n"); #else fprintf(file, "#ABORT too many errors per iteration\n"); fflush(file); fclose(file); end_log_file(); exit(1); #endif } if (kernel_errors == last_iter_errors && (last_iter_with_errors + 1) == iteration_number && kernel_errors != 0 && kill_after_double_error == 1) { fprintf(file, "#ABORT amount of errors equals of the last iteration\n"); fflush(file); fclose(file); end_log_file(); exit(1); } fclose(file); last_iter_errors = kernel_errors; last_iter_with_errors = iteration_number; return 0; } // ~ =========================================================================== // Update total infos variable and log both infos(total infos and iteration infos) int log_info_count(unsigned long int info_count) { update_timestamp(); if (info_count < 1) { return 0; } kernels_total_infos += info_count; FILE *file = NULL; file = fopen(full_log_file_name, "a"); if (file == NULL) { fprintf(stderr, "[ERROR in log_string(char *)] Unable to open file %s\n", full_log_file_name); return 1; } // (iteration_number-1) because this function is called after end_iteration() that increments iteration_number fprintf(file, "#CINF Ite:%lu KerTime:%f AccTime:%f KerInfo:%lu AccInfo:%lu\n", iteration_number - 1, kernel_time, kernel_time_acc, info_count, kernels_total_infos); //fprintf(file, "#SDC kernel_errors:%lu\n", kernel_errors); //fprintf(file, "#TOTAL_SDC total_errors:%lu\n", kernels_total_errors); fflush(file); // if (info_count > max_infos_per_iter) { // #ifdef ERR_INJ // fprintf(file, "#ERR_INJ not aborting, we would abort otherwise\n"); // #else // fprintf(file, "#ABORT too many infos per iteration\n"); // fflush(file); // fclose(file); // end_log_file(); // exit(1); // #endif // } // if (kernel_errors == last_iter_errors // && (last_iter_with_errors + 1) == iteration_number // && kernel_errors != 0) { // fprintf(file, "#ABORT amount of errors equals of the last iteration\n"); // fflush(file); // fclose(file); // end_log_file(); // exit(1); // } fclose(file); // last_iter_errors = kernel_errors; // last_iter_with_errors = iteration_number; return 0; } // ~ =========================================================================== // Print some string with the detail of an error to log file int log_error_detail(char *string) { FILE *file = NULL; #pragma omp parallel shared(log_error_detail_count) { #pragma omp critical log_error_detail_count++; } // Limits the number of lines written to logfile so that // HD space will not explode if ((unsigned long) log_error_detail_count > max_errors_per_iter) return 0; file = fopen(full_log_file_name, "a"); if (file == NULL) { fprintf(stderr, "[ERROR in log_string(char *)] Unable to open file %s\n", full_log_file_name); return 1; } fputs("#ERR ", file); fputs(string, file); fprintf(file, "\n"); fflush(file); fclose(file); return 0; } // ~ =========================================================================== // Print some string with the detail of an error/information to log file int log_info_detail(char *string) { FILE *file = NULL; #pragma omp parallel shared(log_info_detail_count) { #pragma omp critical log_info_detail_count++; } // Limits the number of lines written to logfile so that // HD space will not explode if ((unsigned long) log_info_detail_count > max_infos_per_iter) return 0; file = fopen(full_log_file_name, "a"); if (file == NULL) { fprintf(stderr, "[ERROR in log_string(char *)] Unable to open file %s\n", full_log_file_name); return 1; } fputs("#INF ", file); fputs(string, file); fprintf(file, "\n"); fflush(file); fclose(file); return 0; } // ~ =========================================================================== // Get current iteration number unsigned long int get_iteration_number() { return iteration_number; } // ~ =========================================================================== //Disable double error kill //this will disable double error kill if //two errors happened sequentially void disable_double_error_kill(){ kill_after_double_error = 0; }
surfmorph_codegen.h
//////////////////////////////////////////////////////////////////////////////// // This code has been automatically generated with SymPy /////////////////////// //////////////////////////////////////////////////////////////////////////////// #ifndef SURFMORPH_CODEGEN_H #define SURFMORPH_CODEGEN_H namespace surfmorph { template<typename ScalarT, typename IdxT, typename PointT, typename VectorT, typename MatrixT, typename CoeffsT, typename IndepsT> void updateInterpolationSystem ( const PointT &, const PointT &, const PointT &, const PointT &p3, const IdxT p0Idx, const IdxT p1Idx, const IdxT p2Idx, const IdxT p3Idx, const MatrixT &uInv, const MatrixT &m, const VectorT &t, const ScalarT Ai, const ScalarT alpha, CoeffsT &coeffs, IndepsT &indeps ) { // Make sure coeff and indeps data is allocated #ifdef SURFMORPH_PARALLELIZE_INTERPOLATION #pragma omp single #endif { coeffs[{3 * p0Idx + 0, 3 * p0Idx + 0}] = coeffs[{3 * p0Idx + 0, 3 * p0Idx + 0}]; coeffs[{3 * p0Idx + 0, 3 * p1Idx + 0}] = coeffs[{3 * p0Idx + 0, 3 * p1Idx + 0}]; coeffs[{3 * p0Idx + 0, 3 * p2Idx + 0}] = coeffs[{3 * p0Idx + 0, 3 * p2Idx + 0}]; coeffs[{3 * p0Idx + 0, 3 * p3Idx + 0}] = coeffs[{3 * p0Idx + 0, 3 * p3Idx + 0}]; indeps[3 * p0Idx + 0] = indeps[3 * p0Idx + 0]; coeffs[{3 * p0Idx + 1, 3 * p0Idx + 1}] = coeffs[{3 * p0Idx + 1, 3 * p0Idx + 1}]; coeffs[{3 * p0Idx + 1, 3 * p1Idx + 1}] = coeffs[{3 * p0Idx + 1, 3 * p1Idx + 1}]; coeffs[{3 * p0Idx + 1, 3 * p2Idx + 1}] = coeffs[{3 * p0Idx + 1, 3 * p2Idx + 1}]; coeffs[{3 * p0Idx + 1, 3 * p3Idx + 1}] = coeffs[{3 * p0Idx + 1, 3 * p3Idx + 1}]; indeps[3 * p0Idx + 1] = indeps[3 * p0Idx + 1]; coeffs[{3 * p0Idx + 2, 3 * p0Idx + 2}] = coeffs[{3 * p0Idx + 2, 3 * p0Idx + 2}]; coeffs[{3 * p0Idx + 2, 3 * p1Idx + 2}] = coeffs[{3 * p0Idx + 2, 3 * p1Idx + 2}]; coeffs[{3 * p0Idx + 2, 3 * p2Idx + 2}] = coeffs[{3 * p0Idx + 2, 3 * p2Idx + 2}]; coeffs[{3 * p0Idx + 2, 3 * p3Idx + 2}] = coeffs[{3 * p0Idx + 2, 3 * p3Idx + 2}]; indeps[3 * p0Idx + 2] = indeps[3 * p0Idx + 2]; coeffs[{3 * p1Idx + 0, 3 * p0Idx + 0}] = coeffs[{3 * p1Idx + 0, 3 * p0Idx + 0}]; coeffs[{3 * p1Idx + 0, 3 * p1Idx + 0}] = coeffs[{3 * p1Idx + 0, 3 * p1Idx + 0}]; coeffs[{3 * p1Idx + 0, 3 * p2Idx + 0}] = coeffs[{3 * p1Idx + 0, 3 * p2Idx + 0}]; coeffs[{3 * p1Idx + 0, 3 * p3Idx + 0}] = coeffs[{3 * p1Idx + 0, 3 * p3Idx + 0}]; indeps[3 * p1Idx + 0] = indeps[3 * p1Idx + 0]; coeffs[{3 * p1Idx + 1, 3 * p0Idx + 1}] = coeffs[{3 * p1Idx + 1, 3 * p0Idx + 1}]; coeffs[{3 * p1Idx + 1, 3 * p1Idx + 1}] = coeffs[{3 * p1Idx + 1, 3 * p1Idx + 1}]; coeffs[{3 * p1Idx + 1, 3 * p2Idx + 1}] = coeffs[{3 * p1Idx + 1, 3 * p2Idx + 1}]; coeffs[{3 * p1Idx + 1, 3 * p3Idx + 1}] = coeffs[{3 * p1Idx + 1, 3 * p3Idx + 1}]; indeps[3 * p1Idx + 1] = indeps[3 * p1Idx + 1]; coeffs[{3 * p1Idx + 2, 3 * p0Idx + 2}] = coeffs[{3 * p1Idx + 2, 3 * p0Idx + 2}]; coeffs[{3 * p1Idx + 2, 3 * p1Idx + 2}] = coeffs[{3 * p1Idx + 2, 3 * p1Idx + 2}]; coeffs[{3 * p1Idx + 2, 3 * p2Idx + 2}] = coeffs[{3 * p1Idx + 2, 3 * p2Idx + 2}]; coeffs[{3 * p1Idx + 2, 3 * p3Idx + 2}] = coeffs[{3 * p1Idx + 2, 3 * p3Idx + 2}]; indeps[3 * p1Idx + 2] = indeps[3 * p1Idx + 2]; coeffs[{3 * p2Idx + 0, 3 * p0Idx + 0}] = coeffs[{3 * p2Idx + 0, 3 * p0Idx + 0}]; coeffs[{3 * p2Idx + 0, 3 * p1Idx + 0}] = coeffs[{3 * p2Idx + 0, 3 * p1Idx + 0}]; coeffs[{3 * p2Idx + 0, 3 * p2Idx + 0}] = coeffs[{3 * p2Idx + 0, 3 * p2Idx + 0}]; coeffs[{3 * p2Idx + 0, 3 * p3Idx + 0}] = coeffs[{3 * p2Idx + 0, 3 * p3Idx + 0}]; indeps[3 * p2Idx + 0] = indeps[3 * p2Idx + 0]; coeffs[{3 * p2Idx + 1, 3 * p0Idx + 1}] = coeffs[{3 * p2Idx + 1, 3 * p0Idx + 1}]; coeffs[{3 * p2Idx + 1, 3 * p1Idx + 1}] = coeffs[{3 * p2Idx + 1, 3 * p1Idx + 1}]; coeffs[{3 * p2Idx + 1, 3 * p2Idx + 1}] = coeffs[{3 * p2Idx + 1, 3 * p2Idx + 1}]; coeffs[{3 * p2Idx + 1, 3 * p3Idx + 1}] = coeffs[{3 * p2Idx + 1, 3 * p3Idx + 1}]; indeps[3 * p2Idx + 1] = indeps[3 * p2Idx + 1]; coeffs[{3 * p2Idx + 2, 3 * p0Idx + 2}] = coeffs[{3 * p2Idx + 2, 3 * p0Idx + 2}]; coeffs[{3 * p2Idx + 2, 3 * p1Idx + 2}] = coeffs[{3 * p2Idx + 2, 3 * p1Idx + 2}]; coeffs[{3 * p2Idx + 2, 3 * p2Idx + 2}] = coeffs[{3 * p2Idx + 2, 3 * p2Idx + 2}]; coeffs[{3 * p2Idx + 2, 3 * p3Idx + 2}] = coeffs[{3 * p2Idx + 2, 3 * p3Idx + 2}]; indeps[3 * p2Idx + 2] = indeps[3 * p2Idx + 2]; coeffs[{3 * p3Idx + 0, 3 * p0Idx + 0}] = coeffs[{3 * p3Idx + 0, 3 * p0Idx + 0}]; coeffs[{3 * p3Idx + 0, 3 * p1Idx + 0}] = coeffs[{3 * p3Idx + 0, 3 * p1Idx + 0}]; coeffs[{3 * p3Idx + 0, 3 * p2Idx + 0}] = coeffs[{3 * p3Idx + 0, 3 * p2Idx + 0}]; coeffs[{3 * p3Idx + 0, 3 * p3Idx + 0}] = coeffs[{3 * p3Idx + 0, 3 * p3Idx + 0}]; indeps[3 * p3Idx + 0] = indeps[3 * p3Idx + 0]; coeffs[{3 * p3Idx + 1, 3 * p0Idx + 1}] = coeffs[{3 * p3Idx + 1, 3 * p0Idx + 1}]; coeffs[{3 * p3Idx + 1, 3 * p1Idx + 1}] = coeffs[{3 * p3Idx + 1, 3 * p1Idx + 1}]; coeffs[{3 * p3Idx + 1, 3 * p2Idx + 1}] = coeffs[{3 * p3Idx + 1, 3 * p2Idx + 1}]; coeffs[{3 * p3Idx + 1, 3 * p3Idx + 1}] = coeffs[{3 * p3Idx + 1, 3 * p3Idx + 1}]; indeps[3 * p3Idx + 1] = indeps[3 * p3Idx + 1]; coeffs[{3 * p3Idx + 2, 3 * p0Idx + 2}] = coeffs[{3 * p3Idx + 2, 3 * p0Idx + 2}]; coeffs[{3 * p3Idx + 2, 3 * p1Idx + 2}] = coeffs[{3 * p3Idx + 2, 3 * p1Idx + 2}]; coeffs[{3 * p3Idx + 2, 3 * p2Idx + 2}] = coeffs[{3 * p3Idx + 2, 3 * p2Idx + 2}]; coeffs[{3 * p3Idx + 2, 3 * p3Idx + 2}] = coeffs[{3 * p3Idx + 2, 3 * p3Idx + 2}]; indeps[3 * p3Idx + 2] = indeps[3 * p3Idx + 2]; } #ifdef SURFMORPH_PARALLELIZE_INTERPOLATION #pragma omp sections #endif { #ifdef SURFMORPH_PARALLELIZE_INTERPOLATION #pragma omp section #endif { // Equation 1 coeffs[{3 * p0Idx + 0, 3 * p0Idx + 0}] += 2*Ai*(alpha*(p3(0)*p3(0))*(uInv(0, 0)*uInv(0, 0)) + 2*alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(0, 1) + 2*alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(0, 2) + alpha*(p3(1)*p3(1))*(uInv(0, 1)*uInv(0, 1)) + 2*alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(0, 2) + alpha*(p3(2)*p3(2))*(uInv(0, 2)*uInv(0, 2)) + (uInv(0, 0)*uInv(0, 0)) + (uInv(0, 1)*uInv(0, 1)) + (uInv(0, 2)*uInv(0, 2))); coeffs[{3 * p0Idx + 0, 3 * p1Idx + 0}] += 2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + uInv(0, 0)*uInv(1, 0) + uInv(0, 1)*uInv(1, 1) + uInv(0, 2)*uInv(1, 2)); coeffs[{3 * p0Idx + 0, 3 * p2Idx + 0}] += 2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + uInv(0, 0)*uInv(2, 0) + uInv(0, 1)*uInv(2, 1) + uInv(0, 2)*uInv(2, 2)); coeffs[{3 * p0Idx + 0, 3 * p3Idx + 0}] += -2*Ai*(alpha*(p3(0)*p3(0))*(uInv(0, 0)*uInv(0, 0)) + alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + 2*alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(0, 1) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + 2*alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(0, 2) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + alpha*p3(0)*uInv(0, 0) + alpha*(p3(1)*p3(1))*(uInv(0, 1)*uInv(0, 1)) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + 2*alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(0, 2) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + alpha*p3(1)*uInv(0, 1) + alpha*(p3(2)*p3(2))*(uInv(0, 2)*uInv(0, 2)) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + alpha*p3(2)*uInv(0, 2) + (uInv(0, 0)*uInv(0, 0)) + uInv(0, 0)*uInv(1, 0) + uInv(0, 0)*uInv(2, 0) + (uInv(0, 1)*uInv(0, 1)) + uInv(0, 1)*uInv(1, 1) + uInv(0, 1)*uInv(2, 1) + (uInv(0, 2)*uInv(0, 2)) + uInv(0, 2)*uInv(1, 2) + uInv(0, 2)*uInv(2, 2)); indeps[3 * p0Idx + 0] += -2*Ai*(alpha*p3(0)*t(0)*uInv(0, 0) + alpha*p3(1)*t(0)*uInv(0, 1) + alpha*p3(2)*t(0)*uInv(0, 2) - m(0, 0)*uInv(0, 0) - m(0, 1)*uInv(0, 1) - m(0, 2)*uInv(0, 2)); } #ifdef SURFMORPH_PARALLELIZE_INTERPOLATION #pragma omp section #endif { // Equation 2 coeffs[{3 * p0Idx + 1, 3 * p0Idx + 1}] += 2*Ai*(alpha*(p3(0)*p3(0))*(uInv(0, 0)*uInv(0, 0)) + 2*alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(0, 1) + 2*alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(0, 2) + alpha*(p3(1)*p3(1))*(uInv(0, 1)*uInv(0, 1)) + 2*alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(0, 2) + alpha*(p3(2)*p3(2))*(uInv(0, 2)*uInv(0, 2)) + (uInv(0, 0)*uInv(0, 0)) + (uInv(0, 1)*uInv(0, 1)) + (uInv(0, 2)*uInv(0, 2))); coeffs[{3 * p0Idx + 1, 3 * p1Idx + 1}] += 2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + uInv(0, 0)*uInv(1, 0) + uInv(0, 1)*uInv(1, 1) + uInv(0, 2)*uInv(1, 2)); coeffs[{3 * p0Idx + 1, 3 * p2Idx + 1}] += 2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + uInv(0, 0)*uInv(2, 0) + uInv(0, 1)*uInv(2, 1) + uInv(0, 2)*uInv(2, 2)); coeffs[{3 * p0Idx + 1, 3 * p3Idx + 1}] += -2*Ai*(alpha*(p3(0)*p3(0))*(uInv(0, 0)*uInv(0, 0)) + alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + 2*alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(0, 1) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + 2*alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(0, 2) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + alpha*p3(0)*uInv(0, 0) + alpha*(p3(1)*p3(1))*(uInv(0, 1)*uInv(0, 1)) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + 2*alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(0, 2) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + alpha*p3(1)*uInv(0, 1) + alpha*(p3(2)*p3(2))*(uInv(0, 2)*uInv(0, 2)) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + alpha*p3(2)*uInv(0, 2) + (uInv(0, 0)*uInv(0, 0)) + uInv(0, 0)*uInv(1, 0) + uInv(0, 0)*uInv(2, 0) + (uInv(0, 1)*uInv(0, 1)) + uInv(0, 1)*uInv(1, 1) + uInv(0, 1)*uInv(2, 1) + (uInv(0, 2)*uInv(0, 2)) + uInv(0, 2)*uInv(1, 2) + uInv(0, 2)*uInv(2, 2)); indeps[3 * p0Idx + 1] += -2*Ai*(alpha*p3(0)*t(1)*uInv(0, 0) + alpha*p3(1)*t(1)*uInv(0, 1) + alpha*p3(2)*t(1)*uInv(0, 2) - m(1, 0)*uInv(0, 0) - m(1, 1)*uInv(0, 1) - m(1, 2)*uInv(0, 2)); } #ifdef SURFMORPH_PARALLELIZE_INTERPOLATION #pragma omp section #endif { // Equation 3 coeffs[{3 * p0Idx + 2, 3 * p0Idx + 2}] += 2*Ai*(alpha*(p3(0)*p3(0))*(uInv(0, 0)*uInv(0, 0)) + 2*alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(0, 1) + 2*alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(0, 2) + alpha*(p3(1)*p3(1))*(uInv(0, 1)*uInv(0, 1)) + 2*alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(0, 2) + alpha*(p3(2)*p3(2))*(uInv(0, 2)*uInv(0, 2)) + (uInv(0, 0)*uInv(0, 0)) + (uInv(0, 1)*uInv(0, 1)) + (uInv(0, 2)*uInv(0, 2))); coeffs[{3 * p0Idx + 2, 3 * p1Idx + 2}] += 2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + uInv(0, 0)*uInv(1, 0) + uInv(0, 1)*uInv(1, 1) + uInv(0, 2)*uInv(1, 2)); coeffs[{3 * p0Idx + 2, 3 * p2Idx + 2}] += 2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + uInv(0, 0)*uInv(2, 0) + uInv(0, 1)*uInv(2, 1) + uInv(0, 2)*uInv(2, 2)); coeffs[{3 * p0Idx + 2, 3 * p3Idx + 2}] += -2*Ai*(alpha*(p3(0)*p3(0))*(uInv(0, 0)*uInv(0, 0)) + alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + 2*alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(0, 1) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + 2*alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(0, 2) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + alpha*p3(0)*uInv(0, 0) + alpha*(p3(1)*p3(1))*(uInv(0, 1)*uInv(0, 1)) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + 2*alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(0, 2) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + alpha*p3(1)*uInv(0, 1) + alpha*(p3(2)*p3(2))*(uInv(0, 2)*uInv(0, 2)) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + alpha*p3(2)*uInv(0, 2) + (uInv(0, 0)*uInv(0, 0)) + uInv(0, 0)*uInv(1, 0) + uInv(0, 0)*uInv(2, 0) + (uInv(0, 1)*uInv(0, 1)) + uInv(0, 1)*uInv(1, 1) + uInv(0, 1)*uInv(2, 1) + (uInv(0, 2)*uInv(0, 2)) + uInv(0, 2)*uInv(1, 2) + uInv(0, 2)*uInv(2, 2)); indeps[3 * p0Idx + 2] += -2*Ai*(alpha*p3(0)*t(2)*uInv(0, 0) + alpha*p3(1)*t(2)*uInv(0, 1) + alpha*p3(2)*t(2)*uInv(0, 2) - m(2, 0)*uInv(0, 0) - m(2, 1)*uInv(0, 1) - m(2, 2)*uInv(0, 2)); } #ifdef SURFMORPH_PARALLELIZE_INTERPOLATION #pragma omp section #endif { // Equation 4 coeffs[{3 * p1Idx + 0, 3 * p0Idx + 0}] += 2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + uInv(0, 0)*uInv(1, 0) + uInv(0, 1)*uInv(1, 1) + uInv(0, 2)*uInv(1, 2)); coeffs[{3 * p1Idx + 0, 3 * p1Idx + 0}] += 2*Ai*(alpha*(p3(0)*p3(0))*(uInv(1, 0)*uInv(1, 0)) + 2*alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(1, 1) + 2*alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(1, 2) + alpha*(p3(1)*p3(1))*(uInv(1, 1)*uInv(1, 1)) + 2*alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(1, 2) + alpha*(p3(2)*p3(2))*(uInv(1, 2)*uInv(1, 2)) + (uInv(1, 0)*uInv(1, 0)) + (uInv(1, 1)*uInv(1, 1)) + (uInv(1, 2)*uInv(1, 2))); coeffs[{3 * p1Idx + 0, 3 * p2Idx + 0}] += 2*Ai*(alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + uInv(1, 0)*uInv(2, 0) + uInv(1, 1)*uInv(2, 1) + uInv(1, 2)*uInv(2, 2)); coeffs[{3 * p1Idx + 0, 3 * p3Idx + 0}] += -2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + alpha*(p3(0)*p3(0))*(uInv(1, 0)*uInv(1, 0)) + alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + 2*alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + 2*alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + alpha*p3(0)*uInv(1, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + alpha*(p3(1)*p3(1))*(uInv(1, 1)*uInv(1, 1)) + alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + 2*alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + alpha*p3(1)*uInv(1, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + alpha*(p3(2)*p3(2))*(uInv(1, 2)*uInv(1, 2)) + alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + alpha*p3(2)*uInv(1, 2) + uInv(0, 0)*uInv(1, 0) + uInv(0, 1)*uInv(1, 1) + uInv(0, 2)*uInv(1, 2) + (uInv(1, 0)*uInv(1, 0)) + uInv(1, 0)*uInv(2, 0) + (uInv(1, 1)*uInv(1, 1)) + uInv(1, 1)*uInv(2, 1) + (uInv(1, 2)*uInv(1, 2)) + uInv(1, 2)*uInv(2, 2)); indeps[3 * p1Idx + 0] += -2*Ai*(alpha*p3(0)*t(0)*uInv(1, 0) + alpha*p3(1)*t(0)*uInv(1, 1) + alpha*p3(2)*t(0)*uInv(1, 2) - m(0, 0)*uInv(1, 0) - m(0, 1)*uInv(1, 1) - m(0, 2)*uInv(1, 2)); } #ifdef SURFMORPH_PARALLELIZE_INTERPOLATION #pragma omp section #endif { // Equation 5 coeffs[{3 * p1Idx + 1, 3 * p0Idx + 1}] += 2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + uInv(0, 0)*uInv(1, 0) + uInv(0, 1)*uInv(1, 1) + uInv(0, 2)*uInv(1, 2)); coeffs[{3 * p1Idx + 1, 3 * p1Idx + 1}] += 2*Ai*(alpha*(p3(0)*p3(0))*(uInv(1, 0)*uInv(1, 0)) + 2*alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(1, 1) + 2*alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(1, 2) + alpha*(p3(1)*p3(1))*(uInv(1, 1)*uInv(1, 1)) + 2*alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(1, 2) + alpha*(p3(2)*p3(2))*(uInv(1, 2)*uInv(1, 2)) + (uInv(1, 0)*uInv(1, 0)) + (uInv(1, 1)*uInv(1, 1)) + (uInv(1, 2)*uInv(1, 2))); coeffs[{3 * p1Idx + 1, 3 * p2Idx + 1}] += 2*Ai*(alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + uInv(1, 0)*uInv(2, 0) + uInv(1, 1)*uInv(2, 1) + uInv(1, 2)*uInv(2, 2)); coeffs[{3 * p1Idx + 1, 3 * p3Idx + 1}] += -2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + alpha*(p3(0)*p3(0))*(uInv(1, 0)*uInv(1, 0)) + alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + 2*alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + 2*alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + alpha*p3(0)*uInv(1, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + alpha*(p3(1)*p3(1))*(uInv(1, 1)*uInv(1, 1)) + alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + 2*alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + alpha*p3(1)*uInv(1, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + alpha*(p3(2)*p3(2))*(uInv(1, 2)*uInv(1, 2)) + alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + alpha*p3(2)*uInv(1, 2) + uInv(0, 0)*uInv(1, 0) + uInv(0, 1)*uInv(1, 1) + uInv(0, 2)*uInv(1, 2) + (uInv(1, 0)*uInv(1, 0)) + uInv(1, 0)*uInv(2, 0) + (uInv(1, 1)*uInv(1, 1)) + uInv(1, 1)*uInv(2, 1) + (uInv(1, 2)*uInv(1, 2)) + uInv(1, 2)*uInv(2, 2)); indeps[3 * p1Idx + 1] += -2*Ai*(alpha*p3(0)*t(1)*uInv(1, 0) + alpha*p3(1)*t(1)*uInv(1, 1) + alpha*p3(2)*t(1)*uInv(1, 2) - m(1, 0)*uInv(1, 0) - m(1, 1)*uInv(1, 1) - m(1, 2)*uInv(1, 2)); } #ifdef SURFMORPH_PARALLELIZE_INTERPOLATION #pragma omp section #endif { // Equation 6 coeffs[{3 * p1Idx + 2, 3 * p0Idx + 2}] += 2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + uInv(0, 0)*uInv(1, 0) + uInv(0, 1)*uInv(1, 1) + uInv(0, 2)*uInv(1, 2)); coeffs[{3 * p1Idx + 2, 3 * p1Idx + 2}] += 2*Ai*(alpha*(p3(0)*p3(0))*(uInv(1, 0)*uInv(1, 0)) + 2*alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(1, 1) + 2*alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(1, 2) + alpha*(p3(1)*p3(1))*(uInv(1, 1)*uInv(1, 1)) + 2*alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(1, 2) + alpha*(p3(2)*p3(2))*(uInv(1, 2)*uInv(1, 2)) + (uInv(1, 0)*uInv(1, 0)) + (uInv(1, 1)*uInv(1, 1)) + (uInv(1, 2)*uInv(1, 2))); coeffs[{3 * p1Idx + 2, 3 * p2Idx + 2}] += 2*Ai*(alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + uInv(1, 0)*uInv(2, 0) + uInv(1, 1)*uInv(2, 1) + uInv(1, 2)*uInv(2, 2)); coeffs[{3 * p1Idx + 2, 3 * p3Idx + 2}] += -2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + alpha*(p3(0)*p3(0))*(uInv(1, 0)*uInv(1, 0)) + alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + 2*alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + 2*alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + alpha*p3(0)*uInv(1, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + alpha*(p3(1)*p3(1))*(uInv(1, 1)*uInv(1, 1)) + alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + 2*alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + alpha*p3(1)*uInv(1, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + alpha*(p3(2)*p3(2))*(uInv(1, 2)*uInv(1, 2)) + alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + alpha*p3(2)*uInv(1, 2) + uInv(0, 0)*uInv(1, 0) + uInv(0, 1)*uInv(1, 1) + uInv(0, 2)*uInv(1, 2) + (uInv(1, 0)*uInv(1, 0)) + uInv(1, 0)*uInv(2, 0) + (uInv(1, 1)*uInv(1, 1)) + uInv(1, 1)*uInv(2, 1) + (uInv(1, 2)*uInv(1, 2)) + uInv(1, 2)*uInv(2, 2)); indeps[3 * p1Idx + 2] += -2*Ai*(alpha*p3(0)*t(2)*uInv(1, 0) + alpha*p3(1)*t(2)*uInv(1, 1) + alpha*p3(2)*t(2)*uInv(1, 2) - m(2, 0)*uInv(1, 0) - m(2, 1)*uInv(1, 1) - m(2, 2)*uInv(1, 2)); } #ifdef SURFMORPH_PARALLELIZE_INTERPOLATION #pragma omp section #endif { // Equation 7 coeffs[{3 * p2Idx + 0, 3 * p0Idx + 0}] += 2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + uInv(0, 0)*uInv(2, 0) + uInv(0, 1)*uInv(2, 1) + uInv(0, 2)*uInv(2, 2)); coeffs[{3 * p2Idx + 0, 3 * p1Idx + 0}] += 2*Ai*(alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + uInv(1, 0)*uInv(2, 0) + uInv(1, 1)*uInv(2, 1) + uInv(1, 2)*uInv(2, 2)); coeffs[{3 * p2Idx + 0, 3 * p2Idx + 0}] += 2*Ai*(alpha*(p3(0)*p3(0))*(uInv(2, 0)*uInv(2, 0)) + 2*alpha*p3(0)*p3(1)*uInv(2, 0)*uInv(2, 1) + 2*alpha*p3(0)*p3(2)*uInv(2, 0)*uInv(2, 2) + alpha*(p3(1)*p3(1))*(uInv(2, 1)*uInv(2, 1)) + 2*alpha*p3(1)*p3(2)*uInv(2, 1)*uInv(2, 2) + alpha*(p3(2)*p3(2))*(uInv(2, 2)*uInv(2, 2)) + (uInv(2, 0)*uInv(2, 0)) + (uInv(2, 1)*uInv(2, 1)) + (uInv(2, 2)*uInv(2, 2))); coeffs[{3 * p2Idx + 0, 3 * p3Idx + 0}] += -2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*(p3(0)*p3(0))*(uInv(2, 0)*uInv(2, 0)) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + 2*alpha*p3(0)*p3(1)*uInv(2, 0)*uInv(2, 1) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + 2*alpha*p3(0)*p3(2)*uInv(2, 0)*uInv(2, 2) + alpha*p3(0)*uInv(2, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*(p3(1)*p3(1))*(uInv(2, 1)*uInv(2, 1)) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + 2*alpha*p3(1)*p3(2)*uInv(2, 1)*uInv(2, 2) + alpha*p3(1)*uInv(2, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + alpha*(p3(2)*p3(2))*(uInv(2, 2)*uInv(2, 2)) + alpha*p3(2)*uInv(2, 2) + uInv(0, 0)*uInv(2, 0) + uInv(0, 1)*uInv(2, 1) + uInv(0, 2)*uInv(2, 2) + uInv(1, 0)*uInv(2, 0) + uInv(1, 1)*uInv(2, 1) + uInv(1, 2)*uInv(2, 2) + (uInv(2, 0)*uInv(2, 0)) + (uInv(2, 1)*uInv(2, 1)) + (uInv(2, 2)*uInv(2, 2))); indeps[3 * p2Idx + 0] += -2*Ai*(alpha*p3(0)*t(0)*uInv(2, 0) + alpha*p3(1)*t(0)*uInv(2, 1) + alpha*p3(2)*t(0)*uInv(2, 2) - m(0, 0)*uInv(2, 0) - m(0, 1)*uInv(2, 1) - m(0, 2)*uInv(2, 2)); } #ifdef SURFMORPH_PARALLELIZE_INTERPOLATION #pragma omp section #endif { // Equation 8 coeffs[{3 * p2Idx + 1, 3 * p0Idx + 1}] += 2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + uInv(0, 0)*uInv(2, 0) + uInv(0, 1)*uInv(2, 1) + uInv(0, 2)*uInv(2, 2)); coeffs[{3 * p2Idx + 1, 3 * p1Idx + 1}] += 2*Ai*(alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + uInv(1, 0)*uInv(2, 0) + uInv(1, 1)*uInv(2, 1) + uInv(1, 2)*uInv(2, 2)); coeffs[{3 * p2Idx + 1, 3 * p2Idx + 1}] += 2*Ai*(alpha*(p3(0)*p3(0))*(uInv(2, 0)*uInv(2, 0)) + 2*alpha*p3(0)*p3(1)*uInv(2, 0)*uInv(2, 1) + 2*alpha*p3(0)*p3(2)*uInv(2, 0)*uInv(2, 2) + alpha*(p3(1)*p3(1))*(uInv(2, 1)*uInv(2, 1)) + 2*alpha*p3(1)*p3(2)*uInv(2, 1)*uInv(2, 2) + alpha*(p3(2)*p3(2))*(uInv(2, 2)*uInv(2, 2)) + (uInv(2, 0)*uInv(2, 0)) + (uInv(2, 1)*uInv(2, 1)) + (uInv(2, 2)*uInv(2, 2))); coeffs[{3 * p2Idx + 1, 3 * p3Idx + 1}] += -2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*(p3(0)*p3(0))*(uInv(2, 0)*uInv(2, 0)) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + 2*alpha*p3(0)*p3(1)*uInv(2, 0)*uInv(2, 1) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + 2*alpha*p3(0)*p3(2)*uInv(2, 0)*uInv(2, 2) + alpha*p3(0)*uInv(2, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*(p3(1)*p3(1))*(uInv(2, 1)*uInv(2, 1)) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + 2*alpha*p3(1)*p3(2)*uInv(2, 1)*uInv(2, 2) + alpha*p3(1)*uInv(2, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + alpha*(p3(2)*p3(2))*(uInv(2, 2)*uInv(2, 2)) + alpha*p3(2)*uInv(2, 2) + uInv(0, 0)*uInv(2, 0) + uInv(0, 1)*uInv(2, 1) + uInv(0, 2)*uInv(2, 2) + uInv(1, 0)*uInv(2, 0) + uInv(1, 1)*uInv(2, 1) + uInv(1, 2)*uInv(2, 2) + (uInv(2, 0)*uInv(2, 0)) + (uInv(2, 1)*uInv(2, 1)) + (uInv(2, 2)*uInv(2, 2))); indeps[3 * p2Idx + 1] += -2*Ai*(alpha*p3(0)*t(1)*uInv(2, 0) + alpha*p3(1)*t(1)*uInv(2, 1) + alpha*p3(2)*t(1)*uInv(2, 2) - m(1, 0)*uInv(2, 0) - m(1, 1)*uInv(2, 1) - m(1, 2)*uInv(2, 2)); } #ifdef SURFMORPH_PARALLELIZE_INTERPOLATION #pragma omp section #endif { // Equation 9 coeffs[{3 * p2Idx + 2, 3 * p0Idx + 2}] += 2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + uInv(0, 0)*uInv(2, 0) + uInv(0, 1)*uInv(2, 1) + uInv(0, 2)*uInv(2, 2)); coeffs[{3 * p2Idx + 2, 3 * p1Idx + 2}] += 2*Ai*(alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + uInv(1, 0)*uInv(2, 0) + uInv(1, 1)*uInv(2, 1) + uInv(1, 2)*uInv(2, 2)); coeffs[{3 * p2Idx + 2, 3 * p2Idx + 2}] += 2*Ai*(alpha*(p3(0)*p3(0))*(uInv(2, 0)*uInv(2, 0)) + 2*alpha*p3(0)*p3(1)*uInv(2, 0)*uInv(2, 1) + 2*alpha*p3(0)*p3(2)*uInv(2, 0)*uInv(2, 2) + alpha*(p3(1)*p3(1))*(uInv(2, 1)*uInv(2, 1)) + 2*alpha*p3(1)*p3(2)*uInv(2, 1)*uInv(2, 2) + alpha*(p3(2)*p3(2))*(uInv(2, 2)*uInv(2, 2)) + (uInv(2, 0)*uInv(2, 0)) + (uInv(2, 1)*uInv(2, 1)) + (uInv(2, 2)*uInv(2, 2))); coeffs[{3 * p2Idx + 2, 3 * p3Idx + 2}] += -2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*(p3(0)*p3(0))*(uInv(2, 0)*uInv(2, 0)) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + 2*alpha*p3(0)*p3(1)*uInv(2, 0)*uInv(2, 1) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + 2*alpha*p3(0)*p3(2)*uInv(2, 0)*uInv(2, 2) + alpha*p3(0)*uInv(2, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*(p3(1)*p3(1))*(uInv(2, 1)*uInv(2, 1)) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + 2*alpha*p3(1)*p3(2)*uInv(2, 1)*uInv(2, 2) + alpha*p3(1)*uInv(2, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + alpha*(p3(2)*p3(2))*(uInv(2, 2)*uInv(2, 2)) + alpha*p3(2)*uInv(2, 2) + uInv(0, 0)*uInv(2, 0) + uInv(0, 1)*uInv(2, 1) + uInv(0, 2)*uInv(2, 2) + uInv(1, 0)*uInv(2, 0) + uInv(1, 1)*uInv(2, 1) + uInv(1, 2)*uInv(2, 2) + (uInv(2, 0)*uInv(2, 0)) + (uInv(2, 1)*uInv(2, 1)) + (uInv(2, 2)*uInv(2, 2))); indeps[3 * p2Idx + 2] += -2*Ai*(alpha*p3(0)*t(2)*uInv(2, 0) + alpha*p3(1)*t(2)*uInv(2, 1) + alpha*p3(2)*t(2)*uInv(2, 2) - m(2, 0)*uInv(2, 0) - m(2, 1)*uInv(2, 1) - m(2, 2)*uInv(2, 2)); } #ifdef SURFMORPH_PARALLELIZE_INTERPOLATION #pragma omp section #endif { // Equation 10 coeffs[{3 * p3Idx + 0, 3 * p0Idx + 0}] += -2*Ai*(alpha*(p3(0)*p3(0))*(uInv(0, 0)*uInv(0, 0)) + alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + 2*alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(0, 1) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + 2*alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(0, 2) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + alpha*p3(0)*uInv(0, 0) + alpha*(p3(1)*p3(1))*(uInv(0, 1)*uInv(0, 1)) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + 2*alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(0, 2) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + alpha*p3(1)*uInv(0, 1) + alpha*(p3(2)*p3(2))*(uInv(0, 2)*uInv(0, 2)) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + alpha*p3(2)*uInv(0, 2) + (uInv(0, 0)*uInv(0, 0)) + uInv(0, 0)*uInv(1, 0) + uInv(0, 0)*uInv(2, 0) + (uInv(0, 1)*uInv(0, 1)) + uInv(0, 1)*uInv(1, 1) + uInv(0, 1)*uInv(2, 1) + (uInv(0, 2)*uInv(0, 2)) + uInv(0, 2)*uInv(1, 2) + uInv(0, 2)*uInv(2, 2)); coeffs[{3 * p3Idx + 0, 3 * p1Idx + 0}] += -2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + alpha*(p3(0)*p3(0))*(uInv(1, 0)*uInv(1, 0)) + alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + 2*alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + 2*alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + alpha*p3(0)*uInv(1, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + alpha*(p3(1)*p3(1))*(uInv(1, 1)*uInv(1, 1)) + alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + 2*alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + alpha*p3(1)*uInv(1, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + alpha*(p3(2)*p3(2))*(uInv(1, 2)*uInv(1, 2)) + alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + alpha*p3(2)*uInv(1, 2) + uInv(0, 0)*uInv(1, 0) + uInv(0, 1)*uInv(1, 1) + uInv(0, 2)*uInv(1, 2) + (uInv(1, 0)*uInv(1, 0)) + uInv(1, 0)*uInv(2, 0) + (uInv(1, 1)*uInv(1, 1)) + uInv(1, 1)*uInv(2, 1) + (uInv(1, 2)*uInv(1, 2)) + uInv(1, 2)*uInv(2, 2)); coeffs[{3 * p3Idx + 0, 3 * p2Idx + 0}] += -2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*(p3(0)*p3(0))*(uInv(2, 0)*uInv(2, 0)) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + 2*alpha*p3(0)*p3(1)*uInv(2, 0)*uInv(2, 1) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + 2*alpha*p3(0)*p3(2)*uInv(2, 0)*uInv(2, 2) + alpha*p3(0)*uInv(2, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*(p3(1)*p3(1))*(uInv(2, 1)*uInv(2, 1)) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + 2*alpha*p3(1)*p3(2)*uInv(2, 1)*uInv(2, 2) + alpha*p3(1)*uInv(2, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + alpha*(p3(2)*p3(2))*(uInv(2, 2)*uInv(2, 2)) + alpha*p3(2)*uInv(2, 2) + uInv(0, 0)*uInv(2, 0) + uInv(0, 1)*uInv(2, 1) + uInv(0, 2)*uInv(2, 2) + uInv(1, 0)*uInv(2, 0) + uInv(1, 1)*uInv(2, 1) + uInv(1, 2)*uInv(2, 2) + (uInv(2, 0)*uInv(2, 0)) + (uInv(2, 1)*uInv(2, 1)) + (uInv(2, 2)*uInv(2, 2))); coeffs[{3 * p3Idx + 0, 3 * p3Idx + 0}] += 2*Ai*(alpha*(p3(0)*p3(0))*(uInv(0, 0)*uInv(0, 0)) + 2*alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + 2*alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + alpha*(p3(0)*p3(0))*(uInv(1, 0)*uInv(1, 0)) + 2*alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*(p3(0)*p3(0))*(uInv(2, 0)*uInv(2, 0)) + 2*alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(0, 1) + 2*alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + 2*alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + 2*alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + 2*alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + 2*alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(1, 1) + 2*alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + 2*alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + 2*alpha*p3(0)*p3(1)*uInv(2, 0)*uInv(2, 1) + 2*alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(0, 2) + 2*alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + 2*alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + 2*alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + 2*alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + 2*alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(1, 2) + 2*alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + 2*alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + 2*alpha*p3(0)*p3(2)*uInv(2, 0)*uInv(2, 2) + 2*alpha*p3(0)*uInv(0, 0) + 2*alpha*p3(0)*uInv(1, 0) + 2*alpha*p3(0)*uInv(2, 0) + alpha*(p3(1)*p3(1))*(uInv(0, 1)*uInv(0, 1)) + 2*alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + 2*alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + alpha*(p3(1)*p3(1))*(uInv(1, 1)*uInv(1, 1)) + 2*alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*(p3(1)*p3(1))*(uInv(2, 1)*uInv(2, 1)) + 2*alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(0, 2) + 2*alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + 2*alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + 2*alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + 2*alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + 2*alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(1, 2) + 2*alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + 2*alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + 2*alpha*p3(1)*p3(2)*uInv(2, 1)*uInv(2, 2) + 2*alpha*p3(1)*uInv(0, 1) + 2*alpha*p3(1)*uInv(1, 1) + 2*alpha*p3(1)*uInv(2, 1) + alpha*(p3(2)*p3(2))*(uInv(0, 2)*uInv(0, 2)) + 2*alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + 2*alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + alpha*(p3(2)*p3(2))*(uInv(1, 2)*uInv(1, 2)) + 2*alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + alpha*(p3(2)*p3(2))*(uInv(2, 2)*uInv(2, 2)) + 2*alpha*p3(2)*uInv(0, 2) + 2*alpha*p3(2)*uInv(1, 2) + 2*alpha*p3(2)*uInv(2, 2) + alpha + (uInv(0, 0)*uInv(0, 0)) + 2*uInv(0, 0)*uInv(1, 0) + 2*uInv(0, 0)*uInv(2, 0) + (uInv(0, 1)*uInv(0, 1)) + 2*uInv(0, 1)*uInv(1, 1) + 2*uInv(0, 1)*uInv(2, 1) + (uInv(0, 2)*uInv(0, 2)) + 2*uInv(0, 2)*uInv(1, 2) + 2*uInv(0, 2)*uInv(2, 2) + (uInv(1, 0)*uInv(1, 0)) + 2*uInv(1, 0)*uInv(2, 0) + (uInv(1, 1)*uInv(1, 1)) + 2*uInv(1, 1)*uInv(2, 1) + (uInv(1, 2)*uInv(1, 2)) + 2*uInv(1, 2)*uInv(2, 2) + (uInv(2, 0)*uInv(2, 0)) + (uInv(2, 1)*uInv(2, 1)) + (uInv(2, 2)*uInv(2, 2))); indeps[3 * p3Idx + 0] += 2*Ai*(alpha*p3(0)*t(0)*uInv(0, 0) + alpha*p3(0)*t(0)*uInv(1, 0) + alpha*p3(0)*t(0)*uInv(2, 0) + alpha*p3(1)*t(0)*uInv(0, 1) + alpha*p3(1)*t(0)*uInv(1, 1) + alpha*p3(1)*t(0)*uInv(2, 1) + alpha*p3(2)*t(0)*uInv(0, 2) + alpha*p3(2)*t(0)*uInv(1, 2) + alpha*p3(2)*t(0)*uInv(2, 2) + alpha*t(0) - m(0, 0)*uInv(0, 0) - m(0, 0)*uInv(1, 0) - m(0, 0)*uInv(2, 0) - m(0, 1)*uInv(0, 1) - m(0, 1)*uInv(1, 1) - m(0, 1)*uInv(2, 1) - m(0, 2)*uInv(0, 2) - m(0, 2)*uInv(1, 2) - m(0, 2)*uInv(2, 2)); } #ifdef SURFMORPH_PARALLELIZE_INTERPOLATION #pragma omp section #endif { // Equation 11 coeffs[{3 * p3Idx + 1, 3 * p0Idx + 1}] += -2*Ai*(alpha*(p3(0)*p3(0))*(uInv(0, 0)*uInv(0, 0)) + alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + 2*alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(0, 1) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + 2*alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(0, 2) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + alpha*p3(0)*uInv(0, 0) + alpha*(p3(1)*p3(1))*(uInv(0, 1)*uInv(0, 1)) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + 2*alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(0, 2) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + alpha*p3(1)*uInv(0, 1) + alpha*(p3(2)*p3(2))*(uInv(0, 2)*uInv(0, 2)) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + alpha*p3(2)*uInv(0, 2) + (uInv(0, 0)*uInv(0, 0)) + uInv(0, 0)*uInv(1, 0) + uInv(0, 0)*uInv(2, 0) + (uInv(0, 1)*uInv(0, 1)) + uInv(0, 1)*uInv(1, 1) + uInv(0, 1)*uInv(2, 1) + (uInv(0, 2)*uInv(0, 2)) + uInv(0, 2)*uInv(1, 2) + uInv(0, 2)*uInv(2, 2)); coeffs[{3 * p3Idx + 1, 3 * p1Idx + 1}] += -2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + alpha*(p3(0)*p3(0))*(uInv(1, 0)*uInv(1, 0)) + alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + 2*alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + 2*alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + alpha*p3(0)*uInv(1, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + alpha*(p3(1)*p3(1))*(uInv(1, 1)*uInv(1, 1)) + alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + 2*alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + alpha*p3(1)*uInv(1, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + alpha*(p3(2)*p3(2))*(uInv(1, 2)*uInv(1, 2)) + alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + alpha*p3(2)*uInv(1, 2) + uInv(0, 0)*uInv(1, 0) + uInv(0, 1)*uInv(1, 1) + uInv(0, 2)*uInv(1, 2) + (uInv(1, 0)*uInv(1, 0)) + uInv(1, 0)*uInv(2, 0) + (uInv(1, 1)*uInv(1, 1)) + uInv(1, 1)*uInv(2, 1) + (uInv(1, 2)*uInv(1, 2)) + uInv(1, 2)*uInv(2, 2)); coeffs[{3 * p3Idx + 1, 3 * p2Idx + 1}] += -2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*(p3(0)*p3(0))*(uInv(2, 0)*uInv(2, 0)) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + 2*alpha*p3(0)*p3(1)*uInv(2, 0)*uInv(2, 1) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + 2*alpha*p3(0)*p3(2)*uInv(2, 0)*uInv(2, 2) + alpha*p3(0)*uInv(2, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*(p3(1)*p3(1))*(uInv(2, 1)*uInv(2, 1)) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + 2*alpha*p3(1)*p3(2)*uInv(2, 1)*uInv(2, 2) + alpha*p3(1)*uInv(2, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + alpha*(p3(2)*p3(2))*(uInv(2, 2)*uInv(2, 2)) + alpha*p3(2)*uInv(2, 2) + uInv(0, 0)*uInv(2, 0) + uInv(0, 1)*uInv(2, 1) + uInv(0, 2)*uInv(2, 2) + uInv(1, 0)*uInv(2, 0) + uInv(1, 1)*uInv(2, 1) + uInv(1, 2)*uInv(2, 2) + (uInv(2, 0)*uInv(2, 0)) + (uInv(2, 1)*uInv(2, 1)) + (uInv(2, 2)*uInv(2, 2))); coeffs[{3 * p3Idx + 1, 3 * p3Idx + 1}] += 2*Ai*(alpha*(p3(0)*p3(0))*(uInv(0, 0)*uInv(0, 0)) + 2*alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + 2*alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + alpha*(p3(0)*p3(0))*(uInv(1, 0)*uInv(1, 0)) + 2*alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*(p3(0)*p3(0))*(uInv(2, 0)*uInv(2, 0)) + 2*alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(0, 1) + 2*alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + 2*alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + 2*alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + 2*alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + 2*alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(1, 1) + 2*alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + 2*alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + 2*alpha*p3(0)*p3(1)*uInv(2, 0)*uInv(2, 1) + 2*alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(0, 2) + 2*alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + 2*alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + 2*alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + 2*alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + 2*alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(1, 2) + 2*alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + 2*alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + 2*alpha*p3(0)*p3(2)*uInv(2, 0)*uInv(2, 2) + 2*alpha*p3(0)*uInv(0, 0) + 2*alpha*p3(0)*uInv(1, 0) + 2*alpha*p3(0)*uInv(2, 0) + alpha*(p3(1)*p3(1))*(uInv(0, 1)*uInv(0, 1)) + 2*alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + 2*alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + alpha*(p3(1)*p3(1))*(uInv(1, 1)*uInv(1, 1)) + 2*alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*(p3(1)*p3(1))*(uInv(2, 1)*uInv(2, 1)) + 2*alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(0, 2) + 2*alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + 2*alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + 2*alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + 2*alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + 2*alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(1, 2) + 2*alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + 2*alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + 2*alpha*p3(1)*p3(2)*uInv(2, 1)*uInv(2, 2) + 2*alpha*p3(1)*uInv(0, 1) + 2*alpha*p3(1)*uInv(1, 1) + 2*alpha*p3(1)*uInv(2, 1) + alpha*(p3(2)*p3(2))*(uInv(0, 2)*uInv(0, 2)) + 2*alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + 2*alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + alpha*(p3(2)*p3(2))*(uInv(1, 2)*uInv(1, 2)) + 2*alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + alpha*(p3(2)*p3(2))*(uInv(2, 2)*uInv(2, 2)) + 2*alpha*p3(2)*uInv(0, 2) + 2*alpha*p3(2)*uInv(1, 2) + 2*alpha*p3(2)*uInv(2, 2) + alpha + (uInv(0, 0)*uInv(0, 0)) + 2*uInv(0, 0)*uInv(1, 0) + 2*uInv(0, 0)*uInv(2, 0) + (uInv(0, 1)*uInv(0, 1)) + 2*uInv(0, 1)*uInv(1, 1) + 2*uInv(0, 1)*uInv(2, 1) + (uInv(0, 2)*uInv(0, 2)) + 2*uInv(0, 2)*uInv(1, 2) + 2*uInv(0, 2)*uInv(2, 2) + (uInv(1, 0)*uInv(1, 0)) + 2*uInv(1, 0)*uInv(2, 0) + (uInv(1, 1)*uInv(1, 1)) + 2*uInv(1, 1)*uInv(2, 1) + (uInv(1, 2)*uInv(1, 2)) + 2*uInv(1, 2)*uInv(2, 2) + (uInv(2, 0)*uInv(2, 0)) + (uInv(2, 1)*uInv(2, 1)) + (uInv(2, 2)*uInv(2, 2))); indeps[3 * p3Idx + 1] += 2*Ai*(alpha*p3(0)*t(1)*uInv(0, 0) + alpha*p3(0)*t(1)*uInv(1, 0) + alpha*p3(0)*t(1)*uInv(2, 0) + alpha*p3(1)*t(1)*uInv(0, 1) + alpha*p3(1)*t(1)*uInv(1, 1) + alpha*p3(1)*t(1)*uInv(2, 1) + alpha*p3(2)*t(1)*uInv(0, 2) + alpha*p3(2)*t(1)*uInv(1, 2) + alpha*p3(2)*t(1)*uInv(2, 2) + alpha*t(1) - m(1, 0)*uInv(0, 0) - m(1, 0)*uInv(1, 0) - m(1, 0)*uInv(2, 0) - m(1, 1)*uInv(0, 1) - m(1, 1)*uInv(1, 1) - m(1, 1)*uInv(2, 1) - m(1, 2)*uInv(0, 2) - m(1, 2)*uInv(1, 2) - m(1, 2)*uInv(2, 2)); } #ifdef SURFMORPH_PARALLELIZE_INTERPOLATION #pragma omp section #endif { // Equation 12 coeffs[{3 * p3Idx + 2, 3 * p0Idx + 2}] += -2*Ai*(alpha*(p3(0)*p3(0))*(uInv(0, 0)*uInv(0, 0)) + alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + 2*alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(0, 1) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + 2*alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(0, 2) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + alpha*p3(0)*uInv(0, 0) + alpha*(p3(1)*p3(1))*(uInv(0, 1)*uInv(0, 1)) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + 2*alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(0, 2) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + alpha*p3(1)*uInv(0, 1) + alpha*(p3(2)*p3(2))*(uInv(0, 2)*uInv(0, 2)) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + alpha*p3(2)*uInv(0, 2) + (uInv(0, 0)*uInv(0, 0)) + uInv(0, 0)*uInv(1, 0) + uInv(0, 0)*uInv(2, 0) + (uInv(0, 1)*uInv(0, 1)) + uInv(0, 1)*uInv(1, 1) + uInv(0, 1)*uInv(2, 1) + (uInv(0, 2)*uInv(0, 2)) + uInv(0, 2)*uInv(1, 2) + uInv(0, 2)*uInv(2, 2)); coeffs[{3 * p3Idx + 2, 3 * p1Idx + 2}] += -2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + alpha*(p3(0)*p3(0))*(uInv(1, 0)*uInv(1, 0)) + alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + 2*alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + 2*alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + alpha*p3(0)*uInv(1, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + alpha*(p3(1)*p3(1))*(uInv(1, 1)*uInv(1, 1)) + alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + 2*alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + alpha*p3(1)*uInv(1, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + alpha*(p3(2)*p3(2))*(uInv(1, 2)*uInv(1, 2)) + alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + alpha*p3(2)*uInv(1, 2) + uInv(0, 0)*uInv(1, 0) + uInv(0, 1)*uInv(1, 1) + uInv(0, 2)*uInv(1, 2) + (uInv(1, 0)*uInv(1, 0)) + uInv(1, 0)*uInv(2, 0) + (uInv(1, 1)*uInv(1, 1)) + uInv(1, 1)*uInv(2, 1) + (uInv(1, 2)*uInv(1, 2)) + uInv(1, 2)*uInv(2, 2)); coeffs[{3 * p3Idx + 2, 3 * p2Idx + 2}] += -2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*(p3(0)*p3(0))*(uInv(2, 0)*uInv(2, 0)) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + 2*alpha*p3(0)*p3(1)*uInv(2, 0)*uInv(2, 1) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + 2*alpha*p3(0)*p3(2)*uInv(2, 0)*uInv(2, 2) + alpha*p3(0)*uInv(2, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*(p3(1)*p3(1))*(uInv(2, 1)*uInv(2, 1)) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + 2*alpha*p3(1)*p3(2)*uInv(2, 1)*uInv(2, 2) + alpha*p3(1)*uInv(2, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + alpha*(p3(2)*p3(2))*(uInv(2, 2)*uInv(2, 2)) + alpha*p3(2)*uInv(2, 2) + uInv(0, 0)*uInv(2, 0) + uInv(0, 1)*uInv(2, 1) + uInv(0, 2)*uInv(2, 2) + uInv(1, 0)*uInv(2, 0) + uInv(1, 1)*uInv(2, 1) + uInv(1, 2)*uInv(2, 2) + (uInv(2, 0)*uInv(2, 0)) + (uInv(2, 1)*uInv(2, 1)) + (uInv(2, 2)*uInv(2, 2))); coeffs[{3 * p3Idx + 2, 3 * p3Idx + 2}] += 2*Ai*(alpha*(p3(0)*p3(0))*(uInv(0, 0)*uInv(0, 0)) + 2*alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + 2*alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + alpha*(p3(0)*p3(0))*(uInv(1, 0)*uInv(1, 0)) + 2*alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*(p3(0)*p3(0))*(uInv(2, 0)*uInv(2, 0)) + 2*alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(0, 1) + 2*alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + 2*alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + 2*alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + 2*alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + 2*alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(1, 1) + 2*alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + 2*alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + 2*alpha*p3(0)*p3(1)*uInv(2, 0)*uInv(2, 1) + 2*alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(0, 2) + 2*alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + 2*alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + 2*alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + 2*alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + 2*alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(1, 2) + 2*alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + 2*alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + 2*alpha*p3(0)*p3(2)*uInv(2, 0)*uInv(2, 2) + 2*alpha*p3(0)*uInv(0, 0) + 2*alpha*p3(0)*uInv(1, 0) + 2*alpha*p3(0)*uInv(2, 0) + alpha*(p3(1)*p3(1))*(uInv(0, 1)*uInv(0, 1)) + 2*alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + 2*alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + alpha*(p3(1)*p3(1))*(uInv(1, 1)*uInv(1, 1)) + 2*alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*(p3(1)*p3(1))*(uInv(2, 1)*uInv(2, 1)) + 2*alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(0, 2) + 2*alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + 2*alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + 2*alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + 2*alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + 2*alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(1, 2) + 2*alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + 2*alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + 2*alpha*p3(1)*p3(2)*uInv(2, 1)*uInv(2, 2) + 2*alpha*p3(1)*uInv(0, 1) + 2*alpha*p3(1)*uInv(1, 1) + 2*alpha*p3(1)*uInv(2, 1) + alpha*(p3(2)*p3(2))*(uInv(0, 2)*uInv(0, 2)) + 2*alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + 2*alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + alpha*(p3(2)*p3(2))*(uInv(1, 2)*uInv(1, 2)) + 2*alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + alpha*(p3(2)*p3(2))*(uInv(2, 2)*uInv(2, 2)) + 2*alpha*p3(2)*uInv(0, 2) + 2*alpha*p3(2)*uInv(1, 2) + 2*alpha*p3(2)*uInv(2, 2) + alpha + (uInv(0, 0)*uInv(0, 0)) + 2*uInv(0, 0)*uInv(1, 0) + 2*uInv(0, 0)*uInv(2, 0) + (uInv(0, 1)*uInv(0, 1)) + 2*uInv(0, 1)*uInv(1, 1) + 2*uInv(0, 1)*uInv(2, 1) + (uInv(0, 2)*uInv(0, 2)) + 2*uInv(0, 2)*uInv(1, 2) + 2*uInv(0, 2)*uInv(2, 2) + (uInv(1, 0)*uInv(1, 0)) + 2*uInv(1, 0)*uInv(2, 0) + (uInv(1, 1)*uInv(1, 1)) + 2*uInv(1, 1)*uInv(2, 1) + (uInv(1, 2)*uInv(1, 2)) + 2*uInv(1, 2)*uInv(2, 2) + (uInv(2, 0)*uInv(2, 0)) + (uInv(2, 1)*uInv(2, 1)) + (uInv(2, 2)*uInv(2, 2))); indeps[3 * p3Idx + 2] += 2*Ai*(alpha*p3(0)*t(2)*uInv(0, 0) + alpha*p3(0)*t(2)*uInv(1, 0) + alpha*p3(0)*t(2)*uInv(2, 0) + alpha*p3(1)*t(2)*uInv(0, 1) + alpha*p3(1)*t(2)*uInv(1, 1) + alpha*p3(1)*t(2)*uInv(2, 1) + alpha*p3(2)*t(2)*uInv(0, 2) + alpha*p3(2)*t(2)*uInv(1, 2) + alpha*p3(2)*t(2)*uInv(2, 2) + alpha*t(2) - m(2, 0)*uInv(0, 0) - m(2, 0)*uInv(1, 0) - m(2, 0)*uInv(2, 0) - m(2, 1)*uInv(0, 1) - m(2, 1)*uInv(1, 1) - m(2, 1)*uInv(2, 1) - m(2, 2)*uInv(0, 2) - m(2, 2)*uInv(1, 2) - m(2, 2)*uInv(2, 2)); } } } } #endif ////////////////////////////////////////////////////////////////////////////////
comm.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /** */ #ifndef MXNET_KVSTORE_COMM_H_ #define MXNET_KVSTORE_COMM_H_ #include <dmlc/omp.h> #include <string> #include <algorithm> #include <utility> #include <limits> #include <vector> #include <tuple> #include <thread> #include "mxnet/ndarray.h" #include "../ndarray/ndarray_function.h" #include "../operator/tensor/sparse_retain-inl.h" namespace mxnet { namespace kvstore { /** * \brief multiple device commmunication */ class Comm { public: Comm() { pinned_ctx_ = Context::CPUPinned(0); } virtual ~Comm() { } /** * \brief init key with the data shape and storage shape */ virtual void Init(int key, const NDArrayStorageType stype, const TShape& shape, int dtype = mshadow::kFloat32) = 0; /** * \brief returns src[0] + .. + src[src.size()-1] */ virtual const NDArray& Reduce( int key, const std::vector<NDArray>& src, int priority) = 0; /** * \brief copy from src to dst[i] for every i */ virtual void Broadcast( int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) = 0; /** * \brief broadcast src to dst[i] with target row_ids for every i * \param dst a list of destination row_sparse NDArray and its target row_ids to broadcast, where the row_ids are expected to be unique and sorted * \param use_copy if set to true, directly copy src to dst[i] without looking up the provided row_ids */ virtual void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const bool use_copy, const int priority) = 0; /** * \brief return a pinned contex */ Context pinned_ctx() const { return pinned_ctx_; } protected: Context pinned_ctx_; }; /** * \brief an implemention of Comm that first copy data to CPU memeory, and then * reduce there */ class CommCPU : public Comm { public: CommCPU() { nthread_reduction_ = dmlc::GetEnv("MXNET_KVSTORE_REDUCTION_NTHREADS", 4); bigarray_bound_ = dmlc::GetEnv("MXNET_KVSTORE_BIGARRAY_BOUND", 1000 * 1000); // TODO(junwu) delete the following data member, now for benchmark only is_serial_push_ = dmlc::GetEnv("MXNET_KVSTORE_SERIAL_PUSH", 0); } virtual ~CommCPU() { } void Init(int key, const NDArrayStorageType stype, const TShape& shape, int type = mshadow::kFloat32) override { if (stype == kDefaultStorage) { merge_buf_[key].merged = NDArray(shape, pinned_ctx_, false, type); } else { merge_buf_[key].merged = NDArray(stype, shape, pinned_ctx_, true, type); } } const NDArray& Reduce(int key, const std::vector<NDArray>& src, int priority) override { auto& buf = merge_buf_[key]; // avoid extra copy for single device, but it may bring problems for // abnormal usage of kvstore if (src.size() == 1) { if (src[0].storage_type() == kDefaultStorage) { return src[0]; } else { // if sparse and only one GPU, always update weight on CPU CopyFromTo(src[0], &buf.merged, priority); return buf.merged; } } if (buf.merged.storage_type() == kDefaultStorage) { std::vector<Engine::VarHandle> const_vars(src.size() - 1); std::vector<NDArray> reduce(src.size()); CopyFromTo(src[0], &buf.merged, priority); reduce[0] = buf.merged; if (buf.copy_buf.empty()) { buf.copy_buf.resize(src.size()-1); for (size_t j = 0; j < src.size() - 1; ++j) { // allocate NDArray based on storage type buf.copy_buf[j] = NDArray( src[0].shape(), pinned_ctx_, false, src[0].dtype()); } } for (size_t i = 1; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i-1]), priority); reduce[i] = buf.copy_buf[i-1]; const_vars[i-1] = reduce[i].var(); } Engine::Get()->PushAsync( [reduce, this](RunContext rctx, Engine::CallbackOnComplete on_complete) { ReduceSumCPU(reduce); on_complete(); }, Context::CPU(), const_vars, {reduce[0].var()}, FnProperty::kCPUPrioritized, priority, PROFILER_MESSAGE("KVStoreReduce")); } else { // buf.merged is a sparse ndarray. std::vector<Engine::VarHandle> const_vars(src.size()); std::vector<NDArray> reduce(src.size()); if (buf.copy_buf.empty()) { buf.copy_buf.resize(src.size()); for (size_t j = 0; j < src.size(); ++j) { buf.copy_buf[j] = NDArray( src[0].storage_type(), src[0].shape(), pinned_ctx_, true, src[0].dtype()); } } for (size_t i = 0; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; const_vars[i] = reduce[i].var(); } auto result = buf.merged; Engine::Get()->PushAsync( [reduce, result, this](RunContext rctx, Engine::CallbackOnComplete on_complete) { NDArray out = result; Resource rsc = ResourceManager::Get()->Request(rctx.ctx, ResourceRequest(ResourceRequest::kTempSpace)); is_serial_push_? ReduceSumCPUExSerial(reduce, &out) : mxnet::ndarray::ElementwiseSum(rctx.get_stream<cpu>(), rsc, reduce, &out); on_complete(); }, Context::CPU(), const_vars, {result.var()}, FnProperty::kCPUPrioritized, priority, PROFILER_MESSAGE("KVStoreReduce")); } return buf.merged; } void Broadcast(int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) override { int mask = src.ctx().dev_mask(); if (mask == Context::kCPU) { for (auto d : dst) CopyFromTo(src, d, priority); } else { // first copy data to cpu, then broadcast auto& buf = merge_buf_[key]; CopyFromTo(src, &buf.merged, priority); for (auto d : dst) CopyFromTo(buf.merged, d, priority); } } void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const bool use_copy, const int priority) override { using namespace mshadow; CHECK_EQ(src.storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row-sparse src NDArray"; CHECK_EQ(src.ctx().dev_mask(), Context::kCPU) << "BroadcastRowSparse with src on gpu context not supported"; for (size_t i = 0; i < dst.size(); ++i) { NDArray* out = dst[i].first; NDArray row_id = dst[i].second; if (use_copy) { CopyFromTo(src, out, priority); } else { CHECK_EQ(out->storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row_sparse dst NDArray"; CHECK_EQ(row_id.ctx().dev_mask(), Context::kCPU) << "BroadcastRowSparse with row_indices on gpu context not supported"; // retain according to unique indices const bool use_sparse_retain = (src.shape()[0] != src.storage_shape()[0]) || (row_id.dtype() != out->aux_type(rowsparse::kIdx)) || (out->ctx().dev_mask() != Context::kGPU); if (use_sparse_retain) { // use sparse_retain op const bool is_to_gpu = out->ctx().dev_mask() == Context::kGPU; NDArray out_cpu = is_to_gpu? NDArray(kRowSparseStorage, src.shape(), src.ctx(), true, src.dtype(), src.aux_types()) : *out; Engine::Get()->PushAsync( [=](RunContext rctx, Engine::CallbackOnComplete on_complete) { const TBlob& indices = row_id.data(); NDArray temp = out_cpu; // get rid of const qualifier op::SparseRetainOpForwardRspImpl<cpu>(rctx.get_stream<cpu>(), src, indices, kWriteTo, &temp); on_complete(); }, Context::CPU(), {src.var(), row_id.var()}, {out_cpu.var()}, FnProperty::kNormal, priority, PROFILER_MESSAGE("KVStoreSparseRetain")); if (is_to_gpu) { CopyFromTo(out_cpu, out, priority); } } else { // direct copy rows Engine::Get()->PushAsync( [=](RunContext rctx, Engine::CallbackOnComplete on_complete) { CopyRetainedRowsToGPU(rctx.get_stream<cpu>(), rctx.get_stream<gpu>(), src, row_id, out); on_complete(); }, out->ctx(), {src.var(), row_id.var()}, {out->var()}, FnProperty::kCopyToGPU, priority, PROFILER_MESSAGE("KVStoreCopyRetainedRowsToGPU")); } } } } private: /*! * \brief When src is a rsp with full rows, * simply copy retained rows directly from cpu to gpu * without invoking sparse_retain op. */ void CopyRetainedRowsToGPU(mshadow::Stream<cpu>* cpu_stream, mshadow::Stream<gpu>* gpu_stream, const NDArray& src, const NDArray& indices, NDArray* dst) { #if MXNET_USE_CUDA == 1 CHECK_EQ(src.storage_type(), kRowSparseStorage) << "CopyRetainedRowsToGPU expects row-sparse src NDArray"; CHECK_EQ(src.ctx().dev_mask(), Context::kCPU) << "CopyRetainedRowsToGPU with src on gpu context not supported"; CHECK_EQ(src.storage_shape()[0], src.shape()[0]) << "CopyRetainedRowsToGPU only supports src rsp with full rows"; CHECK_EQ(indices.storage_type(), kDefaultStorage); CHECK_EQ(indices.ctx().dev_mask(), Context::kCPU); CHECK_EQ(dst->storage_type(), kRowSparseStorage); CHECK_EQ(dst->ctx().dev_mask(), Context::kGPU); CHECK_EQ(indices.dtype(), dst->aux_type(rowsparse::kIdx)) << "CopyRetainedRowsToGPU only supports same data type for idx array and dst aux_data(0)"; if (!src.storage_initialized() || indices.data().Size() == 0U) { op::FillZerosRspImpl(gpu_stream, *dst); return; } using namespace mshadow; const TBlob& src_data = src.data(); const TBlob& idx_data = indices.data(); const size_t row_length = src.shape().ProdShape(1, src.shape().ndim()); const size_t num_rows_retained = idx_data.Size(); dst->CheckAndAlloc({Shape1(num_rows_retained)}); TBlob dst_data = dst->data(); TBlob dst_idx_data = dst->aux_data(rowsparse::kIdx); MSHADOW_TYPE_SWITCH(src.dtype(), DType, { MSHADOW_IDX_TYPE_SWITCH(indices.dtype(), IType, { // copy idx array Tensor<gpu, 1, IType> dst_idx_tensor = dst_idx_data.FlatTo1D<gpu, IType>(gpu_stream); const Tensor<cpu, 1, IType> idx_tensor = idx_data.FlatTo1D<cpu, IType>(cpu_stream); Copy(dst_idx_tensor, idx_tensor, gpu_stream); // copy src data const Tensor<cpu, 2, DType> src_data_tensor = src_data.get_with_shape<cpu, 2, DType>( Shape2(src_data.shape_[0], row_length), cpu_stream); Tensor<gpu, 2, DType> dst_data_tensor = dst_data.get_with_shape<gpu, 2, DType>( Shape2(dst_data.shape_[0], row_length), gpu_stream); for (size_t i = 0; i < num_rows_retained; ++i) { Copy(dst_data_tensor[i], src_data_tensor[idx_tensor[i]], gpu_stream); } }) }) #else LOG(FATAL) << "GPU not enabled"; #endif } // reduce sum into val[0] inline void ReduceSumCPU(const std::vector<NDArray> &in_data) { MSHADOW_TYPE_SWITCH(in_data[0].dtype(), DType, { std::vector<DType*> dptr(in_data.size()); for (size_t i = 0; i < in_data.size(); ++i) { TBlob data = in_data[i].data(); CHECK(data.CheckContiguous()); dptr[i] = data.FlatTo2D<cpu, DType>().dptr_; } size_t total = in_data[0].shape().Size(); ReduceSumCPUImpl(dptr, total); }); } // serial implementation of reduce sum for row sparse NDArray. inline void ReduceSumCPUExSerial(const std::vector<NDArray> &in, NDArray *out) { using namespace rowsparse; using namespace mshadow; auto stype = out->storage_type(); CHECK_EQ(stype, kRowSparseStorage) << "Unexpected storage type " << stype; size_t total_num_rows = 0; size_t num_in = in.size(); // skip the ones with empty indices and values std::vector<bool> skip(num_in, false); // the values tensor of the inputs MSHADOW_TYPE_SWITCH(out->dtype(), DType, { MSHADOW_IDX_TYPE_SWITCH(out->aux_type(kIdx), IType, { std::vector<Tensor<cpu, 2, DType>> in_vals(num_in); std::vector<Tensor<cpu, 1, IType>> in_indices(num_in); // offset to the values tensor of all inputs std::vector<size_t> offsets(num_in, 0); std::vector<size_t> num_rows(num_in, 0); for (size_t i = 0; i < num_in; i++) { if (!in[i].storage_initialized()) { skip[i] = true; continue; } auto size = in[i].aux_shape(kIdx).Size(); num_rows[i] = size; total_num_rows += size; in_vals[i] = in[i].data().FlatTo2D<cpu, DType>(); in_indices[i] = in[i].aux_data(kIdx).FlatTo1D<cpu, IType>(); } std::vector<IType> indices; indices.reserve(total_num_rows); // gather indices from all inputs for (size_t i = 0; i < num_in; i++) { for (size_t j = 0; j < num_rows[i]; j++) { indices.emplace_back(in_indices[i][j]); } } CHECK_EQ(indices.size(), total_num_rows); // dedup indices std::sort(indices.begin(), indices.end()); indices.resize(std::unique(indices.begin(), indices.end()) - indices.begin()); // the one left are unique non-zero rows size_t nnr = indices.size(); // allocate memory for output out->CheckAndAlloc({Shape1(nnr)}); auto idx_data = out->aux_data(kIdx).FlatTo1D<cpu, IType>(); auto val_data = out->data().FlatTo2D<cpu, DType>(); for (size_t i = 0; i < nnr; i++) { // copy indices back idx_data[i] = indices[i]; bool zeros = true; for (size_t j = 0; j < num_in; j++) { if (skip[j]) continue; size_t offset = offsets[j]; if (offset < num_rows[j]) { if (indices[i] == in_indices[j][offset]) { if (zeros) { Copy(val_data[i], in_vals[j][offset], nullptr); zeros = false; } else { val_data[i] += in_vals[j][offset]; } offsets[j] += 1; } } } } }); }); } template<typename DType> inline static void ReduceSumCPU( const std::vector<DType*> &dptr, size_t offset, index_t size) { using namespace mshadow; // NOLINT(*) Tensor<cpu, 1, DType> in_0(dptr[0] + offset, Shape1(size)); for (size_t i = 1; i < dptr.size(); i+=4) { switch (dptr.size() - i) { case 1: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); in_0 += in_1; break; } case 2: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size)); in_0 += in_1 + in_2; break; } case 3: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size)); in_0 += in_1 + in_2 + in_3; break; } default: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_4(dptr[i+3] + offset, Shape1(size)); in_0 += in_1 + in_2 + in_3 + in_4; break; } } } } template<typename DType> inline void ReduceSumCPUImpl(std::vector<DType*> dptr, size_t total) { const size_t step = std::min(bigarray_bound_, static_cast<size_t>(4 << 10)); long ntask = (total + step - 1) / step; // NOLINT(*) if (total < bigarray_bound_ || nthread_reduction_ <= 1) { ReduceSumCPU(dptr, 0, total); } else { #pragma omp parallel for schedule(static) num_threads(nthread_reduction_) for (long j = 0; j < ntask; ++j) { // NOLINT(*) size_t k = static_cast<size_t>(j); size_t begin = std::min(k * step, total); size_t end = std::min((k + 1) * step, total); if (j == ntask - 1) CHECK_EQ(end, total); ReduceSumCPU(dptr, begin, static_cast<index_t>(end - begin)); } } } /// \brief temporal space for pushing and pulling struct BufferEntry { /// \brief the merged value NDArray merged; /// \brief the cpu buffer for gpu data std::vector<NDArray> copy_buf; }; std::unordered_map<int, BufferEntry> merge_buf_; size_t bigarray_bound_; int nthread_reduction_; bool is_serial_push_; }; /** * \brief an implementation of Comm that performs reduction on device * directly. * * It is faster if the total device-to-device bandwidths is larger than * device-to-cpu, which is often true for 4 or 8 GPUs. But it uses more device * memory. */ class CommDevice : public Comm { public: CommDevice() { inited_ = false; } virtual ~CommDevice() { } void Init(int key, const NDArrayStorageType stype, const TShape& shape, int dtype = mshadow::kFloat32) override { if (stype == kDefaultStorage) { sorted_key_attrs_.push_back(std::make_tuple(key, shape, dtype)); } else { LOG(FATAL) << "storage type " << stype << " not implemented for device yet"; } } const NDArray& Reduce(int key, const std::vector<NDArray>& src, int priority) override { // avoid extra copy for single device, but it may bring problems for // abnormal usage of kvstore if (src.size() == 1) { return src[0]; } if (!inited_) { std::vector<Context> devs; for (const auto& a : src) { devs.push_back(a.ctx()); } InitMergeBuffer(devs); if (dmlc::GetEnv("MXNET_ENABLE_GPU_P2P", 1)) { EnableP2P(devs); } } auto& buf = merge_buf_[key]; std::vector<NDArray> reduce(src.size()); CopyFromTo(src[0], &(buf.merged), priority); reduce[0] = buf.merged; if (buf.copy_buf.empty()) { // TODO(mli) this results in large device memory usage for huge ndarray, // such as the largest fullc in VGG. consider to do segment reduce with // NDArray.Slice or gpu direct memory access. for the latter, we need to // remove some ctx check, and also it reduces 20% perf buf.copy_buf.resize(src.size()-1); for (size_t i = 0; i < src.size()-1; ++i) { buf.copy_buf[i] = NDArray( buf.merged.shape(), buf.merged.ctx(), false, buf.merged.dtype()); } } for (size_t i = 0; i < src.size()-1; ++i) { CopyFromTo(src[i+1], &(buf.copy_buf[i]), priority); reduce[i+1] = buf.copy_buf[i]; } ElementwiseSum(reduce, &buf.merged); return buf.merged; } void Broadcast(int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) override { if (!inited_) { // copy to a random device first int dev_id = key % dst.size(); CopyFromTo(src, dst[dev_id], priority); for (size_t i = 0; i < dst.size(); ++i) { if (i != static_cast<size_t>(dev_id)) { CopyFromTo(*dst[dev_id], dst[i], priority); } } } else { auto& buf = merge_buf_[key]; CopyFromTo(src, &buf.merged, priority); for (auto d : dst) { CopyFromTo(buf.merged, d, priority); } } } void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const bool use_copy, const int priority) override { LOG(FATAL) << "Not implemented yet"; } private: void EnableP2P(const std::vector<Context>& devs) { #if MXNET_USE_CUDA std::vector<int> gpus; for (const auto& d : devs) { if (d.dev_mask() == gpu::kDevMask) { gpus.push_back(d.dev_id); } } int n = static_cast<int>(gpus.size()); int enabled = 0; std::vector<int> p2p(n*n); for (int i = 0; i < n; ++i) { cudaSetDevice(gpus[i]); for (int j = 0; j < n; j++) { int access; cudaDeviceCanAccessPeer(&access, gpus[i], gpus[j]); if (access) { cudaError_t e = cudaDeviceEnablePeerAccess(gpus[j], 0); if (e == cudaSuccess || e == cudaErrorPeerAccessAlreadyEnabled) { ++enabled; p2p[i*n+j] = 1; } } } } if (enabled != n*(n-1)) { // print warning info if not fully enabled LOG(WARNING) << "only " << enabled << " out of " << n*(n-1) << " GPU pairs are enabled direct access. " << "It may affect the performance. " << "You can set MXNET_ENABLE_GPU_P2P=0 to turn it off"; std::string access(n, '.'); for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { access[j] = p2p[i*n+j] ? 'v' : '.'; } LOG(WARNING) << access; } } #endif } using KeyAttrs = std::tuple<int, TShape, int>; // try to allocate buff on device evenly void InitMergeBuffer(const std::vector<Context>& devs) { std::sort(sorted_key_attrs_.begin(), sorted_key_attrs_.end(), []( const KeyAttrs& a, const KeyAttrs& b) { return std::get<1>(a).Size() > std::get<1>(b).Size(); }); std::unordered_map<int, std::pair<Context, size_t>> ctx_info; for (auto d : devs) { ctx_info[d.dev_id] = std::make_pair(d, 0); } for (size_t i = 0; i < sorted_key_attrs_.size(); ++i) { int key = std::get<0>(sorted_key_attrs_[i]); TShape s = std::get<1>(sorted_key_attrs_[i]); int type = std::get<2>(sorted_key_attrs_[i]); auto& buf = merge_buf_[key]; Context ctx; size_t min_size = std::numeric_limits<size_t>::max(); for (auto it = ctx_info.begin(); it != ctx_info.end(); ++it) { size_t size = it->second.second; if (size <= min_size) { ctx = it->second.first; min_size = size; } } buf.merged = NDArray(s, ctx, false, type); ctx_info[ctx.dev_id].second += s.Size(); } inited_ = true; } std::vector<KeyAttrs> sorted_key_attrs_; /// \brief temporal space for pushing and pulling struct BufferEntry { /// \brief the merged value NDArray merged; /// \brief the gpu buffer std::vector<NDArray> copy_buf; }; std::unordered_map<int, BufferEntry> merge_buf_; bool inited_; }; } // namespace kvstore } // namespace mxnet #endif // MXNET_KVSTORE_COMM_H_
udr-3.c
/* { dg-do run } */ extern void abort (); struct S; void foo (struct S *, struct S *); #pragma omp declare reduction (+:struct S:foo (&omp_out, &omp_in)) struct S { int s; }; void foo (struct S *x, struct S *y) { x->s += y->s; } int main () { struct S s; int i = 0; s.s = 0; #pragma omp parallel reduction (+:s, i) { if (s.s != 0) abort (); s.s = 2; i = 1; } if (s.s != 2 * i) abort (); return 0; }
md2_fmt_plug.c
/* MD2 cracker patch for JtR. Hacked together during May of 2013 by Dhiru * Kholia <dhiru at openwall.com>. * * This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> and * it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_md2_; #elif FMT_REGISTERS_H john_register_one(&fmt_md2_); #else #include <string.h> #include "arch.h" #include "sph_md2.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #ifdef _OPENMP static int omp_t = 1; #include <omp.h> // OMP_SCALE tuned on core i7 quad core HT // 1 - 153k // 64 - 433k // 128 - 572k // 256 - 612k // 512 - 543k // 1k - 680k ** chosen // 2k - 660k // 4k - 670k // 8k - 680k // 16k - 650k #ifndef OMP_SCALE #ifdef __MIC__ #define OMP_SCALE 32 #else #define OMP_SCALE (1024) #endif // __MIC__ #endif // OMP_SCALE #endif // _OPENMP #include "memdbg.h" #define FORMAT_LABEL "MD2" #define FORMAT_NAME "" #define FORMAT_TAG "$md2$" #define TAG_LENGTH 5 #define ALGORITHM_NAME "MD2 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE 16 #define SALT_SIZE 0 #define BINARY_ALIGN 4 #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests md2__tests[] = { {"$md2$ab4f496bfb2a530b219ff33031fe06b0", "message digest"}, {"ab4f496bfb2a530b219ff33031fe06b0", "message digest"}, {"921adc047dad311394d2b8553002042d","len=125_____________________________________________________________________________________________________________________x"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p; int extra; p = ciphertext; if (!strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; if (hexlenl(p, &extra) != 32 || extra) return 0; return 1; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[TAG_LENGTH + BINARY_SIZE * 2 + 1]; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; memcpy(out, FORMAT_TAG, TAG_LENGTH); strnzcpy(out + TAG_LENGTH, ciphertext, BINARY_SIZE*2 + 1); return out; } static void *get_binary(char *ciphertext) { static union { unsigned char c[32]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = ciphertext + TAG_LENGTH; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { sph_md2_context ctx; sph_md2_init(&ctx); sph_md2(&ctx, saved_key[index], strlen(saved_key[index])); sph_md2_close(&ctx, (unsigned char*)crypt_out[index]); } return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void md2_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_md2_ = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { FORMAT_TAG }, md2__tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, fmt_default_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, md2_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 8; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4)); ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(max(0,ceild(16*t2-Nz+5,8)),t1),2*t1-2*t2+1);t3<=min(min(min(floord(4*Nt+Ny-9,8),floord(8*t1+Ny+7,8)),floord(16*t2+Ny+3,8)),floord(16*t1-16*t2+Nz+Ny+5,8));t3++) { for (t4=max(max(max(0,ceild(t1-7,8)),ceild(16*t2-Nz-51,64)),ceild(8*t3-Ny-51,64));t4<=min(min(min(min(floord(4*Nt+Nx-9,64),floord(8*t1+Nx+7,64)),floord(16*t2+Nx+3,64)),floord(8*t3+Nx-5,64)),floord(16*t1-16*t2+Nz+Nx+5,64));t4++) { for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(64*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),2*t3),Nt-1),2*t1+3),4*t2+2),16*t4+14);t5++) { for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) { lbv=max(64*t4,4*t5+4); ubv=min(64*t4+63,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
ast-dump-openmp-declare-variant-extensions.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s | FileCheck %s // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -x c++| FileCheck %s // expected-no-diagnostics int picked1() { return 0; } int picked2() { return 0; } int picked3(); int picked4(); int picked5() { return 0; } int picked6() { return 0; } int picked7() { return 0; } int not_picked1() { return 1; } int not_picked2() { return 2; } int not_picked3(); int not_picked4(); int not_picked5(); int not_picked6(); #pragma omp declare variant(picked1) match(implementation={extension(match_any)}, device={kind(cpu, gpu)}) int base1() { return 3; } #pragma omp declare variant(picked2) match(implementation={extension(match_none)}, device={kind(gpu, fpga)}) int base2() { return 4; } #pragma omp declare variant(picked3) match(implementation={vendor(pgi), extension(match_any)}, device={kind(cpu, gpu)}) int base3() { return 5; } #pragma omp declare variant(picked4) match(user={condition(0)}, implementation={extension(match_none)}, device={kind(gpu, fpga)}) int base4() { return 6; } #pragma omp declare variant(picked5) match(user={condition(1)}, implementation={extension(match_all)}, device={kind(cpu)}) int base5() { return 7; } #pragma omp declare variant(not_picked1) match(implementation={extension(match_any)}, device={kind(gpu, fpga)}) int base6() { return 0; } #pragma omp declare variant(not_picked2) match(implementation={extension(match_none)}, device={kind(gpu, cpu)}) int base7() { return 0; } #pragma omp declare variant(not_picked3) match(implementation={vendor(llvm), extension(match_any)}, device={kind(fpga, gpu)}) int base8() { return 0; } #pragma omp declare variant(not_picked4) match(user={condition(1)}, implementation={extension(match_none)}, device={kind(gpu, fpga)}) int base9() { return 0; } #pragma omp declare variant(not_picked5) match(user={condition(1)}, implementation={extension(match_all)}, device={kind(cpu, gpu)}) int base10() { return 0; } #pragma omp declare variant(not_picked6) match(implementation={extension(match_any)}) int base11() { return 0; } #pragma omp declare variant(picked6) match(implementation={extension(match_all)}) int base12() { return 8; } #pragma omp declare variant(picked7) match(implementation={extension(match_none)}) int base13() { return 9; } #pragma omp begin declare variant match(implementation={extension(match_any)}, device={kind(cpu, gpu)}) int overloaded1() { return 0; } #pragma omp end declare variant int overloaded2() { return 1; } #pragma omp begin declare variant match(implementation={extension(match_none)}, device={kind(fpga, gpu)}) int overloaded2() { return 0; } #pragma omp end declare variant #pragma omp begin declare variant match(implementation={extension(match_none)}, device={kind(cpu)}) NOT PARSED #pragma omp end declare variant int picked3() { return 0; } int picked4() { return 0; } int not_picked3() { return 10; } int not_picked4() { return 11; } int not_picked5() { return 12; } int not_picked6() { return 13; } int test() { // Should return 0. return base1() + base2() + base3() + base4() + base5() + base6() + base7() + base8() + base9() + base10() + base11() + base12() + base13() + overloaded1() + overloaded2(); } // 1) All "picked" versions are called but none of the "non_picked" ones is. // 2) The overloaded functions that return 0 are called. // CHECK: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, col:27> col:5 referenced picked1 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:15, col:27> // CHECK-NEXT: | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <col:17, col:24> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_3:0x[a-z0-9]*]] <col:24> 'int' 0 // CHECK-NEXT: |-FunctionDecl [[ADDR_4:0x[a-z0-9]*]] <line:6:1, col:27> col:5 referenced picked2 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_5:0x[a-z0-9]*]] <col:15, col:27> // CHECK-NEXT: | `-ReturnStmt [[ADDR_6:0x[a-z0-9]*]] <col:17, col:24> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_7:0x[a-z0-9]*]] <col:24> 'int' 0 // CHECK-NEXT: |-FunctionDecl [[ADDR_8:0x[a-z0-9]*]] <line:7:1, col:13> col:5 referenced picked3 'int ({{.*}})' // CHECK-NEXT: |-FunctionDecl [[ADDR_9:0x[a-z0-9]*]] <line:8:1, col:13> col:5 referenced picked4 'int ({{.*}})' // CHECK-NEXT: |-FunctionDecl [[ADDR_10:0x[a-z0-9]*]] <line:9:1, col:27> col:5 referenced picked5 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_11:0x[a-z0-9]*]] <col:15, col:27> // CHECK-NEXT: | `-ReturnStmt [[ADDR_12:0x[a-z0-9]*]] <col:17, col:24> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_13:0x[a-z0-9]*]] <col:24> 'int' 0 // CHECK-NEXT: |-FunctionDecl [[ADDR_14:0x[a-z0-9]*]] <line:10:1, col:27> col:5 referenced picked6 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_15:0x[a-z0-9]*]] <col:15, col:27> // CHECK-NEXT: | `-ReturnStmt [[ADDR_16:0x[a-z0-9]*]] <col:17, col:24> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_17:0x[a-z0-9]*]] <col:24> 'int' 0 // CHECK-NEXT: |-FunctionDecl [[ADDR_18:0x[a-z0-9]*]] <line:11:1, col:27> col:5 referenced picked7 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_19:0x[a-z0-9]*]] <col:15, col:27> // CHECK-NEXT: | `-ReturnStmt [[ADDR_20:0x[a-z0-9]*]] <col:17, col:24> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_21:0x[a-z0-9]*]] <col:24> 'int' 0 // CHECK-NEXT: |-FunctionDecl [[ADDR_22:0x[a-z0-9]*]] <line:12:1, col:31> col:5 referenced not_picked1 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_23:0x[a-z0-9]*]] <col:19, col:31> // CHECK-NEXT: | `-ReturnStmt [[ADDR_24:0x[a-z0-9]*]] <col:21, col:28> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_25:0x[a-z0-9]*]] <col:28> 'int' 1 // CHECK-NEXT: |-FunctionDecl [[ADDR_26:0x[a-z0-9]*]] <line:13:1, col:31> col:5 referenced not_picked2 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_27:0x[a-z0-9]*]] <col:19, col:31> // CHECK-NEXT: | `-ReturnStmt [[ADDR_28:0x[a-z0-9]*]] <col:21, col:28> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_29:0x[a-z0-9]*]] <col:28> 'int' 2 // CHECK-NEXT: |-FunctionDecl [[ADDR_30:0x[a-z0-9]*]] <line:14:1, col:17> col:5 referenced not_picked3 'int ({{.*}})' // CHECK-NEXT: |-FunctionDecl [[ADDR_31:0x[a-z0-9]*]] <line:15:1, col:17> col:5 referenced not_picked4 'int ({{.*}})' // CHECK-NEXT: |-FunctionDecl [[ADDR_32:0x[a-z0-9]*]] <line:16:1, col:17> col:5 referenced not_picked5 'int ({{.*}})' // CHECK-NEXT: |-FunctionDecl [[ADDR_33:0x[a-z0-9]*]] <line:17:1, col:17> col:5 referenced not_picked6 'int ({{.*}})' // CHECK-NEXT: |-FunctionDecl [[ADDR_34:0x[a-z0-9]*]] <line:20:1, col:25> col:5 used base1 'int ({{.*}})' // CHECK-NEXT: | |-CompoundStmt [[ADDR_35:0x[a-z0-9]*]] <col:13, col:25> // CHECK-NEXT: | | `-ReturnStmt [[ADDR_36:0x[a-z0-9]*]] <col:15, col:22> // CHECK-NEXT: | | `-IntegerLiteral [[ADDR_37:0x[a-z0-9]*]] <col:22> 'int' 3 // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_38:0x[a-z0-9]*]] <line:19:1, col:107> Implicit implementation={extension(match_any)}, device={kind(cpu, gpu)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_39:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'picked1' 'int ({{.*}})' non_odr_use_unevaluated // CHECK-NEXT: |-FunctionDecl [[ADDR_40:0x[a-z0-9]*]] <line:23:1, col:25> col:5 used base2 'int ({{.*}})' // CHECK-NEXT: | |-CompoundStmt [[ADDR_41:0x[a-z0-9]*]] <col:13, col:25> // CHECK-NEXT: | | `-ReturnStmt [[ADDR_42:0x[a-z0-9]*]] <col:15, col:22> // CHECK-NEXT: | | `-IntegerLiteral [[ADDR_43:0x[a-z0-9]*]] <col:22> 'int' 4 // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_44:0x[a-z0-9]*]] <line:22:1, col:109> Implicit implementation={extension(match_none)}, device={kind(gpu, fpga)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_45:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_4]] 'picked2' 'int ({{.*}})' non_odr_use_unevaluated // CHECK-NEXT: |-FunctionDecl [[ADDR_46:0x[a-z0-9]*]] <line:26:1, col:25> col:5 used base3 'int ({{.*}})' // CHECK-NEXT: | |-CompoundStmt [[ADDR_47:0x[a-z0-9]*]] <col:13, col:25> // CHECK-NEXT: | | `-ReturnStmt [[ADDR_48:0x[a-z0-9]*]] <col:15, col:22> // CHECK-NEXT: | | `-IntegerLiteral [[ADDR_49:0x[a-z0-9]*]] <col:22> 'int' 5 // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_50:0x[a-z0-9]*]] <line:25:1, col:120> Implicit implementation={vendor(pgi), extension(match_any)}, device={kind(cpu, gpu)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_51:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_8]] 'picked3' 'int ({{.*}})' non_odr_use_unevaluated // CHECK-NEXT: |-FunctionDecl [[ADDR_52:0x[a-z0-9]*]] <line:29:1, col:25> col:5 used base4 'int ({{.*}})' // CHECK-NEXT: | |-CompoundStmt [[ADDR_53:0x[a-z0-9]*]] <col:13, col:25> // CHECK-NEXT: | | `-ReturnStmt [[ADDR_54:0x[a-z0-9]*]] <col:15, col:22> // CHECK-NEXT: | | `-IntegerLiteral [[ADDR_55:0x[a-z0-9]*]] <col:22> 'int' 6 // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_56:0x[a-z0-9]*]] <line:28:1, col:130> Implicit user={condition(0)}, implementation={extension(match_none)}, device={kind(gpu, fpga)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_57:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_9]] 'picked4' 'int ({{.*}})' non_odr_use_unevaluated // CHECK-NEXT: |-FunctionDecl [[ADDR_58:0x[a-z0-9]*]] <line:32:1, col:25> col:5 used base5 'int ({{.*}})' // CHECK-NEXT: | |-CompoundStmt [[ADDR_59:0x[a-z0-9]*]] <col:13, col:25> // CHECK-NEXT: | | `-ReturnStmt [[ADDR_60:0x[a-z0-9]*]] <col:15, col:22> // CHECK-NEXT: | | `-IntegerLiteral [[ADDR_61:0x[a-z0-9]*]] <col:22> 'int' 7 // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_62:0x[a-z0-9]*]] <line:31:1, col:123> Implicit user={condition(1)}, implementation={extension(match_all)}, device={kind(cpu)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_63:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_10]] 'picked5' 'int ({{.*}})' non_odr_use_unevaluated // CHECK-NEXT: |-FunctionDecl [[ADDR_64:0x[a-z0-9]*]] <line:35:1, col:25> col:5 used base6 'int ({{.*}})' // CHECK-NEXT: | |-CompoundStmt [[ADDR_65:0x[a-z0-9]*]] <col:13, col:25> // CHECK-NEXT: | | `-ReturnStmt [[ADDR_66:0x[a-z0-9]*]] <col:15, col:22> // CHECK-NEXT: | | `-IntegerLiteral [[ADDR_67:0x[a-z0-9]*]] <col:22> 'int' 0 // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_68:0x[a-z0-9]*]] <line:34:1, col:112> Implicit implementation={extension(match_any)}, device={kind(gpu, fpga)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_69:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_22]] 'not_picked1' 'int ({{.*}})' non_odr_use_unevaluated // CHECK-NEXT: |-FunctionDecl [[ADDR_70:0x[a-z0-9]*]] <line:38:1, col:25> col:5 used base7 'int ({{.*}})' // CHECK-NEXT: | |-CompoundStmt [[ADDR_71:0x[a-z0-9]*]] <col:13, col:25> // CHECK-NEXT: | | `-ReturnStmt [[ADDR_72:0x[a-z0-9]*]] <col:15, col:22> // CHECK-NEXT: | | `-IntegerLiteral [[ADDR_73:0x[a-z0-9]*]] <col:22> 'int' 0 // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_74:0x[a-z0-9]*]] <line:37:1, col:112> Implicit implementation={extension(match_none)}, device={kind(gpu, cpu)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_75:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_26]] 'not_picked2' 'int ({{.*}})' non_odr_use_unevaluated // CHECK-NEXT: |-FunctionDecl [[ADDR_76:0x[a-z0-9]*]] <line:41:1, col:25> col:5 used base8 'int ({{.*}})' // CHECK-NEXT: | |-CompoundStmt [[ADDR_77:0x[a-z0-9]*]] <col:13, col:25> // CHECK-NEXT: | | `-ReturnStmt [[ADDR_78:0x[a-z0-9]*]] <col:15, col:22> // CHECK-NEXT: | | `-IntegerLiteral [[ADDR_79:0x[a-z0-9]*]] <col:22> 'int' 0 // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_80:0x[a-z0-9]*]] <line:40:1, col:126> Implicit implementation={vendor(llvm), extension(match_any)}, device={kind(fpga, gpu)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_81:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_30]] 'not_picked3' 'int ({{.*}})' non_odr_use_unevaluated // CHECK-NEXT: |-FunctionDecl [[ADDR_82:0x[a-z0-9]*]] <line:44:1, col:25> col:5 used base9 'int ({{.*}})' // CHECK-NEXT: | |-CompoundStmt [[ADDR_83:0x[a-z0-9]*]] <col:13, col:25> // CHECK-NEXT: | | `-ReturnStmt [[ADDR_84:0x[a-z0-9]*]] <col:15, col:22> // CHECK-NEXT: | | `-IntegerLiteral [[ADDR_85:0x[a-z0-9]*]] <col:22> 'int' 0 // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_86:0x[a-z0-9]*]] <line:43:1, col:134> Implicit user={condition(1)}, implementation={extension(match_none)}, device={kind(gpu, fpga)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_87:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_31]] 'not_picked4' 'int ({{.*}})' non_odr_use_unevaluated // CHECK-NEXT: |-FunctionDecl [[ADDR_88:0x[a-z0-9]*]] <line:47:1, col:26> col:5 used base10 'int ({{.*}})' // CHECK-NEXT: | |-CompoundStmt [[ADDR_89:0x[a-z0-9]*]] <col:14, col:26> // CHECK-NEXT: | | `-ReturnStmt [[ADDR_90:0x[a-z0-9]*]] <col:16, col:23> // CHECK-NEXT: | | `-IntegerLiteral [[ADDR_91:0x[a-z0-9]*]] <col:23> 'int' 0 // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_92:0x[a-z0-9]*]] <line:46:1, col:132> Implicit user={condition(1)}, implementation={extension(match_all)}, device={kind(cpu, gpu)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_93:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_32]] 'not_picked5' 'int ({{.*}})' non_odr_use_unevaluated // CHECK-NEXT: |-FunctionDecl [[ADDR_94:0x[a-z0-9]*]] <line:50:1, col:26> col:5 used base11 'int ({{.*}})' // CHECK-NEXT: | |-CompoundStmt [[ADDR_95:0x[a-z0-9]*]] <col:14, col:26> // CHECK-NEXT: | | `-ReturnStmt [[ADDR_96:0x[a-z0-9]*]] <col:16, col:23> // CHECK-NEXT: | | `-IntegerLiteral [[ADDR_97:0x[a-z0-9]*]] <col:23> 'int' 0 // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_98:0x[a-z0-9]*]] <line:49:1, col:86> Implicit implementation={extension(match_any)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_99:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_33]] 'not_picked6' 'int ({{.*}})' non_odr_use_unevaluated // CHECK-NEXT: |-FunctionDecl [[ADDR_100:0x[a-z0-9]*]] <line:53:1, col:26> col:5 used base12 'int ({{.*}})' // CHECK-NEXT: | |-CompoundStmt [[ADDR_101:0x[a-z0-9]*]] <col:14, col:26> // CHECK-NEXT: | | `-ReturnStmt [[ADDR_102:0x[a-z0-9]*]] <col:16, col:23> // CHECK-NEXT: | | `-IntegerLiteral [[ADDR_103:0x[a-z0-9]*]] <col:23> 'int' 8 // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_104:0x[a-z0-9]*]] <line:52:1, col:82> Implicit implementation={extension(match_all)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_105:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_14]] 'picked6' 'int ({{.*}})' non_odr_use_unevaluated // CHECK-NEXT: |-FunctionDecl [[ADDR_106:0x[a-z0-9]*]] <line:56:1, col:26> col:5 used base13 'int ({{.*}})' // CHECK-NEXT: | |-CompoundStmt [[ADDR_107:0x[a-z0-9]*]] <col:14, col:26> // CHECK-NEXT: | | `-ReturnStmt [[ADDR_108:0x[a-z0-9]*]] <col:16, col:23> // CHECK-NEXT: | | `-IntegerLiteral [[ADDR_109:0x[a-z0-9]*]] <col:23> 'int' 9 // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_110:0x[a-z0-9]*]] <line:55:1, col:83> Implicit implementation={extension(match_none)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_111:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_18]] 'picked7' 'int ({{.*}})' non_odr_use_unevaluated // CHECK-NEXT: |-FunctionDecl [[ADDR_112:0x[a-z0-9]*]] <line:59:1, col:17> col:5 implicit used overloaded1 'int ({{.*}})' // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_113:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={extension(match_any)}, device={kind(cpu, gpu)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_114:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_115:0x[a-z0-9]*]] 'overloaded1[implementation={extension(match_any)}]' 'int ({{.*}})' // CHECK-NEXT: |-FunctionDecl [[ADDR_115]] <col:1, col:31> col:1 overloaded1[implementation={extension(match_any)}] 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_116:0x[a-z0-9]*]] <col:19, col:31> // CHECK-NEXT: | `-ReturnStmt [[ADDR_117:0x[a-z0-9]*]] <col:21, col:28> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_118:0x[a-z0-9]*]] <col:28> 'int' 0 // CHECK-NEXT: |-FunctionDecl [[ADDR_119:0x[a-z0-9]*]] <line:62:1, col:31> col:5 used overloaded2 'int ({{.*}})' // CHECK-NEXT: | |-CompoundStmt [[ADDR_120:0x[a-z0-9]*]] <col:19, col:31> // CHECK-NEXT: | | `-ReturnStmt [[ADDR_121:0x[a-z0-9]*]] <col:21, col:28> // CHECK-NEXT: | | `-IntegerLiteral [[ADDR_122:0x[a-z0-9]*]] <col:28> 'int' 1 // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_123:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={extension(match_none)}, device={kind(fpga, gpu)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_124:0x[a-z0-9]*]] <line:64:1> 'int ({{.*}})' Function [[ADDR_125:0x[a-z0-9]*]] 'overloaded2[implementation={extension(match_none)}]' 'int ({{.*}})' // CHECK-NEXT: |-FunctionDecl [[ADDR_125]] <col:1, col:31> col:1 overloaded2[implementation={extension(match_none)}] 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_126:0x[a-z0-9]*]] <col:19, col:31> // CHECK-NEXT: | `-ReturnStmt [[ADDR_127:0x[a-z0-9]*]] <col:21, col:28> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_128:0x[a-z0-9]*]] <col:28> 'int' 0 // CHECK-NEXT: |-FunctionDecl [[ADDR_129:0x[a-z0-9]*]] prev [[ADDR_8]] <line:72:1, col:27> col:5 picked3 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_130:0x[a-z0-9]*]] <col:15, col:27> // CHECK-NEXT: | `-ReturnStmt [[ADDR_131:0x[a-z0-9]*]] <col:17, col:24> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_132:0x[a-z0-9]*]] <col:24> 'int' 0 // CHECK-NEXT: |-FunctionDecl [[ADDR_133:0x[a-z0-9]*]] prev [[ADDR_9]] <line:73:1, col:27> col:5 picked4 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_134:0x[a-z0-9]*]] <col:15, col:27> // CHECK-NEXT: | `-ReturnStmt [[ADDR_135:0x[a-z0-9]*]] <col:17, col:24> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_136:0x[a-z0-9]*]] <col:24> 'int' 0 // CHECK-NEXT: |-FunctionDecl [[ADDR_137:0x[a-z0-9]*]] prev [[ADDR_30]] <line:74:1, col:32> col:5 not_picked3 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_138:0x[a-z0-9]*]] <col:19, col:32> // CHECK-NEXT: | `-ReturnStmt [[ADDR_139:0x[a-z0-9]*]] <col:21, col:28> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_140:0x[a-z0-9]*]] <col:28> 'int' 10 // CHECK-NEXT: |-FunctionDecl [[ADDR_141:0x[a-z0-9]*]] prev [[ADDR_31]] <line:75:1, col:32> col:5 not_picked4 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_142:0x[a-z0-9]*]] <col:19, col:32> // CHECK-NEXT: | `-ReturnStmt [[ADDR_143:0x[a-z0-9]*]] <col:21, col:28> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_144:0x[a-z0-9]*]] <col:28> 'int' 11 // CHECK-NEXT: |-FunctionDecl [[ADDR_145:0x[a-z0-9]*]] prev [[ADDR_32]] <line:76:1, col:32> col:5 not_picked5 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_146:0x[a-z0-9]*]] <col:19, col:32> // CHECK-NEXT: | `-ReturnStmt [[ADDR_147:0x[a-z0-9]*]] <col:21, col:28> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_148:0x[a-z0-9]*]] <col:28> 'int' 12 // CHECK-NEXT: |-FunctionDecl [[ADDR_149:0x[a-z0-9]*]] prev [[ADDR_33]] <line:77:1, col:32> col:5 not_picked6 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_150:0x[a-z0-9]*]] <col:19, col:32> // CHECK-NEXT: | `-ReturnStmt [[ADDR_151:0x[a-z0-9]*]] <col:21, col:28> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_152:0x[a-z0-9]*]] <col:28> 'int' 13 // CHECK-NEXT: `-FunctionDecl [[ADDR_153:0x[a-z0-9]*]] <line:79:1, line:84:1> line:79:5 test 'int ({{.*}})' // CHECK-NEXT: `-CompoundStmt [[ADDR_154:0x[a-z0-9]*]] <col:12, line:84:1> // CHECK-NEXT: `-ReturnStmt [[ADDR_155:0x[a-z0-9]*]] <line:81:3, line:83:38> // CHECK-NEXT: `-BinaryOperator [[ADDR_156:0x[a-z0-9]*]] <line:81:10, line:83:38> 'int' '+' // CHECK-NEXT: |-BinaryOperator [[ADDR_157:0x[a-z0-9]*]] <line:81:10, line:83:22> 'int' '+' // CHECK-NEXT: | |-BinaryOperator [[ADDR_158:0x[a-z0-9]*]] <line:81:10, line:82:70> 'int' '+' // CHECK-NEXT: | | |-BinaryOperator [[ADDR_159:0x[a-z0-9]*]] <line:81:10, line:82:59> 'int' '+' // CHECK-NEXT: | | | |-BinaryOperator [[ADDR_160:0x[a-z0-9]*]] <line:81:10, line:82:48> 'int' '+' // CHECK-NEXT: | | | | |-BinaryOperator [[ADDR_161:0x[a-z0-9]*]] <line:81:10, line:82:37> 'int' '+' // CHECK-NEXT: | | | | | |-BinaryOperator [[ADDR_162:0x[a-z0-9]*]] <line:81:10, line:82:26> 'int' '+' // CHECK-NEXT: | | | | | | |-BinaryOperator [[ADDR_163:0x[a-z0-9]*]] <line:81:10, line:82:16> 'int' '+' // CHECK-NEXT: | | | | | | | |-BinaryOperator [[ADDR_164:0x[a-z0-9]*]] <line:81:10, col:76> 'int' '+' // CHECK-NEXT: | | | | | | | | |-BinaryOperator [[ADDR_165:0x[a-z0-9]*]] <col:10, col:66> 'int' '+' // CHECK-NEXT: | | | | | | | | | |-BinaryOperator [[ADDR_166:0x[a-z0-9]*]] <col:10, col:56> 'int' '+' // CHECK-NEXT: | | | | | | | | | | |-BinaryOperator [[ADDR_167:0x[a-z0-9]*]] <col:10, col:46> 'int' '+' // CHECK-NEXT: | | | | | | | | | | | |-BinaryOperator [[ADDR_168:0x[a-z0-9]*]] <col:10, col:36> 'int' '+' // CHECK-NEXT: | | | | | | | | | | | | |-BinaryOperator [[ADDR_169:0x[a-z0-9]*]] <col:10, col:26> 'int' '+' // CHECK-NEXT: | | | | | | | | | | | | | |-PseudoObjectExpr [[ADDR_170:0x[a-z0-9]*]] <col:10, col:16> 'int' // CHECK-NEXT: | | | | | | | | | | | | | | |-CallExpr [[ADDR_171:0x[a-z0-9]*]] <col:10, col:16> 'int' // CHECK-NEXT: | | | | | | | | | | | | | | | `-ImplicitCastExpr [[ADDR_172:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | | | | | | | | | | | | | | | `-DeclRefExpr [[ADDR_173:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_34]] 'base1' 'int ({{.*}})' // CHECK-NEXT: | | | | | | | | | | | | | | `-CallExpr [[ADDR_174:0x[a-z0-9]*]] <line:19:29, line:81:16> 'int' // CHECK-NEXT: | | | | | | | | | | | | | | `-ImplicitCastExpr [[ADDR_175:0x[a-z0-9]*]] <line:19:29> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | | | | | | | | | | | | | | `-DeclRefExpr [[ADDR_39]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'picked1' 'int ({{.*}})' non_odr_use_unevaluated // CHECK-NEXT: | | | | | | | | | | | | | `-PseudoObjectExpr [[ADDR_176:0x[a-z0-9]*]] <line:81:20, col:26> 'int' // CHECK-NEXT: | | | | | | | | | | | | | |-CallExpr [[ADDR_177:0x[a-z0-9]*]] <col:20, col:26> 'int' // CHECK-NEXT: | | | | | | | | | | | | | | `-ImplicitCastExpr [[ADDR_178:0x[a-z0-9]*]] <col:20> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | | | | | | | | | | | | | | `-DeclRefExpr [[ADDR_179:0x[a-z0-9]*]] <col:20> 'int ({{.*}})' {{.*}}Function [[ADDR_40]] 'base2' 'int ({{.*}})' // CHECK-NEXT: | | | | | | | | | | | | | `-CallExpr [[ADDR_180:0x[a-z0-9]*]] <line:22:29, line:81:26> 'int' // CHECK-NEXT: | | | | | | | | | | | | | `-ImplicitCastExpr [[ADDR_181:0x[a-z0-9]*]] <line:22:29> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | | | | | | | | | | | | | `-DeclRefExpr [[ADDR_45]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_4]] 'picked2' 'int ({{.*}})' non_odr_use_unevaluated // CHECK-NEXT: | | | | | | | | | | | | `-PseudoObjectExpr [[ADDR_182:0x[a-z0-9]*]] <line:81:30, col:36> 'int' // CHECK-NEXT: | | | | | | | | | | | | |-CallExpr [[ADDR_183:0x[a-z0-9]*]] <col:30, col:36> 'int' // CHECK-NEXT: | | | | | | | | | | | | | `-ImplicitCastExpr [[ADDR_184:0x[a-z0-9]*]] <col:30> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | | | | | | | | | | | | | `-DeclRefExpr [[ADDR_185:0x[a-z0-9]*]] <col:30> 'int ({{.*}})' {{.*}}Function [[ADDR_46]] 'base3' 'int ({{.*}})' // CHECK-NEXT: | | | | | | | | | | | | `-CallExpr [[ADDR_186:0x[a-z0-9]*]] <line:25:29, line:81:36> 'int' // CHECK-NEXT: | | | | | | | | | | | | `-ImplicitCastExpr [[ADDR_187:0x[a-z0-9]*]] <line:25:29> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | | | | | | | | | | | | `-DeclRefExpr [[ADDR_51]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_8]] 'picked3' 'int ({{.*}})' non_odr_use_unevaluated // CHECK-NEXT: | | | | | | | | | | | `-PseudoObjectExpr [[ADDR_188:0x[a-z0-9]*]] <line:81:40, col:46> 'int' // CHECK-NEXT: | | | | | | | | | | | |-CallExpr [[ADDR_189:0x[a-z0-9]*]] <col:40, col:46> 'int' // CHECK-NEXT: | | | | | | | | | | | | `-ImplicitCastExpr [[ADDR_190:0x[a-z0-9]*]] <col:40> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | | | | | | | | | | | | `-DeclRefExpr [[ADDR_191:0x[a-z0-9]*]] <col:40> 'int ({{.*}})' {{.*}}Function [[ADDR_52]] 'base4' 'int ({{.*}})' // CHECK-NEXT: | | | | | | | | | | | `-CallExpr [[ADDR_192:0x[a-z0-9]*]] <line:28:29, line:81:46> 'int' // CHECK-NEXT: | | | | | | | | | | | `-ImplicitCastExpr [[ADDR_193:0x[a-z0-9]*]] <line:28:29> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr [[ADDR_57]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_9]] 'picked4' 'int ({{.*}})' non_odr_use_unevaluated // CHECK-NEXT: | | | | | | | | | | `-PseudoObjectExpr [[ADDR_194:0x[a-z0-9]*]] <line:81:50, col:56> 'int' // CHECK-NEXT: | | | | | | | | | | |-CallExpr [[ADDR_195:0x[a-z0-9]*]] <col:50, col:56> 'int' // CHECK-NEXT: | | | | | | | | | | | `-ImplicitCastExpr [[ADDR_196:0x[a-z0-9]*]] <col:50> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr [[ADDR_197:0x[a-z0-9]*]] <col:50> 'int ({{.*}})' {{.*}}Function [[ADDR_58]] 'base5' 'int ({{.*}})' // CHECK-NEXT: | | | | | | | | | | `-CallExpr [[ADDR_198:0x[a-z0-9]*]] <line:31:29, line:81:56> 'int' // CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr [[ADDR_199:0x[a-z0-9]*]] <line:31:29> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr [[ADDR_63]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_10]] 'picked5' 'int ({{.*}})' non_odr_use_unevaluated // CHECK-NEXT: | | | | | | | | | `-CallExpr [[ADDR_200:0x[a-z0-9]*]] <line:81:60, col:66> 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr [[ADDR_201:0x[a-z0-9]*]] <col:60> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr [[ADDR_202:0x[a-z0-9]*]] <col:60> 'int ({{.*}})' {{.*}}Function [[ADDR_64]] 'base6' 'int ({{.*}})' // CHECK-NEXT: | | | | | | | | `-CallExpr [[ADDR_203:0x[a-z0-9]*]] <col:70, col:76> 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr [[ADDR_204:0x[a-z0-9]*]] <col:70> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr [[ADDR_205:0x[a-z0-9]*]] <col:70> 'int ({{.*}})' {{.*}}Function [[ADDR_70]] 'base7' 'int ({{.*}})' // CHECK-NEXT: | | | | | | | `-PseudoObjectExpr [[ADDR_206:0x[a-z0-9]*]] <line:82:10, col:16> 'int' // CHECK-NEXT: | | | | | | | |-CallExpr [[ADDR_207:0x[a-z0-9]*]] <col:10, col:16> 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr [[ADDR_208:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr [[ADDR_209:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_76]] 'base8' 'int ({{.*}})' // CHECK-NEXT: | | | | | | | `-CallExpr [[ADDR_210:0x[a-z0-9]*]] <line:40:29, line:82:16> 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr [[ADDR_211:0x[a-z0-9]*]] <line:40:29> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | | | | | | | `-DeclRefExpr [[ADDR_81]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_30]] 'not_picked3' 'int ({{.*}})' non_odr_use_unevaluated // CHECK-NEXT: | | | | | | `-CallExpr [[ADDR_212:0x[a-z0-9]*]] <line:82:20, col:26> 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr [[ADDR_213:0x[a-z0-9]*]] <col:20> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | | | | | | `-DeclRefExpr [[ADDR_214:0x[a-z0-9]*]] <col:20> 'int ({{.*}})' {{.*}}Function [[ADDR_82]] 'base9' 'int ({{.*}})' // CHECK-NEXT: | | | | | `-CallExpr [[ADDR_215:0x[a-z0-9]*]] <col:30, col:37> 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr [[ADDR_216:0x[a-z0-9]*]] <col:30> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | | | | | `-DeclRefExpr [[ADDR_217:0x[a-z0-9]*]] <col:30> 'int ({{.*}})' {{.*}}Function [[ADDR_88]] 'base10' 'int ({{.*}})' // CHECK-NEXT: | | | | `-CallExpr [[ADDR_218:0x[a-z0-9]*]] <col:41, col:48> 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr [[ADDR_219:0x[a-z0-9]*]] <col:41> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_220:0x[a-z0-9]*]] <col:41> 'int ({{.*}})' {{.*}}Function [[ADDR_94]] 'base11' 'int ({{.*}})' // CHECK-NEXT: | | | `-PseudoObjectExpr [[ADDR_221:0x[a-z0-9]*]] <col:52, col:59> 'int' // CHECK-NEXT: | | | |-CallExpr [[ADDR_222:0x[a-z0-9]*]] <col:52, col:59> 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr [[ADDR_223:0x[a-z0-9]*]] <col:52> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_224:0x[a-z0-9]*]] <col:52> 'int ({{.*}})' {{.*}}Function [[ADDR_100]] 'base12' 'int ({{.*}})' // CHECK-NEXT: | | | `-CallExpr [[ADDR_225:0x[a-z0-9]*]] <line:52:29, line:82:59> 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr [[ADDR_226:0x[a-z0-9]*]] <line:52:29> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | | | `-DeclRefExpr [[ADDR_105]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_14]] 'picked6' 'int ({{.*}})' non_odr_use_unevaluated // CHECK-NEXT: | | `-PseudoObjectExpr [[ADDR_227:0x[a-z0-9]*]] <line:82:63, col:70> 'int' // CHECK-NEXT: | | |-CallExpr [[ADDR_228:0x[a-z0-9]*]] <col:63, col:70> 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr [[ADDR_229:0x[a-z0-9]*]] <col:63> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | | | `-DeclRefExpr [[ADDR_230:0x[a-z0-9]*]] <col:63> 'int ({{.*}})' {{.*}}Function [[ADDR_106]] 'base13' 'int ({{.*}})' // CHECK-NEXT: | | `-CallExpr [[ADDR_231:0x[a-z0-9]*]] <line:55:29, line:82:70> 'int' // CHECK-NEXT: | | `-ImplicitCastExpr [[ADDR_232:0x[a-z0-9]*]] <line:55:29> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | | `-DeclRefExpr [[ADDR_111]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_18]] 'picked7' 'int ({{.*}})' non_odr_use_unevaluated // CHECK-NEXT: | `-PseudoObjectExpr [[ADDR_233:0x[a-z0-9]*]] <line:83:10, col:22> 'int' // CHECK-NEXT: | |-CallExpr [[ADDR_234:0x[a-z0-9]*]] <col:10, col:22> 'int' // CHECK-NEXT: | | `-ImplicitCastExpr [[ADDR_235:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | | `-DeclRefExpr [[ADDR_236:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_112]] 'overloaded1' 'int ({{.*}})' // CHECK-NEXT: | `-CallExpr [[ADDR_237:0x[a-z0-9]*]] <line:59:1, line:83:22> 'int' // CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_238:0x[a-z0-9]*]] <line:59:1> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | `-DeclRefExpr [[ADDR_114]] <col:1> 'int ({{.*}})' Function [[ADDR_115]] 'overloaded1[implementation={extension(match_any)}]' 'int ({{.*}})' // CHECK-NEXT: `-PseudoObjectExpr [[ADDR_239:0x[a-z0-9]*]] <line:83:26, col:38> 'int' // CHECK-NEXT: |-CallExpr [[ADDR_240:0x[a-z0-9]*]] <col:26, col:38> 'int' // CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_241:0x[a-z0-9]*]] <col:26> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | `-DeclRefExpr [[ADDR_242:0x[a-z0-9]*]] <col:26> 'int ({{.*}})' {{.*}}Function [[ADDR_119]] 'overloaded2' 'int ({{.*}})' // CHECK-NEXT: `-CallExpr [[ADDR_243:0x[a-z0-9]*]] <line:64:1, line:83:38> 'int' // CHECK-NEXT: `-ImplicitCastExpr [[ADDR_244:0x[a-z0-9]*]] <line:64:1> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: `-DeclRefExpr [[ADDR_124]] <col:1> 'int ({{.*}})' Function [[ADDR_125]] 'overloaded2[implementation={extension(match_none)}]' 'int ({{.*}})'
task-taskwait.c
/* * task-taskwait.c -- Archer testcase */ //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // // See tools/archer/LICENSE.txt for details. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // RUN: %libarcher-compile-and-run | FileCheck %s #include <omp.h> #include <stdio.h> #include <unistd.h> #include "ompt/ompt-signal.h" int main(int argc, char *argv[]) { int var = 0, a = 0; #pragma omp parallel num_threads(2) shared(var, a) #pragma omp master { #pragma omp task shared(var, a) { OMPT_SIGNAL(a); OMPT_WAIT(a, 2); delay(100); var++; } // Give other thread time to steal the task. OMPT_WAIT(a, 1); OMPT_SIGNAL(a); #pragma omp taskwait var++; } fprintf(stderr, "DONE\n"); int error = (var != 2); return error; } // CHECK-NOT: ThreadSanitizer: data race // CHECK-NOT: ThreadSanitizer: reported // CHECK: DONE
morphology.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M M OOO RRRR PPPP H H OOO L OOO GGGG Y Y % % MM MM O O R R P P H H O O L O O G Y Y % % M M M O O RRRR PPPP HHHHH O O L O O G GGG Y % % M M O O R R P H H O O L O O G G Y % % M M OOO R R P H H OOO LLLLL OOO GGG Y % % % % % % MagickCore Morphology Methods % % % % Software Design % % Anthony Thyssen % % January 2010 % % % % % % Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Morphology is the application of various kernels, of any size or shape, to an % image in various ways (typically binary, but not always). % % Convolution (weighted sum or average) is just one specific type of % morphology. Just one that is very common for image bluring and sharpening % effects. Not only 2D Gaussian blurring, but also 2-pass 1D Blurring. % % This module provides not only a general morphology function, and the ability % to apply more advanced or iterative morphologies, but also functions for the % generation of many different types of kernel arrays from user supplied % arguments. Prehaps even the generation of a kernel from a small image. */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/linked-list.h" #include "MagickCore/list.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor-private.h" #include "MagickCore/morphology.h" #include "MagickCore/morphology-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/prepress.h" #include "MagickCore/quantize.h" #include "MagickCore/resource_.h" #include "MagickCore/registry.h" #include "MagickCore/semaphore.h" #include "MagickCore/splay-tree.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" /* Other global definitions used by module. */ #define Minimize(assign,value) assign=MagickMin(assign,value) #define Maximize(assign,value) assign=MagickMax(assign,value) /* Integer Factorial Function - for a Binomial kernel */ #if 1 static inline size_t fact(size_t n) { size_t f,l; for(f=1, l=2; l <= n; f=f*l, l++); return(f); } #elif 1 /* glibc floating point alternatives */ #define fact(n) ((size_t)tgamma((double)n+1)) #else #define fact(n) ((size_t)lgamma((double)n+1)) #endif /* Currently these are only internal to this module */ static void CalcKernelMetaData(KernelInfo *), ExpandMirrorKernelInfo(KernelInfo *), ExpandRotateKernelInfo(KernelInfo *, const double), RotateKernelInfo(KernelInfo *, double); /* Quick function to find last kernel in a kernel list */ static inline KernelInfo *LastKernelInfo(KernelInfo *kernel) { while (kernel->next != (KernelInfo *) NULL) kernel=kernel->next; return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireKernelInfo() takes the given string (generally supplied by the % user) and converts it into a Morphology/Convolution Kernel. This allows % users to specify a kernel from a number of pre-defined kernels, or to fully % specify their own kernel for a specific Convolution or Morphology % Operation. % % The kernel so generated can be any rectangular array of floating point % values (doubles) with the 'control point' or 'pixel being affected' % anywhere within that array of values. % % Previously IM was restricted to a square of odd size using the exact % center as origin, this is no longer the case, and any rectangular kernel % with any value being declared the origin. This in turn allows the use of % highly asymmetrical kernels. % % The floating point values in the kernel can also include a special value % known as 'nan' or 'not a number' to indicate that this value is not part % of the kernel array. This allows you to shaped the kernel within its % rectangular area. That is 'nan' values provide a 'mask' for the kernel % shape. However at least one non-nan value must be provided for correct % working of a kernel. % % The returned kernel should be freed using the DestroyKernelInfo() when you % are finished with it. Do not free this memory yourself. % % Input kernel defintion strings can consist of any of three types. % % "name:args[[@><]" % Select from one of the built in kernels, using the name and % geometry arguments supplied. See AcquireKernelBuiltIn() % % "WxH[+X+Y][@><]:num, num, num ..." % a kernel of size W by H, with W*H floating point numbers following. % the 'center' can be optionally be defined at +X+Y (such that +0+0 % is top left corner). If not defined the pixel in the center, for % odd sizes, or to the immediate top or left of center for even sizes % is automatically selected. % % "num, num, num, num, ..." % list of floating point numbers defining an 'old style' odd sized % square kernel. At least 9 values should be provided for a 3x3 % square kernel, 25 for a 5x5 square kernel, 49 for 7x7, etc. % Values can be space or comma separated. This is not recommended. % % You can define a 'list of kernels' which can be used by some morphology % operators A list is defined as a semi-colon separated list kernels. % % " kernel ; kernel ; kernel ; " % % Any extra ';' characters, at start, end or between kernel defintions are % simply ignored. % % The special flags will expand a single kernel, into a list of rotated % kernels. A '@' flag will expand a 3x3 kernel into a list of 45-degree % cyclic rotations, while a '>' will generate a list of 90-degree rotations. % The '<' also exands using 90-degree rotates, but giving a 180-degree % reflected kernel before the +/- 90-degree rotations, which can be important % for Thinning operations. % % Note that 'name' kernels will start with an alphabetic character while the % new kernel specification has a ':' character in its specification string. % If neither is the case, it is assumed an old style of a simple list of % numbers generating a odd-sized square kernel has been given. % % The format of the AcquireKernal method is: % % KernelInfo *AcquireKernelInfo(const char *kernel_string) % % A description of each parameter follows: % % o kernel_string: the Morphology/Convolution kernel wanted. % */ /* This was separated so that it could be used as a separate ** array input handling function, such as for -color-matrix */ static KernelInfo *ParseKernelArray(const char *kernel_string) { KernelInfo *kernel; char token[MagickPathExtent]; const char *p, *end; register ssize_t i; double nan = sqrt((double)-1.0); /* Special Value : Not A Number */ MagickStatusType flags; GeometryInfo args; kernel=(KernelInfo *) AcquireQuantumMemory(1,sizeof(*kernel)); if (kernel == (KernelInfo *) NULL) return(kernel); (void) ResetMagickMemory(kernel,0,sizeof(*kernel)); kernel->minimum = kernel->maximum = kernel->angle = 0.0; kernel->negative_range = kernel->positive_range = 0.0; kernel->type = UserDefinedKernel; kernel->next = (KernelInfo *) NULL; kernel->signature=MagickCoreSignature; if (kernel_string == (const char *) NULL) return(kernel); /* find end of this specific kernel definition string */ end = strchr(kernel_string, ';'); if ( end == (char *) NULL ) end = strchr(kernel_string, '\0'); /* clear flags - for Expanding kernel lists thorugh rotations */ flags = NoValue; /* Has a ':' in argument - New user kernel specification FUTURE: this split on ':' could be done by StringToken() */ p = strchr(kernel_string, ':'); if ( p != (char *) NULL && p < end) { /* ParseGeometry() needs the geometry separated! -- Arrgghh */ memcpy(token, kernel_string, (size_t) (p-kernel_string)); token[p-kernel_string] = '\0'; SetGeometryInfo(&args); flags = ParseGeometry(token, &args); /* Size handling and checks of geometry settings */ if ( (flags & WidthValue) == 0 ) /* if no width then */ args.rho = args.sigma; /* then width = height */ if ( args.rho < 1.0 ) /* if width too small */ args.rho = 1.0; /* then width = 1 */ if ( args.sigma < 1.0 ) /* if height too small */ args.sigma = args.rho; /* then height = width */ kernel->width = (size_t)args.rho; kernel->height = (size_t)args.sigma; /* Offset Handling and Checks */ if ( args.xi < 0.0 || args.psi < 0.0 ) return(DestroyKernelInfo(kernel)); kernel->x = ((flags & XValue)!=0) ? (ssize_t)args.xi : (ssize_t) (kernel->width-1)/2; kernel->y = ((flags & YValue)!=0) ? (ssize_t)args.psi : (ssize_t) (kernel->height-1)/2; if ( kernel->x >= (ssize_t) kernel->width || kernel->y >= (ssize_t) kernel->height ) return(DestroyKernelInfo(kernel)); p++; /* advance beyond the ':' */ } else { /* ELSE - Old old specification, forming odd-square kernel */ /* count up number of values given */ p=(const char *) kernel_string; while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\'')) p++; /* ignore "'" chars for convolve filter usage - Cristy */ for (i=0; p < end; i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); } /* set the size of the kernel - old sized square */ kernel->width = kernel->height= (size_t) sqrt((double) i+1.0); kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; p=(const char *) kernel_string; while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\'')) p++; /* ignore "'" chars for convolve filter usage - Cristy */ } /* Read in the kernel values from rest of input string argument */ kernel->values=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory( kernel->width,kernel->height*sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); kernel->minimum=MagickMaximumValue; kernel->maximum=(-MagickMaximumValue); kernel->negative_range = kernel->positive_range = 0.0; for (i=0; (i < (ssize_t) (kernel->width*kernel->height)) && (p < end); i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); if ( LocaleCompare("nan",token) == 0 || LocaleCompare("-",token) == 0 ) { kernel->values[i] = nan; /* this value is not part of neighbourhood */ } else { kernel->values[i] = StringToDouble(token,(char **) NULL); ( kernel->values[i] < 0) ? ( kernel->negative_range += kernel->values[i] ) : ( kernel->positive_range += kernel->values[i] ); Minimize(kernel->minimum, kernel->values[i]); Maximize(kernel->maximum, kernel->values[i]); } } /* sanity check -- no more values in kernel definition */ GetNextToken(p,&p,MagickPathExtent,token); if ( *token != '\0' && *token != ';' && *token != '\'' ) return(DestroyKernelInfo(kernel)); #if 0 /* this was the old method of handling a incomplete kernel */ if ( i < (ssize_t) (kernel->width*kernel->height) ) { Minimize(kernel->minimum, kernel->values[i]); Maximize(kernel->maximum, kernel->values[i]); for ( ; i < (ssize_t) (kernel->width*kernel->height); i++) kernel->values[i]=0.0; } #else /* Number of values for kernel was not enough - Report Error */ if ( i < (ssize_t) (kernel->width*kernel->height) ) return(DestroyKernelInfo(kernel)); #endif /* check that we recieved at least one real (non-nan) value! */ if (kernel->minimum == MagickMaximumValue) return(DestroyKernelInfo(kernel)); if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel size */ ExpandRotateKernelInfo(kernel, 45.0); /* cyclic rotate 3x3 kernels */ else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */ ExpandRotateKernelInfo(kernel, 90.0); /* 90 degree rotate of kernel */ else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */ ExpandMirrorKernelInfo(kernel); /* 90 degree mirror rotate */ return(kernel); } static KernelInfo *ParseKernelName(const char *kernel_string, ExceptionInfo *exception) { char token[MagickPathExtent]; const char *p, *end; GeometryInfo args; KernelInfo *kernel; MagickStatusType flags; ssize_t type; /* Parse special 'named' kernel */ GetNextToken(kernel_string,&p,MagickPathExtent,token); type=ParseCommandOption(MagickKernelOptions,MagickFalse,token); if ( type < 0 || type == UserDefinedKernel ) return((KernelInfo *) NULL); /* not a valid named kernel */ while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',') || (*p == ':' )) && (*p != '\0') && (*p != ';')) p++; end = strchr(p, ';'); /* end of this kernel defintion */ if ( end == (char *) NULL ) end = strchr(p, '\0'); /* ParseGeometry() needs the geometry separated! -- Arrgghh */ memcpy(token, p, (size_t) (end-p)); token[end-p] = '\0'; SetGeometryInfo(&args); flags = ParseGeometry(token, &args); #if 0 /* For Debugging Geometry Input */ (void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n", flags, args.rho, args.sigma, args.xi, args.psi ); #endif /* special handling of missing values in input string */ switch( type ) { /* Shape Kernel Defaults */ case UnityKernel: if ( (flags & WidthValue) == 0 ) args.rho = 1.0; /* Default scale = 1.0, zero is valid */ break; case SquareKernel: case DiamondKernel: case OctagonKernel: case DiskKernel: case PlusKernel: case CrossKernel: if ( (flags & HeightValue) == 0 ) args.sigma = 1.0; /* Default scale = 1.0, zero is valid */ break; case RingKernel: if ( (flags & XValue) == 0 ) args.xi = 1.0; /* Default scale = 1.0, zero is valid */ break; case RectangleKernel: /* Rectangle - set size defaults */ if ( (flags & WidthValue) == 0 ) /* if no width then */ args.rho = args.sigma; /* then width = height */ if ( args.rho < 1.0 ) /* if width too small */ args.rho = 3; /* then width = 3 */ if ( args.sigma < 1.0 ) /* if height too small */ args.sigma = args.rho; /* then height = width */ if ( (flags & XValue) == 0 ) /* center offset if not defined */ args.xi = (double)(((ssize_t)args.rho-1)/2); if ( (flags & YValue) == 0 ) args.psi = (double)(((ssize_t)args.sigma-1)/2); break; /* Distance Kernel Defaults */ case ChebyshevKernel: case ManhattanKernel: case OctagonalKernel: case EuclideanKernel: if ( (flags & HeightValue) == 0 ) /* no distance scale */ args.sigma = 100.0; /* default distance scaling */ else if ( (flags & AspectValue ) != 0 ) /* '!' flag */ args.sigma = QuantumRange/(args.sigma+1); /* maximum pixel distance */ else if ( (flags & PercentValue ) != 0 ) /* '%' flag */ args.sigma *= QuantumRange/100.0; /* percentage of color range */ break; default: break; } kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args, exception); if ( kernel == (KernelInfo *) NULL ) return(kernel); /* global expand to rotated kernel list - only for single kernels */ if ( kernel->next == (KernelInfo *) NULL ) { if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel args */ ExpandRotateKernelInfo(kernel, 45.0); else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */ ExpandRotateKernelInfo(kernel, 90.0); else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */ ExpandMirrorKernelInfo(kernel); } return(kernel); } MagickExport KernelInfo *AcquireKernelInfo(const char *kernel_string, ExceptionInfo *exception) { KernelInfo *kernel, *new_kernel; char *kernel_cache, token[MagickPathExtent]; const char *p; if (kernel_string == (const char *) NULL) return(ParseKernelArray(kernel_string)); p=kernel_string; kernel_cache=(char *) NULL; if (*kernel_string == '@') { kernel_cache=FileToString(kernel_string+1,~0UL,exception); if (kernel_cache == (char *) NULL) return((KernelInfo *) NULL); p=(const char *) kernel_cache; } kernel=NULL; while (GetNextToken(p,(const char **) NULL,MagickPathExtent,token), *token != '\0') { /* ignore extra or multiple ';' kernel separators */ if (*token != ';') { /* tokens starting with alpha is a Named kernel */ if (isalpha((int) ((unsigned char) *token)) != 0) new_kernel=ParseKernelName(p,exception); else /* otherwise a user defined kernel array */ new_kernel=ParseKernelArray(p); /* Error handling -- this is not proper error handling! */ if (new_kernel == (KernelInfo *) NULL) { if (kernel != (KernelInfo *) NULL) kernel=DestroyKernelInfo(kernel); return((KernelInfo *) NULL); } /* initialise or append the kernel list */ if (kernel == (KernelInfo *) NULL) kernel=new_kernel; else LastKernelInfo(kernel)->next=new_kernel; } /* look for the next kernel in list */ p=strchr(p,';'); if (p == (char *) NULL) break; p++; } if (kernel_cache != (char *) NULL) kernel_cache=DestroyString(kernel_cache); return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e K e r n e l B u i l t I n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireKernelBuiltIn() returned one of the 'named' built-in types of % kernels used for special purposes such as gaussian blurring, skeleton % pruning, and edge distance determination. % % They take a KernelType, and a set of geometry style arguments, which were % typically decoded from a user supplied string, or from a more complex % Morphology Method that was requested. % % The format of the AcquireKernalBuiltIn method is: % % KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type, % const GeometryInfo args) % % A description of each parameter follows: % % o type: the pre-defined type of kernel wanted % % o args: arguments defining or modifying the kernel % % Convolution Kernels % % Unity % The a No-Op or Scaling single element kernel. % % Gaussian:{radius},{sigma} % Generate a two-dimensional gaussian kernel, as used by -gaussian. % The sigma for the curve is required. The resulting kernel is % normalized, % % If 'sigma' is zero, you get a single pixel on a field of zeros. % % NOTE: that the 'radius' is optional, but if provided can limit (clip) % the final size of the resulting kernel to a square 2*radius+1 in size. % The radius should be at least 2 times that of the sigma value, or % sever clipping and aliasing may result. If not given or set to 0 the % radius will be determined so as to produce the best minimal error % result, which is usally much larger than is normally needed. % % LoG:{radius},{sigma} % "Laplacian of a Gaussian" or "Mexician Hat" Kernel. % The supposed ideal edge detection, zero-summing kernel. % % An alturnative to this kernel is to use a "DoG" with a sigma ratio of % approx 1.6 (according to wikipedia). % % DoG:{radius},{sigma1},{sigma2} % "Difference of Gaussians" Kernel. % As "Gaussian" but with a gaussian produced by 'sigma2' subtracted % from the gaussian produced by 'sigma1'. Typically sigma2 > sigma1. % The result is a zero-summing kernel. % % Blur:{radius},{sigma}[,{angle}] % Generates a 1 dimensional or linear gaussian blur, at the angle given % (current restricted to orthogonal angles). If a 'radius' is given the % kernel is clipped to a width of 2*radius+1. Kernel can be rotated % by a 90 degree angle. % % If 'sigma' is zero, you get a single pixel on a field of zeros. % % Note that two convolutions with two "Blur" kernels perpendicular to % each other, is equivalent to a far larger "Gaussian" kernel with the % same sigma value, However it is much faster to apply. This is how the % "-blur" operator actually works. % % Comet:{width},{sigma},{angle} % Blur in one direction only, much like how a bright object leaves % a comet like trail. The Kernel is actually half a gaussian curve, % Adding two such blurs in opposite directions produces a Blur Kernel. % Angle can be rotated in multiples of 90 degrees. % % Note that the first argument is the width of the kernel and not the % radius of the kernel. % % Binomial:[{radius}] % Generate a discrete kernel using a 2 dimentional Pascel's Triangle % of values. Used for special forma of image filters. % % # Still to be implemented... % # % # Filter2D % # Filter1D % # Set kernel values using a resize filter, and given scale (sigma) % # Cylindrical or Linear. Is this possible with an image? % # % % Named Constant Convolution Kernels % % All these are unscaled, zero-summing kernels by default. As such for % non-HDRI version of ImageMagick some form of normalization, user scaling, % and biasing the results is recommended, to prevent the resulting image % being 'clipped'. % % The 3x3 kernels (most of these) can be circularly rotated in multiples of % 45 degrees to generate the 8 angled varients of each of the kernels. % % Laplacian:{type} % Discrete Lapacian Kernels, (without normalization) % Type 0 : 3x3 with center:8 surounded by -1 (8 neighbourhood) % Type 1 : 3x3 with center:4 edge:-1 corner:0 (4 neighbourhood) % Type 2 : 3x3 with center:4 edge:1 corner:-2 % Type 3 : 3x3 with center:4 edge:-2 corner:1 % Type 5 : 5x5 laplacian % Type 7 : 7x7 laplacian % Type 15 : 5x5 LoG (sigma approx 1.4) % Type 19 : 9x9 LoG (sigma approx 1.4) % % Sobel:{angle} % Sobel 'Edge' convolution kernel (3x3) % | -1, 0, 1 | % | -2, 0,-2 | % | -1, 0, 1 | % % Roberts:{angle} % Roberts convolution kernel (3x3) % | 0, 0, 0 | % | -1, 1, 0 | % | 0, 0, 0 | % % Prewitt:{angle} % Prewitt Edge convolution kernel (3x3) % | -1, 0, 1 | % | -1, 0, 1 | % | -1, 0, 1 | % % Compass:{angle} % Prewitt's "Compass" convolution kernel (3x3) % | -1, 1, 1 | % | -1,-2, 1 | % | -1, 1, 1 | % % Kirsch:{angle} % Kirsch's "Compass" convolution kernel (3x3) % | -3,-3, 5 | % | -3, 0, 5 | % | -3,-3, 5 | % % FreiChen:{angle} % Frei-Chen Edge Detector is based on a kernel that is similar to % the Sobel Kernel, but is designed to be isotropic. That is it takes % into account the distance of the diagonal in the kernel. % % | 1, 0, -1 | % | sqrt(2), 0, -sqrt(2) | % | 1, 0, -1 | % % FreiChen:{type},{angle} % % Frei-Chen Pre-weighted kernels... % % Type 0: default un-nomalized version shown above. % % Type 1: Orthogonal Kernel (same as type 11 below) % | 1, 0, -1 | % | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2) % | 1, 0, -1 | % % Type 2: Diagonal form of Kernel... % | 1, sqrt(2), 0 | % | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2) % | 0, -sqrt(2) -1 | % % However this kernel is als at the heart of the FreiChen Edge Detection % Process which uses a set of 9 specially weighted kernel. These 9 % kernels not be normalized, but directly applied to the image. The % results is then added together, to produce the intensity of an edge in % a specific direction. The square root of the pixel value can then be % taken as the cosine of the edge, and at least 2 such runs at 90 degrees % from each other, both the direction and the strength of the edge can be % determined. % % Type 10: All 9 of the following pre-weighted kernels... % % Type 11: | 1, 0, -1 | % | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2) % | 1, 0, -1 | % % Type 12: | 1, sqrt(2), 1 | % | 0, 0, 0 | / 2*sqrt(2) % | 1, sqrt(2), 1 | % % Type 13: | sqrt(2), -1, 0 | % | -1, 0, 1 | / 2*sqrt(2) % | 0, 1, -sqrt(2) | % % Type 14: | 0, 1, -sqrt(2) | % | -1, 0, 1 | / 2*sqrt(2) % | sqrt(2), -1, 0 | % % Type 15: | 0, -1, 0 | % | 1, 0, 1 | / 2 % | 0, -1, 0 | % % Type 16: | 1, 0, -1 | % | 0, 0, 0 | / 2 % | -1, 0, 1 | % % Type 17: | 1, -2, 1 | % | -2, 4, -2 | / 6 % | -1, -2, 1 | % % Type 18: | -2, 1, -2 | % | 1, 4, 1 | / 6 % | -2, 1, -2 | % % Type 19: | 1, 1, 1 | % | 1, 1, 1 | / 3 % | 1, 1, 1 | % % The first 4 are for edge detection, the next 4 are for line detection % and the last is to add a average component to the results. % % Using a special type of '-1' will return all 9 pre-weighted kernels % as a multi-kernel list, so that you can use them directly (without % normalization) with the special "-set option:morphology:compose Plus" % setting to apply the full FreiChen Edge Detection Technique. % % If 'type' is large it will be taken to be an actual rotation angle for % the default FreiChen (type 0) kernel. As such FreiChen:45 will look % like a Sobel:45 but with 'sqrt(2)' instead of '2' values. % % WARNING: The above was layed out as per % http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf % But rotated 90 degrees so direction is from left rather than the top. % I have yet to find any secondary confirmation of the above. The only % other source found was actual source code at % http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf % Neigher paper defineds the kernels in a way that looks locical or % correct when taken as a whole. % % Boolean Kernels % % Diamond:[{radius}[,{scale}]] % Generate a diamond shaped kernel with given radius to the points. % Kernel size will again be radius*2+1 square and defaults to radius 1, % generating a 3x3 kernel that is slightly larger than a square. % % Square:[{radius}[,{scale}]] % Generate a square shaped kernel of size radius*2+1, and defaulting % to a 3x3 (radius 1). % % Octagon:[{radius}[,{scale}]] % Generate octagonal shaped kernel of given radius and constant scale. % Default radius is 3 producing a 7x7 kernel. A radius of 1 will result % in "Diamond" kernel. % % Disk:[{radius}[,{scale}]] % Generate a binary disk, thresholded at the radius given, the radius % may be a float-point value. Final Kernel size is floor(radius)*2+1 % square. A radius of 5.3 is the default. % % NOTE: That a low radii Disk kernels produce the same results as % many of the previously defined kernels, but differ greatly at larger % radii. Here is a table of equivalences... % "Disk:1" => "Diamond", "Octagon:1", or "Cross:1" % "Disk:1.5" => "Square" % "Disk:2" => "Diamond:2" % "Disk:2.5" => "Octagon" % "Disk:2.9" => "Square:2" % "Disk:3.5" => "Octagon:3" % "Disk:4.5" => "Octagon:4" % "Disk:5.4" => "Octagon:5" % "Disk:6.4" => "Octagon:6" % All other Disk shapes are unique to this kernel, but because a "Disk" % is more circular when using a larger radius, using a larger radius is % preferred over iterating the morphological operation. % % Rectangle:{geometry} % Simply generate a rectangle of 1's with the size given. You can also % specify the location of the 'control point', otherwise the closest % pixel to the center of the rectangle is selected. % % Properly centered and odd sized rectangles work the best. % % Symbol Dilation Kernels % % These kernel is not a good general morphological kernel, but is used % more for highlighting and marking any single pixels in an image using, % a "Dilate" method as appropriate. % % For the same reasons iterating these kernels does not produce the % same result as using a larger radius for the symbol. % % Plus:[{radius}[,{scale}]] % Cross:[{radius}[,{scale}]] % Generate a kernel in the shape of a 'plus' or a 'cross' with % a each arm the length of the given radius (default 2). % % NOTE: "plus:1" is equivalent to a "Diamond" kernel. % % Ring:{radius1},{radius2}[,{scale}] % A ring of the values given that falls between the two radii. % Defaults to a ring of approximataly 3 radius in a 7x7 kernel. % This is the 'edge' pixels of the default "Disk" kernel, % More specifically, "Ring" -> "Ring:2.5,3.5,1.0" % % Hit and Miss Kernels % % Peak:radius1,radius2 % Find any peak larger than the pixels the fall between the two radii. % The default ring of pixels is as per "Ring". % Edges % Find flat orthogonal edges of a binary shape % Corners % Find 90 degree corners of a binary shape % Diagonals:type % A special kernel to thin the 'outside' of diagonals % LineEnds:type % Find end points of lines (for pruning a skeletion) % Two types of lines ends (default to both) can be searched for % Type 0: All line ends % Type 1: single kernel for 4-conneected line ends % Type 2: single kernel for simple line ends % LineJunctions % Find three line junctions (within a skeletion) % Type 0: all line junctions % Type 1: Y Junction kernel % Type 2: Diagonal T Junction kernel % Type 3: Orthogonal T Junction kernel % Type 4: Diagonal X Junction kernel % Type 5: Orthogonal + Junction kernel % Ridges:type % Find single pixel ridges or thin lines % Type 1: Fine single pixel thick lines and ridges % Type 2: Find two pixel thick lines and ridges % ConvexHull % Octagonal Thickening Kernel, to generate convex hulls of 45 degrees % Skeleton:type % Traditional skeleton generating kernels. % Type 1: Tradional Skeleton kernel (4 connected skeleton) % Type 2: HIPR2 Skeleton kernel (8 connected skeleton) % Type 3: Thinning skeleton based on a ressearch paper by % Dan S. Bloomberg (Default Type) % ThinSE:type % A huge variety of Thinning Kernels designed to preserve conectivity. % many other kernel sets use these kernels as source definitions. % Type numbers are 41-49, 81-89, 481, and 482 which are based on % the super and sub notations used in the source research paper. % % Distance Measuring Kernels % % Different types of distance measuring methods, which are used with the % a 'Distance' morphology method for generating a gradient based on % distance from an edge of a binary shape, though there is a technique % for handling a anti-aliased shape. % % See the 'Distance' Morphological Method, for information of how it is % applied. % % Chebyshev:[{radius}][x{scale}[%!]] % Chebyshev Distance (also known as Tchebychev or Chessboard distance) % is a value of one to any neighbour, orthogonal or diagonal. One why % of thinking of it is the number of squares a 'King' or 'Queen' in % chess needs to traverse reach any other position on a chess board. % It results in a 'square' like distance function, but one where % diagonals are given a value that is closer than expected. % % Manhattan:[{radius}][x{scale}[%!]] % Manhattan Distance (also known as Rectilinear, City Block, or the Taxi % Cab distance metric), it is the distance needed when you can only % travel in horizontal or vertical directions only. It is the % distance a 'Rook' in chess would have to travel, and results in a % diamond like distances, where diagonals are further than expected. % % Octagonal:[{radius}][x{scale}[%!]] % An interleving of Manhatten and Chebyshev metrics producing an % increasing octagonally shaped distance. Distances matches those of % the "Octagon" shaped kernel of the same radius. The minimum radius % and default is 2, producing a 5x5 kernel. % % Euclidean:[{radius}][x{scale}[%!]] % Euclidean distance is the 'direct' or 'as the crow flys' distance. % However by default the kernel size only has a radius of 1, which % limits the distance to 'Knight' like moves, with only orthogonal and % diagonal measurements being correct. As such for the default kernel % you will get octagonal like distance function. % % However using a larger radius such as "Euclidean:4" you will get a % much smoother distance gradient from the edge of the shape. Especially % if the image is pre-processed to include any anti-aliasing pixels. % Of course a larger kernel is slower to use, and not always needed. % % The first three Distance Measuring Kernels will only generate distances % of exact multiples of {scale} in binary images. As such you can use a % scale of 1 without loosing any information. However you also need some % scaling when handling non-binary anti-aliased shapes. % % The "Euclidean" Distance Kernel however does generate a non-integer % fractional results, and as such scaling is vital even for binary shapes. % */ MagickExport KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type, const GeometryInfo *args,ExceptionInfo *exception) { KernelInfo *kernel; register ssize_t i; register ssize_t u, v; double nan = sqrt((double)-1.0); /* Special Value : Not A Number */ /* Generate a new empty kernel if needed */ kernel=(KernelInfo *) NULL; switch(type) { case UndefinedKernel: /* These should not call this function */ case UserDefinedKernel: assert("Should not call this function" != (char *) NULL); break; case LaplacianKernel: /* Named Descrete Convolution Kernels */ case SobelKernel: /* these are defined using other kernels */ case RobertsKernel: case PrewittKernel: case CompassKernel: case KirschKernel: case FreiChenKernel: case EdgesKernel: /* Hit and Miss kernels */ case CornersKernel: case DiagonalsKernel: case LineEndsKernel: case LineJunctionsKernel: case RidgesKernel: case ConvexHullKernel: case SkeletonKernel: case ThinSEKernel: break; /* A pre-generated kernel is not needed */ #if 0 /* set to 1 to do a compile-time check that we haven't missed anything */ case UnityKernel: case GaussianKernel: case DoGKernel: case LoGKernel: case BlurKernel: case CometKernel: case BinomialKernel: case DiamondKernel: case SquareKernel: case RectangleKernel: case OctagonKernel: case DiskKernel: case PlusKernel: case CrossKernel: case RingKernel: case PeaksKernel: case ChebyshevKernel: case ManhattanKernel: case OctangonalKernel: case EuclideanKernel: #else default: #endif /* Generate the base Kernel Structure */ kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel)); if (kernel == (KernelInfo *) NULL) return(kernel); (void) ResetMagickMemory(kernel,0,sizeof(*kernel)); kernel->minimum = kernel->maximum = kernel->angle = 0.0; kernel->negative_range = kernel->positive_range = 0.0; kernel->type = type; kernel->next = (KernelInfo *) NULL; kernel->signature=MagickCoreSignature; break; } switch(type) { /* Convolution Kernels */ case UnityKernel: { kernel->height = kernel->width = (size_t) 1; kernel->x = kernel->y = (ssize_t) 0; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(1,sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); kernel->maximum = kernel->values[0] = args->rho; break; } break; case GaussianKernel: case DoGKernel: case LoGKernel: { double sigma = fabs(args->sigma), sigma2 = fabs(args->xi), A, B, R; if ( args->rho >= 1.0 ) kernel->width = (size_t)args->rho*2+1; else if ( (type != DoGKernel) || (sigma >= sigma2) ) kernel->width = GetOptimalKernelWidth2D(args->rho,sigma); else kernel->width = GetOptimalKernelWidth2D(args->rho,sigma2); kernel->height = kernel->width; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* WARNING: The following generates a 'sampled gaussian' kernel. * What we really want is a 'discrete gaussian' kernel. * * How to do this is I don't know, but appears to be basied on the * Error Function 'erf()' (intergral of a gaussian) */ if ( type == GaussianKernel || type == DoGKernel ) { /* Calculate a Gaussian, OR positive half of a DoG */ if ( sigma > MagickEpsilon ) { A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */ B = (double) (1.0/(Magick2PI*sigma*sigma)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B; } else /* limiting case - a unity (normalized Dirac) kernel */ { (void) ResetMagickMemory(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; } } if ( type == DoGKernel ) { /* Subtract a Negative Gaussian for "Difference of Gaussian" */ if ( sigma2 > MagickEpsilon ) { sigma = sigma2; /* simplify loop expressions */ A = 1.0/(2.0*sigma*sigma); B = (double) (1.0/(Magick2PI*sigma*sigma)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B; } else /* limiting case - a unity (normalized Dirac) kernel */ kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0; } if ( type == LoGKernel ) { /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */ if ( sigma > MagickEpsilon ) { A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */ B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) { R = ((double)(u*u+v*v))*A; kernel->values[i] = (1-R)*exp(-R)*B; } } else /* special case - generate a unity kernel */ { (void) ResetMagickMemory(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; } } /* Note the above kernels may have been 'clipped' by a user defined ** radius, producing a smaller (darker) kernel. Also for very small ** sigma's (> 0.1) the central value becomes larger than one, and thus ** producing a very bright kernel. ** ** Normalization will still be needed. */ /* Normalize the 2D Gaussian Kernel ** ** NB: a CorrelateNormalize performs a normal Normalize if ** there are no negative values. */ CalcKernelMetaData(kernel); /* the other kernel meta-data */ ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue); break; } case BlurKernel: { double sigma = fabs(args->sigma), alpha, beta; if ( args->rho >= 1.0 ) kernel->width = (size_t)args->rho*2+1; else kernel->width = GetOptimalKernelWidth1D(args->rho,sigma); kernel->height = 1; kernel->x = (ssize_t) (kernel->width-1)/2; kernel->y = 0; kernel->negative_range = kernel->positive_range = 0.0; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); #if 1 #define KernelRank 3 /* Formula derived from GetBlurKernel() in "effect.c" (plus bug fix). ** It generates a gaussian 3 times the width, and compresses it into ** the expected range. This produces a closer normalization of the ** resulting kernel, especially for very low sigma values. ** As such while wierd it is prefered. ** ** I am told this method originally came from Photoshop. ** ** A properly normalized curve is generated (apart from edge clipping) ** even though we later normalize the result (for edge clipping) ** to allow the correct generation of a "Difference of Blurs". */ /* initialize */ v = (ssize_t) (kernel->width*KernelRank-1)/2; /* start/end points to fit range */ (void) ResetMagickMemory(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); /* Calculate a Positive 1D Gaussian */ if ( sigma > MagickEpsilon ) { sigma *= KernelRank; /* simplify loop expressions */ alpha = 1.0/(2.0*sigma*sigma); beta= (double) (1.0/(MagickSQ2PI*sigma )); for ( u=-v; u <= v; u++) { kernel->values[(u+v)/KernelRank] += exp(-((double)(u*u))*alpha)*beta; } } else /* special case - generate a unity kernel */ kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; #else /* Direct calculation without curve averaging This is equivelent to a KernelRank of 1 */ /* Calculate a Positive Gaussian */ if ( sigma > MagickEpsilon ) { alpha = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */ beta = 1.0/(MagickSQ2PI*sigma); for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = exp(-((double)(u*u))*alpha)*beta; } else /* special case - generate a unity kernel */ { (void) ResetMagickMemory(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; } #endif /* Note the above kernel may have been 'clipped' by a user defined ** radius, producing a smaller (darker) kernel. Also for very small ** sigma's (> 0.1) the central value becomes larger than one, as a ** result of not generating a actual 'discrete' kernel, and thus ** producing a very bright 'impulse'. ** ** Becuase of these two factors Normalization is required! */ /* Normalize the 1D Gaussian Kernel ** ** NB: a CorrelateNormalize performs a normal Normalize if ** there are no negative values. */ CalcKernelMetaData(kernel); /* the other kernel meta-data */ ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue); /* rotate the 1D kernel by given angle */ RotateKernelInfo(kernel, args->xi ); break; } case CometKernel: { double sigma = fabs(args->sigma), A; if ( args->rho < 1.0 ) kernel->width = (GetOptimalKernelWidth1D(args->rho,sigma)-1)/2+1; else kernel->width = (size_t)args->rho; kernel->x = kernel->y = 0; kernel->height = 1; kernel->negative_range = kernel->positive_range = 0.0; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* A comet blur is half a 1D gaussian curve, so that the object is ** blurred in one direction only. This may not be quite the right ** curve to use so may change in the future. The function must be ** normalised after generation, which also resolves any clipping. ** ** As we are normalizing and not subtracting gaussians, ** there is no need for a divisor in the gaussian formula ** ** It is less comples */ if ( sigma > MagickEpsilon ) { #if 1 #define KernelRank 3 v = (ssize_t) kernel->width*KernelRank; /* start/end points */ (void) ResetMagickMemory(kernel->values,0, (size_t) kernel->width*sizeof(*kernel->values)); sigma *= KernelRank; /* simplify the loop expression */ A = 1.0/(2.0*sigma*sigma); /* B = 1.0/(MagickSQ2PI*sigma); */ for ( u=0; u < v; u++) { kernel->values[u/KernelRank] += exp(-((double)(u*u))*A); /* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */ } for (i=0; i < (ssize_t) kernel->width; i++) kernel->positive_range += kernel->values[i]; #else A = 1.0/(2.0*sigma*sigma); /* simplify the loop expression */ /* B = 1.0/(MagickSQ2PI*sigma); */ for ( i=0; i < (ssize_t) kernel->width; i++) kernel->positive_range += kernel->values[i] = exp(-((double)(i*i))*A); /* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */ #endif } else /* special case - generate a unity kernel */ { (void) ResetMagickMemory(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; kernel->positive_range = 1.0; } kernel->minimum = 0.0; kernel->maximum = kernel->values[0]; kernel->negative_range = 0.0; ScaleKernelInfo(kernel, 1.0, NormalizeValue); /* Normalize */ RotateKernelInfo(kernel, args->xi); /* Rotate by angle */ break; } case BinomialKernel: { size_t order_f; if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; order_f = fact(kernel->width-1); kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values within diamond area to scale given */ for ( i=0, v=0; v < (ssize_t)kernel->height; v++) { size_t alpha = order_f / ( fact((size_t) v) * fact(kernel->height-v-1) ); for ( u=0; u < (ssize_t)kernel->width; u++, i++) kernel->positive_range += kernel->values[i] = (double) (alpha * order_f / ( fact((size_t) u) * fact(kernel->height-u-1) )); } kernel->minimum = 1.0; kernel->maximum = kernel->values[kernel->x+kernel->y*kernel->width]; kernel->negative_range = 0.0; break; } /* Convolution Kernels - Well Known Named Constant Kernels */ case LaplacianKernel: { switch ( (int) args->rho ) { case 0: default: /* laplacian square filter -- default */ kernel=ParseKernelArray("3: -1,-1,-1 -1,8,-1 -1,-1,-1"); break; case 1: /* laplacian diamond filter */ kernel=ParseKernelArray("3: 0,-1,0 -1,4,-1 0,-1,0"); break; case 2: kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2"); break; case 3: kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 1,-2,1"); break; case 5: /* a 5x5 laplacian */ kernel=ParseKernelArray( "5: -4,-1,0,-1,-4 -1,2,3,2,-1 0,3,4,3,0 -1,2,3,2,-1 -4,-1,0,-1,-4"); break; case 7: /* a 7x7 laplacian */ kernel=ParseKernelArray( "7:-10,-5,-2,-1,-2,-5,-10 -5,0,3,4,3,0,-5 -2,3,6,7,6,3,-2 -1,4,7,8,7,4,-1 -2,3,6,7,6,3,-2 -5,0,3,4,3,0,-5 -10,-5,-2,-1,-2,-5,-10" ); break; case 15: /* a 5x5 LoG (sigma approx 1.4) */ kernel=ParseKernelArray( "5: 0,0,-1,0,0 0,-1,-2,-1,0 -1,-2,16,-2,-1 0,-1,-2,-1,0 0,0,-1,0,0"); break; case 19: /* a 9x9 LoG (sigma approx 1.4) */ /* http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue1/IJIP-15.pdf */ kernel=ParseKernelArray( "9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,12,24,12,-3,-5,-2 -2,-5,-0,24,40,24,-0,-5,-2 -2,-5,-3,12,24,12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; break; } case SobelKernel: { /* Simple Sobel Kernel */ kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case RobertsKernel: { kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case PrewittKernel: { kernel=ParseKernelArray("3: 1,0,-1 1,0,-1 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case CompassKernel: { kernel=ParseKernelArray("3: 1,1,-1 1,-2,-1 1,1,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case KirschKernel: { kernel=ParseKernelArray("3: 5,-3,-3 5,0,-3 5,-3,-3"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case FreiChenKernel: /* Direction is set to be left to right positive */ /* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf -- RIGHT? */ /* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf -- WRONG? */ { switch ( (int) args->rho ) { default: case 0: kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[3] = +(MagickRealType) MagickSQ2; kernel->values[5] = -(MagickRealType) MagickSQ2; CalcKernelMetaData(kernel); /* recalculate meta-data */ break; case 2: kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[1] = kernel->values[3]= +(MagickRealType) MagickSQ2; kernel->values[5] = kernel->values[7]= -(MagickRealType) MagickSQ2; CalcKernelMetaData(kernel); /* recalculate meta-data */ ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 10: { kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19",exception); if (kernel == (KernelInfo *) NULL) return(kernel); break; } case 1: case 11: kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[3] = +(MagickRealType) MagickSQ2; kernel->values[5] = -(MagickRealType) MagickSQ2; CalcKernelMetaData(kernel); /* recalculate meta-data */ ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 12: kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[1] = +(MagickRealType) MagickSQ2; kernel->values[7] = +(MagickRealType) MagickSQ2; CalcKernelMetaData(kernel); ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 13: kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[0] = +(MagickRealType) MagickSQ2; kernel->values[8] = -(MagickRealType) MagickSQ2; CalcKernelMetaData(kernel); ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 14: kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[2] = -(MagickRealType) MagickSQ2; kernel->values[6] = +(MagickRealType) MagickSQ2; CalcKernelMetaData(kernel); ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 15: kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/2.0, NoValue); break; case 16: kernel=ParseKernelArray("3: 1,0,-1 0,0,0 -1,0,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/2.0, NoValue); break; case 17: kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 -1,-2,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/6.0, NoValue); break; case 18: kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/6.0, NoValue); break; case 19: kernel=ParseKernelArray("3: 1,1,1 1,1,1 1,1,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/3.0, NoValue); break; } if ( fabs(args->sigma) >= MagickEpsilon ) /* Rotate by correctly supplied 'angle' */ RotateKernelInfo(kernel, args->sigma); else if ( args->rho > 30.0 || args->rho < -30.0 ) /* Rotate by out of bounds 'type' */ RotateKernelInfo(kernel, args->rho); break; } /* Boolean or Shaped Kernels */ case DiamondKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values within diamond area to scale given */ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) if ( (labs((long) u)+labs((long) v)) <= (long) kernel->x) kernel->positive_range += kernel->values[i] = args->sigma; else kernel->values[i] = nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ break; } case SquareKernel: case RectangleKernel: { double scale; if ( type == SquareKernel ) { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = (size_t) (2*args->rho+1); kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; scale = args->sigma; } else { /* NOTE: user defaults set in "AcquireKernelInfo()" */ if ( args->rho < 1.0 || args->sigma < 1.0 ) return(DestroyKernelInfo(kernel)); /* invalid args given */ kernel->width = (size_t)args->rho; kernel->height = (size_t)args->sigma; if ( args->xi < 0.0 || args->xi > (double)kernel->width || args->psi < 0.0 || args->psi > (double)kernel->height ) return(DestroyKernelInfo(kernel)); /* invalid args given */ kernel->x = (ssize_t) args->xi; kernel->y = (ssize_t) args->psi; scale = 1.0; } kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values to scale given */ u=(ssize_t) (kernel->width*kernel->height); for ( i=0; i < u; i++) kernel->values[i] = scale; kernel->minimum = kernel->maximum = scale; /* a flat shape */ kernel->positive_range = scale*u; break; } case OctagonKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 5; /* default radius = 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) if ( (labs((long) u)+labs((long) v)) <= ((long)kernel->x + (long)(kernel->x/2)) ) kernel->positive_range += kernel->values[i] = args->sigma; else kernel->values[i] = nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ break; } case DiskKernel: { ssize_t limit = (ssize_t)(args->rho*args->rho); if (args->rho < 0.4) /* default radius approx 4.3 */ kernel->width = kernel->height = 9L, limit = 18L; else kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) if ((u*u+v*v) <= limit) kernel->positive_range += kernel->values[i] = args->sigma; else kernel->values[i] = nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ break; } case PlusKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 5; /* default radius 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values along axises to given scale */ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0); break; } case CrossKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 5; /* default radius 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values along axises to given scale */ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = (u == v || u == -v) ? args->sigma : nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0); break; } /* HitAndMiss Kernels */ case RingKernel: case PeaksKernel: { ssize_t limit1, limit2, scale; if (args->rho < args->sigma) { kernel->width = ((size_t)args->sigma)*2+1; limit1 = (ssize_t)(args->rho*args->rho); limit2 = (ssize_t)(args->sigma*args->sigma); } else { kernel->width = ((size_t)args->rho)*2+1; limit1 = (ssize_t)(args->sigma*args->sigma); limit2 = (ssize_t)(args->rho*args->rho); } if ( limit2 <= 0 ) kernel->width = 7L, limit1 = 7L, limit2 = 11L; kernel->height = kernel->width; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */ scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi); for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) { ssize_t radius=u*u+v*v; if (limit1 < radius && radius <= limit2) kernel->positive_range += kernel->values[i] = (double) scale; else kernel->values[i] = nan; } kernel->minimum = kernel->maximum = (double) scale; if ( type == PeaksKernel ) { /* set the central point in the middle */ kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; kernel->positive_range = 1.0; kernel->maximum = 1.0; } break; } case EdgesKernel: { kernel=AcquireKernelInfo("ThinSE:482",exception); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */ break; } case CornersKernel: { kernel=AcquireKernelInfo("ThinSE:87",exception); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */ break; } case DiagonalsKernel: { switch ( (int) args->rho ) { case 0: default: { KernelInfo *new_kernel; kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; ExpandMirrorKernelInfo(kernel); return(kernel); } case 1: kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-"); break; case 2: kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } case LineEndsKernel: { /* Kernels for finding the end of thin lines */ switch ( (int) args->rho ) { case 0: default: /* set of kernels to find all end of lines */ return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>",exception)); case 1: /* kernel for 4-connected line ends - no rotation */ kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-"); break; case 2: /* kernel to add for 8-connected lines - no rotation */ kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1"); break; case 3: /* kernel to add for orthogonal line ends - does not find corners */ kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0"); break; case 4: /* traditional line end - fails on last T end */ kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } case LineJunctionsKernel: { /* kernels for finding the junctions of multiple lines */ switch ( (int) args->rho ) { case 0: default: /* set of kernels to find all line junctions */ return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>",exception)); case 1: /* Y Junction */ kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-"); break; case 2: /* Diagonal T Junctions */ kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1"); break; case 3: /* Orthogonal T Junctions */ kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-"); break; case 4: /* Diagonal X Junctions */ kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1"); break; case 5: /* Orthogonal X Junctions - minimal diamond kernel */ kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } case RidgesKernel: { /* Ridges - Ridge finding kernels */ KernelInfo *new_kernel; switch ( (int) args->rho ) { case 1: default: kernel=ParseKernelArray("3x1:0,1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */ break; case 2: kernel=ParseKernelArray("4x1:0,1,1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */ /* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */ /* Unfortunatally we can not yet rotate a non-square kernel */ /* But then we can't flip a non-symetrical kernel either */ new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; break; } break; } case ConvexHullKernel: { KernelInfo *new_kernel; /* first set of 8 kernels */ kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* append the mirror versions too - no flip function yet */ new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; ExpandRotateKernelInfo(new_kernel, 90.0); LastKernelInfo(kernel)->next = new_kernel; break; } case SkeletonKernel: { switch ( (int) args->rho ) { case 1: default: /* Traditional Skeleton... ** A cyclically rotated single kernel */ kernel=AcquireKernelInfo("ThinSE:482",exception); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */ break; case 2: /* HIPR Variation of the cyclic skeleton ** Corners of the traditional method made more forgiving, ** but the retain the same cyclic order. */ kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;",exception); if (kernel == (KernelInfo *) NULL) return(kernel); if (kernel->next == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); kernel->type = type; kernel->next->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */ break; case 3: /* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's ** "Connectivity-Preserving Morphological Image Thransformations" ** by Dan S. Bloomberg, available on Leptonica, Selected Papers, ** http://www.leptonica.com/papers/conn.pdf */ kernel=AcquireKernelInfo("ThinSE:41; ThinSE:42; ThinSE:43", exception); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->next->type = type; kernel->next->next->type = type; ExpandMirrorKernelInfo(kernel); /* 12 kernels total */ break; } break; } case ThinSEKernel: { /* Special kernels for general thinning, while preserving connections ** "Connectivity-Preserving Morphological Image Thransformations" ** by Dan S. Bloomberg, available on Leptonica, Selected Papers, ** http://www.leptonica.com/papers/conn.pdf ** And ** http://tpgit.github.com/Leptonica/ccthin_8c_source.html ** ** Note kernels do not specify the origin pixel, allowing them ** to be used for both thickening and thinning operations. */ switch ( (int) args->rho ) { /* SE for 4-connected thinning */ case 41: /* SE_4_1 */ kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1"); break; case 42: /* SE_4_2 */ kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-"); break; case 43: /* SE_4_3 */ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1"); break; case 44: /* SE_4_4 */ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-"); break; case 45: /* SE_4_5 */ kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-"); break; case 46: /* SE_4_6 */ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1"); break; case 47: /* SE_4_7 */ kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-"); break; case 48: /* SE_4_8 */ kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1"); break; case 49: /* SE_4_9 */ kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1"); break; /* SE for 8-connected thinning - negatives of the above */ case 81: /* SE_8_0 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-"); break; case 82: /* SE_8_2 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-"); break; case 83: /* SE_8_3 */ kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-"); break; case 84: /* SE_8_4 */ kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-"); break; case 85: /* SE_8_5 */ kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-"); break; case 86: /* SE_8_6 */ kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1"); break; case 87: /* SE_8_7 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-"); break; case 88: /* SE_8_8 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-"); break; case 89: /* SE_8_9 */ kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-"); break; /* Special combined SE kernels */ case 423: /* SE_4_2 , SE_4_3 Combined Kernel */ kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-"); break; case 823: /* SE_8_2 , SE_8_3 Combined Kernel */ kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-"); break; case 481: /* SE_48_1 - General Connected Corner Kernel */ kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-"); break; default: case 482: /* SE_48_2 - General Edge Kernel */ kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } /* Distance Measuring Kernels */ case ChebyshevKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->positive_range += ( kernel->values[i] = args->sigma*MagickMax(fabs((double)u),fabs((double)v)) ); kernel->maximum = kernel->values[0]; break; } case ManhattanKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->positive_range += ( kernel->values[i] = args->sigma*(labs((long) u)+labs((long) v)) ); kernel->maximum = kernel->values[0]; break; } case OctagonalKernel: { if (args->rho < 2.0) kernel->width = kernel->height = 5; /* default/minimum radius = 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) { double r1 = MagickMax(fabs((double)u),fabs((double)v)), r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5); kernel->positive_range += kernel->values[i] = args->sigma*MagickMax(r1,r2); } kernel->maximum = kernel->values[0]; break; } case EuclideanKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->positive_range += ( kernel->values[i] = args->sigma*sqrt((double)(u*u+v*v)) ); kernel->maximum = kernel->values[0]; break; } default: { /* No-Op Kernel - Basically just a single pixel on its own */ kernel=ParseKernelArray("1:1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = UndefinedKernel; break; } break; } return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneKernelInfo() creates a new clone of the given Kernel List so that its % can be modified without effecting the original. The cloned kernel should % be destroyed using DestoryKernelInfo() when no longer needed. % % The format of the CloneKernelInfo method is: % % KernelInfo *CloneKernelInfo(const KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to be cloned % */ MagickExport KernelInfo *CloneKernelInfo(const KernelInfo *kernel) { register ssize_t i; KernelInfo *new_kernel; assert(kernel != (KernelInfo *) NULL); new_kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel)); if (new_kernel == (KernelInfo *) NULL) return(new_kernel); *new_kernel=(*kernel); /* copy values in structure */ /* replace the values with a copy of the values */ new_kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height*sizeof(*kernel->values))); if (new_kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(new_kernel)); for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++) new_kernel->values[i]=kernel->values[i]; /* Also clone the next kernel in the kernel list */ if ( kernel->next != (KernelInfo *) NULL ) { new_kernel->next = CloneKernelInfo(kernel->next); if ( new_kernel->next == (KernelInfo *) NULL ) return(DestroyKernelInfo(new_kernel)); } return(new_kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyKernelInfo() frees the memory used by a Convolution/Morphology % kernel. % % The format of the DestroyKernelInfo method is: % % KernelInfo *DestroyKernelInfo(KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to be destroyed % */ MagickExport KernelInfo *DestroyKernelInfo(KernelInfo *kernel) { assert(kernel != (KernelInfo *) NULL); if (kernel->next != (KernelInfo *) NULL) kernel->next=DestroyKernelInfo(kernel->next); kernel->values=(MagickRealType *) RelinquishAlignedMemory(kernel->values); kernel=(KernelInfo *) RelinquishMagickMemory(kernel); return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + E x p a n d M i r r o r K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExpandMirrorKernelInfo() takes a single kernel, and expands it into a % sequence of 90-degree rotated kernels but providing a reflected 180 % rotatation, before the -/+ 90-degree rotations. % % This special rotation order produces a better, more symetrical thinning of % objects. % % The format of the ExpandMirrorKernelInfo method is: % % void ExpandMirrorKernelInfo(KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % This function is only internel to this module, as it is not finalized, % especially with regard to non-orthogonal angles, and rotation of larger % 2D kernels. */ #if 0 static void FlopKernelInfo(KernelInfo *kernel) { /* Do a Flop by reversing each row. */ size_t y; register ssize_t x,r; register double *k,t; for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width) for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--) t=k[x], k[x]=k[r], k[r]=t; kernel->x = kernel->width - kernel->x - 1; angle = fmod(angle+180.0, 360.0); } #endif static void ExpandMirrorKernelInfo(KernelInfo *kernel) { KernelInfo *clone, *last; last = kernel; clone = CloneKernelInfo(last); RotateKernelInfo(clone, 180); /* flip */ LastKernelInfo(last)->next = clone; last = clone; clone = CloneKernelInfo(last); RotateKernelInfo(clone, 90); /* transpose */ LastKernelInfo(last)->next = clone; last = clone; clone = CloneKernelInfo(last); RotateKernelInfo(clone, 180); /* flop */ LastKernelInfo(last)->next = clone; return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + E x p a n d R o t a t e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating % incrementally by the angle given, until the kernel repeats. % % WARNING: 45 degree rotations only works for 3x3 kernels. % While 90 degree roatations only works for linear and square kernels % % The format of the ExpandRotateKernelInfo method is: % % void ExpandRotateKernelInfo(KernelInfo *kernel, double angle) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o angle: angle to rotate in degrees % % This function is only internel to this module, as it is not finalized, % especially with regard to non-orthogonal angles, and rotation of larger % 2D kernels. */ /* Internal Routine - Return true if two kernels are the same */ static MagickBooleanType SameKernelInfo(const KernelInfo *kernel1, const KernelInfo *kernel2) { register size_t i; /* check size and origin location */ if ( kernel1->width != kernel2->width || kernel1->height != kernel2->height || kernel1->x != kernel2->x || kernel1->y != kernel2->y ) return MagickFalse; /* check actual kernel values */ for (i=0; i < (kernel1->width*kernel1->height); i++) { /* Test for Nan equivalence */ if ( IsNaN(kernel1->values[i]) && !IsNaN(kernel2->values[i]) ) return MagickFalse; if ( IsNaN(kernel2->values[i]) && !IsNaN(kernel1->values[i]) ) return MagickFalse; /* Test actual values are equivalent */ if ( fabs(kernel1->values[i] - kernel2->values[i]) >= MagickEpsilon ) return MagickFalse; } return MagickTrue; } static void ExpandRotateKernelInfo(KernelInfo *kernel, const double angle) { KernelInfo *clone, *last; last = kernel; DisableMSCWarning(4127) while(1) { RestoreMSCWarning clone = CloneKernelInfo(last); RotateKernelInfo(clone, angle); if ( SameKernelInfo(kernel, clone) != MagickFalse ) break; LastKernelInfo(last)->next = clone; last = clone; } clone = DestroyKernelInfo(clone); /* kernel has repeated - junk the clone */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a l c M e t a K e r n a l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CalcKernelMetaData() recalculate the KernelInfo meta-data of this kernel only, % using the kernel values. This should only ne used if it is not possible to % calculate that meta-data in some easier way. % % It is important that the meta-data is correct before ScaleKernelInfo() is % used to perform kernel normalization. % % The format of the CalcKernelMetaData method is: % % void CalcKernelMetaData(KernelInfo *kernel, const double scale ) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to modify % % WARNING: Minimum and Maximum values are assumed to include zero, even if % zero is not part of the kernel (as in Gaussian Derived kernels). This % however is not true for flat-shaped morphological kernels. % % WARNING: Only the specific kernel pointed to is modified, not a list of % multiple kernels. % % This is an internal function and not expected to be useful outside this % module. This could change however. */ static void CalcKernelMetaData(KernelInfo *kernel) { register size_t i; kernel->minimum = kernel->maximum = 0.0; kernel->negative_range = kernel->positive_range = 0.0; for (i=0; i < (kernel->width*kernel->height); i++) { if ( fabs(kernel->values[i]) < MagickEpsilon ) kernel->values[i] = 0.0; ( kernel->values[i] < 0) ? ( kernel->negative_range += kernel->values[i] ) : ( kernel->positive_range += kernel->values[i] ); Minimize(kernel->minimum, kernel->values[i]); Maximize(kernel->maximum, kernel->values[i]); } return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o r p h o l o g y A p p l y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MorphologyApply() applies a morphological method, multiple times using % a list of multiple kernels. This is the method that should be called by % other 'operators' that internally use morphology operations as part of % their processing. % % It is basically equivalent to as MorphologyImage() (see below) but without % any user controls. This allows internel programs to use this method to % perform a specific task without possible interference by any API user % supplied settings. % % It is MorphologyImage() task to extract any such user controls, and % pass them to this function for processing. % % More specifically all given kernels should already be scaled, normalised, % and blended appropriatally before being parred to this routine. The % appropriate bias, and compose (typically 'UndefinedComposeOp') given. % % The format of the MorphologyApply method is: % % Image *MorphologyApply(const Image *image,MorphologyMethod method, % const ssize_t iterations,const KernelInfo *kernel, % const CompositeMethod compose,const double bias, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the source image % % o method: the morphology method to be applied. % % o iterations: apply the operation this many times (or no change). % A value of -1 means loop until no change found. % How this is applied may depend on the morphology method. % Typically this is a value of 1. % % o channel: the channel type. % % o kernel: An array of double representing the morphology kernel. % % o compose: How to handle or merge multi-kernel results. % If 'UndefinedCompositeOp' use default for the Morphology method. % If 'NoCompositeOp' force image to be re-iterated by each kernel. % Otherwise merge the results using the compose method given. % % o bias: Convolution Output Bias. % % o exception: return any errors or warnings in this structure. % */ static ssize_t MorphologyPrimitive(const Image *image,Image *morphology_image, const MorphologyMethod method,const KernelInfo *kernel,const double bias, ExceptionInfo *exception) { #define MorphologyTag "Morphology/Image" CacheView *image_view, *morphology_view; OffsetInfo offset; register ssize_t j, y; size_t *changes, changed, width; MagickBooleanType status; MagickOffsetType progress; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(morphology_image != (Image *) NULL); assert(morphology_image->signature == MagickCoreSignature); assert(kernel != (KernelInfo *) NULL); assert(kernel->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); morphology_view=AcquireAuthenticCacheView(morphology_image,exception); width=image->columns+kernel->width-1; offset.x=0; offset.y=0; switch (method) { case ConvolveMorphology: case DilateMorphology: case DilateIntensityMorphology: case IterativeDistanceMorphology: { /* Kernel needs to used with reflection about origin. */ offset.x=(ssize_t) kernel->width-kernel->x-1; offset.y=(ssize_t) kernel->height-kernel->y-1; break; } case ErodeMorphology: case ErodeIntensityMorphology: case HitAndMissMorphology: case ThinningMorphology: case ThickenMorphology: { offset.x=kernel->x; offset.y=kernel->y; break; } default: { assert("Not a Primitive Morphology Method" != (char *) NULL); break; } } changed=0; changes=(size_t *) AcquireQuantumMemory(GetOpenMPMaximumThreads(), sizeof(*changes)); if (changes == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++) changes[j]=0; if ((method == ConvolveMorphology) && (kernel->width == 1)) { register ssize_t x; /* Special handling (for speed) of vertical (blur) kernels. This performs its handling in columns rather than in rows. This is only done for convolve as it is the only method that generates very large 1-D vertical kernels (such as a 'BlurKernel') */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,morphology_image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t r; ssize_t center; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,x,-offset.y,1,image->rows+ kernel->height-1,exception); q=GetCacheViewAuthenticPixels(morphology_view,x,0,1, morphology_image->rows,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } center=(ssize_t) GetPixelChannels(image)*offset.y; for (r=0; r < (ssize_t) image->rows; r++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait morphology_traits, traits; register const MagickRealType *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t v; size_t count; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); morphology_traits=GetPixelChannelTraits(morphology_image,channel); if ((traits == UndefinedPixelTrait) || (morphology_traits == UndefinedPixelTrait)) continue; if (((traits & CopyPixelTrait) != 0) || (GetPixelReadMask(image,p+center) == 0)) { SetPixelChannel(morphology_image,channel,p[center+i],q); continue; } k=(&kernel->values[kernel->height-1]); pixels=p; pixel=bias; gamma=0.0; count=0; if ((morphology_traits & BlendPixelTrait) == 0) for (v=0; v < (ssize_t) kernel->height; v++) { if (!IsNaN(*k)) { pixel+=(*k)*pixels[i]; gamma+=(*k); count++; } k--; pixels+=GetPixelChannels(image); } else for (v=0; v < (ssize_t) kernel->height; v++) { if (!IsNaN(*k)) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=alpha*(*k)*pixels[i]; gamma+=alpha*(*k); count++; } k--; pixels+=GetPixelChannels(image); } if (fabs(pixel-p[center+i]) > MagickEpsilon) changes[id]++; gamma=PerceptibleReciprocal(gamma); if (count != 0) gamma*=(double) kernel->height/count; SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma* pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(morphology_image); } if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_MorphologyPrimitive) #endif proceed=SetImageProgress(image,MorphologyTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } morphology_image->type=image->type; morphology_view=DestroyCacheView(morphology_view); image_view=DestroyCacheView(image_view); for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++) changed+=changes[j]; changes=(size_t *) RelinquishMagickMemory(changes); return(status ? (ssize_t) changed : 0); } /* Normal handling of horizontal or rectangular kernels (row by row). */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,morphology_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; ssize_t center; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width, kernel->height,exception); q=GetCacheViewAuthenticPixels(morphology_view,0,y,morphology_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } center=(ssize_t) (GetPixelChannels(image)*width*offset.y+ GetPixelChannels(image)*offset.x); for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, intensity, maximum, minimum, pixel; PixelChannel channel; PixelTrait morphology_traits, traits; register const MagickRealType *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; size_t count; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); morphology_traits=GetPixelChannelTraits(morphology_image,channel); if ((traits == UndefinedPixelTrait) || (morphology_traits == UndefinedPixelTrait)) continue; if (((traits & CopyPixelTrait) != 0) || (GetPixelReadMask(image,p+center) == 0)) { SetPixelChannel(morphology_image,channel,p[center+i],q); continue; } pixels=p; maximum=0.0; minimum=(double) QuantumRange; count=kernel->width*kernel->height; switch (method) { case ConvolveMorphology: pixel=bias; break; case HitAndMissMorphology: pixel=(double) QuantumRange; break; case ThinningMorphology: pixel=(double) QuantumRange; break; case ThickenMorphology: pixel=(double) QuantumRange; break; case ErodeMorphology: pixel=(double) QuantumRange; break; case DilateMorphology: pixel=0.0; break; case ErodeIntensityMorphology: case DilateIntensityMorphology: case IterativeDistanceMorphology: { pixel=(double) p[center+i]; break; } default: pixel=0; break; } gamma=1.0; switch (method) { case ConvolveMorphology: { /* Weighted Average of pixels using reflected kernel For correct working of this operation for asymetrical kernels, the kernel needs to be applied in its reflected form. That is its values needs to be reversed. Correlation is actually the same as this but without reflecting the kernel, and thus 'lower-level' that Convolution. However as Convolution is the more common method used, and it does not really cost us much in terms of processing to use a reflected kernel, so it is Convolution that is implemented. Correlation will have its kernel reflected before calling this function to do a Convolve. For more details of Correlation vs Convolution see http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf */ k=(&kernel->values[kernel->width*kernel->height-1]); count=0; if ((morphology_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { pixel+=(*k)*pixels[i]; count++; } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } /* Alpha blending. */ gamma=0.0; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=alpha*(*k)*pixels[i]; gamma+=alpha*(*k); count++; } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } case ErodeMorphology: { /* Minimum value within kernel neighbourhood. The kernel is not reflected for this operation. In normal Greyscale Morphology, the kernel value should be added to the real value, this is currently not done, due to the nature of the boolean kernels being used. */ k=kernel->values; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k) && (*k >= 0.5)) { if ((double) pixels[i] < pixel) pixel=(double) pixels[i]; } k++; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } case DilateMorphology: { /* Maximum value within kernel neighbourhood. For correct working of this operation for asymetrical kernels, the kernel needs to be applied in its reflected form. That is its values needs to be reversed. In normal Greyscale Morphology, the kernel value should be added to the real value, this is currently not done, due to the nature of the boolean kernels being used. */ count=0; k=(&kernel->values[kernel->width*kernel->height-1]); for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k) && (*k > 0.5)) { if ((double) pixels[i] > pixel) pixel=(double) pixels[i]; } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } case HitAndMissMorphology: case ThinningMorphology: case ThickenMorphology: { /* Minimum of foreground pixel minus maxumum of background pixels. The kernel is not reflected for this operation, and consists of both foreground and background pixel neighbourhoods, 0.0 for background, and 1.0 for foreground with either Nan or 0.5 values for don't care. This never produces a meaningless negative result. Such results cause Thinning/Thicken to not work correctly when used against a greyscale image. */ count=0; k=kernel->values; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { if (*k > 0.7) { if ((double) pixels[i] < pixel) pixel=(double) pixels[i]; } else if (*k < 0.3) { if ((double) pixels[i] > maximum) maximum=(double) pixels[i]; } count++; } k++; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } pixel-=maximum; if (pixel < 0.0) pixel=0.0; if (method == ThinningMorphology) pixel=(double) p[center+i]-pixel; else if (method == ThickenMorphology) pixel+=(double) p[center+i]+pixel; break; } case ErodeIntensityMorphology: { /* Select pixel with minimum intensity within kernel neighbourhood. The kernel is not reflected for this operation. */ count=0; k=kernel->values; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k) && (*k >= 0.5)) { intensity=(double) GetPixelIntensity(image,pixels); if (intensity < minimum) { pixel=(double) pixels[i]; minimum=intensity; } count++; } k++; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } case DilateIntensityMorphology: { /* Select pixel with maximum intensity within kernel neighbourhood. The kernel is not reflected for this operation. */ count=0; k=(&kernel->values[kernel->width*kernel->height-1]); for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k) && (*k >= 0.5)) { intensity=(double) GetPixelIntensity(image,pixels); if (intensity > maximum) { pixel=(double) pixels[i]; maximum=intensity; } count++; } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } case IterativeDistanceMorphology: { /* Compute th iterative distance from black edge of a white image shape. Essentually white values are decreased to the smallest 'distance from edge' it can find. It works by adding kernel values to the neighbourhood, and and select the minimum value found. The kernel is rotated before use, so kernel distances match resulting distances, when a user provided asymmetric kernel is applied. This code is nearly identical to True GrayScale Morphology but not quite. GreyDilate Kernel values added, maximum value found Kernel is rotated before use. GrayErode: Kernel values subtracted and minimum value found No kernel rotation used. Note the the Iterative Distance method is essentially a GrayErode, but with negative kernel values, and kernel rotation applied. */ count=0; k=(&kernel->values[kernel->width*kernel->height-1]); for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); count++; } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } case UndefinedMorphology: default: break; } if (fabs(pixel-p[center+i]) > MagickEpsilon) changes[id]++; gamma=PerceptibleReciprocal(gamma); if (count != 0) gamma*=(double) kernel->height*kernel->width/count; SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(morphology_image); } if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_MorphologyPrimitive) #endif proceed=SetImageProgress(image,MorphologyTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } morphology_view=DestroyCacheView(morphology_view); image_view=DestroyCacheView(image_view); for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++) changed+=changes[j]; changes=(size_t *) RelinquishMagickMemory(changes); return(status ? (ssize_t) changed : -1); } /* This is almost identical to the MorphologyPrimative() function above, but applies the primitive directly to the actual image using two passes, once in each direction, with the results of the previous (and current) row being re-used. That is after each row is 'Sync'ed' into the image, the next row makes use of those values as part of the calculation of the next row. It repeats, but going in the oppisite (bottom-up) direction. Because of this 're-use of results' this function can not make use of multi- threaded, parellel processing. */ static ssize_t MorphologyPrimitiveDirect(Image *image, const MorphologyMethod method,const KernelInfo *kernel, ExceptionInfo *exception) { CacheView *morphology_view, *image_view; MagickBooleanType status; MagickOffsetType progress; OffsetInfo offset; size_t width, changed; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(kernel != (KernelInfo *) NULL); assert(kernel->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=MagickTrue; changed=0; progress=0; switch(method) { case DistanceMorphology: case VoronoiMorphology: { /* Kernel reflected about origin. */ offset.x=(ssize_t) kernel->width-kernel->x-1; offset.y=(ssize_t) kernel->height-kernel->y-1; break; } default: { offset.x=kernel->x; offset.y=kernel->y; break; } } /* Two views into same image, do not thread. */ image_view=AcquireVirtualCacheView(image,exception); morphology_view=AcquireAuthenticCacheView(image,exception); width=image->columns+kernel->width-1; for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; ssize_t center; /* Read virtual pixels, and authentic pixels, from the same image! We read using virtual to get virtual pixel handling, but write back into the same image. Only top half of kernel is processed as we do a single pass downward through the image iterating the distance function as we go. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width,(size_t) offset.y+1,exception); q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } center=(ssize_t) (GetPixelChannels(image)*width*offset.y+ GetPixelChannels(image)*offset.x); for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelTrait traits; register const MagickRealType *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; traits=GetPixelChannelTraits(image,(PixelChannel) i); if (traits == UndefinedPixelTrait) continue; if (((traits & CopyPixelTrait) != 0) || (GetPixelReadMask(image,p+center) == 0)) continue; pixels=p; pixel=(double) QuantumRange; switch (method) { case DistanceMorphology: { k=(&kernel->values[kernel->width*kernel->height-1]); for (v=0; v <= offset.y; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } k=(&kernel->values[kernel->width*(kernel->y+1)-1]); pixels=q-offset.x*GetPixelChannels(image); for (u=0; u < offset.x; u++) { if (!IsNaN(*k) && ((x+u-offset.x) >= 0)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; pixels+=GetPixelChannels(image); } break; } case VoronoiMorphology: { k=(&kernel->values[kernel->width*kernel->height-1]); for (v=0; v < offset.y; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } k=(&kernel->values[kernel->width*(kernel->y+1)-1]); pixels=q-offset.x*GetPixelChannels(image); for (u=0; u < offset.x; u++) { if (!IsNaN(*k) && ((x+u-offset.x) >= 0)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; pixels+=GetPixelChannels(image); } break; } default: break; } if (fabs(pixel-q[i]) > MagickEpsilon) changed++; q[i]=ClampToQuantum(pixel); } p+=GetPixelChannels(image); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,MorphologyTag,progress++,2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } morphology_view=DestroyCacheView(morphology_view); image_view=DestroyCacheView(image_view); /* Do the reverse pass through the image. */ image_view=AcquireVirtualCacheView(image,exception); morphology_view=AcquireAuthenticCacheView(image,exception); for (y=(ssize_t) image->rows-1; y >= 0; y--) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; ssize_t center; /* Read virtual pixels, and authentic pixels, from the same image. We read using virtual to get virtual pixel handling, but write back into the same image. Only the bottom half of the kernel is processed as we up the image. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-offset.x,y,width,(size_t) kernel->y+1,exception); q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } p+=(image->columns-1)*GetPixelChannels(image); q+=(image->columns-1)*GetPixelChannels(image); center=(ssize_t) (offset.x*GetPixelChannels(image)); for (x=(ssize_t) image->columns-1; x >= 0; x--) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelTrait traits; register const MagickRealType *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; traits=GetPixelChannelTraits(image,(PixelChannel) i); if (traits == UndefinedPixelTrait) continue; if (((traits & CopyPixelTrait) != 0) || (GetPixelReadMask(image,p+center) == 0)) continue; pixels=p; pixel=(double) QuantumRange; switch (method) { case DistanceMorphology: { k=(&kernel->values[kernel->width*(kernel->y+1)-1]); for (v=offset.y; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } k=(&kernel->values[kernel->width*kernel->y+kernel->x-1]); pixels=q; for (u=offset.x+1; u < (ssize_t) kernel->width; u++) { pixels+=GetPixelChannels(image); if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; } break; } case VoronoiMorphology: { k=(&kernel->values[kernel->width*(kernel->y+1)-1]); for (v=offset.y; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } k=(&kernel->values[kernel->width*(kernel->y+1)-1]); pixels=q; for (u=offset.x+1; u < (ssize_t) kernel->width; u++) { pixels+=GetPixelChannels(image); if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; } break; } default: break; } if (fabs(pixel-q[i]) > MagickEpsilon) changed++; q[i]=ClampToQuantum(pixel); } p-=GetPixelChannels(image); q-=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,MorphologyTag,progress++,2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } morphology_view=DestroyCacheView(morphology_view); image_view=DestroyCacheView(image_view); return(status ? (ssize_t) changed : -1); } /* Apply a Morphology by calling one of the above low level primitive application functions. This function handles any iteration loops, composition or re-iteration of results, and compound morphology methods that is based on multiple low-level (staged) morphology methods. Basically this provides the complex glue between the requested morphology method and raw low-level implementation (above). */ MagickPrivate Image *MorphologyApply(const Image *image, const MorphologyMethod method, const ssize_t iterations, const KernelInfo *kernel, const CompositeOperator compose,const double bias, ExceptionInfo *exception) { CompositeOperator curr_compose; Image *curr_image, /* Image we are working with or iterating */ *work_image, /* secondary image for primitive iteration */ *save_image, /* saved image - for 'edge' method only */ *rslt_image; /* resultant image - after multi-kernel handling */ KernelInfo *reflected_kernel, /* A reflected copy of the kernel (if needed) */ *norm_kernel, /* the current normal un-reflected kernel */ *rflt_kernel, /* the current reflected kernel (if needed) */ *this_kernel; /* the kernel being applied */ MorphologyMethod primitive; /* the current morphology primitive being applied */ CompositeOperator rslt_compose; /* multi-kernel compose method for results to use */ MagickBooleanType special, /* do we use a direct modify function? */ verbose; /* verbose output of results */ size_t method_loop, /* Loop 1: number of compound method iterations (norm 1) */ method_limit, /* maximum number of compound method iterations */ kernel_number, /* Loop 2: the kernel number being applied */ stage_loop, /* Loop 3: primitive loop for compound morphology */ stage_limit, /* how many primitives are in this compound */ kernel_loop, /* Loop 4: iterate the kernel over image */ kernel_limit, /* number of times to iterate kernel */ count, /* total count of primitive steps applied */ kernel_changed, /* total count of changed using iterated kernel */ method_changed; /* total count of changed over method iteration */ ssize_t changed; /* number pixels changed by last primitive operation */ char v_info[MagickPathExtent]; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(kernel != (KernelInfo *) NULL); assert(kernel->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); count = 0; /* number of low-level morphology primitives performed */ if ( iterations == 0 ) return((Image *) NULL); /* null operation - nothing to do! */ kernel_limit = (size_t) iterations; if ( iterations < 0 ) /* negative interations = infinite (well alomst) */ kernel_limit = image->columns>image->rows ? image->columns : image->rows; verbose = IsStringTrue(GetImageArtifact(image,"debug")); /* initialise for cleanup */ curr_image = (Image *) image; curr_compose = image->compose; (void) curr_compose; work_image = save_image = rslt_image = (Image *) NULL; reflected_kernel = (KernelInfo *) NULL; /* Initialize specific methods * + which loop should use the given iteratations * + how many primitives make up the compound morphology * + multi-kernel compose method to use (by default) */ method_limit = 1; /* just do method once, unless otherwise set */ stage_limit = 1; /* assume method is not a compound */ special = MagickFalse; /* assume it is NOT a direct modify primitive */ rslt_compose = compose; /* and we are composing multi-kernels as given */ switch( method ) { case SmoothMorphology: /* 4 primitive compound morphology */ stage_limit = 4; break; case OpenMorphology: /* 2 primitive compound morphology */ case OpenIntensityMorphology: case TopHatMorphology: case CloseMorphology: case CloseIntensityMorphology: case BottomHatMorphology: case EdgeMorphology: stage_limit = 2; break; case HitAndMissMorphology: rslt_compose = LightenCompositeOp; /* Union of multi-kernel results */ /* FALL THUR */ case ThinningMorphology: case ThickenMorphology: method_limit = kernel_limit; /* iterate the whole method */ kernel_limit = 1; /* do not do kernel iteration */ break; case DistanceMorphology: case VoronoiMorphology: special = MagickTrue; /* use special direct primative */ break; default: break; } /* Apply special methods with special requirments ** For example, single run only, or post-processing requirements */ if ( special != MagickFalse ) { rslt_image=CloneImage(image,0,0,MagickTrue,exception); if (rslt_image == (Image *) NULL) goto error_cleanup; if (SetImageStorageClass(rslt_image,DirectClass,exception) == MagickFalse) goto error_cleanup; changed=MorphologyPrimitiveDirect(rslt_image,method,kernel,exception); if (verbose != MagickFalse) (void) (void) FormatLocaleFile(stderr, "%s:%.20g.%.20g #%.20g => Changed %.20g\n", CommandOptionToMnemonic(MagickMorphologyOptions, method), 1.0,0.0,1.0, (double) changed); if ( changed < 0 ) goto error_cleanup; if ( method == VoronoiMorphology ) { /* Preserve the alpha channel of input image - but turned it off */ (void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel, exception); (void) CompositeImage(rslt_image,image,CopyAlphaCompositeOp, MagickTrue,0,0,exception); (void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel, exception); } goto exit_cleanup; } /* Handle user (caller) specified multi-kernel composition method */ if ( compose != UndefinedCompositeOp ) rslt_compose = compose; /* override default composition for method */ if ( rslt_compose == UndefinedCompositeOp ) rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */ /* Some methods require a reflected kernel to use with primitives. * Create the reflected kernel for those methods. */ switch ( method ) { case CorrelateMorphology: case CloseMorphology: case CloseIntensityMorphology: case BottomHatMorphology: case SmoothMorphology: reflected_kernel = CloneKernelInfo(kernel); if (reflected_kernel == (KernelInfo *) NULL) goto error_cleanup; RotateKernelInfo(reflected_kernel,180); break; default: break; } /* Loops around more primitive morpholgy methods ** erose, dilate, open, close, smooth, edge, etc... */ /* Loop 1: iterate the compound method */ method_loop = 0; method_changed = 1; while ( method_loop < method_limit && method_changed > 0 ) { method_loop++; method_changed = 0; /* Loop 2: iterate over each kernel in a multi-kernel list */ norm_kernel = (KernelInfo *) kernel; this_kernel = (KernelInfo *) kernel; rflt_kernel = reflected_kernel; kernel_number = 0; while ( norm_kernel != NULL ) { /* Loop 3: Compound Morphology Staging - Select Primative to apply */ stage_loop = 0; /* the compound morphology stage number */ while ( stage_loop < stage_limit ) { stage_loop++; /* The stage of the compound morphology */ /* Select primitive morphology for this stage of compound method */ this_kernel = norm_kernel; /* default use unreflected kernel */ primitive = method; /* Assume method is a primitive */ switch( method ) { case ErodeMorphology: /* just erode */ case EdgeInMorphology: /* erode and image difference */ primitive = ErodeMorphology; break; case DilateMorphology: /* just dilate */ case EdgeOutMorphology: /* dilate and image difference */ primitive = DilateMorphology; break; case OpenMorphology: /* erode then dialate */ case TopHatMorphology: /* open and image difference */ primitive = ErodeMorphology; if ( stage_loop == 2 ) primitive = DilateMorphology; break; case OpenIntensityMorphology: primitive = ErodeIntensityMorphology; if ( stage_loop == 2 ) primitive = DilateIntensityMorphology; break; case CloseMorphology: /* dilate, then erode */ case BottomHatMorphology: /* close and image difference */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = DilateMorphology; if ( stage_loop == 2 ) primitive = ErodeMorphology; break; case CloseIntensityMorphology: this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = DilateIntensityMorphology; if ( stage_loop == 2 ) primitive = ErodeIntensityMorphology; break; case SmoothMorphology: /* open, close */ switch ( stage_loop ) { case 1: /* start an open method, which starts with Erode */ primitive = ErodeMorphology; break; case 2: /* now Dilate the Erode */ primitive = DilateMorphology; break; case 3: /* Reflect kernel a close */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = DilateMorphology; break; case 4: /* Finish the Close */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = ErodeMorphology; break; } break; case EdgeMorphology: /* dilate and erode difference */ primitive = DilateMorphology; if ( stage_loop == 2 ) { save_image = curr_image; /* save the image difference */ curr_image = (Image *) image; primitive = ErodeMorphology; } break; case CorrelateMorphology: /* A Correlation is a Convolution with a reflected kernel. ** However a Convolution is a weighted sum using a reflected ** kernel. It may seem stange to convert a Correlation into a ** Convolution as the Correlation is the simplier method, but ** Convolution is much more commonly used, and it makes sense to ** implement it directly so as to avoid the need to duplicate the ** kernel when it is not required (which is typically the ** default). */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = ConvolveMorphology; break; default: break; } assert( this_kernel != (KernelInfo *) NULL ); /* Extra information for debugging compound operations */ if (verbose != MagickFalse) { if ( stage_limit > 1 ) (void) FormatLocaleString(v_info,MagickPathExtent,"%s:%.20g.%.20g -> ", CommandOptionToMnemonic(MagickMorphologyOptions,method),(double) method_loop,(double) stage_loop); else if ( primitive != method ) (void) FormatLocaleString(v_info, MagickPathExtent, "%s:%.20g -> ", CommandOptionToMnemonic(MagickMorphologyOptions, method),(double) method_loop); else v_info[0] = '\0'; } /* Loop 4: Iterate the kernel with primitive */ kernel_loop = 0; kernel_changed = 0; changed = 1; while ( kernel_loop < kernel_limit && changed > 0 ) { kernel_loop++; /* the iteration of this kernel */ /* Create a clone as the destination image, if not yet defined */ if ( work_image == (Image *) NULL ) { work_image=CloneImage(image,0,0,MagickTrue,exception); if (work_image == (Image *) NULL) goto error_cleanup; if (SetImageStorageClass(work_image,DirectClass,exception) == MagickFalse) goto error_cleanup; } /* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */ count++; changed = MorphologyPrimitive(curr_image, work_image, primitive, this_kernel, bias, exception); if (verbose != MagickFalse) { if ( kernel_loop > 1 ) (void) FormatLocaleFile(stderr, "\n"); /* add end-of-line from previous */ (void) (void) FormatLocaleFile(stderr, "%s%s%s:%.20g.%.20g #%.20g => Changed %.20g", v_info,CommandOptionToMnemonic(MagickMorphologyOptions, primitive),(this_kernel == rflt_kernel ) ? "*" : "", (double) (method_loop+kernel_loop-1),(double) kernel_number, (double) count,(double) changed); } if ( changed < 0 ) goto error_cleanup; kernel_changed += changed; method_changed += changed; /* prepare next loop */ { Image *tmp = work_image; /* swap images for iteration */ work_image = curr_image; curr_image = tmp; } if ( work_image == image ) work_image = (Image *) NULL; /* replace input 'image' */ } /* End Loop 4: Iterate the kernel with primitive */ if (verbose != MagickFalse && kernel_changed != (size_t)changed) (void) FormatLocaleFile(stderr, " Total %.20g",(double) kernel_changed); if (verbose != MagickFalse && stage_loop < stage_limit) (void) FormatLocaleFile(stderr, "\n"); /* add end-of-line before looping */ #if 0 (void) FormatLocaleFile(stderr, "--E-- image=0x%lx\n", (unsigned long)image); (void) FormatLocaleFile(stderr, " curr =0x%lx\n", (unsigned long)curr_image); (void) FormatLocaleFile(stderr, " work =0x%lx\n", (unsigned long)work_image); (void) FormatLocaleFile(stderr, " save =0x%lx\n", (unsigned long)save_image); (void) FormatLocaleFile(stderr, " union=0x%lx\n", (unsigned long)rslt_image); #endif } /* End Loop 3: Primative (staging) Loop for Coumpound Methods */ /* Final Post-processing for some Compound Methods ** ** The removal of any 'Sync' channel flag in the Image Compositon ** below ensures the methematical compose method is applied in a ** purely mathematical way, and only to the selected channels. ** Turn off SVG composition 'alpha blending'. */ switch( method ) { case EdgeOutMorphology: case EdgeInMorphology: case TopHatMorphology: case BottomHatMorphology: if (verbose != MagickFalse) (void) FormatLocaleFile(stderr, "\n%s: Difference with original image",CommandOptionToMnemonic( MagickMorphologyOptions, method) ); (void) CompositeImage(curr_image,image,DifferenceCompositeOp, MagickTrue,0,0,exception); break; case EdgeMorphology: if (verbose != MagickFalse) (void) FormatLocaleFile(stderr, "\n%s: Difference of Dilate and Erode",CommandOptionToMnemonic( MagickMorphologyOptions, method) ); (void) CompositeImage(curr_image,save_image,DifferenceCompositeOp, MagickTrue,0,0,exception); save_image = DestroyImage(save_image); /* finished with save image */ break; default: break; } /* multi-kernel handling: re-iterate, or compose results */ if ( kernel->next == (KernelInfo *) NULL ) rslt_image = curr_image; /* just return the resulting image */ else if ( rslt_compose == NoCompositeOp ) { if (verbose != MagickFalse) { if ( this_kernel->next != (KernelInfo *) NULL ) (void) FormatLocaleFile(stderr, " (re-iterate)"); else (void) FormatLocaleFile(stderr, " (done)"); } rslt_image = curr_image; /* return result, and re-iterate */ } else if ( rslt_image == (Image *) NULL) { if (verbose != MagickFalse) (void) FormatLocaleFile(stderr, " (save for compose)"); rslt_image = curr_image; curr_image = (Image *) image; /* continue with original image */ } else { /* Add the new 'current' result to the composition ** ** The removal of any 'Sync' channel flag in the Image Compositon ** below ensures the methematical compose method is applied in a ** purely mathematical way, and only to the selected channels. ** IE: Turn off SVG composition 'alpha blending'. */ if (verbose != MagickFalse) (void) FormatLocaleFile(stderr, " (compose \"%s\")", CommandOptionToMnemonic(MagickComposeOptions, rslt_compose) ); (void) CompositeImage(rslt_image,curr_image,rslt_compose,MagickTrue, 0,0,exception); curr_image = DestroyImage(curr_image); curr_image = (Image *) image; /* continue with original image */ } if (verbose != MagickFalse) (void) FormatLocaleFile(stderr, "\n"); /* loop to the next kernel in a multi-kernel list */ norm_kernel = norm_kernel->next; if ( rflt_kernel != (KernelInfo *) NULL ) rflt_kernel = rflt_kernel->next; kernel_number++; } /* End Loop 2: Loop over each kernel */ } /* End Loop 1: compound method interation */ goto exit_cleanup; /* Yes goto's are bad, but it makes cleanup lot more efficient */ error_cleanup: if ( curr_image == rslt_image ) curr_image = (Image *) NULL; if ( rslt_image != (Image *) NULL ) rslt_image = DestroyImage(rslt_image); exit_cleanup: if ( curr_image == rslt_image || curr_image == image ) curr_image = (Image *) NULL; if ( curr_image != (Image *) NULL ) curr_image = DestroyImage(curr_image); if ( work_image != (Image *) NULL ) work_image = DestroyImage(work_image); if ( save_image != (Image *) NULL ) save_image = DestroyImage(save_image); if ( reflected_kernel != (KernelInfo *) NULL ) reflected_kernel = DestroyKernelInfo(reflected_kernel); return(rslt_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o r p h o l o g y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MorphologyImage() applies a user supplied kernel to the image according to % the given mophology method. % % This function applies any and all user defined settings before calling % the above internal function MorphologyApply(). % % User defined settings include... % * Output Bias for Convolution and correlation ("-define convolve:bias=??") % * Kernel Scale/normalize settings ("-define convolve:scale=??") % This can also includes the addition of a scaled unity kernel. % * Show Kernel being applied ("-define morphology:showkernel=1") % % Other operators that do not want user supplied options interfering, % especially "convolve:bias" and "morphology:showkernel" should use % MorphologyApply() directly. % % The format of the MorphologyImage method is: % % Image *MorphologyImage(const Image *image,MorphologyMethod method, % const ssize_t iterations,KernelInfo *kernel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o method: the morphology method to be applied. % % o iterations: apply the operation this many times (or no change). % A value of -1 means loop until no change found. % How this is applied may depend on the morphology method. % Typically this is a value of 1. % % o kernel: An array of double representing the morphology kernel. % Warning: kernel may be normalized for the Convolve method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MorphologyImage(const Image *image, const MorphologyMethod method,const ssize_t iterations, const KernelInfo *kernel,ExceptionInfo *exception) { const char *artifact; CompositeOperator compose; double bias; Image *morphology_image; KernelInfo *curr_kernel; curr_kernel = (KernelInfo *) kernel; bias=0.0; compose = UndefinedCompositeOp; /* use default for method */ /* Apply Convolve/Correlate Normalization and Scaling Factors. * This is done BEFORE the ShowKernelInfo() function is called so that * users can see the results of the 'option:convolve:scale' option. */ if ( method == ConvolveMorphology || method == CorrelateMorphology ) { /* Get the bias value as it will be needed */ artifact = GetImageArtifact(image,"convolve:bias"); if ( artifact != (const char *) NULL) { if (IsGeometry(artifact) == MagickFalse) (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"InvalidSetting","'%s' '%s'", "convolve:bias",artifact); else bias=StringToDoubleInterval(artifact,(double) QuantumRange+1.0); } /* Scale kernel according to user wishes */ artifact = GetImageArtifact(image,"convolve:scale"); if ( artifact != (const char *) NULL ) { if (IsGeometry(artifact) == MagickFalse) (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"InvalidSetting","'%s' '%s'", "convolve:scale",artifact); else { if ( curr_kernel == kernel ) curr_kernel = CloneKernelInfo(kernel); if (curr_kernel == (KernelInfo *) NULL) return((Image *) NULL); ScaleGeometryKernelInfo(curr_kernel, artifact); } } } /* display the (normalized) kernel via stderr */ artifact=GetImageArtifact(image,"morphology:showkernel"); if (IsStringTrue(artifact) != MagickFalse) ShowKernelInfo(curr_kernel); /* Override the default handling of multi-kernel morphology results * If 'Undefined' use the default method * If 'None' (default for 'Convolve') re-iterate previous result * Otherwise merge resulting images using compose method given. * Default for 'HitAndMiss' is 'Lighten'. */ { ssize_t parse; artifact = GetImageArtifact(image,"morphology:compose"); if ( artifact != (const char *) NULL) { parse=ParseCommandOption(MagickComposeOptions, MagickFalse,artifact); if ( parse < 0 ) (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"UnrecognizedComposeOperator","'%s' '%s'", "morphology:compose",artifact); else compose=(CompositeOperator)parse; } } /* Apply the Morphology */ morphology_image = MorphologyApply(image,method,iterations, curr_kernel,compose,bias,exception); /* Cleanup and Exit */ if ( curr_kernel != kernel ) curr_kernel=DestroyKernelInfo(curr_kernel); return(morphology_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R o t a t e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotateKernelInfo() rotates the kernel by the angle given. % % Currently it is restricted to 90 degree angles, of either 1D kernels % or square kernels. And 'circular' rotations of 45 degrees for 3x3 kernels. % It will ignore usless rotations for specific 'named' built-in kernels. % % The format of the RotateKernelInfo method is: % % void RotateKernelInfo(KernelInfo *kernel, double angle) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o angle: angle to rotate in degrees % % This function is currently internal to this module only, but can be exported % to other modules if needed. */ static void RotateKernelInfo(KernelInfo *kernel, double angle) { /* angle the lower kernels first */ if ( kernel->next != (KernelInfo *) NULL) RotateKernelInfo(kernel->next, angle); /* WARNING: Currently assumes the kernel (rightly) is horizontally symetrical ** ** TODO: expand beyond simple 90 degree rotates, flips and flops */ /* Modulus the angle */ angle = fmod(angle, 360.0); if ( angle < 0 ) angle += 360.0; if ( 337.5 < angle || angle <= 22.5 ) return; /* Near zero angle - no change! - At least not at this time */ /* Handle special cases */ switch (kernel->type) { /* These built-in kernels are cylindrical kernels, rotating is useless */ case GaussianKernel: case DoGKernel: case LoGKernel: case DiskKernel: case PeaksKernel: case LaplacianKernel: case ChebyshevKernel: case ManhattanKernel: case EuclideanKernel: return; /* These may be rotatable at non-90 angles in the future */ /* but simply rotating them in multiples of 90 degrees is useless */ case SquareKernel: case DiamondKernel: case PlusKernel: case CrossKernel: return; /* These only allows a +/-90 degree rotation (by transpose) */ /* A 180 degree rotation is useless */ case BlurKernel: if ( 135.0 < angle && angle <= 225.0 ) return; if ( 225.0 < angle && angle <= 315.0 ) angle -= 180; break; default: break; } /* Attempt rotations by 45 degrees -- 3x3 kernels only */ if ( 22.5 < fmod(angle,90.0) && fmod(angle,90.0) <= 67.5 ) { if ( kernel->width == 3 && kernel->height == 3 ) { /* Rotate a 3x3 square by 45 degree angle */ double t = kernel->values[0]; kernel->values[0] = kernel->values[3]; kernel->values[3] = kernel->values[6]; kernel->values[6] = kernel->values[7]; kernel->values[7] = kernel->values[8]; kernel->values[8] = kernel->values[5]; kernel->values[5] = kernel->values[2]; kernel->values[2] = kernel->values[1]; kernel->values[1] = t; /* rotate non-centered origin */ if ( kernel->x != 1 || kernel->y != 1 ) { ssize_t x,y; x = (ssize_t) kernel->x-1; y = (ssize_t) kernel->y-1; if ( x == y ) x = 0; else if ( x == 0 ) x = -y; else if ( x == -y ) y = 0; else if ( y == 0 ) y = x; kernel->x = (ssize_t) x+1; kernel->y = (ssize_t) y+1; } angle = fmod(angle+315.0, 360.0); /* angle reduced 45 degrees */ kernel->angle = fmod(kernel->angle+45.0, 360.0); } else perror("Unable to rotate non-3x3 kernel by 45 degrees"); } if ( 45.0 < fmod(angle, 180.0) && fmod(angle,180.0) <= 135.0 ) { if ( kernel->width == 1 || kernel->height == 1 ) { /* Do a transpose of a 1 dimensional kernel, ** which results in a fast 90 degree rotation of some type. */ ssize_t t; t = (ssize_t) kernel->width; kernel->width = kernel->height; kernel->height = (size_t) t; t = kernel->x; kernel->x = kernel->y; kernel->y = t; if ( kernel->width == 1 ) { angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */ kernel->angle = fmod(kernel->angle+90.0, 360.0); } else { angle = fmod(angle+90.0, 360.0); /* angle increased 90 degrees */ kernel->angle = fmod(kernel->angle+270.0, 360.0); } } else if ( kernel->width == kernel->height ) { /* Rotate a square array of values by 90 degrees */ { register ssize_t i,j,x,y; register MagickRealType *k,t; k=kernel->values; for( i=0, x=(ssize_t) kernel->width-1; i<=x; i++, x--) for( j=0, y=(ssize_t) kernel->height-1; j<y; j++, y--) { t = k[i+j*kernel->width]; k[i+j*kernel->width] = k[j+x*kernel->width]; k[j+x*kernel->width] = k[x+y*kernel->width]; k[x+y*kernel->width] = k[y+i*kernel->width]; k[y+i*kernel->width] = t; } } /* rotate the origin - relative to center of array */ { register ssize_t x,y; x = (ssize_t) (kernel->x*2-kernel->width+1); y = (ssize_t) (kernel->y*2-kernel->height+1); kernel->x = (ssize_t) ( -y +(ssize_t) kernel->width-1)/2; kernel->y = (ssize_t) ( +x +(ssize_t) kernel->height-1)/2; } angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */ kernel->angle = fmod(kernel->angle+90.0, 360.0); } else perror("Unable to rotate a non-square, non-linear kernel 90 degrees"); } if ( 135.0 < angle && angle <= 225.0 ) { /* For a 180 degree rotation - also know as a reflection * This is actually a very very common operation! * Basically all that is needed is a reversal of the kernel data! * And a reflection of the origon */ MagickRealType t; register MagickRealType *k; ssize_t i, j; k=kernel->values; j=(ssize_t) (kernel->width*kernel->height-1); for (i=0; i < j; i++, j--) t=k[i], k[i]=k[j], k[j]=t; kernel->x = (ssize_t) kernel->width - kernel->x - 1; kernel->y = (ssize_t) kernel->height - kernel->y - 1; angle = fmod(angle-180.0, 360.0); /* angle+180 degrees */ kernel->angle = fmod(kernel->angle+180.0, 360.0); } /* At this point angle should at least between -45 (315) and +45 degrees * In the future some form of non-orthogonal angled rotates could be * performed here, posibily with a linear kernel restriction. */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e G e o m e t r y K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleGeometryKernelInfo() takes a geometry argument string, typically % provided as a "-set option:convolve:scale {geometry}" user setting, % and modifies the kernel according to the parsed arguments of that setting. % % The first argument (and any normalization flags) are passed to % ScaleKernelInfo() to scale/normalize the kernel. The second argument % is then passed to UnityAddKernelInfo() to add a scled unity kernel % into the scaled/normalized kernel. % % The format of the ScaleGeometryKernelInfo method is: % % void ScaleGeometryKernelInfo(KernelInfo *kernel, % const double scaling_factor,const MagickStatusType normalize_flags) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to modify % % o geometry: % The geometry string to parse, typically from the user provided % "-set option:convolve:scale {geometry}" setting. % */ MagickExport void ScaleGeometryKernelInfo (KernelInfo *kernel, const char *geometry) { MagickStatusType flags; GeometryInfo args; SetGeometryInfo(&args); flags = ParseGeometry(geometry, &args); #if 0 /* For Debugging Geometry Input */ (void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n", flags, args.rho, args.sigma, args.xi, args.psi ); #endif if ( (flags & PercentValue) != 0 ) /* Handle Percentage flag*/ args.rho *= 0.01, args.sigma *= 0.01; if ( (flags & RhoValue) == 0 ) /* Set Defaults for missing args */ args.rho = 1.0; if ( (flags & SigmaValue) == 0 ) args.sigma = 0.0; /* Scale/Normalize the input kernel */ ScaleKernelInfo(kernel, args.rho, (GeometryFlags) flags); /* Add Unity Kernel, for blending with original */ if ( (flags & SigmaValue) != 0 ) UnityAddKernelInfo(kernel, args.sigma); return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleKernelInfo() scales the given kernel list by the given amount, with or % without normalization of the sum of the kernel values (as per given flags). % % By default (no flags given) the values within the kernel is scaled % directly using given scaling factor without change. % % If either of the two 'normalize_flags' are given the kernel will first be % normalized and then further scaled by the scaling factor value given. % % Kernel normalization ('normalize_flags' given) is designed to ensure that % any use of the kernel scaling factor with 'Convolve' or 'Correlate' % morphology methods will fall into -1.0 to +1.0 range. Note that for % non-HDRI versions of IM this may cause images to have any negative results % clipped, unless some 'bias' is used. % % More specifically. Kernels which only contain positive values (such as a % 'Gaussian' kernel) will be scaled so that those values sum to +1.0, % ensuring a 0.0 to +1.0 output range for non-HDRI images. % % For Kernels that contain some negative values, (such as 'Sharpen' kernels) % the kernel will be scaled by the absolute of the sum of kernel values, so % that it will generally fall within the +/- 1.0 range. % % For kernels whose values sum to zero, (such as 'Laplician' kernels) kernel % will be scaled by just the sum of the postive values, so that its output % range will again fall into the +/- 1.0 range. % % For special kernels designed for locating shapes using 'Correlate', (often % only containing +1 and -1 values, representing foreground/brackground % matching) a special normalization method is provided to scale the positive % values separately to those of the negative values, so the kernel will be % forced to become a zero-sum kernel better suited to such searches. % % WARNING: Correct normalization of the kernel assumes that the '*_range' % attributes within the kernel structure have been correctly set during the % kernels creation. % % NOTE: The values used for 'normalize_flags' have been selected specifically % to match the use of geometry options, so that '!' means NormalizeValue, '^' % means CorrelateNormalizeValue. All other GeometryFlags values are ignored. % % The format of the ScaleKernelInfo method is: % % void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor, % const MagickStatusType normalize_flags ) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o scaling_factor: % multiply all values (after normalization) by this factor if not % zero. If the kernel is normalized regardless of any flags. % % o normalize_flags: % GeometryFlags defining normalization method to use. % specifically: NormalizeValue, CorrelateNormalizeValue, % and/or PercentValue % */ MagickExport void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,const GeometryFlags normalize_flags) { register double pos_scale, neg_scale; register ssize_t i; /* do the other kernels in a multi-kernel list first */ if ( kernel->next != (KernelInfo *) NULL) ScaleKernelInfo(kernel->next, scaling_factor, normalize_flags); /* Normalization of Kernel */ pos_scale = 1.0; if ( (normalize_flags&NormalizeValue) != 0 ) { if ( fabs(kernel->positive_range + kernel->negative_range) >= MagickEpsilon ) /* non-zero-summing kernel (generally positive) */ pos_scale = fabs(kernel->positive_range + kernel->negative_range); else /* zero-summing kernel */ pos_scale = kernel->positive_range; } /* Force kernel into a normalized zero-summing kernel */ if ( (normalize_flags&CorrelateNormalizeValue) != 0 ) { pos_scale = ( fabs(kernel->positive_range) >= MagickEpsilon ) ? kernel->positive_range : 1.0; neg_scale = ( fabs(kernel->negative_range) >= MagickEpsilon ) ? -kernel->negative_range : 1.0; } else neg_scale = pos_scale; /* finialize scaling_factor for positive and negative components */ pos_scale = scaling_factor/pos_scale; neg_scale = scaling_factor/neg_scale; for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++) if (!IsNaN(kernel->values[i])) kernel->values[i] *= (kernel->values[i] >= 0) ? pos_scale : neg_scale; /* convolution output range */ kernel->positive_range *= pos_scale; kernel->negative_range *= neg_scale; /* maximum and minimum values in kernel */ kernel->maximum *= (kernel->maximum >= 0.0) ? pos_scale : neg_scale; kernel->minimum *= (kernel->minimum >= 0.0) ? pos_scale : neg_scale; /* swap kernel settings if user's scaling factor is negative */ if ( scaling_factor < MagickEpsilon ) { double t; t = kernel->positive_range; kernel->positive_range = kernel->negative_range; kernel->negative_range = t; t = kernel->maximum; kernel->maximum = kernel->minimum; kernel->minimum = 1; } return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h o w K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShowKernelInfo() outputs the details of the given kernel defination to % standard error, generally due to a users 'morphology:showkernel' option % request. % % The format of the ShowKernel method is: % % void ShowKernelInfo(const KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % */ MagickPrivate void ShowKernelInfo(const KernelInfo *kernel) { const KernelInfo *k; size_t c, i, u, v; for (c=0, k=kernel; k != (KernelInfo *) NULL; c++, k=k->next ) { (void) FormatLocaleFile(stderr, "Kernel"); if ( kernel->next != (KernelInfo *) NULL ) (void) FormatLocaleFile(stderr, " #%lu", (unsigned long) c ); (void) FormatLocaleFile(stderr, " \"%s", CommandOptionToMnemonic(MagickKernelOptions, k->type) ); if ( fabs(k->angle) >= MagickEpsilon ) (void) FormatLocaleFile(stderr, "@%lg", k->angle); (void) FormatLocaleFile(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long) k->width,(unsigned long) k->height,(long) k->x,(long) k->y); (void) FormatLocaleFile(stderr, " with values from %.*lg to %.*lg\n", GetMagickPrecision(), k->minimum, GetMagickPrecision(), k->maximum); (void) FormatLocaleFile(stderr, "Forming a output range from %.*lg to %.*lg", GetMagickPrecision(), k->negative_range, GetMagickPrecision(), k->positive_range); if ( fabs(k->positive_range+k->negative_range) < MagickEpsilon ) (void) FormatLocaleFile(stderr, " (Zero-Summing)\n"); else if ( fabs(k->positive_range+k->negative_range-1.0) < MagickEpsilon ) (void) FormatLocaleFile(stderr, " (Normalized)\n"); else (void) FormatLocaleFile(stderr, " (Sum %.*lg)\n", GetMagickPrecision(), k->positive_range+k->negative_range); for (i=v=0; v < k->height; v++) { (void) FormatLocaleFile(stderr, "%2lu:", (unsigned long) v ); for (u=0; u < k->width; u++, i++) if (IsNaN(k->values[i])) (void) FormatLocaleFile(stderr," %*s", GetMagickPrecision()+3, "nan"); else (void) FormatLocaleFile(stderr," %*.*lg", GetMagickPrecision()+3, GetMagickPrecision(), (double) k->values[i]); (void) FormatLocaleFile(stderr,"\n"); } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n i t y A d d K e r n a l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnityAddKernelInfo() Adds a given amount of the 'Unity' Convolution Kernel % to the given pre-scaled and normalized Kernel. This in effect adds that % amount of the original image into the resulting convolution kernel. This % value is usually provided by the user as a percentage value in the % 'convolve:scale' setting. % % The resulting effect is to convert the defined kernels into blended % soft-blurs, unsharp kernels or into sharpening kernels. % % The format of the UnityAdditionKernelInfo method is: % % void UnityAdditionKernelInfo(KernelInfo *kernel, const double scale ) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o scale: % scaling factor for the unity kernel to be added to % the given kernel. % */ MagickExport void UnityAddKernelInfo(KernelInfo *kernel, const double scale) { /* do the other kernels in a multi-kernel list first */ if ( kernel->next != (KernelInfo *) NULL) UnityAddKernelInfo(kernel->next, scale); /* Add the scaled unity kernel to the existing kernel */ kernel->values[kernel->x+kernel->y*kernel->width] += scale; CalcKernelMetaData(kernel); /* recalculate the meta-data */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Z e r o K e r n e l N a n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ZeroKernelNans() replaces any special 'nan' value that may be present in % the kernel with a zero value. This is typically done when the kernel will % be used in special hardware (GPU) convolution processors, to simply % matters. % % The format of the ZeroKernelNans method is: % % void ZeroKernelNans (KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % */ MagickPrivate void ZeroKernelNans(KernelInfo *kernel) { register size_t i; /* do the other kernels in a multi-kernel list first */ if (kernel->next != (KernelInfo *) NULL) ZeroKernelNans(kernel->next); for (i=0; i < (kernel->width*kernel->height); i++) if (IsNaN(kernel->values[i])) kernel->values[i]=0.0; return; }
configurator.c
/* Simple tool to create config.h. * Would be much easier with ccan modules, but deliberately standalone. * * Copyright 2011 Rusty Russell <rusty@rustcorp.com.au>. MIT license. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include <stdio.h> #include <stdbool.h> #include <stdlib.h> #include <unistd.h> #include <err.h> #include <sys/types.h> #include <sys/wait.h> #include <sys/stat.h> #include <fcntl.h> #include <string.h> #define DEFAULT_COMPILER "cc" #define DEFAULT_FLAGS "-g3 -ggdb -Wall -Wundef -Wmissing-prototypes -Wmissing-declarations -Wstrict-prototypes -Wold-style-definition" #define OUTPUT_FILE "configurator.out" #define INPUT_FILE "configuratortest.c" static int verbose; enum test_style { OUTSIDE_MAIN = 0x1, DEFINES_FUNC = 0x2, INSIDE_MAIN = 0x4, DEFINES_EVERYTHING = 0x8, MAY_NOT_COMPILE = 0x10, EXECUTE = 0x8000 }; struct test { const char *name; enum test_style style; const char *depends; const char *link; const char *fragment; const char *flags; const char *overrides; /* On success, force this to '1' */ bool done; bool answer; }; static struct test tests[] = { { "HAVE_32BIT_OFF_T", DEFINES_EVERYTHING|EXECUTE, NULL, NULL, "#include <sys/types.h>\n" "int main(void) {\n" " return sizeof(off_t) == 4 ? 0 : 1;\n" "}\n" }, { "HAVE_ALIGNOF", INSIDE_MAIN, NULL, NULL, "return __alignof__(double) > 0 ? 0 : 1;" }, { "HAVE_ASPRINTF", DEFINES_FUNC, NULL, NULL, "#ifndef _GNU_SOURCE\n" "#define _GNU_SOURCE\n" "#endif\n" "#include <stdio.h>\n" "static char *func(int x) {" " char *p;\n" " if (asprintf(&p, \"%u\", x) == -1) p = NULL;" " return p;\n" "}" }, { "HAVE_ATTRIBUTE_COLD", DEFINES_FUNC, NULL, NULL, "static int __attribute__((cold)) func(int x) { return x; }" }, { "HAVE_ATTRIBUTE_CONST", DEFINES_FUNC, NULL, NULL, "static int __attribute__((const)) func(int x) { return x; }" }, { "HAVE_ATTRIBUTE_PURE", DEFINES_FUNC, NULL, NULL, "static int __attribute__((pure)) func(int x) { return x; }" }, { "HAVE_ATTRIBUTE_MAY_ALIAS", OUTSIDE_MAIN, NULL, NULL, "typedef short __attribute__((__may_alias__)) short_a;" }, { "HAVE_ATTRIBUTE_NORETURN", DEFINES_FUNC, NULL, NULL, "#include <stdlib.h>\n" "static void __attribute__((noreturn)) func(int x) { exit(x); }" }, { "HAVE_ATTRIBUTE_PRINTF", DEFINES_FUNC, NULL, NULL, "static void __attribute__((format(__printf__, 1, 2))) func(const char *fmt, ...) { (void)fmt; }" }, { "HAVE_ATTRIBUTE_UNUSED", OUTSIDE_MAIN, NULL, NULL, "static int __attribute__((unused)) func(int x) { return x; }" }, { "HAVE_ATTRIBUTE_USED", OUTSIDE_MAIN, NULL, NULL, "static int __attribute__((used)) func(int x) { return x; }" }, { "HAVE_BACKTRACE", DEFINES_FUNC, NULL, NULL, "#include <execinfo.h>\n" "static int func(int x) {" " void *bt[10];\n" " return backtrace(bt, 10) < x;\n" "}" }, { "HAVE_BIG_ENDIAN", INSIDE_MAIN|EXECUTE, NULL, NULL, "union { int i; char c[sizeof(int)]; } u;\n" "u.i = 0x01020304;\n" "return u.c[0] == 0x01 && u.c[1] == 0x02 && u.c[2] == 0x03 && u.c[3] == 0x04 ? 0 : 1;" }, { "HAVE_BSWAP_64", DEFINES_FUNC, "HAVE_BYTESWAP_H", NULL, "#include <byteswap.h>\n" "static int func(int x) { return bswap_64(x); }" }, { "HAVE_BUILTIN_CHOOSE_EXPR", INSIDE_MAIN, NULL, NULL, "return __builtin_choose_expr(1, 0, \"garbage\");" }, { "HAVE_BUILTIN_CLZ", INSIDE_MAIN, NULL, NULL, "return __builtin_clz(1) == (sizeof(int)*8 - 1) ? 0 : 1;" }, { "HAVE_BUILTIN_CLZL", INSIDE_MAIN, NULL, NULL, "return __builtin_clzl(1) == (sizeof(long)*8 - 1) ? 0 : 1;" }, { "HAVE_BUILTIN_CLZLL", INSIDE_MAIN, NULL, NULL, "return __builtin_clzll(1) == (sizeof(long long)*8 - 1) ? 0 : 1;" }, { "HAVE_BUILTIN_CTZ", INSIDE_MAIN, NULL, NULL, "return __builtin_ctz(1 << (sizeof(int)*8 - 1)) == (sizeof(int)*8 - 1) ? 0 : 1;" }, { "HAVE_BUILTIN_CTZL", INSIDE_MAIN, NULL, NULL, "return __builtin_ctzl(1UL << (sizeof(long)*8 - 1)) == (sizeof(long)*8 - 1) ? 0 : 1;" }, { "HAVE_BUILTIN_CTZLL", INSIDE_MAIN, NULL, NULL, "return __builtin_ctzll(1ULL << (sizeof(long long)*8 - 1)) == (sizeof(long long)*8 - 1) ? 0 : 1;" }, { "HAVE_BUILTIN_CONSTANT_P", INSIDE_MAIN, NULL, NULL, "return __builtin_constant_p(1) ? 0 : 1;" }, { "HAVE_BUILTIN_EXPECT", INSIDE_MAIN, NULL, NULL, "return __builtin_expect(argc == 1, 1) ? 0 : 1;" }, { "HAVE_BUILTIN_FFS", INSIDE_MAIN, NULL, NULL, "return __builtin_ffs(0) == 0 ? 0 : 1;" }, { "HAVE_BUILTIN_FFSL", INSIDE_MAIN, NULL, NULL, "return __builtin_ffsl(0L) == 0 ? 0 : 1;" }, { "HAVE_BUILTIN_FFSLL", INSIDE_MAIN, NULL, NULL, "return __builtin_ffsll(0LL) == 0 ? 0 : 1;" }, { "HAVE_BUILTIN_POPCOUNTL", INSIDE_MAIN, NULL, NULL, "return __builtin_popcountl(255L) == 8 ? 0 : 1;" }, { "HAVE_BUILTIN_TYPES_COMPATIBLE_P", INSIDE_MAIN, NULL, NULL, "return __builtin_types_compatible_p(char *, int) ? 1 : 0;" }, { "HAVE_ICCARM_INTRINSICS", DEFINES_FUNC, NULL, NULL, "#include <intrinsics.h>\n" "int func(int v) {\n" " return __CLZ(__RBIT(v));\n" "}" }, { "HAVE_BYTESWAP_H", OUTSIDE_MAIN, NULL, NULL, "#include <byteswap.h>\n" }, { "HAVE_CLOCK_GETTIME", DEFINES_FUNC, "HAVE_STRUCT_TIMESPEC", NULL, "#include <time.h>\n" "static struct timespec func(void) {\n" " struct timespec ts;\n" " clock_gettime(CLOCK_REALTIME, &ts);\n" " return ts;\n" "}\n" }, { "HAVE_CLOCK_GETTIME_IN_LIBRT", DEFINES_FUNC, "HAVE_STRUCT_TIMESPEC !HAVE_CLOCK_GETTIME", "-lrt", "#include <time.h>\n" "static struct timespec func(void) {\n" " struct timespec ts;\n" " clock_gettime(CLOCK_REALTIME, &ts);\n" " return ts;\n" "}\n", /* This means HAVE_CLOCK_GETTIME, too */ "HAVE_CLOCK_GETTIME" }, { "HAVE_COMPOUND_LITERALS", INSIDE_MAIN, NULL, NULL, "int *foo = (int[]) { 1, 2, 3, 4 };\n" "return foo[0] ? 0 : 1;" }, { "HAVE_FCHDIR", DEFINES_EVERYTHING|EXECUTE, NULL, NULL, "#include <sys/types.h>\n" "#include <sys/stat.h>\n" "#include <fcntl.h>\n" "#include <unistd.h>\n" "int main(void) {\n" " int fd = open(\"..\", O_RDONLY);\n" " return fchdir(fd) == 0 ? 0 : 1;\n" "}\n" }, { "HAVE_ERR_H", DEFINES_FUNC, NULL, NULL, "#include <err.h>\n" "static void func(int arg) {\n" " if (arg == 0)\n" " err(1, \"err %u\", arg);\n" " if (arg == 1)\n" " errx(1, \"err %u\", arg);\n" " if (arg == 3)\n" " warn(\"warn %u\", arg);\n" " if (arg == 4)\n" " warnx(\"warn %u\", arg);\n" "}\n" }, { "HAVE_FILE_OFFSET_BITS", DEFINES_EVERYTHING|EXECUTE, "HAVE_32BIT_OFF_T", NULL, "#define _FILE_OFFSET_BITS 64\n" "#include <sys/types.h>\n" "int main(void) {\n" " return sizeof(off_t) == 8 ? 0 : 1;\n" "}\n" }, { "HAVE_FOR_LOOP_DECLARATION", INSIDE_MAIN, NULL, NULL, "for (int i = 0; i < argc; i++) { return 0; };\n" "return 1;" }, { "HAVE_FLEXIBLE_ARRAY_MEMBER", OUTSIDE_MAIN, NULL, NULL, "struct foo { unsigned int x; int arr[]; };" }, { "HAVE_GETPAGESIZE", DEFINES_FUNC, NULL, NULL, "#include <unistd.h>\n" "static int func(void) { return getpagesize(); }" }, { "HAVE_ISBLANK", DEFINES_FUNC, NULL, NULL, "#ifndef _GNU_SOURCE\n" "#define _GNU_SOURCE\n" "#endif\n" "#include <ctype.h>\n" "static int func(void) { return isblank(' '); }" }, { "HAVE_LITTLE_ENDIAN", INSIDE_MAIN|EXECUTE, NULL, NULL, "union { int i; char c[sizeof(int)]; } u;\n" "u.i = 0x01020304;\n" "return u.c[0] == 0x04 && u.c[1] == 0x03 && u.c[2] == 0x02 && u.c[3] == 0x01 ? 0 : 1;" }, { "HAVE_MEMMEM", DEFINES_FUNC, NULL, NULL, "#ifndef _GNU_SOURCE\n" "#define _GNU_SOURCE\n" "#endif\n" "#include <string.h>\n" "static void *func(void *h, size_t hl, void *n, size_t nl) {\n" "return memmem(h, hl, n, nl);" "}\n", }, { "HAVE_MEMRCHR", DEFINES_FUNC, NULL, NULL, "#ifndef _GNU_SOURCE\n" "#define _GNU_SOURCE\n" "#endif\n" "#include <string.h>\n" "static void *func(void *s, int c, size_t n) {\n" "return memrchr(s, c, n);" "}\n", }, { "HAVE_MMAP", DEFINES_FUNC, NULL, NULL, "#include <sys/mman.h>\n" "static void *func(int fd) {\n" " return mmap(0, 65536, PROT_READ, MAP_SHARED, fd, 0);\n" "}" }, { "HAVE_PROC_SELF_MAPS", DEFINES_EVERYTHING|EXECUTE, NULL, NULL, "#include <sys/types.h>\n" "#include <sys/stat.h>\n" "#include <fcntl.h>\n" "int main(void) {\n" " return open(\"/proc/self/maps\", O_RDONLY) != -1 ? 0 : 1;\n" "}\n" }, { "HAVE_QSORT_R_PRIVATE_LAST", DEFINES_EVERYTHING|EXECUTE|MAY_NOT_COMPILE, NULL, NULL, "#ifndef _GNU_SOURCE\n" "#define _GNU_SOURCE\n" "#endif\n" "#include <stdlib.h>\n" "static int cmp(const void *lp, const void *rp, void *priv) {\n" " *(unsigned int *)priv = 1;\n" " return *(const int *)lp - *(const int *)rp; }\n" "int main(void) {\n" " int array[] = { 9, 2, 5 };\n" " unsigned int called = 0;\n" " qsort_r(array, 3, sizeof(int), cmp, &called);\n" " return called && array[0] == 2 && array[1] == 5 && array[2] == 9 ? 0 : 1;\n" "}\n" }, { "HAVE_STRUCT_TIMESPEC", DEFINES_FUNC, NULL, NULL, "#include <time.h>\n" "static void func(void) {\n" " struct timespec ts;\n" " ts.tv_sec = ts.tv_nsec = 1;\n" "}\n" }, { "HAVE_SECTION_START_STOP", DEFINES_FUNC, NULL, NULL, "static void *__attribute__((__section__(\"mysec\"))) p = &p;\n" "static int func(void) {\n" " extern void *__start_mysec[], *__stop_mysec[];\n" " return __stop_mysec - __start_mysec;\n" "}\n" }, { "HAVE_STACK_GROWS_UPWARDS", DEFINES_EVERYTHING|EXECUTE, NULL, NULL, "static long nest(const void *base, unsigned int i)\n" "{\n" " if (i == 0)\n" " return (const char *)&i - (const char *)base;\n" " return nest(base, i-1);\n" "}\n" "int main(int argc, char *argv[]) {\n" " (void)argv;\n" " return (nest(&argc, argc) > 0) ? 0 : 1;\n" "}\n" }, { "HAVE_STATEMENT_EXPR", INSIDE_MAIN, NULL, NULL, "return ({ int x = argc; x == argc ? 0 : 1; });" }, { "HAVE_SYS_FILIO_H", OUTSIDE_MAIN, NULL, NULL, /* Solaris needs this for FIONREAD */ "#include <sys/filio.h>\n" }, { "HAVE_SYS_TERMIOS_H", OUTSIDE_MAIN, NULL, NULL, "#include <sys/termios.h>\n" }, { "HAVE_TYPEOF", INSIDE_MAIN, NULL, NULL, "__typeof__(argc) i; i = argc; return i == argc ? 0 : 1;" }, { "HAVE_UNALIGNED_ACCESS", DEFINES_EVERYTHING|EXECUTE, NULL, NULL, "#include <string.h>\n" "int main(int argc, char *argv[]) {\n" " (void)argc;\n" " char pad[sizeof(int *) * 1];\n" " strncpy(pad, argv[0], sizeof(pad));\n" " int *x = (int *)pad, *y = (int *)(pad + 1);\n" " return *x == *y;\n" "}\n" }, { "HAVE_UTIME", DEFINES_FUNC, NULL, NULL, "#include <sys/types.h>\n" "#include <utime.h>\n" "static int func(const char *filename) {\n" " struct utimbuf times = { 0 };\n" " return utime(filename, &times);\n" "}" }, { "HAVE_WARN_UNUSED_RESULT", DEFINES_FUNC, NULL, NULL, "#include <sys/types.h>\n" "#include <utime.h>\n" "static __attribute__((warn_unused_result)) int func(int i) {\n" " return i + 1;\n" "}" }, { "HAVE_OPENMP", INSIDE_MAIN, NULL, NULL, "int i;\n" "#pragma omp parallel for\n" "for(i = 0; i < 0; i++) {};\n" "return 0;\n", "-Werror -fopenmp" }, { "HAVE_VALGRIND_MEMCHECK_H", OUTSIDE_MAIN, NULL, NULL, "#include <valgrind/memcheck.h>\n" }, { "HAVE_UCONTEXT", DEFINES_EVERYTHING|EXECUTE, NULL, NULL, "#include <ucontext.h>\n" "static int x = 0;\n" "static char stack[2048];\n" "static ucontext_t a, b;\n" "static void fn(void) {\n" " x |= 2;\n" " setcontext(&b);\n" " x |= 4;\n" "}\n" "int main(void) {\n" " x |= 1;\n" " getcontext(&a);\n" " a.uc_stack.ss_sp = stack;\n" " a.uc_stack.ss_size = sizeof(stack);\n" " makecontext(&a, fn, 0);\n" " swapcontext(&b, &a);\n" " return (x == 3) ? 0 : 1;\n" "}\n" }, { "HAVE_POINTER_SAFE_MAKECONTEXT", DEFINES_EVERYTHING|EXECUTE, "HAVE_UCONTEXT", NULL, "#include <stddef.h>\n" "#include <ucontext.h>\n" "static int worked = 0;\n" "static char stack[1024];\n" "static ucontext_t a, b;\n" "static void fn(void *p, void *q) {\n" " void *cp = &worked;\n" " void *cq = (void *)(~((ptrdiff_t)cp));\n" " if ((p == cp) && (q == cq))\n" " worked = 1;\n" " setcontext(&b);\n" "}\n" "int main(void) {\n" " void *ap = &worked;\n" " void *aq = (void *)(~((ptrdiff_t)ap));\n" " getcontext(&a);\n" " a.uc_stack.ss_sp = stack;\n" " a.uc_stack.ss_size = sizeof(stack);\n" " makecontext(&a, (void (*)(void))fn, 2, ap, aq);\n" " swapcontext(&b, &a);\n" " return worked ? 0 : 1;\n" "}\n" }, }; static char *grab_fd(int fd) { int ret; size_t max, size = 0; char *buffer; max = 16384; buffer = malloc(max+1); while ((ret = read(fd, buffer + size, max - size)) > 0) { size += ret; if (size == max) buffer = realloc(buffer, max *= 2); } if (ret < 0) err(1, "reading from command"); buffer[size] = '\0'; return buffer; } static char *run(const char *cmd, int *exitstatus) { pid_t pid; int p[2]; char *ret; int status; if (pipe(p) != 0) err(1, "creating pipe"); pid = fork(); if (pid == -1) err(1, "forking"); if (pid == 0) { if (dup2(p[1], STDOUT_FILENO) != STDOUT_FILENO || dup2(p[1], STDERR_FILENO) != STDERR_FILENO || close(p[0]) != 0 || close(STDIN_FILENO) != 0 || open("/dev/null", O_RDONLY) != STDIN_FILENO) exit(128); status = system(cmd); if (WIFEXITED(status)) exit(WEXITSTATUS(status)); /* Here's a hint... */ exit(128 + WTERMSIG(status)); } close(p[1]); ret = grab_fd(p[0]); /* This shouldn't fail... */ if (waitpid(pid, &status, 0) != pid) err(1, "Failed to wait for child"); close(p[0]); if (WIFEXITED(status)) *exitstatus = WEXITSTATUS(status); else *exitstatus = -WTERMSIG(status); return ret; } static char *connect_args(const char *argv[], const char *extra) { unsigned int i, len = strlen(extra) + 1; char *ret; for (i = 1; argv[i]; i++) len += 1 + strlen(argv[i]); ret = malloc(len); len = 0; for (i = 1; argv[i]; i++) { strcpy(ret + len, argv[i]); len += strlen(argv[i]); if (argv[i+1]) ret[len++] = ' '; } strcpy(ret + len, extra); return ret; } static struct test *find_test(const char *name) { unsigned int i; for (i = 0; i < sizeof(tests)/sizeof(tests[0]); i++) { if (strcmp(tests[i].name, name) == 0) return &tests[i]; } abort(); } #define PRE_BOILERPLATE "/* Test program generated by configurator. */\n" #define MAIN_START_BOILERPLATE \ "int main(int argc, char *argv[]) {\n" \ " (void)argc;\n" \ " (void)argv;\n" #define USE_FUNC_BOILERPLATE "(void)func;\n" #define MAIN_BODY_BOILERPLATE "return 0;\n" #define MAIN_END_BOILERPLATE "}\n" static bool run_test(const char *cmd, struct test *test) { char *output, *newcmd; FILE *outf; int status; if (test->done) return test->answer; if (test->depends) { size_t len; const char *deps = test->depends; char *dep; /* Space-separated dependencies, could be ! for inverse. */ while ((len = strcspn(deps, " "))) { bool positive = true; if (deps[len]) { dep = strdup(deps); dep[len] = '\0'; } else { dep = (char *)deps; } if (dep[0] == '!') { dep++; positive = false; } if (run_test(cmd, find_test(dep)) != positive) { test->answer = false; test->done = true; return test->answer; } if (deps[len]) free(dep); deps += len; deps += strspn(deps, " "); } } outf = fopen(INPUT_FILE, "w"); if (!outf) err(1, "creating %s", INPUT_FILE); fprintf(outf, "%s", PRE_BOILERPLATE); switch (test->style & ~(EXECUTE|MAY_NOT_COMPILE)) { case INSIDE_MAIN: fprintf(outf, "%s", MAIN_START_BOILERPLATE); fprintf(outf, "%s", test->fragment); fprintf(outf, "%s", MAIN_END_BOILERPLATE); break; case OUTSIDE_MAIN: fprintf(outf, "%s", test->fragment); fprintf(outf, "%s", MAIN_START_BOILERPLATE); fprintf(outf, "%s", MAIN_BODY_BOILERPLATE); fprintf(outf, "%s", MAIN_END_BOILERPLATE); break; case DEFINES_FUNC: fprintf(outf, "%s", test->fragment); fprintf(outf, "%s", MAIN_START_BOILERPLATE); fprintf(outf, "%s", USE_FUNC_BOILERPLATE); fprintf(outf, "%s", MAIN_BODY_BOILERPLATE); fprintf(outf, "%s", MAIN_END_BOILERPLATE); break; case DEFINES_EVERYTHING: fprintf(outf, "%s", test->fragment); break; default: abort(); } fclose(outf); if (verbose > 1) if (system("cat " INPUT_FILE) == -1) ; newcmd = strdup(cmd); if (test->flags) { newcmd = realloc(newcmd, strlen(newcmd) + strlen(" ") + strlen(test->flags) + 1); strcat(newcmd, " "); strcat(newcmd, test->flags); if (verbose > 1) printf("Extra flags line: %s", newcmd); } if (test->link) { newcmd = realloc(newcmd, strlen(newcmd) + strlen(" ") + strlen(test->link) + 1); strcat(newcmd, " "); strcat(newcmd, test->link); if (verbose > 1) printf("Extra link line: %s", newcmd); } output = run(newcmd, &status); free(newcmd); if (status != 0 || strstr(output, "warning")) { if (verbose) printf("Compile %s for %s, status %i: %s\n", status ? "fail" : "warning", test->name, status, output); if ((test->style & EXECUTE) && !(test->style & MAY_NOT_COMPILE)) errx(1, "Test for %s did not compile:\n%s", test->name, output); test->answer = false; free(output); } else { /* Compile succeeded. */ free(output); /* We run INSIDE_MAIN tests for sanity checking. */ if ((test->style & EXECUTE) || (test->style & INSIDE_MAIN)) { output = run("./" OUTPUT_FILE, &status); if (!(test->style & EXECUTE) && status != 0) errx(1, "Test for %s failed with %i:\n%s", test->name, status, output); if (verbose && status) printf("%s exited %i\n", test->name, status); free(output); } test->answer = (status == 0); } test->done = true; if (test->answer && test->overrides) { struct test *override = find_test(test->overrides); override->done = true; override->answer = true; } return test->answer; } int main(int argc, const char *argv[]) { char *cmd; unsigned int i; const char *default_args[] = { "", DEFAULT_COMPILER, DEFAULT_FLAGS, NULL }; if (argc > 1) { if (strcmp(argv[1], "--help") == 0) { printf("Usage: configurator [-v] [<compiler> <flags>...]\n" " <compiler> <flags> will have \"-o <outfile> <infile.c>\" appended\n" "Default: %s %s\n", DEFAULT_COMPILER, DEFAULT_FLAGS); exit(0); } if (strcmp(argv[1], "-v") == 0) { argc--; argv++; verbose = 1; } else if (strcmp(argv[1], "-vv") == 0) { argc--; argv++; verbose = 2; } } if (argc == 1) argv = default_args; cmd = connect_args(argv, " -o " OUTPUT_FILE " " INPUT_FILE); for (i = 0; i < sizeof(tests)/sizeof(tests[0]); i++) run_test(cmd, &tests[i]); free(cmd); unlink(OUTPUT_FILE); unlink(INPUT_FILE); printf("/* Generated by CCAN configurator */\n" "#ifndef CCAN_CONFIG_H\n" "#define CCAN_CONFIG_H\n"); printf("#ifndef _GNU_SOURCE\n"); printf("#define _GNU_SOURCE /* Always use GNU extensions. */\n"); printf("#endif\n"); printf("#define CCAN_COMPILER \"%s\"\n", argv[1]); cmd = connect_args(argv+1, ""); printf("#define CCAN_CFLAGS \"%s\"\n\n", cmd); free(cmd); /* This one implies "#include <ccan/..." works, eg. for tdb2.h */ printf("#define HAVE_CCAN 1\n"); for (i = 0; i < sizeof(tests)/sizeof(tests[0]); i++) printf("#define %s %u\n", tests[i].name, tests[i].answer); printf("#endif /* CCAN_CONFIG_H */\n"); return 0; }
GB_binop__ge_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ge_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__ge_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__ge_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__ge_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_uint64) // A*D function (colscale): GB (_AxD__ge_uint64) // D*A function (rowscale): GB (_DxB__ge_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__ge_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__ge_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_uint64) // C=scalar+B GB (_bind1st__ge_uint64) // C=scalar+B' GB (_bind1st_tran__ge_uint64) // C=A+scalar GB (_bind2nd__ge_uint64) // C=A'+scalar GB (_bind2nd_tran__ge_uint64) // C type: bool // A type: uint64_t // A pattern? 0 // B type: uint64_t // B pattern? 0 // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GE || GxB_NO_UINT64 || GxB_NO_GE_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__ge_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ge_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ge_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__ge_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__ge_uint64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ge_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; uint64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((uint64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__ge_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ge_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__ge_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ge_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ge_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ge_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__ge_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__ge_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
vq_train.c
/*Daala video codec Copyright (c) 2012-2014 Daala project contributors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.*/ #include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> #include "od_defs.h" #include "../src/dct.h" #define MAX(a,b) ((a)>(b)?(a):(b)) #define MAX_ENTRIES (4096) #define MAX_DIMS (128) #if 0 # undef NUM_PROCS # define NUM_PROCS (1) #endif static double inner_prod(const double *x, const double *y, int n) { double sum; int i; sum = 0; for (i = 0; i < n; i++) sum += x[i]*y[i]; return sum; } static void normalize(double *x, int n) { int i; double sum; sum = 1e-30; for (i = 0; i < n; i++) sum += x[i]*x[i]; sum = 1./sqrt(sum); for (i = 0; i < n; i++) x[i] *= sum; } /* Returns the distance to the closest K=2 codeword. We can take a shortcut because there are only two possibilities: both pulses at the position with largest magnitude, or one pulse at each of the two largest magnitudes. */ static double pvq_dist_k2(const double *data, int n) { double xbest1; double xbest2; int i; xbest1 = xbest2 = -1; for (i = 0; i < n; i++) { if (fabs(data[i]) > xbest2) { if (fabs(data[i]) > xbest1) { xbest2 = xbest1; xbest1 = fabs(data[i]); } else { xbest2 = fabs(data[i]); } } } return 2 - 2*MAX(xbest1, M_SQRT1_2*(xbest1 + xbest2)); } static int find_nearest(const double *data, const double *codebook, int nb_entries, int n, double *sign, double *err) { double best_dist; double best_sign; int best_id; int i; best_dist = -1; best_id = 0; best_sign = 1; for (i = 0; i < nb_entries; i++) { double dist; dist = inner_prod(data, &codebook[i*n], n); if (fabs(dist) > best_dist) { best_dist = fabs(dist); best_sign = dist > 0 ? 1 : -1; best_id = i; } } if (sign) *sign = best_sign; if (err) *err = 2 - 2*best_dist; return best_id; } void vq_rand_init(const double *data, int nb_vectors, double *codebook, int nb_entries, int n) { int i; int j; /* Start with a codebook made of randomly selected vectors. */ for (i = 0; i < nb_entries; i++) { int id; id = rand()%nb_vectors; for (j = 0; j < n; j++) { /* Add some noise just in case we pick the same vector twice. */ codebook[i*n + j] = data[id*n + j] + .01*(rand()%3 - 1); } normalize(&codebook[i*n], n); } } double vq_train(const double *data, int nb_vectors, double *codebook, int nb_entries, int n, int nb_iter, int exclude_pvq) { int i; int iter; double rms[NUM_PROCS]; double *accum; accum = (double *)malloc(MAX_ENTRIES*MAX_DIMS*NUM_PROCS*sizeof(*accum)); for (iter = 0; iter < nb_iter; iter++) { for (i = 0; i < NUM_PROCS; i++) rms[i] = 0; memset(accum,0,nb_entries*n*NUM_PROCS*sizeof(*accum)); #pragma omp parallel for schedule(dynamic) for (i = 0; i < nb_vectors; i++) { int tid; int id; double sign; double pvq_err; double err; tid=OD_OMP_GET_THREAD; id = find_nearest(&data[i*n], codebook, nb_entries, n, &sign, &err); pvq_err = pvq_dist_k2(&data[i*n], n); /*printf("%f ", err);*/ if (!exclude_pvq || err < pvq_err) { int j; int offset; rms[tid] += err; offset = nb_entries*n*tid + id*n; for (j = 0; j < n; j++) accum[offset + j] += sign*data[i*n + j]; } else rms[tid] += pvq_err; } for (i = 1; i < NUM_PROCS; i++) { int j; int offset; offset = nb_entries*n*i; for (j = 0; j < nb_entries*n; j++) accum[j] += accum[offset+j]; } for (i = 1; i < NUM_PROCS; i++) rms[0] += rms[i]; for (i = 0; i < nb_entries; i++) normalize(&accum[i*n], n); for (i = 0; i < nb_entries*n; i++) codebook[i] = accum[i]; rms[0] = sqrt(rms[0]/nb_vectors); fprintf(stderr, "RMS: %f\n", rms[0]); } free(accum); return rms[0]; } int main(int argc, char **argv) { int i; int j; int nb_vectors; int nb_entries; int ndim; double *data; double *codebook; double rms; unsigned seed; seed = time(NULL); srand(seed); if (argc != 4) { fprintf(stderr, "usage: %s <dimensions> <max vectors> <bits>\n",argc > 0? argv[0] : '\0'); return 1; } ndim = atoi(argv[1]); nb_vectors = atoi(argv[2]); nb_entries = 1<<atoi(argv[3]); OD_OMP_SET_THREADS(NUM_PROCS); data = (double *)malloc(nb_vectors*ndim*sizeof(*data)); codebook = (double *)malloc(nb_entries*ndim*sizeof(*codebook)); if (data == NULL || codebook == NULL) { fprintf(stderr, "malloc() failed, giving up.\n"); return 1; } for (i = 0;i < nb_vectors; i++) { if (feof(stdin)) break; for (j = 0; j < ndim; j++) { if(scanf("%lf ", &data[i*ndim + j]) != 1) exit(EXIT_FAILURE); } normalize(&data[i*ndim], ndim); } nb_vectors = i; fprintf(stderr, "read %d vectors\n", nb_vectors); vq_rand_init(data, nb_vectors, codebook, nb_entries, ndim); rms = vq_train(data, nb_vectors, codebook, nb_entries, ndim, 100, 1); #if 0 for (i = 0; i < nb_vectors; i++) { double sign; int nearest; nearest = find_nearest(&data[i*ndim], codebook, nb_entries, ndim, &sign, NULL); printf("%d %f\n", nearest, sign); } #endif printf("/* Automatically generated by vq_train. */\n"); printf("/* Seed was %u. */\n", seed); printf("/* RMS training error is %f. */\n", rms); printf("const double codebook[%d*%d] = {\n", nb_entries, ndim); for (i = 0; i < nb_entries; i++) { for(j = 0; j < ndim; j++) printf("%f, ", codebook[i*ndim + j]); printf("\n"); } printf("};\n"); free(data); free(codebook); return 0; }
GB_reduce_to_vector.c
//------------------------------------------------------------------------------ // GB_reduce_to_vector: reduce a matrix to a vector using a binary op //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // CALLS: GB_build // C<M> = accum (C,reduce(A)) where C is n-by-1. Reduces a matrix A or A' // to a vector. #include "GB_reduce.h" #include "GB_build.h" #include "GB_ek_slice.h" #include "GB_accum_mask.h" #ifndef GBCOMPACT #include "GB_red__include.h" #endif #define GB_FREE_WORK \ { \ GB_FREE_MEMORY (Wfirst_space, ntasks, zsize) ; \ GB_FREE_MEMORY (Wlast_space, ntasks, zsize) ; \ GB_ek_slice_free (&pstart_slice, &kfirst_slice, &klast_slice, ntasks) ; \ } #define GB_FREE_ALL \ { \ GB_FREE_WORK ; \ GB_MATRIX_FREE (&T) ; \ } GrB_Info GB_reduce_to_vector // C<M> = accum (C,reduce(A)) ( GrB_Matrix C, // input/output for results, size n-by-1 const GrB_Matrix M, // optional M for C, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(C,T) const GrB_BinaryOp reduce, // reduce operator for T=reduce(A) const GB_void *terminal, // for early exit (NULL if none) const GrB_Matrix A, // first input: matrix A const GrB_Descriptor desc, // descriptor for C, M, and A GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- // C may be aliased with M and/or A GB_RETURN_IF_NULL_OR_FAULTY (C) ; GB_RETURN_IF_FAULTY (M) ; GB_RETURN_IF_FAULTY (accum) ; GB_RETURN_IF_NULL_OR_FAULTY (A) ; GB_RETURN_IF_FAULTY (desc) ; ASSERT_MATRIX_OK (C, "C input for reduce_BinaryOp", GB0) ; ASSERT_MATRIX_OK_OR_NULL (M, "M for reduce_BinaryOp", GB0) ; ASSERT_BINARYOP_OK_OR_NULL (accum, "accum for reduce_BinaryOp", GB0) ; ASSERT_BINARYOP_OK (reduce, "reduce for reduce_BinaryOp", GB0) ; ASSERT_MATRIX_OK (A, "A input for reduce_BinaryOp", GB0) ; ASSERT_DESCRIPTOR_OK_OR_NULL (desc, "desc for reduce_BinaryOp", GB0) ; GrB_Matrix T = NULL ; int ntasks = 0 ; size_t zsize = 0 ; int64_t *pstart_slice = NULL, *kfirst_slice = NULL, *klast_slice = NULL ; GB_void *GB_RESTRICT Wfirst_space = NULL ; GB_void *GB_RESTRICT Wlast_space = NULL ; // get the descriptor GB_GET_DESCRIPTOR (info, desc, C_replace, Mask_comp, Mask_struct, A_transpose, xx1, xx2) ; // C and M are n-by-1 GrB_Vector objects, typecasted to GrB_Matrix ASSERT (GB_VECTOR_OK (C)) ; ASSERT (GB_IMPLIES (M != NULL, GB_VECTOR_OK (M))) ; // check domains and dimensions for C<M> = accum (C,T) GrB_Type ttype = reduce->ztype ; GB_OK (GB_compatible (C->type, C, M, accum, ttype, Context)) ; // check types of reduce if (reduce->xtype != reduce->ztype || reduce->ytype != reduce->ztype) { // all 3 types of z = reduce (x,y) must be the same. reduce must also // be associative but there is no way to check this in general. return (GB_ERROR (GrB_DOMAIN_MISMATCH, (GB_LOG, "All domains of reduction operator must be identical;\n" "operator is: [%s] = %s ([%s],[%s])", reduce->ztype->name, reduce->name, reduce->xtype->name, reduce->ytype->name))) ; } // T = reduce (T,A) must be compatible if (!GB_Type_compatible (A->type, reduce->ztype)) { return (GB_ERROR (GrB_DOMAIN_MISMATCH, (GB_LOG, "incompatible type for reduction operator z=%s(x,y):\n" "input matrix A of type [%s]\n" "cannot be typecast to reduction operator of type [%s]", reduce->name, A->type->name, reduce->ztype->name))) ; } // check the dimensions int64_t n = GB_NROWS (C) ; if (A_transpose) { if (n != GB_NCOLS (A)) { return (GB_ERROR (GrB_DIMENSION_MISMATCH, (GB_LOG, "w=reduce(A'): length of w is "GBd";\n" "it must match the number of columns of A, which is "GBd".", n, GB_NCOLS (A)))) ; } } else { if (n != GB_NROWS(A)) { return (GB_ERROR (GrB_DIMENSION_MISMATCH, (GB_LOG, "w=reduce(A): length of w is "GBd";\n" "it must match the number of rows of A, which is "GBd".", n, GB_NROWS (A)))) ; } } // quick return if an empty mask is complemented GB_RETURN_IF_QUICK_MASK (C, C_replace, M, Mask_comp) ; //-------------------------------------------------------------------------- // delete any lingering zombies and assemble any pending tuples //-------------------------------------------------------------------------- // GB_WAIT (C) ; GB_WAIT (M) ; GB_WAIT (A) ; ASSERT (!GB_PENDING (A)) ; ASSERT (!GB_ZOMBIES (A)) ; //-------------------------------------------------------------------------- // handle the CSR/CSC format of A //-------------------------------------------------------------------------- // the result vector T is in CSC format if (!(A->is_csc)) { A_transpose = !A_transpose ; } //-------------------------------------------------------------------------- // T = reduce (A) or reduce (A') //-------------------------------------------------------------------------- // T is created below so that it can be typecasted to a GrB_Vector when // done: non-hypersparse n-by-1 matrix in CSC format. // T = reduce_to_vector (A) or reduce_to_vector (A'), which is T = sum (A') // or sum (A), in MATLAB notation, except where where 'sum' is any // associative operator. // By default, T(i) = op (A (i,:)) is a vector whose length is the same as // the number of rows of A. T(i) is the reduction of all entries in the // ith row of A. If A_transpose is true, the T is computed as if A were // transposed first, and thus its length is equal to the number of vectors // of the input matrix A. The use of A_transpose is the opposite of // MATLAB, since sum(A) in MATLAB sums up the columns of A, and sum(A') // sums up the rows of A.. // T is an n-by-1 GrB_Matrix that represents the vector. It is computed // as a GrB_Matrix so it can be passed to GB_accum_mask without // typecasting. ASSERT (n == ((A_transpose) ? A->vdim : A->vlen)) ; //-------------------------------------------------------------------------- // scalar workspace //-------------------------------------------------------------------------- size_t asize = A->type->size ; GB_Type_code acode = A->type->code ; const int64_t *GB_RESTRICT Ai = A->i ; const GB_void *GB_RESTRICT Ax = A->x ; int64_t anvec = A->nvec ; int64_t anz = GB_NNZ (A) ; zsize = reduce->ztype->size ; GB_Type_code zcode = reduce->ztype->code ; //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (anz + anvec, chunk, nthreads_max) ; //-------------------------------------------------------------------------- // T = reduce(A) or reduce(A') //-------------------------------------------------------------------------- GxB_binary_function freduce = reduce->function ; GB_cast_function cast_A_to_Z = GB_cast_factory (zcode, acode) ; bool nocasting = (A->type == reduce->ztype) ; if (A_transpose) { //---------------------------------------------------------------------- // T = reduce(A'), where T(j) = reduce (A (:,j)) //---------------------------------------------------------------------- // Each vector A(:,j) is reduced to the scalar T(j) //---------------------------------------------------------------------- // allocate T, including T->p, T->i, and T->x. T is not hypersparse. //---------------------------------------------------------------------- // since T is a GrB_Vector, it is CSC and not hypersparse GB_CREATE (&T, ttype, n, 1, GB_Ap_calloc, true, GB_FORCE_NONHYPER, GB_HYPER_DEFAULT, 1, anvec, true, Context) ; GB_OK (info) ; ASSERT (GB_VECTOR_OK (T)) ; T->p [0] = 0 ; T->p [1] = anvec ; int64_t *GB_RESTRICT Ti = T->i ; GB_void *GB_RESTRICT Tx = T->x ; T->nvec_nonempty = (anvec > 0) ? 1 : 0 ; T->magic = GB_MAGIC ; //---------------------------------------------------------------------- // symbolic phase //---------------------------------------------------------------------- // Construct the pattern of T. The kth vector in A creates one entry // in T, but it is flagged as a zombie if it is empty. int64_t nzombies = 0 ; const int64_t *GB_RESTRICT Ah = A->h ; const int64_t *GB_RESTRICT Ap = A->p ; int nth = GB_nthreads (anvec, chunk, nthreads_max) ; int64_t k ; #pragma omp parallel for num_threads(nth) schedule(static) \ reduction(+:nzombies) for (k = 0 ; k < anvec ; k++) { // if A(:,j) is empty, then the entry in T becomes a zombie int64_t j = (Ah == NULL) ? k : Ah [k] ; int64_t jnz = Ap [k+1] - Ap [k] ; if (jnz == 0) { // A(:,j) is empty: T(j) is a zombie Ti [k] = GB_FLIP (j) ; nzombies++ ; } else { // A(:,j) has at least one entry; T(j) is live Ti [k] = j ; } } if (A->nvec_nonempty < 0) { A->nvec_nonempty = anvec - nzombies ; } ASSERT (A->nvec_nonempty == (anvec - nzombies)) ; T->nzombies = nzombies ; //---------------------------------------------------------------------- // slice the entries of A for the numeric phase //---------------------------------------------------------------------- // Task tid does entries pstart_slice [tid] to pstart_slice [tid+1]-1 // and vectors kfirst_slice [tid] to klast_slice [tid]. The first and // last vectors may be shared with prior slices and subsequent slices. ntasks = (nthreads == 1) ? 1 : (8 * nthreads) ; ntasks = GB_IMIN (ntasks, anz) ; ntasks = GB_IMAX (ntasks, 1) ; GB_MALLOC_MEMORY (Wfirst_space, ntasks, zsize) ; GB_MALLOC_MEMORY (Wlast_space, ntasks, zsize) ; if (Wfirst_space == NULL || Wlast_space == NULL || !GB_ek_slice (&pstart_slice, &kfirst_slice, &klast_slice, A, ntasks)) { // out of memory GB_FREE_ALL ; return (GB_OUT_OF_MEMORY) ; } //---------------------------------------------------------------------- // numeric phase: launch the switch factory //---------------------------------------------------------------------- bool done = false ; #ifndef GBCOMPACT #define GB_red(opname,aname) GB_red_eachvec_ ## opname ## aname #define GB_RED_WORKER(opname,aname,atype) \ { \ info = GB_red (opname, aname) ((atype *) Tx, A, \ kfirst_slice, klast_slice, pstart_slice, \ Wfirst_space, Wlast_space, ntasks, nthreads) ; \ done = (info != GrB_NO_VALUE) ; \ } \ break ; if (nocasting) { // controlled by opcode and typecode. No typecasting is done. GB_Opcode opcode = reduce->opcode ; GB_Type_code typecode = acode ; ASSERT (typecode <= GB_UDT_code) ; #include "GB_red_factory.c" } #endif //---------------------------------------------------------------------- // generic worker: with typecasting //---------------------------------------------------------------------- if (!done) { GB_BURBLE_MATRIX (A, "generic ") ; #define GB_ATYPE GB_void #define GB_CTYPE GB_void // ztype s ; #define GB_SCALAR(s) \ GB_void s [GB_VLA(zsize)] // ztype s = (ztype) Ax [p], with typecast #define GB_CAST_ARRAY_TO_SCALAR(s,Ax,p) \ cast_A_to_Z (s, Ax +((p)*asize), zsize) ; \ // s += (ztype) Ax [p], with typecast #define GB_ADD_CAST_ARRAY_TO_SCALAR(s, Ax, p) \ GB_void awork [GB_VLA(zsize)] ; \ cast_A_to_Z (awork, Ax +((p)*asize), zsize) ; \ freduce (s, s, awork) ; // W [k] = s, no typecast #define GB_COPY_SCALAR_TO_ARRAY(W,k,s) \ memcpy (W +((k)*zsize), s, zsize) ; // W [k] = S [i], no typecast #define GB_COPY_ARRAY_TO_ARRAY(W,k,S,i) \ memcpy (W +((k)*zsize), S +((i)*zsize), zsize) ; // W [k] += S [i], no typecast #define GB_ADD_ARRAY_TO_ARRAY(W,k,S,i) \ freduce (W +((k)*zsize), W +((k)*zsize), S +((i)*zsize)) ; // W [k] += s, no typecast #define GB_ADD_SCALAR_TO_ARRAY(W,k,s) \ freduce (W +((k)*zsize), W +((k)*zsize), s) ; // break if terminal value reached #define GB_BREAK_IF_TERMINAL(t) \ if (terminal != NULL) \ { \ if (memcmp (t, terminal, zsize) == 0) break ; \ } #include "GB_reduce_each_vector.c" } //---------------------------------------------------------------------- // wrapup: delete any zombies //---------------------------------------------------------------------- ASSERT_MATRIX_OK (T, "T before wait", GB_FLIP (GB0)) ; if (nzombies > 0) { ASSERT (GB_VECTOR_OK (T)) ; ASSERT (!GB_PENDING (T)) ; ASSERT (GB_ZOMBIES (T)) ; GB_OK (GB_wait (T, Context)) ; } ASSERT_MATRIX_OK (T, "T output = reduce_each_vector (A)", GB0) ; } else { //---------------------------------------------------------------------- // T = reduce(A), where T(i) = reduce (A (i,:)) //---------------------------------------------------------------------- //---------------------------------------------------------------------- // select the method //---------------------------------------------------------------------- // When A_transpose is false (after flipping it to account for the // CSR/CSC format), n is A->vlen, the vector length of A. This is // the number of rows of a CSC matrix, or the # of columns of a CSR // matrix. The matrix A itself requires O(vdim+anz) memory if // non-hypersparse and O(anz) if hypersparse. This does not depend on // A->vlen. So if the vector length is really huge (when anz << n), // the bucket method would fail. Thus, the qsort method, below, is // used when A is very sparse. if (GB_CHOOSE_QSORT_INSTEAD_OF_BUCKET (anz, n)) { //------------------------------------------------------------------ // qsort method //------------------------------------------------------------------ // memory usage is O(anz) and time is O(anz*log(anz)). This is // more efficient than the bucket method, below, when A is very // hypersparse. The time and memory complexity does not depend // on n. // since T is a GrB_Vector, it is not hypersparse GB_NEW (&T, ttype, n, 1, GB_Ap_null, true, GB_FORCE_NONHYPER, GB_HYPER_DEFAULT, 1, Context) ; GB_OK (info) ; // GB_build treats Ai and Ax as read-only; they must not be modified GB_OK (GB_build ( T, // construct result in the T vector (GrB_Index *) Ai, // indices inside the vector NULL, // vector indices (none) Ax, // values, of size anz anz, // number of tuples reduce, // reduction operator acode, // type code of the Ax array false, // the input is a vector false, // indices do not need to be checked Context )) ; ASSERT (T->nvec_nonempty == GB_nvec_nonempty (T, NULL)) ; } else { //------------------------------------------------------------------ // bucket method //------------------------------------------------------------------ // Determine number of threads to use for constructing the buckets. // Each thread requires O(n) workspace, so this method does not // scale well when there are many threads compared to anz. Total // workspace is O(n*ntasks), so limit the # of threads used so that // at most anz workspace is used. Each thread takes a single task. ntasks = (n > 0) ? (anz / n) : 1 ; ntasks = GB_IMIN (ntasks, nthreads) ; ntasks = GB_IMAX (ntasks, 1) ; int nth = ntasks ; // one thread per task //------------------------------------------------------------------ // slice the entries for each thread //------------------------------------------------------------------ // Thread tid does entries pstart_slice [tid] to // pstart_slice [tid+1]-1. No need to compute kfirst or klast. GB_MALLOC_MEMORY (pstart_slice, ntasks+1, sizeof (int64_t)) ; if (pstart_slice == NULL) { // out of memory GB_FREE_ALL ; return (GB_OUT_OF_MEMORY) ; } GB_eslice (pstart_slice, anz, ntasks) ; //------------------------------------------------------------------ // sum across each index: T(i) = reduce (A (i,:)) //------------------------------------------------------------------ // Early exit cannot be exploited; ignore the terminal value. #undef GB_red #define GB_red(opname,aname) GB_red_eachindex_ ## opname ## aname #undef GB_RED_WORKER #define GB_RED_WORKER(opname,aname,atype) \ { \ info = GB_red (opname, aname) (&T, ttype, A, pstart_slice, \ ntasks, nthreads, Context) ; \ done = (info != GrB_NO_VALUE) ; \ } \ break ; bool done = false ; //------------------------------------------------------------------ // launch the switch factory //------------------------------------------------------------------ #ifndef GBCOMPACT if (nocasting) { // controlled by opcode and typecode. No typecasting GB_Opcode opcode = reduce->opcode ; GB_Type_code typecode = acode ; ASSERT (typecode <= GB_UDT_code) ; #include "GB_red_factory.c" if (! (info == GrB_SUCCESS || info == GrB_NO_VALUE)) { // out of memory GB_FREE_ALL ; return (info) ; } } #endif //------------------------------------------------------------------ // generic worker //------------------------------------------------------------------ if (!done) { // if this fails, the template frees all workspace with the // GB_FREE_ALL macro, defined above. GB_BURBLE_MATRIX (A, "generic ") ; #include "GB_reduce_each_index.c" } } ASSERT_MATRIX_OK (T, "T output for T = reduce_each_index (A)", GB0) ; } //-------------------------------------------------------------------------- // C<M> = accum (C,T): accumulate the results into C via the mask //-------------------------------------------------------------------------- GB_FREE_WORK ; return (GB_accum_mask (C, M, NULL, accum, &T, C_replace, Mask_comp, Mask_struct, Context)) ; }
rt_dtwonm.c
#include "runtime.h" void RT_CORE_dscal(Quark *quark, Quark_Task_Flags *task_flags, int m, double *alpha, double *A, int lda) { plasma_context_t *plasma; plasma = plasma_context_self(); if (plasma->runtime == PLASMA_QUARK) { cblas_dscal(m, 1.0/alpha[0], A, lda); } else if (plasma->runtime == PLASMA_OMPSS) { //double *Adel = A + (m-256)*m; //#pragma omp task in([1]alpha) inout([m]Adel) no_copy_deps label(dscal) #pragma omp target device (smp) copy_deps #pragma omp task in([1]alpha) inout([m]A) label(dscal) cblas_dscal(m, 1.0/alpha[0], A, lda); } } void RT_CORE_dconv(Quark *quark, Quark_Task_Flags *task_flags, double *alpha, double *beta, double *result, double *e0) { plasma_context_t *plasma; plasma = plasma_context_self(); #pragma omp target device (smp) copy_deps #pragma omp task in([1]alpha, [1]beta) inout([1]result) out([1]e0) label(scalars_for_normest) { e0[0] = result[0]; result[0] = alpha[0]/beta[0]; } } void RT_CORE_conv( Quark *quark, Quark_Task_Flags *task_flags, double *e, double *e0, double *conv) { plasma_context_t *plasma; plasma = plasma_context_self(); #pragma omp target device (smp) copy_deps #pragma omp task in([1]e, [1]e0) out([1]conv) label(conv_for_normest) { conv[0] = abs( e[0] - e0[0]); } } void RT_CORE_tolconv( Quark *quark, Quark_Task_Flags *task_flags, double *tol, double *result, double *tolconv, double *conv) { plasma_context_t *plasma; plasma = plasma_context_self(); #pragma omp target device (smp) copy_deps #pragma omp task in([1]tol, [1]result) out([1]tolconv, [1]conv) label(tolconv_for_normest) { tolconv[0] = tol[0] * result[0]; conv[0] = result[0]; } } void RT_CORE_dgemm_2norm( Quark *quark, Quark_Task_Flags *task_flags, PLASMA_enum transA, PLASMA_enum transB, double alpha, PLASMA_desc A, double *B, int szeB, double beta, double *C, int szeC) { plasma_context_t *plasma; plasma = plasma_context_self(); PLASMA_desc *descB2; PLASMA_Desc_Create( &descB2 , B, PlasmaRealDouble, A.nb, 1, A.nb, A.n, 1, 0, 0, A.n, 1); PLASMA_desc descB = plasma_desc_submatrix(*descB2, 0, 0, A.n, 1); PLASMA_desc *descC2; PLASMA_Desc_Create( &descC2, C, PlasmaRealDouble, A.nb, 1, A.nb, A.n, 1, 0, 0, A.n, 1); PLASMA_desc descC = plasma_desc_submatrix(*descC2, 0, 0, A.n, 1); int nnz, i; int IONE=1; int ISEED[4] = {0,0,0,1}; //double *Cdel = BLKADDR(descC, double,descC.mt-1, descC.nt-1); //double *Bdel = BLKADDR(descB, double, descC.mt-1, descC.nt-1); //#pragma omp task in([descC.nb*descC.nb]Bdel) inout([szeC]C) no_copy_deps label(dgemm_2norm) #pragma omp target device (smp) copy_deps #pragma omp task in([szeB]B) inout([szeC]C) label(dgemm_2norm) RT_CORE_dgemm_2norm1( transA, transB, alpha, A, B, descB, szeB, beta, C, descC, szeC); #pragma omp target device (smp) copy_deps #pragma omp task inout([szeC]C) label(dgemm_2norm) { for(i= 0 ; i < szeC ; i++ ){ if ( C[i] != 0) nnz = nnz + 1; } if ( nnz == 0){ LAPACKE_dlarnv_work(IONE, ISEED, szeC, C); } } //#pragma omp task in([descC.nb*descC.nb]Cdel) inout([szeB]B) no_copy_deps label(dgemm_2norm) #pragma omp target device (smp) copy_deps #pragma omp task in([szeC]C) inout([szeB]B) label(dgemm_2norm) RT_CORE_dgemm_2norm2( transA, transB, alpha, A, C, descC, szeC, beta, B, descB, szeB); } #define Ad(m, n) BLKADDR(A, double, m, n) #define descB(m, n) BLKADDR(descB, double, m, n) #define descC(m, n) BLKADDR(descC, double, m, n) void RT_CORE_dgemm_2norm1( PLASMA_enum transA, PLASMA_enum transB, double alpha, PLASMA_desc A, double *B, PLASMA_desc descB, int szeB, double beta, double *C, PLASMA_desc descC, int szeC) { plasma_context_t *plasma; plasma = plasma_context_self(); int ldam, ldak, ldbn, ldbk, ldcm, m, n, k; int tempmm, tempnn, tempkn, tempkm; double zbeta; double zone = (double)1.0; for (m = 0; m < descC.mt; m++) { tempmm = m == descC.mt-1 ? descC.m-m*descC.mb : descC.mb; ldcm = BLKLDD(descC, m); for (n = 0; n < descC.nt; n++) { tempnn = n == descC.nt-1 ? descC.n-n*descC.nb : descC.nb; ldam = BLKLDD(A, m); for (k = 0; k < A.nt; k++) { tempkn = k == A.nt-1 ? A.n-k*A.nb : A.nb; ldbk = BLKLDD(descB, k); zbeta = k == 0 ? beta : zone; CORE_dgemm( PlasmaNoTrans, PlasmaNoTrans, tempmm, tempnn, tempkn, 1.0, Ad(m, k), ldam, descB(k, n), ldbk, zbeta, descC(m, n), ldcm); } } } } void RT_CORE_dgemm_2norm2( PLASMA_enum transA, PLASMA_enum transB, double alpha, PLASMA_desc A, double *C, PLASMA_desc descC, int szeC, double beta, double *B, PLASMA_desc descB, int szeB) { plasma_context_t *plasma; plasma = plasma_context_self(); int ldam, ldak, ldbn, ldbk, ldcm, m, n, k; int tempmm, tempnn, tempkn, tempkm; double zbeta; double zone = (double)1.0; for (m = 0; m < descC.mt; m++) { tempmm = m == descC.mt-1 ? descC.m-m*descC.mb : descC.mb; ldcm = BLKLDD(descC, m); for (n = 0; n < descC.nt; n++) { tempnn = n == descC.nt-1 ? descC.n-n*descC.nb : descC.nb; for (k = 0; k < A.nt; k++) { tempkm = k == A.mt-1 ? A.m-k*A.mb : A.mb; ldak = BLKLDD(A, k); ldbk = BLKLDD(descC, k); zbeta = k == 0 ? beta : zone; CORE_dgemm( PlasmaTrans, PlasmaNoTrans, tempmm, tempnn, tempkm, 1.0, Ad(k, m), ldak, descC(k, n), ldbk, zbeta, descB(m, n), ldcm); } } } }
basic2.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> int main(){ int nthreads, tid; #pragma omp parallel private( nthreads, tid ) { tid = omp_get_thread_num(); printf("Hello World from thread = %d\n", tid); if (tid == 0){ nthreads = omp_get_num_threads(); printf( "Number of threads = %d\n", nthreads ); } } return EXIT_SUCCESS; }
constitute.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO N N SSSSS TTTTT IIIII TTTTT U U TTTTT EEEEE % % C O O NN N SS T I T U U T E % % C O O N N N ESSS T I T U U T EEE % % C O O N NN SS T I T U U T E % % CCCC OOO N N SSSSS T IIIII T UUU T EEEEE % % % % % % MagickCore Methods to Consitute an Image % % % % Software Design % % Cristy % % October 1998 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/cache.h" #include "MagickCore/client.h" #include "MagickCore/coder-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/constitute-private.h" #include "MagickCore/delegate.h" #include "MagickCore/geometry.h" #include "MagickCore/identify.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/profile.h" #include "MagickCore/profile-private.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/statistic.h" #include "MagickCore/stream.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/timer.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n s t i t u t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConstituteImage() returns an image from the pixel data you supply. % The pixel data must be in scanline order top-to-bottom. The data can be % char, short int, int, float, or double. Float and double require the % pixels to be normalized [0..1], otherwise [0..QuantumRange]. For example, to % create a 640x480 image from unsigned red-green-blue character data, use: % % image = ConstituteImage(640,480,"RGB",CharPixel,pixels,&exception); % % The format of the ConstituteImage method is: % % Image *ConstituteImage(const size_t columns,const size_t rows, % const char *map,const StorageType storage,const void *pixels, % ExceptionInfo *exception) % % A description of each parameter follows: % % o columns: width in pixels of the image. % % o rows: height in pixels of the image. % % o map: This string reflects the expected ordering of the pixel array. % It can be any combination or order of R = red, G = green, B = blue, % A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan, % Y = yellow, M = magenta, K = black, I = intensity (for grayscale), % P = pad. % % o storage: Define the data type of the pixels. Float and double types are % expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose % from these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel, % LongPixel, QuantumPixel, or ShortPixel. % % o pixels: This array of values contain the pixel components as defined by % map and type. You must preallocate this array where the expected % length varies depending on the values of width, height, map, and type. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConstituteImage(const size_t columns,const size_t rows, const char *map,const StorageType storage,const void *pixels, ExceptionInfo *exception) { Image *image; MagickBooleanType status; ssize_t i; size_t length; /* Allocate image structure. */ assert(map != (const char *) NULL); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",map); assert(pixels != (void *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage((ImageInfo *) NULL,exception); if (image == (Image *) NULL) return((Image *) NULL); switch (storage) { case CharPixel: image->depth=8*sizeof(unsigned char); break; case DoublePixel: image->depth=8*sizeof(double); break; case FloatPixel: image->depth=8*sizeof(float); break; case LongPixel: image->depth=8*sizeof(unsigned long); break; case LongLongPixel: image->depth=8*sizeof(MagickSizeType); break; case ShortPixel: image->depth=8*sizeof(unsigned short); break; default: break; } length=strlen(map); for (i=0; i < (ssize_t) length; i++) { switch (map[i]) { case 'a': case 'A': case 'O': case 'o': { image->alpha_trait=BlendPixelTrait; break; } case 'C': case 'c': case 'm': case 'M': case 'Y': case 'y': case 'K': case 'k': { image->colorspace=CMYKColorspace; break; } case 'I': case 'i': { image->colorspace=GRAYColorspace; break; } default: { if (length == 1) image->colorspace=GRAYColorspace; break; } } } status=SetImageExtent(image,columns,rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ResetImagePixels(image,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ImportImagePixels(image,0,0,columns,rows,map,storage,pixels,exception); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i n g I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PingImage() returns all the properties of an image or image sequence % except for the pixels. It is much faster and consumes far less memory % than ReadImage(). On failure, a NULL image is returned and exception % describes the reason for the failure. % % The format of the PingImage method is: % % Image *PingImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Ping the image defined by the file or filename members of % this structure. % % o exception: return any errors or warnings in this structure. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static size_t PingStream(const Image *magick_unused(image), const void *magick_unused(pixels),const size_t columns) { magick_unreferenced(image); magick_unreferenced(pixels); return(columns); } #if defined(__cplusplus) || defined(c_plusplus) } #endif MagickExport Image *PingImage(const ImageInfo *image_info, ExceptionInfo *exception) { Image *image; ImageInfo *ping_info; assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); ping_info=CloneImageInfo(image_info); ping_info->ping=MagickTrue; image=ReadStream(ping_info,&PingStream,exception); if (image != (Image *) NULL) { ResetTimer(&image->timer); if (ping_info->verbose != MagickFalse) (void) IdentifyImage(image,stdout,MagickFalse,exception); } ping_info=DestroyImageInfo(ping_info); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i n g I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PingImages() pings one or more images and returns them as an image list. % % The format of the PingImage method is: % % Image *PingImages(ImageInfo *image_info,const char *filename, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o filename: the image filename. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PingImages(ImageInfo *image_info,const char *filename, ExceptionInfo *exception) { char ping_filename[MagickPathExtent]; Image *image, *images; ImageInfo *read_info; /* Ping image list from a file. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); (void) SetImageOption(image_info,"filename",filename); (void) CopyMagickString(image_info->filename,filename,MagickPathExtent); (void) InterpretImageFilename(image_info,(Image *) NULL,image_info->filename, (int) image_info->scene,ping_filename,exception); if (LocaleCompare(ping_filename,image_info->filename) != 0) { ExceptionInfo *sans; ssize_t extent, scene; /* Images of the form image-%d.png[1-5]. */ read_info=CloneImageInfo(image_info); sans=AcquireExceptionInfo(); (void) SetImageInfo(read_info,0,sans); sans=DestroyExceptionInfo(sans); if (read_info->number_scenes == 0) { read_info=DestroyImageInfo(read_info); return(PingImage(image_info,exception)); } (void) CopyMagickString(ping_filename,read_info->filename, MagickPathExtent); images=NewImageList(); extent=(ssize_t) (read_info->scene+read_info->number_scenes); for (scene=(ssize_t) read_info->scene; scene < (ssize_t) extent; scene++) { (void) InterpretImageFilename(image_info,(Image *) NULL,ping_filename, (int) scene,read_info->filename,exception); image=PingImage(read_info,exception); if (image == (Image *) NULL) continue; AppendImageToList(&images,image); } read_info=DestroyImageInfo(read_info); return(images); } return(PingImage(image_info,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadImage() reads an image or image sequence from a file or file handle. % The method returns a NULL if there is a memory shortage or if the image % cannot be read. On failure, a NULL image is returned and exception % describes the reason for the failure. % % The format of the ReadImage method is: % % Image *ReadImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Read the image defined by the file or filename members of % this structure. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType IsCoderAuthorized(const char *coder, const PolicyRights rights,ExceptionInfo *exception) { if (IsRightsAuthorized(CoderPolicyDomain,rights,coder) == MagickFalse) { errno=EPERM; (void) ThrowMagickException(exception,GetMagickModule(),PolicyError, "NotAuthorized","`%s'",coder); return(MagickFalse); } return(MagickTrue); } MagickExport Image *ReadImage(const ImageInfo *image_info, ExceptionInfo *exception) { char filename[MagickPathExtent], magick[MagickPathExtent], magick_filename[MagickPathExtent]; const char *value; const DelegateInfo *delegate_info; const MagickInfo *magick_info; DecodeImageHandler *decoder; ExceptionInfo *sans_exception; GeometryInfo geometry_info; Image *image, *next; ImageInfo *read_info; MagickBooleanType status; MagickStatusType flags; /* Determine image type from filename prefix or suffix (e.g. image.jpg). */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image_info->filename != (char *) NULL); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); read_info=CloneImageInfo(image_info); (void) CopyMagickString(magick_filename,read_info->filename,MagickPathExtent); (void) SetImageInfo(read_info,0,exception); (void) CopyMagickString(filename,read_info->filename,MagickPathExtent); (void) CopyMagickString(magick,read_info->magick,MagickPathExtent); /* Call appropriate image reader based on image type. */ sans_exception=AcquireExceptionInfo(); magick_info=GetMagickInfo(read_info->magick,sans_exception); if (sans_exception->severity == PolicyError) InheritException(exception,sans_exception); sans_exception=DestroyExceptionInfo(sans_exception); if (magick_info != (const MagickInfo *) NULL) { if (GetMagickEndianSupport(magick_info) == MagickFalse) read_info->endian=UndefinedEndian; else if ((image_info->endian == UndefinedEndian) && (GetMagickRawSupport(magick_info) != MagickFalse)) { unsigned long lsb_first; lsb_first=1; read_info->endian=(*(char *) &lsb_first) == 1 ? LSBEndian : MSBEndian; } } if ((magick_info != (const MagickInfo *) NULL) && (GetMagickDecoderSeekableStream(magick_info) != MagickFalse)) { image=AcquireImage(read_info,exception); (void) CopyMagickString(image->filename,read_info->filename, MagickPathExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { read_info=DestroyImageInfo(read_info); image=DestroyImage(image); return((Image *) NULL); } if (IsBlobSeekable(image) == MagickFalse) { /* Coder requires a seekable stream. */ *read_info->filename='\0'; status=ImageToFile(image,read_info->filename,exception); if (status == MagickFalse) { (void) CloseBlob(image); read_info=DestroyImageInfo(read_info); image=DestroyImage(image); return((Image *) NULL); } read_info->temporary=MagickTrue; } (void) CloseBlob(image); image=DestroyImage(image); } image=NewImageList(); decoder=GetImageDecoder(magick_info); if (decoder == (DecodeImageHandler *) NULL) { delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception); if (delegate_info == (const DelegateInfo *) NULL) { (void) SetImageInfo(read_info,0,exception); (void) CopyMagickString(read_info->filename,filename, MagickPathExtent); magick_info=GetMagickInfo(read_info->magick,exception); decoder=GetImageDecoder(magick_info); } } if (decoder != (DecodeImageHandler *) NULL) { /* Call appropriate image reader based on image type. */ if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(read_info->magick,ReadPolicyRights,exception); image=(Image *) NULL; if (status != MagickFalse) image=decoder(read_info,exception); if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } else { delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception); if (delegate_info == (const DelegateInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'", read_info->magick); if (read_info->temporary != MagickFalse) (void) RelinquishUniqueFileResource(read_info->filename); read_info=DestroyImageInfo(read_info); return((Image *) NULL); } /* Let our decoding delegate process the image. */ image=AcquireImage(read_info,exception); if (image == (Image *) NULL) { read_info=DestroyImageInfo(read_info); return((Image *) NULL); } (void) CopyMagickString(image->filename,read_info->filename, MagickPathExtent); *read_info->filename='\0'; if (GetDelegateThreadSupport(delegate_info) == MagickFalse) LockSemaphoreInfo(delegate_info->semaphore); status=InvokeDelegate(read_info,image,read_info->magick,(char *) NULL, exception); if (GetDelegateThreadSupport(delegate_info) == MagickFalse) UnlockSemaphoreInfo(delegate_info->semaphore); image=DestroyImageList(image); read_info->temporary=MagickTrue; if (status != MagickFalse) (void) SetImageInfo(read_info,0,exception); magick_info=GetMagickInfo(read_info->magick,exception); decoder=GetImageDecoder(magick_info); if (decoder == (DecodeImageHandler *) NULL) { if (IsPathAccessible(read_info->filename) != MagickFalse) (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'", read_info->magick); else ThrowFileException(exception,FileOpenError,"UnableToOpenFile", read_info->filename); read_info=DestroyImageInfo(read_info); return((Image *) NULL); } /* Call appropriate image reader based on image type. */ if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(read_info->magick,ReadPolicyRights,exception); image=(Image *) NULL; if (status != MagickFalse) image=(decoder)(read_info,exception); if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } if (read_info->temporary != MagickFalse) { (void) RelinquishUniqueFileResource(read_info->filename); read_info->temporary=MagickFalse; if (image != (Image *) NULL) (void) CopyMagickString(image->filename,filename,MagickPathExtent); } if (image == (Image *) NULL) { read_info=DestroyImageInfo(read_info); return(image); } if (exception->severity >= ErrorException) (void) LogMagickEvent(ExceptionEvent,GetMagickModule(), "Coder (%s) generated an image despite an error (%d), " "notify the developers",image->magick,exception->severity); if (IsBlobTemporary(image) != MagickFalse) (void) RelinquishUniqueFileResource(read_info->filename); if ((IsSceneGeometry(read_info->scenes,MagickFalse) != MagickFalse) && (GetImageListLength(image) != 1)) { Image *clones; clones=CloneImages(image,read_info->scenes,exception); if (clones != (Image *) NULL) { image=DestroyImageList(image); image=GetFirstImageInList(clones); } } for (next=image; next != (Image *) NULL; next=GetNextImageInList(next)) { char magick_path[MagickPathExtent], *property, timestamp[MagickTimeExtent]; const char *option; const StringInfo *profile; ssize_t option_type; static const char *source_date_epoch = (const char *) NULL; static MagickBooleanType epoch_initalized = MagickFalse; next->taint=MagickFalse; GetPathComponent(magick_filename,MagickPath,magick_path); if ((*magick_path == '\0') && (*next->magick == '\0')) (void) CopyMagickString(next->magick,magick,MagickPathExtent); (void) CopyMagickString(next->magick_filename,magick_filename, MagickPathExtent); if (IsBlobTemporary(image) != MagickFalse) (void) CopyMagickString(next->filename,filename,MagickPathExtent); if (next->magick_columns == 0) next->magick_columns=next->columns; if (next->magick_rows == 0) next->magick_rows=next->rows; (void) GetImageProperty(next,"exif:*",exception); (void) GetImageProperty(next,"icc:*",exception); (void) GetImageProperty(next,"iptc:*",exception); (void) GetImageProperty(next,"xmp:*",exception); value=GetImageProperty(next,"exif:Orientation",exception); if (value == (char *) NULL) value=GetImageProperty(next,"tiff:Orientation",exception); if (value != (char *) NULL) { next->orientation=(OrientationType) StringToLong(value); (void) DeleteImageProperty(next,"tiff:Orientation"); (void) DeleteImageProperty(next,"exif:Orientation"); } value=GetImageProperty(next,"exif:XResolution",exception); if (value != (char *) NULL) { geometry_info.rho=next->resolution.x; geometry_info.sigma=1.0; flags=ParseGeometry(value,&geometry_info); if (geometry_info.sigma != 0) next->resolution.x=geometry_info.rho/geometry_info.sigma; if (strchr(value,',') != (char *) NULL) next->resolution.x=geometry_info.rho+geometry_info.sigma/1000.0; (void) DeleteImageProperty(next,"exif:XResolution"); } value=GetImageProperty(next,"exif:YResolution",exception); if (value != (char *) NULL) { geometry_info.rho=next->resolution.y; geometry_info.sigma=1.0; flags=ParseGeometry(value,&geometry_info); if (geometry_info.sigma != 0) next->resolution.y=geometry_info.rho/geometry_info.sigma; if (strchr(value,',') != (char *) NULL) next->resolution.y=geometry_info.rho+geometry_info.sigma/1000.0; (void) DeleteImageProperty(next,"exif:YResolution"); } value=GetImageProperty(next,"exif:ResolutionUnit",exception); if (value == (char *) NULL) value=GetImageProperty(next,"tiff:ResolutionUnit",exception); if (value != (char *) NULL) { option_type=ParseCommandOption(MagickResolutionOptions,MagickFalse, value); if (option_type >= 0) next->units=(ResolutionType) option_type; (void) DeleteImageProperty(next,"exif:ResolutionUnit"); (void) DeleteImageProperty(next,"tiff:ResolutionUnit"); } if (next->page.width == 0) next->page.width=next->columns; if (next->page.height == 0) next->page.height=next->rows; option=GetImageOption(read_info,"caption"); if (option != (const char *) NULL) { property=InterpretImageProperties(read_info,next,option,exception); (void) SetImageProperty(next,"caption",property,exception); property=DestroyString(property); } option=GetImageOption(read_info,"comment"); if (option != (const char *) NULL) { property=InterpretImageProperties(read_info,next,option,exception); (void) SetImageProperty(next,"comment",property,exception); property=DestroyString(property); } option=GetImageOption(read_info,"label"); if (option != (const char *) NULL) { property=InterpretImageProperties(read_info,next,option,exception); (void) SetImageProperty(next,"label",property,exception); property=DestroyString(property); } if (LocaleCompare(next->magick,"TEXT") == 0) (void) ParseAbsoluteGeometry("0x0+0+0",&next->page); if ((read_info->extract != (char *) NULL) && (read_info->stream == (StreamHandler) NULL)) { RectangleInfo geometry; SetGeometry(next,&geometry); flags=ParseAbsoluteGeometry(read_info->extract,&geometry); if ((next->columns != geometry.width) || (next->rows != geometry.height)) { if (((flags & XValue) != 0) || ((flags & YValue) != 0)) { Image *crop_image; crop_image=CropImage(next,&geometry,exception); if (crop_image != (Image *) NULL) ReplaceImageInList(&next,crop_image); } else if (((flags & WidthValue) != 0) || ((flags & HeightValue) != 0)) { Image *size_image; flags=ParseRegionGeometry(next,read_info->extract,&geometry, exception); size_image=ResizeImage(next,geometry.width,geometry.height, next->filter,exception); if (size_image != (Image *) NULL) ReplaceImageInList(&next,size_image); } } } profile=GetImageProfile(next,"icc"); if (profile == (const StringInfo *) NULL) profile=GetImageProfile(next,"icm"); profile=GetImageProfile(next,"iptc"); if (profile == (const StringInfo *) NULL) profile=GetImageProfile(next,"8bim"); if (epoch_initalized == MagickFalse) { source_date_epoch=getenv("SOURCE_DATE_EPOCH"); epoch_initalized=MagickTrue; } if (source_date_epoch == (const char *) NULL) { (void) FormatMagickTime((time_t) GetBlobProperties(next)->st_mtime, sizeof(timestamp),timestamp); (void) SetImageProperty(next,"date:modify",timestamp,exception); (void) FormatMagickTime((time_t) GetBlobProperties(next)->st_ctime, sizeof(timestamp),timestamp); (void) SetImageProperty(next,"date:create",timestamp,exception); } option=GetImageOption(image_info,"delay"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); if ((flags & GreaterValue) != 0) { if (next->delay > (size_t) floor(geometry_info.rho+0.5)) next->delay=(size_t) floor(geometry_info.rho+0.5); } else if ((flags & LessValue) != 0) { if (next->delay < (size_t) floor(geometry_info.rho+0.5)) next->ticks_per_second=CastDoubleToLong(floor( geometry_info.sigma+0.5)); } else next->delay=(size_t) floor(geometry_info.rho+0.5); if ((flags & SigmaValue) != 0) next->ticks_per_second=CastDoubleToLong(floor( geometry_info.sigma+0.5)); } option=GetImageOption(image_info,"dispose"); if (option != (const char *) NULL) { option_type=ParseCommandOption(MagickDisposeOptions,MagickFalse, option); if (option_type >= 0) next->dispose=(DisposeType) option_type; } if (read_info->verbose != MagickFalse) (void) IdentifyImage(next,stderr,MagickFalse,exception); image=next; } read_info=DestroyImageInfo(read_info); if (GetBlobError(image) != MagickFalse) ThrowReaderException(CorruptImageError,"UnableToReadImageData"); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadImages() reads one or more images and returns them as an image list. % % The format of the ReadImage method is: % % Image *ReadImages(ImageInfo *image_info,const char *filename, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o filename: the image filename. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ReadImages(ImageInfo *image_info,const char *filename, ExceptionInfo *exception) { char read_filename[MagickPathExtent]; Image *image, *images; ImageInfo *read_info; /* Read image list from a file. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); read_info=CloneImageInfo(image_info); *read_info->magick='\0'; (void) SetImageOption(read_info,"filename",filename); (void) CopyMagickString(read_info->filename,filename,MagickPathExtent); (void) InterpretImageFilename(read_info,(Image *) NULL,filename, (int) read_info->scene,read_filename,exception); if (LocaleCompare(read_filename,read_info->filename) != 0) { ExceptionInfo *sans; ssize_t extent, scene; /* Images of the form image-%d.png[1-5]. */ sans=AcquireExceptionInfo(); (void) SetImageInfo(read_info,0,sans); sans=DestroyExceptionInfo(sans); if (read_info->number_scenes != 0) { (void) CopyMagickString(read_filename,read_info->filename, MagickPathExtent); images=NewImageList(); extent=(ssize_t) (read_info->scene+read_info->number_scenes); scene=(ssize_t) read_info->scene; for ( ; scene < (ssize_t) extent; scene++) { (void) InterpretImageFilename(image_info,(Image *) NULL, read_filename,(int) scene,read_info->filename,exception); image=ReadImage(read_info,exception); if (image == (Image *) NULL) continue; AppendImageToList(&images,image); } read_info=DestroyImageInfo(read_info); return(images); } } (void) CopyMagickString(read_info->filename,filename,MagickPathExtent); image=ReadImage(read_info,exception); read_info=DestroyImageInfo(read_info); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d I n l i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadInlineImage() reads a Base64-encoded inline image or image sequence. % The method returns a NULL if there is a memory shortage or if the image % cannot be read. On failure, a NULL image is returned and exception % describes the reason for the failure. % % The format of the ReadInlineImage method is: % % Image *ReadInlineImage(const ImageInfo *image_info,const char *content, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o content: the image encoded in Base64. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ReadInlineImage(const ImageInfo *image_info, const char *content,ExceptionInfo *exception) { Image *image; ImageInfo *read_info; unsigned char *blob; size_t length; const char *p; /* Skip over header (e.g. data:image/gif;base64,). */ image=NewImageList(); for (p=content; (*p != ',') && (*p != '\0'); p++) ; if (*p == '\0') ThrowReaderException(CorruptImageError,"CorruptImage"); blob=Base64Decode(++p,&length); if (length == 0) { blob=(unsigned char *) RelinquishMagickMemory(blob); ThrowReaderException(CorruptImageError,"CorruptImage"); } read_info=CloneImageInfo(image_info); (void) SetImageInfoProgressMonitor(read_info,(MagickProgressMonitor) NULL, (void *) NULL); *read_info->filename='\0'; *read_info->magick='\0'; for (p=content; (*p != '/') && (*p != '\0'); p++) ; if (*p != '\0') { char *q; ssize_t i; /* Extract media type. */ if (LocaleNCompare(++p,"x-",2) == 0) p+=2; (void) strcpy(read_info->filename,"data."); q=read_info->filename+5; for (i=0; (*p != ';') && (*p != '\0') && (i < (MagickPathExtent-6)); i++) *q++=(*p++); *q++='\0'; } image=BlobToImage(read_info,blob,length,exception); blob=(unsigned char *) RelinquishMagickMemory(blob); read_info=DestroyImageInfo(read_info); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteImage() writes an image or an image sequence to a file or file handle. % If writing to a file is on disk, the name is defined by the filename member % of the image structure. WriteImage() returns MagickFalse is there is a % memory shortage or if the image cannot be written. Check the exception % member of image to determine the cause for any failure. % % The format of the WriteImage method is: % % MagickBooleanType WriteImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WriteImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { char filename[MagickPathExtent]; const char *option; const DelegateInfo *delegate_info; const MagickInfo *magick_info; EncodeImageHandler *encoder; ExceptionInfo *sans_exception; ImageInfo *write_info; MagickBooleanType status, temporary; /* Determine image type from filename prefix or suffix (e.g. image.jpg). */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); sans_exception=AcquireExceptionInfo(); write_info=CloneImageInfo(image_info); (void) CopyMagickString(write_info->filename,image->filename, MagickPathExtent); (void) SetImageInfo(write_info,1,sans_exception); if (*write_info->magick == '\0') (void) CopyMagickString(write_info->magick,image->magick,MagickPathExtent); (void) CopyMagickString(filename,image->filename,MagickPathExtent); (void) CopyMagickString(image->filename,write_info->filename, MagickPathExtent); /* Call appropriate image writer based on image type. */ magick_info=GetMagickInfo(write_info->magick,sans_exception); if (sans_exception->severity == PolicyError) magick_info=GetMagickInfo(write_info->magick,exception); sans_exception=DestroyExceptionInfo(sans_exception); if (magick_info != (const MagickInfo *) NULL) { if (GetMagickEndianSupport(magick_info) == MagickFalse) image->endian=UndefinedEndian; else if ((image_info->endian == UndefinedEndian) && (GetMagickRawSupport(magick_info) != MagickFalse)) { unsigned long lsb_first; lsb_first=1; image->endian=(*(char *) &lsb_first) == 1 ? LSBEndian : MSBEndian; } } (void) SyncImageProfiles(image); DisassociateImageStream(image); option=GetImageOption(image_info,"delegate:bimodal"); if ((IsStringTrue(option) != MagickFalse) && (write_info->page == (char *) NULL) && (GetPreviousImageInList(image) == (Image *) NULL) && (GetNextImageInList(image) == (Image *) NULL) && (IsTaintImage(image) == MagickFalse) ) { delegate_info=GetDelegateInfo(image->magick,write_info->magick,exception); if ((delegate_info != (const DelegateInfo *) NULL) && (GetDelegateMode(delegate_info) == 0) && (IsPathAccessible(image->magick_filename) != MagickFalse)) { /* Process image with bi-modal delegate. */ (void) CopyMagickString(image->filename,image->magick_filename, MagickPathExtent); status=InvokeDelegate(write_info,image,image->magick, write_info->magick,exception); write_info=DestroyImageInfo(write_info); (void) CopyMagickString(image->filename,filename,MagickPathExtent); return(status); } } status=MagickFalse; temporary=MagickFalse; if ((magick_info != (const MagickInfo *) NULL) && (GetMagickEncoderSeekableStream(magick_info) != MagickFalse)) { char image_filename[MagickPathExtent]; (void) CopyMagickString(image_filename,image->filename,MagickPathExtent); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); (void) CopyMagickString(image->filename, image_filename,MagickPathExtent); if (status != MagickFalse) { if (IsBlobSeekable(image) == MagickFalse) { /* A seekable stream is required by the encoder. */ write_info->adjoin=MagickTrue; (void) CopyMagickString(write_info->filename,image->filename, MagickPathExtent); (void) AcquireUniqueFilename(image->filename); temporary=MagickTrue; } (void) CloseBlob(image); } } encoder=GetImageEncoder(magick_info); if (encoder != (EncodeImageHandler *) NULL) { /* Call appropriate image writer based on image type. */ if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(write_info->magick,WritePolicyRights,exception); if (status != MagickFalse) status=encoder(write_info,image,exception); if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } else { delegate_info=GetDelegateInfo((char *) NULL,write_info->magick,exception); if (delegate_info != (DelegateInfo *) NULL) { /* Process the image with delegate. */ *write_info->filename='\0'; if (GetDelegateThreadSupport(delegate_info) == MagickFalse) LockSemaphoreInfo(delegate_info->semaphore); status=InvokeDelegate(write_info,image,(char *) NULL, write_info->magick,exception); if (GetDelegateThreadSupport(delegate_info) == MagickFalse) UnlockSemaphoreInfo(delegate_info->semaphore); (void) CopyMagickString(image->filename,filename,MagickPathExtent); } else { sans_exception=AcquireExceptionInfo(); magick_info=GetMagickInfo(write_info->magick,sans_exception); if (sans_exception->severity == PolicyError) magick_info=GetMagickInfo(write_info->magick,exception); sans_exception=DestroyExceptionInfo(sans_exception); if ((write_info->affirm == MagickFalse) && (magick_info == (const MagickInfo *) NULL)) { (void) CopyMagickString(write_info->magick,image->magick, MagickPathExtent); magick_info=GetMagickInfo(write_info->magick,exception); } encoder=GetImageEncoder(magick_info); if (encoder == (EncodeImageHandler *) NULL) { char extension[MagickPathExtent]; GetPathComponent(image->filename,ExtensionPath,extension); if (*extension != '\0') magick_info=GetMagickInfo(extension,exception); else magick_info=GetMagickInfo(image->magick,exception); (void) CopyMagickString(image->filename,filename, MagickPathExtent); encoder=GetImageEncoder(magick_info); } if (encoder == (EncodeImageHandler *) NULL) { magick_info=GetMagickInfo(image->magick,exception); encoder=GetImageEncoder(magick_info); if (encoder == (EncodeImageHandler *) NULL) (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"NoEncodeDelegateForThisImageFormat", "`%s'",write_info->magick); } if (encoder != (EncodeImageHandler *) NULL) { /* Call appropriate image writer based on image type. */ if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(write_info->magick,WritePolicyRights, exception); if (status != MagickFalse) status=encoder(write_info,image,exception); if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } } } if (temporary != MagickFalse) { /* Copy temporary image file to permanent. */ status=OpenBlob(write_info,image,ReadBinaryBlobMode,exception); if (status != MagickFalse) { (void) RelinquishUniqueFileResource(write_info->filename); status=ImageToFile(image,write_info->filename,exception); } (void) CloseBlob(image); (void) RelinquishUniqueFileResource(image->filename); (void) CopyMagickString(image->filename,write_info->filename, MagickPathExtent); } if ((LocaleCompare(write_info->magick,"info") != 0) && (write_info->verbose != MagickFalse)) (void) IdentifyImage(image,stdout,MagickFalse,exception); write_info=DestroyImageInfo(write_info); if (GetBlobError(image) != MagickFalse) ThrowWriterException(FileOpenError,"UnableToWriteFile"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteImages() writes an image sequence into one or more files. While % WriteImage() can write an image sequence, it is limited to writing % the sequence into a single file using a format which supports multiple % frames. WriteImages(), however, does not have this limitation, instead it % generates multiple output files if necessary (or when requested). When % ImageInfo's adjoin flag is set to MagickFalse, the file name is expected % to include a printf-style formatting string for the frame number (e.g. % "image%02d.png"). % % The format of the WriteImages method is: % % MagickBooleanType WriteImages(const ImageInfo *image_info,Image *images, % const char *filename,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o images: the image list. % % o filename: the image filename. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WriteImages(const ImageInfo *image_info, Image *images,const char *filename,ExceptionInfo *exception) { #define WriteImageTag "Write/Image" ExceptionInfo *sans_exception; ImageInfo *write_info; MagickBooleanType proceed; MagickOffsetType progress; MagickProgressMonitor progress_monitor; MagickSizeType number_images; MagickStatusType status; Image *p; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); write_info=CloneImageInfo(image_info); *write_info->magick='\0'; images=GetFirstImageInList(images); if (filename != (const char *) NULL) for (p=images; p != (Image *) NULL; p=GetNextImageInList(p)) (void) CopyMagickString(p->filename,filename,MagickPathExtent); (void) CopyMagickString(write_info->filename,images->filename, MagickPathExtent); sans_exception=AcquireExceptionInfo(); (void) SetImageInfo(write_info,(unsigned int) GetImageListLength(images), sans_exception); sans_exception=DestroyExceptionInfo(sans_exception); if (*write_info->magick == '\0') (void) CopyMagickString(write_info->magick,images->magick,MagickPathExtent); p=images; for ( ; GetNextImageInList(p) != (Image *) NULL; p=GetNextImageInList(p)) { Image *next; next=GetNextImageInList(p); if (next == (Image *) NULL) break; if (p->scene >= next->scene) { ssize_t i; /* Generate consistent scene numbers. */ i=(ssize_t) images->scene; for (p=images; p != (Image *) NULL; p=GetNextImageInList(p)) p->scene=(size_t) i++; break; } } /* Write images. */ status=MagickTrue; progress_monitor=(MagickProgressMonitor) NULL; progress=0; number_images=GetImageListLength(images); for (p=images; p != (Image *) NULL; p=GetNextImageInList(p)) { if (number_images != 1) progress_monitor=SetImageProgressMonitor(p,(MagickProgressMonitor) NULL, p->client_data); status&=WriteImage(write_info,p,exception); if (number_images != 1) (void) SetImageProgressMonitor(p,progress_monitor,p->client_data); if (write_info->adjoin != MagickFalse) break; if (number_images != 1) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(p,WriteImageTag,progress,number_images); if (proceed == MagickFalse) break; } } write_info=DestroyImageInfo(write_info); return(status != 0 ? MagickTrue : MagickFalse); }
concom.c
/**********************************************************************************************/ /* This program is part of the Barcelona OpenMP Tasks Suite */ /* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */ /* Copyright (C) 2009 Universitat Politecnica de Catalunya */ /* */ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation; either version 2 of the License, or */ /* (at your option) any later version. */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ /* GNU General Public License for more details. */ /* */ /* You should have received a copy of the GNU General Public License */ /* along with this program; if not, write to the Free Software */ /* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /**********************************************************************************************/ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <sys/time.h> #include "app-desc.h" #include "bots.h" // bots_arg_size == Number of nodes // bots_arg_size_1 == Maximum number of neighbors per node // bots_arg_size_2 == Number of links in the entire graph node *nodes; int *visited, *components; // Checks to see if two nodes can be linked int linkable(int N1, int N2) { int i; if (N1 == N2) return (0); if (nodes[N1].n >= bots_arg_size_1) return (0); if (nodes[N2].n >= bots_arg_size_1) return (0); for (i = 0; i < nodes[N1].n; i++) if (nodes[N1].neighbor[i] == N2) return (0); return (1); } // Allocates and creates a graph with random links between nodes // also allocates visited and components vectors void initialize() { int i, l1, l2, N1, N2; double RN; nodes = (node *) malloc(bots_arg_size * sizeof(node)); visited = (int *) malloc(bots_arg_size * sizeof(int)); components = (int *) malloc(bots_arg_size * sizeof(int)); /* initialize nodes */ for (i = 0; i < bots_arg_size; i++) { nodes[i].n = 0; nodes[i].neighbor = (int *) malloc(bots_arg_size_1 * sizeof(int)); } /* for each link, generate end nodes and link */ for (i = 0; i < bots_arg_size_2; i++) { RN = rand() / (double) RAND_MAX; N1 = (int) ((bots_arg_size-1) * RN); RN = rand() / (double) RAND_MAX; N2 = (int) ((bots_arg_size-1) * RN); if (linkable(N1, N2)) { l1 = nodes[N1].n; l2 = nodes[N2].n; nodes[N1].neighbor[l1] = N2; nodes[N2].neighbor[l2] = N1; nodes[N1].n += 1; nodes[N2].n += 1; } } } // Writes the number of CCs void write_outputs(int n, int cc) { int i; printf("Graph %d, Number of components %d\n", n, cc); if (bots_verbose_mode) for (i = 0; i < cc; i++) printf("Component %d Size: %d\n", i, components[i]); } // Marks a node and all its neighbors as part of the CC void CC_par (int i, int cc) { int j, n; /* if node has not been visited */ if (visited[i] == 0) { /* add node to current component */ if (bots_verbose_mode) printf("Adding node %d to component %d\n", i, cc); #pragma omp critical { visited[i] = 1; components[cc]++; } /* add each neighbor's subtree to the current component */ for (j = 0; j < nodes[i].n; j++) { n = nodes[i].neighbor[j]; #pragma omp task firstprivate (i,cc) {CC_par(n, cc);} } #pragma omp taskwait } } void CC_seq (int i, int cc) { int j, n; /* if node has not been visited */ if (visited[i] == 0) { /* add node to current component */ if (bots_verbose_mode) printf("Adding node %d to component %d\n", i, cc); { visited[i] = 1; components[cc]++; } /* add each neighbor's subtree to the current component */ for (j = 0; j < nodes[i].n; j++) { n = nodes[i].neighbor[j]; CC_seq(n, cc); } } } void cc_init() { int i; /* initialize global data structures */ for (i = 0; i < bots_arg_size; i++) { visited[i] = 0; components[i] = 0; } } void cc_par(int *cc) { int i; *cc = 0; /* for all nodes ... unvisited nodes start a new component */ #pragma omp parallel #pragma omp single #pragma omp task for (i = 0; i < bots_arg_size; i++) { if (visited[i] == 0) { #pragma omp task firstprivate (i,cc) {CC_par(i, *cc);} #pragma omp taskwait (*cc)++; } } } void cc_seq(int *cc) { int i; (*cc) = 0; /* for all nodes ... unvisited nodes start a new component */ for (i = 0; i < bots_arg_size; i++) { if (visited[i] == 0) { CC_par(i, *cc); (*cc)++; } } } int cc_check(int ccs, int ccp) { if (bots_verbose_mode) fprintf(stdout, "Sequential = %d CC, Parallel =%d CC\n", ccs, ccp); if (ccs == ccp) return BOTS_RESULT_SUCCESSFUL; else return BOTS_RESULT_UNSUCCESSFUL; }
IteratorTestHelper.h
/** * @file IteratorTestHelper.h * @author F. Gratl * @date 05.03.21 */ #pragma once #include <gtest/gtest.h> #include <array> #include <vector> #include "autopas/options/IteratorBehavior.h" #include "autopas/utils/ArrayMath.h" #include "autopas/utils/WrapOpenMP.h" #include "autopas/utils/inBox.h" #include "testingHelpers/commonTypedefs.h" namespace IteratorTestHelper { /** * Inserts particles around all corners of the given AutoPas object at critical distances. * @tparam AutoPasT * @param autoPas * @param boxOfInterestMin * @param boxOfInterestMax * @return Tuple of four vectors containing IDs of added particles. * 1. All owned particles * 2. All halo particles. * 3. All owned particles in the box of interest. * 3. All halo particles in the box of interest. */ template <class AutoPasT> auto fillContainerAroundBoundary(AutoPasT &autoPas, std::array<double, 3> boxOfInterestMin, std::array<double, 3> boxOfInterestMax) { constexpr size_t numParticles1dTotal = 10; auto cutoff = autoPas.getCutoff(); auto skin = autoPas.getVerletSkin(); // generator function for critical coordinates (along one dimension) auto generateInteresting1DPositions = [&](double min, double max) -> auto { // ensure that all particles are at most skin away from halo! // interesting cases are: // - outside of the halo by skin // - edge of halo // - in the halo // - edge of actual domain // - just inside the domain return std::array<double, numParticles1dTotal>{min - cutoff - skin + 1e-10, min - cutoff, min - skin / 4, min, min + skin / 4, max - skin / 4, max, max + skin / 4, max + cutoff, max + cutoff + skin - 1e-10}; }; // fill container size_t id = 0; auto boxMin = autoPas.getBoxMin(); auto boxMax = autoPas.getBoxMax(); std::vector<size_t> particleIDsInterestHalo; std::vector<size_t> particleIDsInterestOwned; std::vector<size_t> particleIDsHalo; std::vector<size_t> particleIDsOwned; for (auto x : generateInteresting1DPositions(boxMin[0], boxMax[0])) { for (auto y : generateInteresting1DPositions(boxMin[1], boxMax[1])) { for (auto z : generateInteresting1DPositions(boxMin[2], boxMax[2])) { std::array<double, 3> pos{x, y, z}; Molecule p(pos, {0., 0., 0.}, id++, 0); // add the particle as actual or halo particle if (autopas::utils::inBox(pos, boxMin, boxMax)) { autoPas.addParticle(p); particleIDsOwned.push_back(p.getID()); if (autopas::utils::inBox(pos, boxOfInterestMin, boxOfInterestMax)) { particleIDsInterestOwned.push_back(p.getID()); } } else { // AutoPas should set the ownership state of this particle to halo autoPas.addOrUpdateHaloParticle(p); particleIDsHalo.push_back(p.getID()); if (autopas::utils::inBox(pos, boxOfInterestMin, boxOfInterestMax)) { particleIDsInterestHalo.push_back(p.getID()); } } } } } // sanity check. Can not use ASSERT_EQ because this introduces a different return. EXPECT_EQ(particleIDsOwned.size() + particleIDsHalo.size(), numParticles1dTotal * numParticles1dTotal * numParticles1dTotal); // getNumberOfParticles works via counters in the logic handler EXPECT_EQ(autoPas.getNumberOfParticles(autopas::IteratorBehavior::owned), particleIDsOwned.size()); EXPECT_EQ(autoPas.getNumberOfParticles(autopas::IteratorBehavior::halo), particleIDsHalo.size()); return std::make_tuple(particleIDsOwned, particleIDsHalo, particleIDsInterestOwned, particleIDsInterestHalo); } /** * Inserts particles around all corners of the given AutoPas object at critical distances. * @tparam AutoPasT * @param autoPas * @return Tuple of two vectors containing IDs of added particles. First for owned, second for halo particles. */ template <class AutoPasT> auto fillContainerAroundBoundary(AutoPasT &autoPas) { std::array<double, 3> numericLimitMin{std::numeric_limits<double>::min(), std::numeric_limits<double>::min(), std::numeric_limits<double>::min()}; std::array<double, 3> numericLimitMax{std::numeric_limits<double>::max(), std::numeric_limits<double>::max(), std::numeric_limits<double>::max()}; return fillContainerAroundBoundary(autoPas, numericLimitMin, numericLimitMax); } /** * Creats a grid of particles in the given AutoPas object. * Grid width is `sparsity * ( boxLength / ((cutoff + skin) * cellSizeFactor) )`. * E.g., for a sparsity of 1, 1 particle is inserted for every cell. For a sparsity of .5, 8 particles are inserted. * The lower corner of the grid is offset from boxMin by half the grid width in every dimension. * This way there should be one particle in every third Linked Cells cell. * @tparam AutoPasT * @param autoPas * @param sparsity * @return Vector of all particle IDs added. */ template <class AutoPasT> auto fillContainerWithGrid(AutoPasT &autoPas, double sparsity) { auto cutoff = autoPas.getCutoff(); auto skin = autoPas.getVerletSkin(); auto cellSizeFactor = *(autoPas.getAllowedCellSizeFactors().getAll().begin()); auto boxLength = autopas::utils::ArrayMath::sub(autoPas.getBoxMax(), autoPas.getBoxMin()); auto gridWidth1D = (cutoff + skin) * cellSizeFactor; auto gridEdgesPerDim = autopas::utils::ArrayMath::mulScalar(boxLength, 1 / gridWidth1D); auto gridWidth3D = autopas::utils::ArrayMath::div(boxLength, gridEdgesPerDim); size_t id = 0; std::vector<size_t> particleIDs; for (double x = gridWidth3D[0] / 2; x < boxLength[0]; x += sparsity * gridWidth3D[0]) { for (double y = gridWidth3D[1] / 2; y < boxLength[1]; y += sparsity * gridWidth3D[1]) { for (double z = gridWidth3D[2] / 2; z < boxLength[2]; z += sparsity * gridWidth3D[2]) { std::array<double, 3> pos{x, y, z}; Molecule p(pos, {0., 0., 0.}, id++, 0); autoPas.addParticle(p); particleIDs.push_back(p.getID()); } } } return particleIDs; } template <class AutoPasT> auto getHaloBoxMinMax(AutoPasT &autoPas) { const auto interactionLength = autoPas.getCutoff() + autoPas.getVerletSkin(); // halo has width of interactionLength const auto haloBoxMin = autopas::utils::ArrayMath::subScalar(autoPas.getBoxMin(), interactionLength); const auto haloBoxMax = autopas::utils::ArrayMath::addScalar(autoPas.getBoxMax(), interactionLength); return std::make_tuple(haloBoxMin, haloBoxMax); } /** * Creates a function to instantiate an iterator with the given properties and passes this function on to fun. * The iterator always covers the whole domain and, if necessary the halo. * This is necessary so that fun can decide for itself if it wants iterators to be created in an OpenMP region or not. * @tparam AutoPasT * @tparam F f(AutoPas, Iterator) * @param useRegionIterator * @param useConstIterator * @param behavior * @param autoPas * @param fun Function taking the AutoPas object and the generated iterator. */ template <bool useConstIterator, class AutoPasT, class F> void provideIterator(AutoPasT &autoPas, autopas::IteratorBehavior behavior, bool useRegionIterator, F fun) { if (useRegionIterator) { std::array<double, 3> haloBoxMin, haloBoxMax; std::tie(haloBoxMin, haloBoxMax) = getHaloBoxMinMax(autoPas); if constexpr (useConstIterator) { const auto &autoPasRef = autoPas; auto getIter = [&]() -> typename AutoPasT::const_iterator_t { return autoPasRef.getRegionIterator(haloBoxMin, haloBoxMax, behavior); }; fun(autoPasRef, getIter); } else { auto getIter = [&]() -> typename AutoPasT::iterator_t { return autoPas.getRegionIterator(haloBoxMin, haloBoxMax, behavior); }; fun(autoPas, getIter); } } else { if constexpr (useConstIterator) { auto getIter = [&]() -> typename AutoPasT::const_iterator_t { return autoPas.cbegin(behavior); }; fun(autoPas, getIter); } else { auto getIter = [&]() -> typename AutoPasT::iterator_t { return autoPas.begin(behavior); }; fun(autoPas, getIter); } } } /** * Same as provideIterator() but `useConstIterator` is a run-time variable. * @tparam useConstIterator * @tparam AutoPasT * @tparam F f(AutoPas, Iterator) * @param useRegionIterator * @param behavior * @param autoPas * @param fun Function taking the AutoPas object and the generated iterator. */ template <class AutoPasT, class F> void provideIterator(bool useConstIterator, AutoPasT &autoPas, autopas::IteratorBehavior behavior, bool useRegionIterator, F fun) { if (useConstIterator) { provideIterator<true>(autoPas, behavior, useRegionIterator, fun); } else { provideIterator<false>(autoPas, behavior, useRegionIterator, fun); } } /** * Creates a function to instantiate a region-iterator with the given properties and passes this function on to fun. * This is necessary so that fun can decide for itself if it wants iterators to be created in an OpenMP region or not. * @tparam useConstIterator * @tparam AutoPasT * @tparam F f(AutoPas, Iterator) * @param autoPas * @param behavior * @param boxMin * @param boxMax * @param fun Function taking the AutoPas object and the generated iterator. */ template <bool useConstIterator, class AutoPasT, class F> void provideRegionIterator(AutoPasT &autoPas, autopas::IteratorBehavior behavior, const std::array<double, 3> &boxMin, const std::array<double, 3> &boxMax, F fun) { if constexpr (useConstIterator) { const auto &autoPasRef = autoPas; auto getIter = [&]() -> typename AutoPasT::const_iterator_t { return autoPasRef.getRegionIterator(boxMin, boxMax, behavior); }; fun(autoPasRef, getIter); } else { auto getIter = [&]() -> typename AutoPasT::iterator_t { return autoPas.getRegionIterator(boxMin, boxMax, behavior); }; fun(autoPas, getIter); } } /** * Same as provideRegionIterator() but `useConstIterator` is a run-time variable. * @tparam AutoPasT * @tparam F f(AutoPas, Iterator) * @param useConstIterator * @param autoPas * @param behavior * @param boxMin * @param boxMax * @param fun Function taking the AutoPas object and the generated iterator. */ template <class AutoPasT, class F> void provideRegionIterator(bool useConstIterator, AutoPasT &autoPas, autopas::IteratorBehavior behavior, const std::array<double, 3> &boxMin, const std::array<double, 3> &boxMax, F fun) { if (useConstIterator) { provideRegionIterator<true>(autoPas, behavior, boxMin, boxMax, fun); } else { provideRegionIterator<false>(autoPas, behavior, boxMin, boxMax, fun); } } /** * Apply an iterator, track what particle IDs are found and compare this to a vector of expected IDs * @tparam AutoPasT * @tparam IteratorT * @param autopas * @param iterator * @param particleIDsExpected */ template <class AutoPasT, class FgetIter> void findParticles(AutoPasT &autopas, FgetIter getIter, const std::vector<size_t> &particleIDsExpected) { std::vector<size_t> particleIDsFound; #ifdef AUTOPAS_OPENMP // aparently the version from WrapOpenMP.h can not be found #pragma omp declare reduction(vecMergeWorkaround : std::vector<size_t> : omp_out.insert(omp_out.end(), omp_in.begin(), omp_in.end())) #pragma omp parallel reduction(vecMergeWorkaround : particleIDsFound) #endif { for (auto iterator = getIter(); iterator.isValid(); ++iterator) { auto id = iterator->getID(); particleIDsFound.push_back(id); } } // check that everything was found EXPECT_THAT(particleIDsFound, ::testing::UnorderedElementsAreArray(particleIDsExpected)); } /** * Generates a given amount of cells where only indicated cells contain a given amount of particles. * Cells can be considered to be on the main diagonal through 3D space. So the xyz coordinates of each cell's lower * corner are {cellID, cellID, cellID}. The particles are also placed along this line within their cells. Within each * cell the particles are placed equidistant around the center. * @param numCells * @param cellsToFill * @param particlesPerCell * @return Vector of generated and filled cells. */ static std::vector<FMCell> generateCellsWithPattern(const size_t numCells, const std::vector<size_t> &cellsToFill, const size_t particlesPerCell) { constexpr double cellDiagonal = 1.; // distance between particles within one cell const double distBetweenParticles = cellDiagonal / (particlesPerCell + 1.); std::vector<FMCell> cells(numCells); size_t numParticlesAdded = 0; for (auto cellId : cellsToFill) { for (size_t i = 0; i < particlesPerCell; ++i) { auto position = cellId + distBetweenParticles * (i + 1.); Molecule m({position, position, position}, {0, 0, 0}, numParticlesAdded++, 0); cells[cellId].addParticle(m); } } return cells; } } // namespace IteratorTestHelper
GB_binop__bor_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bor_int64) // A.*B function (eWiseMult): GB (_AemultB_01__bor_int64) // A.*B function (eWiseMult): GB (_AemultB_02__bor_int64) // A.*B function (eWiseMult): GB (_AemultB_03__bor_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bor_int64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bor_int64) // C+=b function (dense accum): GB (_Cdense_accumb__bor_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bor_int64) // C=scalar+B GB (_bind1st__bor_int64) // C=scalar+B' GB (_bind1st_tran__bor_int64) // C=A+scalar GB (_bind2nd__bor_int64) // C=A'+scalar GB (_bind2nd_tran__bor_int64) // C type: int64_t // A type: int64_t // B,b type: int64_t // BinaryOp: cij = (aij) | (bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x) | (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BOR || GxB_NO_INT64 || GxB_NO_BOR_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bor_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bor_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bor_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bor_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__bor_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bor_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__bor_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bor_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bor_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = (x) | (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bor_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij) | (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x) | (aij) ; \ } GrB_Info GB (_bind1st_tran__bor_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij) | (y) ; \ } GrB_Info GB (_bind2nd_tran__bor_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
displacement_op_opencl.h
// ----------------------------------------------------------------------------- // // Copyright (C) The BioDynaMo Project. // All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // // See the LICENSE file distributed with this work for details. // See the NOTICE file distributed with this work for additional information // regarding copyright ownership. // // ----------------------------------------------------------------------------- #ifndef DISPLACEMENT_OP_OPENCL_H_ #define DISPLACEMENT_OP_OPENCL_H_ #include <vector> #ifdef USE_OPENCL #define __CL_ENABLE_EXCEPTIONS #ifdef __APPLE__ #include <OpenCL/cl.hpp> #else #include <CL/cl.hpp> #endif #endif #include "bound_space_op.h" #include "gpu/gpu_helper.h" #include "grid.h" #include "resource_manager.h" #include "shape.h" #include "type_util.h" namespace bdm { using std::array; /// Defines the 3D physical interactions between physical objects template <typename TGrid = Grid<>, typename TResourceManager = ResourceManager<>> class DisplacementOpOpenCL { public: DisplacementOpOpenCL() {} ~DisplacementOpOpenCL() {} template <typename TContainer> typename std::enable_if<is_soa_sphere<TContainer>::value>::type operator()( TContainer* cells, uint16_t type_idx) const { #ifdef USE_OPENCL auto& grid = TGrid::GetInstance(); auto rm = TResourceManager::Get(); auto context = rm->GetOpenCLContext(); auto queue = rm->GetOpenCLCommandQueue(); auto programs = rm->GetOpenCLProgramList(); std::vector<cl_double> mass(cells->size()); std::vector<array<cl_double, 3>> cell_movements(cells->size()); std::vector<cl_uint> gpu_starts; std::vector<cl_ushort> gpu_lengths; std::vector<cl_uint> successors(cells->size()); cl_uint box_length; std::array<cl_uint, 3> num_boxes_axis; std::array<cl_int, 3> grid_dimensions; cl_double squared_radius = grid.GetLargestObjectSize() * grid.GetLargestObjectSize(); // We need to create a mass vector, because it is not stored by default in // a cell container cells->FillMassVector(&mass); grid.GetSuccessors(&successors); grid.GetBoxInfo(&gpu_starts, &gpu_lengths); grid.GetGridInfo(&box_length, &num_boxes_axis, &grid_dimensions); // Allocate GPU buffers cl::Buffer positions_arg(*context, CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR, cells->size() * 3 * sizeof(cl_double), cells->GetPositionPtr()); cl::Buffer diameters_arg(*context, CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR, cells->size() * sizeof(cl_double), cells->GetDiameterPtr()); cl::Buffer tractor_force_arg( *context, CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR, cells->size() * 3 * sizeof(cl_double), cells->GetTractorForcePtr()); cl::Buffer adherence_arg(*context, CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR, cells->size() * sizeof(cl_double), cells->GetAdherencePtr()); cl::Buffer box_id_arg(*context, CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR, cells->size() * sizeof(cl_uint), cells->GetBoxIdPtr()); cl::Buffer mass_arg(*context, CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR, cells->size() * sizeof(cl_double), mass.data()); cl::Buffer cell_movements_arg( *context, CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR, cells->size() * 3 * sizeof(cl_double), cell_movements.data()->data()); cl::Buffer starts_arg(*context, CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR, gpu_starts.size() * sizeof(cl_uint), gpu_starts.data()); cl::Buffer lengths_arg(*context, CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR, gpu_lengths.size() * sizeof(cl_short), gpu_lengths.data()); cl::Buffer successors_arg(*context, CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR, successors.size() * sizeof(cl_uint), successors.data()); cl::Buffer nba_arg(*context, CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR, 3 * sizeof(cl_uint), num_boxes_axis.data()); cl::Buffer gd_arg(*context, CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR, 3 * sizeof(cl_int), grid_dimensions.data()); // Create the kernel object from our program // TODO(ahmad): generalize the program selection, in case we have more than // one. We can maintain an unordered map of programs maybe cl::Kernel collide((*programs)[0], "collide"); // Set kernel parameters collide.setArg(0, positions_arg); collide.setArg(1, diameters_arg); collide.setArg(2, tractor_force_arg); collide.setArg(3, adherence_arg); collide.setArg(4, box_id_arg); collide.setArg(5, mass_arg); collide.setArg(6, Param::simulation_time_step_); collide.setArg(7, Param::simulation_max_displacement_); collide.setArg(8, squared_radius); collide.setArg(9, static_cast<cl_int>(cells->size())); collide.setArg(10, starts_arg); collide.setArg(11, lengths_arg); collide.setArg(12, successors_arg); collide.setArg(13, box_length); collide.setArg(14, nba_arg); collide.setArg(15, gd_arg); collide.setArg(16, cell_movements_arg); // The amount of threads for each work group (analogous to CUDA thread // block) int block_size = 256; auto num_objects = cells->size(); try { // The global size determines the total number of threads that will be // spawned on the GPU, in groups of local_size cl::NDRange global_size = cl::NDRange(num_objects + (block_size - (num_objects % block_size))); cl::NDRange local_size = cl::NDRange(block_size); queue->enqueueNDRangeKernel(collide, cl::NullRange, global_size, local_size); } catch (const cl::Error& err) { Log::Error("DisplacementOpOpenCL", err.what(), "(", err.err(), ") = ", GetErrorString(err.err())); throw; } try { queue->enqueueReadBuffer(cell_movements_arg, CL_TRUE, 0, cells->size() * 3 * sizeof(cl_double), cell_movements.data()->data()); } catch (const cl::Error& err) { Log::Error("DisplacementOpOpenCL", err.what(), "(", err.err(), ") = ", GetErrorString(err.err())); throw; } // set new positions after all updates have been calculated // otherwise some cells would see neighbors with already updated positions // which would lead to inconsistencies #pragma omp parallel for for (size_t i = 0; i < cells->size(); i++) { auto&& cell = (*cells)[i]; cell.UpdatePosition(cell_movements[i]); if (Param::bound_space_) { ApplyBoundingBox(&cell, Param::min_bound_, Param::max_bound_); } cell.SetPosition(cell.GetPosition()); // Reset biological movement to 0. cell.SetTractorForce({0, 0, 0}); } #endif } template <typename TContainer> typename std::enable_if<!is_soa_sphere<TContainer>::value>::type operator()( TContainer* cells, uint16_t type_idx) { Fatal("DisplacementOpCuda", "You tried to compile GPU-specific function calls for a non-SOA data " "structure or non-spherical simulation object."); } }; } // namespace bdm #endif // DISPLACEMENT_OP_OPENCL_H_
Perf2.cpp.pluto.c
#include <omp.h> #pragma warning(disable : 4996) #include <math.h> #include <stdio.h> #include <stdlib.h> #define _CRT_SECURE_NO_WARNINGS #define min(x,y) ((x) < (y) ? (x) : (y)) #define max(x,y) ((x) > (y) ? (x) : (y)) #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) (((n)<0) ? -((-(n)+(d)-1)/(d)) : (n)/(d)) #define S0(a, i, j, k) c[i][j] = c[i][k] + c[k][j] //#define match(b1, b2) (((b1)+(b2)) == 3 ? 1 : 0) #define sigma(i, j) (match(seq[i], seq[j])) int max_score(int s1, int s2) { if (s1 >= s2) return s1; return s2; } int max_sc(int s1, int s2, int s3) { if (s1>=s2 && s1>=s3) return s1; if (s2>=s3) return s2; return s3; } int match(const int e1, const int e2) { /* * 'A' => 0 -> bitowo 0001 -> 1 * 'G' => 1 -> bitowo 0010 -> 2 * 'C' => 2 -> bitowo 0100 -> 4 * 'U' => 3 -> bitowo 1000 -> 8 */ //const bool match = // (e1 == 0 && e2 == 3) || (e1 == 3 && e2 == 0) || // (e1 == 1 && e2 == 2) || (e1 == 2 && e2 == 1) || // (e1 == 1 && e2 == 3) || (e1 == 3 && e2 == 1); //return match; const int match = (e1 + e2 == 9) || (e1 + e2 == 6) || (e1 + e2 == 10) ; return match; } void printMatrix(int**, int, int); int ** getFullCopy(int ** table, int N); int** allocateMatrix(int); void deallocateMatrix(int**, int); void write_results_full(int , double , char ); void write_results(int , double ); void computeDYN2Perfect(int** table, int n, int *seq) { int** S = getFullCopy(table, n); double start = omp_get_wtime(); //Listing 1.2: Perfectly nested Nussinov loops int t1, t2, t3, t4, t5, t6; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; if (n >= 3) { for (t1=1;t1<=n-2;t1++) { lbp=ceild(t1-25,26); ubp=floord(-t1+2*n-2,26); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(-t1+13*t2-28,30)),ceild(26*t2-n-28,30));t3<=min(floord(-t1+n-1,30),floord(-t1+26*t2+25,60));t3++) { if (t1 >= 2) { for (t4=max(30*t3,-t1+13*t2+1);t4<=min(min(-2*t1+n,30*t3+29),-t1+13*t2+13);t4++) { lbv=max(26*t2,t1+2*t4); ubv=2*t1+2*t4-2; #pragma ivdep #pragma vector always for (t5=lbv;t5<=ubv;t5++) { S[t4][(-t4+t5)] = max_sc(S[t4][-t1 + (-t4+t5)] + S[-t1 + (-t4+t5) + 1][(-t4+t5)], S[t4][(-t4+t5)], S[t4 + 1][(-t4+t5) - 1] + sigma(t4, (-t4+t5)));; S[t4][(-t4+t5)] = max_sc(S[t4][t1 + t4 - 1] + S[t1 + t4 - 1 + 1][(-t4+t5)], S[t4][(-t4+t5)], S[t4 + 1][(-t4+t5) - 1] + sigma(t4, (-t4+t5)));; } S[t4][(2*t1+t4-1)] = max_sc(S[t4][t1 + t4 - 1] + S[t1 + t4 - 1 + 1][(2*t1+t4-1)], S[t4][(2*t1+t4-1)], S[t4 + 1][(2*t1+t4-1) - 1] + sigma(t4, (2*t1+t4-1)));; } } for (t4=max(max(30*t3,-2*t1+n+1),26*t2-n+1);t4<=min(min(30*t3+29,-t1+n-1),-t1+13*t2+13);t4++) { lbv=max(26*t2,t1+2*t4); ubv=t4+n-1; #pragma ivdep #pragma vector always for (t5=lbv;t5<=ubv;t5++) { S[t4][(-t4+t5)] = max_sc(S[t4][-t1 + (-t4+t5)] + S[-t1 + (-t4+t5) + 1][(-t4+t5)], S[t4][(-t4+t5)], S[t4 + 1][(-t4+t5) - 1] + sigma(t4, (-t4+t5)));; S[t4][(-t4+t5)] = max_sc(S[t4][t1 + t4 - 1] + S[t1 + t4 - 1 + 1][(-t4+t5)], S[t4][(-t4+t5)], S[t4 + 1][(-t4+t5) - 1] + sigma(t4, (-t4+t5)));; } } for (t4=max(max(30*t3,-t1+13*t2+14),26*t2-n+1);t4<=min(min(floord(-t1+26*t2+25,2),30*t3+29),-t1+n-1);t4++) { lbv=max(26*t2,t1+2*t4); ubv=min(26*t2+25,t4+n-1); #pragma ivdep #pragma vector always for (t5=lbv;t5<=ubv;t5++) { S[t4][(-t4+t5)] = max_sc(S[t4][-t1 + (-t4+t5)] + S[-t1 + (-t4+t5) + 1][(-t4+t5)], S[t4][(-t4+t5)], S[t4 + 1][(-t4+t5) - 1] + sigma(t4, (-t4+t5)));; S[t4][(-t4+t5)] = max_sc(S[t4][t1 + t4 - 1] + S[t1 + t4 - 1 + 1][(-t4+t5)], S[t4][(-t4+t5)], S[t4 + 1][(-t4+t5) - 1] + sigma(t4, (-t4+t5)));; } } if (t1 == 1) { for (t4=max(13*t2,30*t3);t4<=min(min(n-2,13*t2+12),30*t3+29);t4++) { S[t4][(t4+1)] = max_sc(S[t4][1 + t4 - 1] + S[1 + t4 - 1 + 1][(t4+1)], S[t4][(t4+1)], S[t4 + 1][(t4+1) - 1] + sigma(t4, (t4+1)));; } } } } } } double execution_time = omp_get_wtime() - start; printf("PERF: %lf\n", execution_time); write_results(n, execution_time); printMatrix(S, n, 2); deallocateMatrix(S, n); } void printMatrix(int** matrix, int N, int fileno) { char filename[10]; sprintf(filename, "nontiled%d", fileno); FILE* f = fopen(filename, "wt"); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) fprintf(f, "%d ", matrix[i][j]); fprintf(f, "\n"); } fclose(f); } int **getFullCopy(int ** table, int N) { int **S = allocateMatrix(N); for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) S[i][j] = table[i][j]; return S; } int** allocateMatrix(int N) { int** t = (int**)malloc(sizeof(int*) * N); for (int i = 0; i < N; i++) { t[i] = (int*)malloc(sizeof(int) * N); } return t; } int* allocateVector(int N) { int* t = (int*)malloc(sizeof(int) * N); return t; } void deallocateMatrix(int **t, int N) { for (int i = 0; i < N; i++) { free(t[i]); } free(t); } void write_results_full(int n, double execution_time, char end_char) { FILE* f = fopen("results.txt", "at"); fprintf(f, "%d:%lf%c", n, execution_time, end_char); fclose(f); } void write_results(int n, double execution_time) { write_results_full(n, execution_time, ';'); } int getValue(const char c) { /* * 'A' => 0 -> bitowo 0001 -> 1 * 'G' => 1 -> bitowo 0010 -> 2 * 'C' => 2 -> bitowo 0100 -> 4 * 'U' => 3 -> bitowo 1000 -> 8 */ if(c=='A') return 1; if(c=='G') return 2; if(c=='C') return 4; if(c=='U') return 8; return 16; } #define PERFORMANCE_TEST 0 int main(void) { #if PERFORMANCE_TEST==1 const int ZMAX = 1600; #else const int ZMAX = 16; #endif int** graph = allocateMatrix(ZMAX); int* seq = allocateVector(ZMAX); for (int i = 0; i < ZMAX; i++) for (int j = 0; j < ZMAX; j++) graph[i][j] = 0; for (int i = 0; i < ZMAX; i++) graph[i][i] = 0; // const char* seqTest = "GCGUCCACGGCUAGCU"; #if PERFORMANCE_TEST==1 for (int i=0 ; i<ZMAX ; i++) { seq[i] = 1 << (rand()%4+1); } #else for (int i = 0; i < ZMAX; i++) seq[i] = getValue(seqTest[i]); #endif int N = ZMAX - 10; //while (N < ZMAX) //{ N += 10; computeDYN2Perfect(graph, N, seq); //N += 10; //} deallocateMatrix(graph, ZMAX); free(seq); return 0; }
5287.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop #pragma omp parallel for private(j) collapse(2) schedule(dynamic, 1) num_threads(4) for (i = 1; i < _PB_NI - 1; ++i) { for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
CPUMatrixImpl.h
// // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE.md file in the project root for full license information. // // CPUMatrix.h : template implementation of all matrix functions on the CPU side // #pragma once #include "Basics.h" #include "File.h" #include "CPUMatrix.h" #include "TensorOps.h" #include <assert.h> #include <stdexcept> #include <omp.h> #include <math.h> #include <random> #include <chrono> #include <exception> #include <thread> #include <iostream> #include <algorithm> #include <numeric> #pragma warning(push) #pragma warning(disable:4244) // 'conversion' conversion from 'type1' to 'type2', possible loss of data #include <boost/random/normal_distribution.hpp> #pragma warning(pop) #include <boost/random/uniform_real_distribution.hpp> #ifdef _WIN32 #define NOMINMAX #include "Windows.h" #else #include <cfloat> #endif #ifdef LEAKDETECT #include <vld.h> #endif #pragma warning(disable : 4100) // unreferenced formal parameter; "struct TensorOpReduction<ElemType, OPFN, typename ReductionOp, N, -1>" trigger this #pragma warning(disable : 4127) // conditional expression is constant; "if (sizeof(ElemType)==sizeof(float))" triggers this #pragma warning(disable : 4244) // unreachable code; triggered for unknown reasons #pragma warning(disable : 4702) // conversion from 'double' to 'float' #ifdef USE_MKL // requires MKLML 0.11 and above #include <mkl_cblas.h> #include <mkl_lapacke.h> #include <mkl_service.h> #else #ifdef _MSC_VER // Visual Studio doesn't define standard complex types properly #define HAVE_LAPACK_CONFIG_H #define LAPACK_COMPLEX_STRUCTURE #endif #include <cblas.h> #include <lapacke.h> #endif #define SWAP(a, b) \ { \ (a) ^= (b); \ (b) ^= (a); \ (a) ^= (b); \ } #define IDX2C(i, j, ld) (((j) * (ld)) + (i)) // 0 based indexing namespace Microsoft { namespace MSR { namespace CNTK { #pragma region Helpful Enum Definitions enum class MatrixOrder { RowMajor = 101, // row-major arrays ColMajor = 102 // column-major arrays }; enum class MatrixTranspose : char { NoTrans = 'N', // trans='N' Trans = 'T', // trans='T' ConjTrans = 'C' // trans='C' }; enum class SymMatrixType : char { Up = 'U', // symmetric matrix is stored in the upper part Low = 'L', // symmetric matrix is stored in the lower part Full = 'F', // full populated NotSymmetric = 'N' // not a symmetric matrix }; enum class MatrixOpSide : char { Left = 'L', // left multiply Right = 'R', // right multiply }; #pragma endregion Helpful Enum Definitions #pragma region Constructors and Destructor template <class ElemType> CPUMatrix<ElemType>::CPUMatrix() { ZeroInit(); } // helper to allocate an array of ElemType // Use this instead of new[] to get NaN initialization for debugging. template <class ElemType> static ElemType* NewArray(size_t n) { // We need to allocate possibly one more element for the following reason. // At some point we might want to fill a buffer with the result of a random // number generator. The RNG is oblivious to whether the buffer is on the // CPU or GPU but it needs to keep an accurate tally of how many numbers it // has generated. The trouble stems from the fact that generating an odd // number gaussians on the GPU is not supported so we must always // generate an even number. So since we wouldn't know how to update the tally // we are making this allocate one more element in the worst case. ElemType* p = new ElemType[AsMultipleOf(n, 2)](); #if 0 // _DEBUG ElemType nan = Matrix<ElemType>::MakeNan(__LINE__); for (size_t i = 0; i < n; i++) p[i] = nan; #endif return p; } template <class ElemType> CPUMatrix<ElemType>::CPUMatrix(const size_t numRows, const size_t numCols) { ZeroInit(); m_numRows = numRows; m_numCols = numCols; SetSizeAllocated(GetNumElements()); if (GetNumElements() != 0) { SetBuffer(NewArray<ElemType>(GetNumElements()), GetNumElements() * sizeof(ElemType)); } } template <class ElemType> CPUMatrix<ElemType>::CPUMatrix(const size_t numRows, const size_t numCols, ElemType* pArray, const size_t matrixFlags) { ZeroInit(); SetValue(numRows, numCols, pArray, matrixFlags); } //copy constructor, deep copy template <class ElemType> CPUMatrix<ElemType>::CPUMatrix(const CPUMatrix<ElemType>& deepCopyFrom) { ZeroInit(); SetValue(deepCopyFrom); } //assignment operator, deep copy template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator=(const CPUMatrix<ElemType>& deepCopyFrom) { SetValue(deepCopyFrom); return *this; } //move constructor, shallow copy template <class ElemType> CPUMatrix<ElemType>::CPUMatrix(CPUMatrix<ElemType>&& moveFrom) : Base(/* shallow */ true) { ShallowCopyFrom(moveFrom); moveFrom.ZeroValues(); } // Shortcut of default constructor + shallow copy, to avoid one initialization template <class ElemType> CPUMatrix<ElemType>::CPUMatrix(const CPUMatrix<ElemType>& shallowCopyFrom, bool shallow) : Base(shallow) { ShallowCopyFrom(shallowCopyFrom); } //move assignment operator, shallow copy template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator=(CPUMatrix<ElemType>&& moveFrom) { if (this != &moveFrom) { ShallowCopyFrom(moveFrom); // release the pointer from the source object so that the destructor won't release it twice moveFrom.ZeroValues(); } return *this; } template <class ElemType> void CPUMatrix<ElemType>::Clear() { ZeroInit(); } #pragma endregion Constructors and Destructor #pragma region Basic Operators template <class ElemType> CPUMatrix<ElemType> CPUMatrix<ElemType>::ColumnSlice(size_t startColumn, size_t numCols) const { if (startColumn + numCols > m_numCols) InvalidArgument("The slice (%d+%d) is out of range of the source matrix (%d).", (int) startColumn, (int) numCols, (int) m_numCols); CPUMatrix<ElemType> slice(*this, /* shallow= */ true); slice.m_numCols = numCols; slice.m_sliceViewOffset = m_sliceViewOffset + startColumn * m_numRows; return slice; } // set this(:, 0:numCols-1) = fromMatrix(:, startColumn : startColumn+numCols-1) // TODO: why not say *this = ColumnSlice()? template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignColumnSlice(const CPUMatrix<ElemType>& fromMatrix, size_t startColumn, size_t numCols) { if (startColumn + numCols > fromMatrix.m_numCols) InvalidArgument("The slice (%d+%d) is out of range of the source matrix (%d).", (int) startColumn, (int) numCols, (int) fromMatrix.m_numCols); Clear(); ShallowCopyFrom(fromMatrix); m_numCols = numCols; m_sliceViewOffset = fromMatrix.m_sliceViewOffset + startColumn * m_numRows; return *this; } // set this(: , startColumn:startColumn+numCols-1)= fromMatrix; template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::SetColumnSlice(const CPUMatrix<ElemType>& fromMatrix, size_t startColumn, size_t numCols) { if (startColumn + numCols > m_numCols) LogicError("The slice is out of range of the destination matrix."); if (numCols > fromMatrix.GetNumCols()) InvalidArgument("The slice (%d) is out of range of the source matrix (%d).", (int) numCols, (int) fromMatrix.GetNumCols()); if (m_numRows != fromMatrix.m_numRows) LogicError("The number of rows in source and destination matrices do not match"); memcpy(Data() + startColumn * m_numRows, fromMatrix.Data(), numCols * m_numRows * sizeof(ElemType)); return *this; } template <class ElemType> void CPUMatrix<ElemType>::CopyColumnsStrided(const CPUMatrix<ElemType>& fromMatrix, size_t numCols, size_t srcNumColsStride, size_t destNumColsStride) { if ((((numCols - 1) * srcNumColsStride) + 1) > fromMatrix.m_numCols) LogicError("The numCols to copy and srcNumColsStride specified is out of range of the source matrix."); if ((((numCols - 1) * destNumColsStride) + 1) > m_numCols) LogicError("The numCols to copy and srcNumColsStride specified is out of range of the destination matrix."); if (m_numRows != fromMatrix.m_numRows) LogicError("The number of rows in source and destination matrices do not match"); long n = (long) numCols, m = (long) m_numRows; auto& us = *this; #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (size_t i = 0; i < (m & ~3); i += 4) { us(i, j * destNumColsStride) = fromMatrix(i, j * srcNumColsStride); us(i + 1, j * destNumColsStride) = fromMatrix(i + 1, j * srcNumColsStride); us(i + 2, j * destNumColsStride) = fromMatrix(i + 2, j * srcNumColsStride); us(i + 3, j * destNumColsStride) = fromMatrix(i + 3, j * srcNumColsStride); } // handle remaining for (size_t i = m & ~3; i < m; i++) { us(i, j * destNumColsStride) = fromMatrix(i, j * srcNumColsStride); } } } //for each column of a, we add all rows of a to this starting from startIndex template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignToRowSliceValuesOf(const CPUMatrix<ElemType>& a, const size_t startIndex, const size_t numRows) { if (a.GetNumRows() != numRows) LogicError("AddToRowSliceValuesOf: a.GetNumRows() != numRows."); if (startIndex + numRows > GetNumRows()) LogicError("AddToRowSliceValuesOf: startIndex + numRows exceeds GetNumRows()."); if (a.GetNumCols() != GetNumCols()) LogicError("AddToRowSliceValuesOf: columns does not match."); long n = (long) a.GetNumCols(), m = (long) numRows; auto& us = *this; #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (size_t i = 0, startRow = startIndex; i < (m & ~3); i += 4, startRow += 4) { us(startRow, j) = a(i, j); us(startRow + 1, j) = a(i + 1, j); us(startRow + 2, j) = a(i + 2, j); us(startRow + 3, j) = a(i + 3, j); } // handle remaining stuffs for (size_t i = m & ~3, startRow = startIndex + (m & ~3); i < m; i++, startRow++) { us(startRow, j) = a(i, j); } } return *this; } //for each column of a, we assign numRows starting from startIndex to this template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignRowSliceValuesOf(const CPUMatrix<ElemType>& a, const size_t startIndex, const size_t numRows) { if (startIndex + numRows > a.GetNumRows()) LogicError("AssignRowSliceValuesOf: startIndex + numRows exceeds a.GetNumRows()."); RequireSize(numRows, a.GetNumCols()); long n = (long) a.GetNumCols(); // note: OpenMP requires loop indices to be long, not size_t long k = (long) a.GetNumRows(); #pragma omp parallel for for (long j = 0; j < n; j++) { // memory copy might be faster? memcpy(Data() + j * numRows, a.Data() + j * k + startIndex, sizeof(ElemType) * numRows); // //four-way unrolling // for (long i=0, startRow = startIndex; i<(m & ~3); i+=4, startRow+=4) // { // us(i,j) = a(startRow,j); // us(i+1,j) = a(startRow+1,j); // us(i+2,j) = a(startRow+2,j); // us(i+3,j) = a(startRow+3,j); // } // //handle remaining stuffs // for (long i=m & ~3, startRow = startIndex+(m & ~3); i<m; i++, startRow++) // { // us(i,j) = a(startRow,j); // } } return *this; } //for the row slice of this starting from startIndex we add a to it. template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddToRowSliceValuesOf(const CPUMatrix<ElemType>& a, const size_t startIndex, const size_t numRows) { if (a.IsEmpty()) LogicError("AddToRowSliceValuesOf: input matrix a is empty."); if (a.GetNumRows() != numRows) LogicError("AddToRowSliceValuesOf: a.GetNumRows() != numRows."); if (startIndex + numRows > GetNumRows()) LogicError("AddToRowSliceValuesOf: startIndex + numRows exceeds GetNumRows()."); if (a.GetNumCols() != GetNumCols()) LogicError("AddToRowSliceValuesOf: columns does not match."); long n = (long) a.GetNumCols(), m = (long) numRows; auto& us = *this; #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0, startRow = (long) startIndex; i < (m & ~3); i += 4, startRow += 4) { us(startRow, j) += a(i, j); us(startRow + 1, j) += a(i + 1, j); us(startRow + 2, j) += a(i + 2, j); us(startRow + 3, j) += a(i + 3, j); } // handle remaining stuffs for (long i = m & ~3, startRow = (long) startIndex + (m & ~3); i < m; i++, startRow++) { us(startRow, j) += a(i, j); } } return *this; } //for each column of this, we add row slice of a starting from startIndex template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddWithRowSliceValuesOf(const CPUMatrix<ElemType>& a, const size_t startIndex, const size_t numRows) { if (a.IsEmpty()) LogicError("AddWithRowSliceValuesOf: input matrix a is empty."); if (GetNumRows() != numRows) LogicError("AddWithRowSliceValuesOf: GetNumRows() != numRows."); if (startIndex + numRows > a.GetNumRows()) LogicError("AddWithRowSliceValuesOf: startIndex + numRows exceeds a.GetNumRows()."); if (a.GetNumCols() != GetNumCols()) LogicError("AddWithRowSliceValuesOf: columns does not match."); long n = (long) a.GetNumCols(), m = (long) numRows; auto& us = *this; #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0, startRow = (long) startIndex; i < (m & ~3); i += 4, startRow += 4) { us(i, j) += a(startRow, j); us(i + 1, j) += a(startRow + 1, j); us(i + 2, j) += a(startRow + 2, j); us(i + 3, j) += a(startRow + 3, j); } // handle remaining stuffs for (long i = m & ~3, startRow = (long) startIndex + (m & ~3); i < m; i++, startRow++) { us(i, j) += a(startRow, j); } } return *this; } template <class ElemType> CPUMatrix<ElemType> CPUMatrix<ElemType>::Diagonal() const { if (m_numRows != m_numCols) LogicError("Diagonal can be called only for square matrix. (rows=%d, cols=%d)", (int) m_numRows, (int) m_numCols); CPUMatrix<ElemType> diag(1, m_numCols); auto& us = *this; #pragma omp parallel for for (long i = 0; i < m_numRows; i++) { diag(0, (size_t) i) = us(i, i); } return diag; } template <class ElemType> void CPUMatrix<ElemType>::MinusOneAt(CPUMatrix<ElemType>& c, const size_t position) { if (position < c.GetNumElements()) c.Data()[position] -= 1.0; else RuntimeError("MinusOneAt: position is out of CPU matrix size"); } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignRepeatOf(const CPUMatrix<ElemType>& a, const size_t numRowRepeats, const size_t numColRepeats) { if (this == &a) LogicError("AssignRepeatOf: a is the same as [this]. Does not support inplace repeat."); if (a.IsEmpty()) LogicError("AssignRepeatOf: Matrix a is empty."); RequireSize(a.GetNumRows() * numRowRepeats, a.GetNumCols() * numColRepeats); long n = (long) a.GetNumCols(), m = (long) a.GetNumRows(); auto& us = *this; #pragma omp parallel for for (long q = 0; q < numColRepeats; q++) { for (long p = 0; p < numRowRepeats; p++) { long colOffset = q * n; for (long j = 0; j < n; j++, colOffset++) { long rowOffset = p * m; // four-way unrolling for (long i = 0; i < (m & ~3); i += 4, rowOffset += 4) { us(rowOffset, colOffset) = a(i, j); us(rowOffset + 1, colOffset) = a(i + 1, j); us(rowOffset + 2, colOffset) = a(i + 2, j); us(rowOffset + 3, colOffset) = a(i + 3, j); } // handle remaining stuffs for (long i = m & ~3; i < m; i++, rowOffset++) { us(rowOffset, colOffset) = a(i, j); } } } } return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddToRowRepeatValuesOf(const CPUMatrix<ElemType>& a, const size_t numRepeats) { if (a.IsEmpty()) LogicError("AddToRowRepeatValuesOf: input matrix a is empty."); if (a.GetNumRows() != GetNumRows() * numRepeats) LogicError("AddToRowRepeatValuesOf: a.GetNumRows() != GetNumRows() * numRepeats."); long n = (long) a.GetNumCols(), m = (long) GetNumRows(); auto& us = *this; #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { for (long k = 0; k < numRepeats; k++) { us(i, j) += a(k * m + i, j); us(i + 1, j) += a(k * m + i + 1, j); us(i + 2, j) += a(k * m + i + 2, j); us(i + 3, j) += a(k * m + i + 3, j); } } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { for (long k = 0; k < numRepeats; k++) { us(i, j) += a(k * m + i, j); } } } return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignPositiveAndShiftedNegSample(const CPUMatrix<ElemType>& a, const size_t posNumber, const size_t negNumber, const size_t shiftNumber) { a; posNumber; negNumber; shiftNumber; NOT_IMPLEMENTED; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddFoldedPositiveAndShiftedNegSample(const CPUMatrix<ElemType>& a, const size_t posNumber, const size_t negNumber, const size_t shiftNumber) { a; posNumber; negNumber; shiftNumber; NOT_IMPLEMENTED; } template <class ElemType> CPUMatrix<ElemType> CPUMatrix<ElemType>::Transpose() { if (IsEmpty()) LogicError("Transpose: Matrix is empty."); CPUMatrix<ElemType> c; c.AssignTransposeOf(*this); return c; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignTransposeOf(const CPUMatrix<ElemType>& a) { if (this == &a) LogicError("AssignTransposeOf: a is the same as [this]. Does not support inplace transpose."); if (a.IsEmpty()) LogicError("AssignTransposeOf: Matrix a is empty."); RequireSize(a.GetNumCols(), a.GetNumRows()); long n = (long) a.GetNumCols(), m = (long) a.GetNumRows(); auto& us = *this; #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(j, i) = a(i, j); us(j, i + 1) = a(i + 1, j); us(j, i + 2) = a(i + 2, j); us(j, i + 3) = a(i + 3, j); } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(j, i) = a(i, j); } } return *this; } // dst[i] = src[i] * alpha + dst[i] * beta // scale a column vector and add it to another // The usual special case: If beta = 0, then dst[] is not read, and may be uninitialized or NaN. template <class ElemType> static void ScaleAndAddColumn(ElemType beta, ElemType* dst, const ElemType* src, size_t numRows, ElemType alpha) { if (alpha != 1) // rare case: just do the full thing for (size_t i = 0; i < numRows; i++) dst[i] = beta * dst[i] + alpha * src[i]; else if (beta == 1) // used in backprop for (size_t i = 0; i < numRows; i++) dst[i] += src[i]; else if (beta == 0) // plain assignment memcpy(dst, src, sizeof(ElemType) * numRows); else // alpha=1, arbitrary beta: also rare case for (size_t i = 0; i < numRows; i++) dst[i] = beta * dst[i] + src[i]; } // *this[:,j] = a[:,idx[j]] * alpha + *this[:,j] * beta template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::DoGatherColumnsOf(ElemType beta, const CPUMatrix<ElemType>& idx, const CPUMatrix<ElemType>& a, ElemType alpha) { if (idx.GetNumRows() != 1) // index is 1-dimensional only InvalidArgument("DoGatherColumnsOf: Map must be a row vector."); if (beta) VerifySize(a.GetNumRows(), idx.GetNumCols()); else Resize(a.GetNumRows(), idx.GetNumCols()); auto& us = *this; // race-condition consideration: Since this loops over independent output columns, this has no race condition. Cf. DoScatterColumnsOf(). #pragma omp parallel for // TODO: Depending in circumstance, it may be more efficient to parallelize over rows. foreach_column(jOut, us) { auto jInF = idx(0, jOut); // this is the column we need to get if (std::isnan(jInF) || jInF < 0) // negative index means gap continue; size_t jIn = (size_t)jInF; if (jIn >= a.GetNumCols()) InvalidArgument("DoGatherColumnsOf: Map out of bounds. %ld >= %ld", (long int)jIn, (long int)a.GetNumCols()); ScaleAndAddColumn(beta, &us(0,jOut), &a(0,jIn), us.GetNumRows(), alpha); } return *this; } // *this[:,idx[j]] = a[:,j] * alpha + *this[:,idx[j]] * beta template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::DoScatterColumnsOf(ElemType beta, const CPUMatrix<ElemType>& idx, const CPUMatrix<ElemType>& a, ElemType alpha) { if (idx.GetNumRows() != 1) // index is 1-dimensional only InvalidArgument("DoScatterColumnsOf: Map must be a row vector."); if (idx.GetNumCols() != a.GetNumCols()) InvalidArgument("DoScatterColumnsOf: Map must have width of input vector."); if (a.GetNumRows() != GetNumRows()) InvalidArgument("DoScatterColumnsOf: Output must have same height as input vector."); auto& us = *this; // pre-scale with beta upfront // Scatter may add more than one source column to the same target, so we must pre-scale with beta, and then just keep adding. Scale(beta, us); // if beta is 0, then this will be a memset() ScatterValues(idx.Data(), a.Data(), us.Data(), alpha, idx.GetNumCols(), a.GetNumRows(), GetNumCols(), idx.GetNumRows()); return *this; } template <class ElemType> void CPUMatrix<ElemType>::SetValue(const ElemType v) { if (IsEmpty()) LogicError("SetValue: Matrix is empty."); bool isFinite = std::numeric_limits<ElemType>::is_integer || std::isfinite((double) v); if (isFinite && v == 0) { memset(Data(), 0, sizeof(ElemType) * GetNumElements()); } else { ElemType* bufPtr = Data(); long m = (long) GetNumElements(); // 2-way thread parallelism is sufficient for the memory bound // operation of just setting the values of an array. const unsigned SETVALUE_NUM_THREADS = 2; UNUSED(SETVALUE_NUM_THREADS); // in case OMP is turned off. #pragma omp parallel for num_threads(SETVALUE_NUM_THREADS) // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { bufPtr[i] = v; bufPtr[i + 1] = v; bufPtr[i + 2] = v; bufPtr[i + 3] = v; } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { bufPtr[i] = v; } } } template <class ElemType> void CPUMatrix<ElemType>::MaskColumnsValue(const CPUMatrix<char>& columnsMask, ElemType val, size_t numColsPerMaskEntry) { if (GetNumCols() != (columnsMask.GetNumCols() * numColsPerMaskEntry)) RuntimeError("MaskColumnsValue: Matrix number of columns must equal 'column mask number of columns * numColsPerMaskEntry'."); auto& us = *this; long n = (long)columnsMask.GetNumCols(), m = (long) GetNumRows(); #pragma omp parallel for for (long j = 0; j < n; j++) { if (columnsMask(0, j) == 1) continue; for (long k = 0; k < numColsPerMaskEntry; ++k) { // four-way unrolling for (size_t i = 0; i < (m & ~3); i += 4) { us(i, (j * numColsPerMaskEntry) + k) = val; us(i + 1, (j * numColsPerMaskEntry) + k) = val; us(i + 2, (j * numColsPerMaskEntry) + k) = val; us(i + 3, (j * numColsPerMaskEntry) + k) = val; } // handle remaining for (size_t i = m & ~3; i < m; i++) { us(i, (j * numColsPerMaskEntry) + k) = val; } } } } template <class ElemType> void CPUMatrix<ElemType>::SetColumn(const ElemType* colPointer, size_t j) { if (IsEmpty()) LogicError("SetColumn: Matrix is empty."); if (colPointer == NULL) return; auto& us = *this; long m = (long) GetNumRows(); #pragma omp parallel for // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, j) = colPointer[i]; us(i + 1, j) = colPointer[i + 1]; us(i + 2, j) = colPointer[i + 2]; us(i + 3, j) = colPointer[i + 3]; } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, j) = colPointer[i]; } } template <class ElemType> void CPUMatrix<ElemType>::SetColumn(const ElemType val, size_t j) { if (IsEmpty()) LogicError("SetColumn: Matrix is empty."); auto& us = *this; long m = (long) GetNumRows(); #pragma omp parallel for // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, j) = val; us(i + 1, j) = val; us(i + 2, j) = val; us(i + 3, j) = val; } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, j) = val; } } template <class ElemType> void CPUMatrix<ElemType>::SetColumn(const CPUMatrix<ElemType>& valMat, size_t j) { if (IsEmpty()) LogicError("SetColumn: Matrix is empty."); if (valMat.GetNumRows() != GetNumRows() || valMat.GetNumCols() != 1) LogicError("The valMat matrix has incorrect number of rows or columns."); auto& us = *this; long m = (long) GetNumRows(); #pragma omp parallel for // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, j) = valMat(i, 0); us(i + 1, j) = valMat(i + 1, 0); us(i + 2, j) = valMat(i + 2, 0); us(i + 3, j) = valMat(i + 3, 0); } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, j) = valMat(i, 0); } } template <class ElemType> void CPUMatrix<ElemType>::SetValue(const CPUMatrix<ElemType>& deepCopyFrom) { if (this == &deepCopyFrom) return; SetValue(deepCopyFrom.GetNumRows(), deepCopyFrom.GetNumCols(), deepCopyFrom.Data(), 0); } #if 0 template <class ElemType> void CPUMatrix<ElemType>::SetValue(const GPUMatrix<ElemType>& /*deepCopyFrom*/) { NOT_IMPLEMENTED; } template <class ElemType> void CPUMatrix<ElemType>::SetValue(const CPUSparseMatrix<ElemType>& deepCopyFrom) { deepCopyFrom.AssignColumnSliceToDense(*this, 0, deepCopyFrom.GetNumCols()); } template <class ElemType> void CPUMatrix<ElemType>::SetValue(const GPUSparseMatrix<ElemType>& /*deepCopyFrom*/) { NOT_IMPLEMENTED; } #endif template <class ElemType> void CPUMatrix<ElemType>::SetValue(const size_t numRows, const size_t numCols, ElemType* pArray, const size_t matrixFlags) { if (pArray == nullptr && numRows * numCols > 0) InvalidArgument("Invalid pArray. pArray == nullptr, but matrix is of size %d * %d = %d.", (int)numRows, (int)numCols, (int)(numRows * numCols)); SetFormat(matrixFormatDense); SetComputeDeviceId(CPUDEVICE); // if it's externally managed, then populate the structure if (matrixFlags & matrixFlagDontOwnBuffer) { // free previous array allocation if any before overwriting delete[] Buffer(); m_numRows = numRows; m_numCols = numCols; SetBuffer(pArray, GetNumElements() * sizeof(ElemType), true); SetSizeAllocated(GetNumElements()); } else { RequireSize(numRows, numCols); if (!IsEmpty()) { if (!(matrixFlags & matrixFormatRowMajor)) // compatible to internal structure memcpy(Data(), pArray, GetNumElements() * sizeof(ElemType)); else // need to transpose { ElemType* bufPtr = Data(); auto& us = *this; if (std::is_same<ElemType, double>::value) { #pragma omp parallel for foreach_column (j, us) { cblas_dcopy((int) numRows, reinterpret_cast<double*>(pArray + j), (int) numCols, reinterpret_cast<double*>(bufPtr + LocateColumn(j)), 1); } } else if (std::is_same<ElemType, float>::value) { #pragma omp parallel for foreach_column (j, us) { { #pragma warning(suppress : 4244) cblas_scopy((int) numRows, reinterpret_cast<float*>(pArray + j), (int) numCols, reinterpret_cast<float*>(bufPtr + LocateColumn(j)), 1); } } } else { RuntimeError("Unsupported data format"); } } } } } template <class ElemType> void CPUMatrix<ElemType>::SetDiagonalValue(const ElemType v) { auto& us = *this; long m = static_cast<long>(GetDiagSize()); #pragma omp parallel for // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, i) = v; us(i + 1, i + 1) = v; us(i + 2, i + 2) = v; us(i + 3, i + 3) = v; } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, i) = v; } } template <class ElemType> void CPUMatrix<ElemType>::SetDiagonalValue(const CPUMatrix<ElemType>& vector) { if (IsEmpty() || vector.IsEmpty()) LogicError("SetDiagonalValue: Matrix is empty."); if (vector.GetNumRows() != 1 && vector.GetNumCols() != 1) LogicError("SetDiagonalValue: input vector must be a vector."); if (vector.GetNumElements() == 1) // reduce to simple form SetDiagonalValue(vector(0, 0)); else if (vector.GetNumRows() != GetDiagSize() && vector.GetNumCols() != GetDiagSize()) LogicError("SetDiagonalValue: input vector's dimension does not agree with [this]."); else { auto& us = *this; long m = (long) GetDiagSize(); if (vector.GetNumRows() == 1) // row vector { #pragma omp parallel for // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, i) = vector(0, i); us(i + 1, i + 1) = vector(0, i + 1); us(i + 2, i + 2) = vector(0, i + 2); us(i + 3, i + 3) = vector(0, i + 3); } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, i) = vector(0, i); } } else { #pragma omp parallel for // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, i) = vector(i, 0); us(i + 1, i + 1) = vector(i + 1, 0); us(i + 2, i + 2) = vector(i + 2, 0); us(i + 3, i + 3) = vector(i + 3, 0); } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, i) = vector(i, 0); } } } } template <class ElemType> void CPUMatrix<ElemType>::SetUniformRandomValue(const ElemType low, const ElemType high, unsigned long seed) { if (IsEmpty()) LogicError("SetUniformRandomValue: Matrix is empty."); std::mt19937_64 generator; generator.seed(seed == USE_TIME_BASED_SEED ? (unsigned long) time(NULL) : seed); boost::random::uniform_real_distribution<double> r((double)low, (double)high); ElemType* bufPtr = Data(); long m = (long) GetNumElements(); // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { bufPtr[i] = (ElemType)r(generator); bufPtr[i + 1] = (ElemType)r(generator); bufPtr[i + 2] = (ElemType)r(generator); bufPtr[i + 3] = (ElemType)r(generator); } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { bufPtr[i] = (ElemType)r(generator); } } template <class ElemType> void CPUMatrix<ElemType>::SetUniformRandomValue(RNGHandle& rngHandle, const ElemType low, const ElemType high) { if (IsEmpty()) LogicError("SetUniformRandomValue: Matrix is empty."); CPURNGHandle* cpuRNGHandle = dynamic_cast<CPURNGHandle*>(&rngHandle); if (cpuRNGHandle == nullptr) LogicError("rngHandle must be a CPURNGHandle."); boost::random::uniform_real_distribution<double> r((double)low, (double)high); std::generate(Data(), Data() + GetNumElements(), [&cpuRNGHandle, &r]() {return (ElemType)r(cpuRNGHandle->Generator()); }); } template <class ElemType> void CPUMatrix<ElemType>::SetGaussianRandomValue(RNGHandle& rngHandle, const ElemType mean, const ElemType stdev) { if (IsEmpty()) LogicError("SetGaussianRandomValue: Matrix is empty."); CPURNGHandle* cpuRNGHandle = dynamic_cast<CPURNGHandle*>(&rngHandle); if (cpuRNGHandle == nullptr) LogicError("rngHandle must be a CPURNGHandle."); boost::random::normal_distribution<double> r((double)mean, (double)stdev); auto n = AsMultipleOf(GetNumElements(), 2); std::generate(Data(), Data() + n, [&cpuRNGHandle, &r]() {return (ElemType)r(cpuRNGHandle->Generator()); }); } template <class ElemType> void CPUMatrix<ElemType>::SetGumbelRandomValue(RNGHandle& rngHandle, const ElemType loc, const ElemType scale) { if (IsEmpty()) LogicError("SetGumbelRandomValue: Matrix is empty."); CPURNGHandle* cpuRNGHandle = dynamic_cast<CPURNGHandle*>(&rngHandle); if (cpuRNGHandle == nullptr) LogicError("rngHandle must be a CPURNGHandle."); boost::random::uniform_real_distribution<double> r(0, 1); std::generate(Data(), Data() + GetNumElements(), [&cpuRNGHandle, &r, loc, scale]() {return (ElemType)(loc - scale * log(-log1p(-r(cpuRNGHandle->Generator())))); }); } template <class ElemType> void CPUMatrix<ElemType>::SetGaussianRandomValue(const ElemType mean, const ElemType sigma, unsigned long seed) { if (sigma <= 0) InvalidArgument("SetGaussianRandomValue: sigma must be a positive value."); if (IsEmpty()) LogicError("SetGaussianRandomValue: Matrix is empty."); auto& us = *this; std::mt19937_64 generator(seed == USE_TIME_BASED_SEED ? (unsigned long) time(NULL) : seed); boost::random::normal_distribution<double> r((double)mean, (double)sigma); // #pragma omp parallel for is not thread safe. Also the results would not be deterministic foreach_coord (i, j, us) { us(i, j) = (ElemType)r(generator); } } template <class ElemType> void CPUMatrix<ElemType>::SetTruncatedNormalRandomValue(const ElemType mean, const ElemType sigma, unsigned long seed) { if (sigma <= 0) InvalidArgument("SetTruncatedNormalRandomValue: sigma must be a positive value."); if (IsEmpty()) LogicError("SetTruncatedNormalRandomValue: Matrix is empty."); auto& us = *this; std::mt19937_64 generator(seed == USE_TIME_BASED_SEED ? (unsigned long)time(NULL) : seed); boost::random::normal_distribution<double> r((double)mean, (double)sigma); const ElemType high = mean + 2 * sigma; const ElemType low = mean - 2 * sigma; // #pragma omp parallel for is not thread safe. Also the results would not be deterministic foreach_coord(i, j, us) { ElemType tmp = 0; do tmp = (ElemType)r(generator); while (tmp < low || tmp > high ); // Rejection sampling is fine here because the acceptance probability is about 0.9545 us(i, j) = tmp; } } template <class ElemType> void CPUMatrix<ElemType>::AddGaussianRandomValue(const ElemType mean, const ElemType sigma, unsigned long seed) { if (sigma <= 0) InvalidArgument("SetUniformRandomValue: sigma must be a positive value."); if (IsEmpty()) LogicError("SetUniformRandomValue: Matrix is empty."); auto& us = *this; std::mt19937_64 generator; generator.seed(seed == USE_TIME_BASED_SEED ? (unsigned long) time(NULL) : seed); boost::random::normal_distribution<double> r((double)mean, (double)sigma); long m = (long) GetNumRows(), n = (long) GetNumCols(); for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, j) = (ElemType)r(generator); us(i + 1, j) = (ElemType)r(generator); us(i + 2, j) = (ElemType)r(generator); us(i + 3, j) = (ElemType)r(generator); } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, j) = r(generator); } } } //maskRate: percentage of values masked out (similar to dropout rate) //scaleValue: which scale value to set to the left ones (unmasked items). template <class ElemType> void CPUMatrix<ElemType>::SetUniformRandomMask(const ElemType maskRate, const ElemType scaleValue, RNGHandle& rngHandle) { if (IsEmpty()) LogicError("SetUniformRandomValue: Matrix is empty."); CPURNGHandle* cpuRNGHandle = dynamic_cast<CPURNGHandle*>(&rngHandle); if (cpuRNGHandle == nullptr) LogicError("rngHandle must be a CPURNGHandle."); auto& us = *this; boost::random::uniform_real_distribution<double> r(0, 1); long m = (long) GetNumRows(), n = (long) GetNumCols(); ElemType v; for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { v = (ElemType)r(cpuRNGHandle->Generator()); us(i, j) = v <= maskRate ? (ElemType)0 : scaleValue; v = r(cpuRNGHandle->Generator()); us(i + 1, j) = v <= maskRate ? (ElemType)0 : scaleValue; v = r(cpuRNGHandle->Generator()); us(i + 2, j) = v <= maskRate ? (ElemType)0 : scaleValue; v = r(cpuRNGHandle->Generator()); us(i + 3, j) = v <= maskRate ? (ElemType)0 : scaleValue; } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { v = (ElemType)r(cpuRNGHandle->Generator()); us(i, j) = v <= maskRate ? (ElemType)0 : scaleValue; } } } template <class ElemType> ElemType CPUMatrix<ElemType>::Adagrad(CPUMatrix<ElemType>& gradients, const bool needAveMultiplier) { ElemType aveMultiplier = 0; if (IsEmpty() || gradients.GetNumCols() != GetNumCols() || gradients.GetNumRows() != GetNumRows()) { RequireSize(gradients.GetNumRows(), gradients.GetNumCols()); SetValue(0.0); } if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != gradients.GetNumCols()) LogicError("The matrix gradients must have the same rows and columns as this matrix."); ElemType *a = Data(), *d_v = gradients.Data(); size_t n = GetNumElements(); const ElemType floor = 1e-16f; ElemType a0, a1, a2, a3; // disable omp here because aveMultiper needs to be added atomically. however, it seems the result is incorrect even if rmp atomic and amp critical are used. // #pragma omp parallel for for (long i = 0; i < (n & ~3); i += 4) // four-way unrolling { a[i] += d_v[i] * d_v[i]; a[i + 1] += d_v[i + 1] * d_v[i + 1]; a[i + 2] += d_v[i + 2] * d_v[i + 2]; a[i + 3] += d_v[i + 3] * d_v[i + 3]; a0 = sqrt(a[i] + floor); a1 = sqrt(a[i + 1] + floor); a2 = sqrt(a[i + 2] + floor); a3 = sqrt(a[i + 3] + floor); d_v[i] /= a0; d_v[i + 1] /= a1; d_v[i + 2] /= a2; d_v[i + 3] /= a3; if (needAveMultiplier) { aveMultiplier += 1 / a0 + 1 / a1 + 1 / a2 + 1 / a3; } } // get the last few elements if any for (long i = n & ~3; i < n; i++) { a[i] += d_v[i] * d_v[i]; a0 = sqrt(a[i] + floor); d_v[i] /= a0; if (needAveMultiplier) { aveMultiplier += 1 / a0; } } if (needAveMultiplier && n > 0) return aveMultiplier / n; else return 1; } template <class ElemType> void CPUMatrix<ElemType>::FSAdagrad(CPUMatrix<ElemType>& gradients, CPUMatrix<ElemType>& functionValues, ElemType learnRatePerSample, ElemType momentum, ElemType adaWeight, ElemType adaMul, ElemType unitGainFactor) { size_t numColsNeeded = 2 * gradients.GetNumCols(); if (IsEmpty() || (GetNumCols() < numColsNeeded)) { RequireSize(gradients.GetNumRows(), numColsNeeded); SetValue(0.0); } if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != numColsNeeded) LogicError("The matrix gradients does not have expected dimensions."); size_t n = gradients.GetNumElements(); ElemType* grad = gradients.Data(); ElemType* smoothAda = Data(); ElemType* smoothMom = Data() + n; ElemType* val = functionValues.Data(); #pragma omp parallel for // TODO: Unroll 4-times for better performance leveraging vectorization for (long i = 0; i < n; i++) { ElemType g = grad[i]; ElemType adaSqr = adaWeight * smoothAda[i] + (1.0f - adaWeight) * g * g; smoothAda[i] = adaSqr; if (adaSqr != 0.0f) { ElemType ada = sqrt(adaSqr); ElemType w = adaMul * ((ElemType) 1.0 / ada); if (w > 10.0f) w = 10.0f; g *= w; } if (momentum > 0.0f) { g = momentum * smoothMom[i] + unitGainFactor * g; smoothMom[i] = g; } g *= learnRatePerSample; val[i] -= g; } } template <class ElemType> void CPUMatrix<ElemType>::Adam(CPUMatrix<ElemType>& gradients, CPUMatrix<ElemType>& functionValues, ElemType learnRatePerSample, ElemType momentum, ElemType adaWeight, ElemType adaMul, ElemType epsilon, ElemType unitGainFactor, bool adamax) { size_t numColsNeeded = 2 * gradients.GetNumCols(); if (IsEmpty() || (GetNumCols() < numColsNeeded)) { RequireSize(gradients.GetNumRows(), numColsNeeded); SetValue(0.0); } if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != numColsNeeded) LogicError("The matrix gradients does not have expected dimensions."); size_t n = gradients.GetNumElements(); ElemType* grad = gradients.Data(); ElemType* smoothAda = Data(); ElemType* smoothMom = Data() + n; ElemType* val = functionValues.Data(); #pragma omp parallel for // TODO: Unroll 4-times for better performance leveraging vectorization for (long i = 0; i < n; i++) { ElemType g = grad[i]; ElemType ada; if (!adamax) { ElemType adaSqr = adaWeight * smoothAda[i] + (1.0f - adaWeight) * g * g; smoothAda[i] = adaSqr; ada = sqrt(adaSqr); } else ada = smoothAda[i] = std::max(adaWeight * smoothAda[i], fabs_(g)); ElemType w = adaMul * (ElemType)( 1.0 / (ada + epsilon)); g = momentum * smoothMom[i] + unitGainFactor * g; smoothMom[i] = g; val[i] -= g * w * learnRatePerSample; } } template <class ElemType> ElemType CPUMatrix<ElemType>::RmsProp(CPUMatrix<ElemType>& gradients, ElemType RMS_GAMMA, ElemType RMS_WGT_INC, ElemType RMS_WGT_MAX, ElemType RMS_WGT_DEC, ElemType RMS_WGT_MIN, const bool needAveMultiplier, const bool initialized) { const ElemType floor = 1e-6f; size_t n = gradients.GetNumElements(); ElemType* curr_grad = gradients.Data(); if (IsEmpty() || GetNumCols() < gradients.GetNumCols() * 3 || !initialized) { RequireSize(gradients.GetNumRows(), gradients.GetNumCols() * 3); SetValue(0.0); ElemType* avars = Data(); // accumulated variances for RMS scaling ElemType* steps = Data() + 2 * n; // current step size // initialize moving average of gradient-squared for (long i = 0; i < n; i++) avars[i] = curr_grad[i] * curr_grad[i]; // initialize starting step size for (long i = 0; i < n; i++) steps[i] = ElemType(0.02); } ElemType* avars = Data(); // accumulated variances for RMS scaling ElemType* signs = Data() + n; // sign of previous gradient ElemType* steps = Data() + 2 * n; // current step size if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != gradients.GetNumCols() * 3) LogicError("The matrix gradients does not have expected dimensions."); ElemType ONE_MINUS_GAMMA = ElemType(1.0) - RMS_GAMMA; // int upd[] = { // 2,2,0, // 2,2,0, // 1,1,1, // 2,2,0, // 1,2,1, // 0,2,2, // 1,1,1, // 0,2,2, // 0,2,2, // }; // for (long i=0; i<n; i++) // { // avars[i] = RMS_GAMMA * avars[i] + ONE_MINUS_GAMMA * (curr_grad[i] * curr_grad[i]); // // grad sign base 3: 0->neg, 1->zero, 2->pos // const int grad_sign = 1 + (ElemType(0) < curr_grad[i]) - (curr_grad[i] < ElemType(0)); // // signs[i] contains three consecutive grad_sign // signs[i] = 3*(int(signs[i]) % 9) + grad_sign; // switch(upd[int(signs[i])]) // { // case 0: // steps[i] = max(steps[i] * RMS_WGT_DEC, RMS_WGT_MIN); // break; // case 2: // steps[i] = min(steps[i] * RMS_WGT_INC, RMS_WGT_MAX); // break; // } // curr_grad[i] *= steps[i] / sqrt(avars[i] + floor); // } ElemType aveMultiplier = 0, a; for (long i = 0; i < n; i++) { avars[i] = RMS_GAMMA * avars[i] + ONE_MINUS_GAMMA * (curr_grad[i] * curr_grad[i]); const int grad_sign = (ElemType(0) < curr_grad[i]) - (curr_grad[i] < ElemType(0)); if (signs[i] * grad_sign > 0) steps[i] = std::min(steps[i] * RMS_WGT_INC, RMS_WGT_MAX); else steps[i] = std::max(steps[i] * RMS_WGT_DEC, RMS_WGT_MIN); a = steps[i] / sqrt(avars[i] + floor); curr_grad[i] *= a; signs[i] = (ElemType) grad_sign; if (needAveMultiplier) aveMultiplier += a; } if (needAveMultiplier) return aveMultiplier / n; else return 1; } template <class ElemType> template <typename GradType> void CPUMatrix<ElemType>::AdaDelta(CPUMatrix<GradType>& gradients, CPUMatrix<ElemType>& functionValues, ElemType learningRate, ElemType rho, ElemType epsilon) { size_t numColsNeeded = 2 * gradients.GetNumCols(); if (IsEmpty() || (GetNumCols() < numColsNeeded)) { RequireSize(gradients.GetNumRows(), numColsNeeded); SetValue(0.0); } if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != numColsNeeded) LogicError("The matrix gradients does not have expected dimensions."); size_t n = gradients.GetNumElements(); GradType* grad = gradients.Data(); ElemType* smoothAda = Data(); ElemType* smoothX2 = Data() + n; ElemType* val = functionValues.Data(); #pragma omp parallel for // TODO: Unroll 4-times for better performance leveraging vectorization for (long i = 0; i < n; i++) { ElemType g = (ElemType)grad[i]; ElemType adaSqr = rho * smoothAda[i] + (1 - rho) * g * g; smoothAda[i] = adaSqr; ElemType x2 = smoothX2[i]; ElemType deltaX = -sqrt(x2 + epsilon) / sqrt(adaSqr + epsilon) * g; smoothX2[i] = rho * smoothX2[i] + (1 - rho) * deltaX * deltaX; val[i] += learningRate * deltaX; } } template <class ElemType> void CPUMatrix<ElemType>::AdaDeltaFlushTimestamps(size_t cols, ElemType rho, int* timestamps, int currentTimestamp) { // Sets all timestamps to 0 and updates the two logical buffers that this object holds // so that their values are the same as if a dense implementation of adadelta had been used. // This basically means that the values of these buffers are set to decay * original value // where decay is rho ** (currentTimestamp - timestamp for that column) auto rows = GetNumRows(); auto smoothAda = Data(); auto smoothX2 = Data() + cols * rows; #pragma omp parallel for for (auto col = 0; col < cols; ++col) { ElemType decay = std::pow(rho, ElemType(currentTimestamp - timestamps[col])); auto offset = rows * col; timestamps[col] = 0; for (auto row = 0; row < rows; ++row) { smoothAda[offset + row] *= decay; smoothX2[offset + row] *= decay; } } } template <class ElemType> void CPUMatrix<ElemType>::Reshape(const size_t numRows, const size_t numCols) { if (numRows * numCols != GetNumElements()) InvalidArgument("Reshape: Total number of elements does not match."); m_numRows = numRows; m_numCols = numCols; } // RequireSize() -- Tests if the matrix is the right size. If not, resizes the matrix. This avoids the VerifyResizable check if we're already the right size. template <class ElemType> void CPUMatrix<ElemType>::RequireSize(const size_t numRows, const size_t numCols, bool growOnly /*=true*/) { if (GetNumRows() != numRows || GetNumCols() != numCols) Resize(numRows, numCols, growOnly); } // Resize() -- change matrix size // This function is cheap if the matrix size does not change. // Current content is not preserved. // If growOnly is true, resize will not reallocate memory if the current memory is large enough (i.e., will not shrink). // If this object does not own its memory then new memory cannot be allocated (one can still shrink and/or reshape). template <class ElemType> void CPUMatrix<ElemType>::Resize(const size_t numRows, const size_t numCols, bool growOnly /*=true*/) { if (GetNumRows() == numRows && GetNumCols() == numCols) return; VerifyResizable(__func__); size_t numElements = numRows * numCols; if (numElements > GetSizeAllocated() || // grow allocation (!growOnly && (numElements != GetSizeAllocated()))) // shrink allocation (not if 'growOnly') { // reallocate buffer ElemType* pArray = nullptr; if (numElements > 0) { pArray = NewArray<ElemType>(numElements); } // success: update the object delete[] Buffer(); SetBuffer(pArray, numElements * sizeof(ElemType)); SetSizeAllocated(numElements); } // success m_sliceViewOffset = 0; m_numRows = numRows; m_numCols = numCols; } // allocated by the callee but should be deleted by the caller // TODO: change to use STL vector instead template <class ElemType> ElemType* CPUMatrix<ElemType>::CopyToArray() const { size_t numElements = GetNumElements(); if (numElements != 0) { ElemType* arrayCopyTo = NewArray<ElemType>(numElements); memcpy(arrayCopyTo, Data(), sizeof(ElemType) * numElements); return arrayCopyTo; } else { return nullptr; } } //memory will be allocated by the callee if not enough but need to be deleted by the caller after it's done //return number of elements copied template <class ElemType> size_t CPUMatrix<ElemType>::CopyToArray(ElemType*& arrayCopyTo, size_t& currentArraySize) const { size_t numElements = GetNumElements(); if (numElements > currentArraySize) { delete arrayCopyTo; arrayCopyTo = NewArray<ElemType>(numElements); currentArraySize = numElements; } if (numElements != 0) { memcpy(arrayCopyTo, Data(), sizeof(ElemType) * numElements); } return numElements; } template <typename ElemType> void CPUMatrix<ElemType>::CopySection(size_t /*numRows*/, size_t /*numCols*/, ElemType* /*dst*/, size_t /*colStride*/) const { // REVIEW alexeyk: currently not used by CPU, but implement when possible. RuntimeError("Not implemented."); } template <class ElemType> inline size_t CPUMatrix<ElemType>::LocateColumn(const size_t col) const { // For performance reason avoid extra validation in release. assert(col == 0 || col < GetNumCols()); return col * m_numRows; // matrix in column-wise storage } template <class ElemType> inline size_t CPUMatrix<ElemType>::LocateElement(const size_t row, const size_t col) const { // For performance reason avoid extra validation in release. assert(row < m_numRows); return LocateColumn(col) + row; // matrix in column-wise storage } #pragma endregion Basic Operators #pragma region Member BLAS Functions template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator+=(ElemType alpha) { return AssignSumOf(alpha, *this); } template <class ElemType> CPUMatrix<ElemType> CPUMatrix<ElemType>::operator+(ElemType alpha) const { CPUMatrix<ElemType> c(GetNumRows(), GetNumCols()); c.AssignSumOf(alpha, *this); return c; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSumOf(const ElemType alpha, const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignSumOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); long m = (long) GetNumRows(), n = (long) GetNumCols(); #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, j) = alpha + a(i, j); us(i + 1, j) = alpha + a(i + 1, j); us(i + 2, j) = alpha + a(i + 2, j); us(i + 3, j) = alpha + a(i + 3, j); } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, j) = alpha + a(i, j); } } return *this; } //if [this] and a have same dimension then [this]=[this]+a //if a is a column vector, add to all columns of [this] //if a is a row vector, add to all rows of [this] //if a is a scalar, add it to all elements. template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator+=(const CPUMatrix<ElemType>& a) { // if (a.GetNumElements() == 1) // *this += a(0,0); // else ScaleAndAdd(1, a, *this); return *this; } //if [this] and a have same dimension then OUTPUT=[this]+a //if a is a column vector, add to all columns of [this] //if a is a row vector, add to all rows of [this] template <class ElemType> CPUMatrix<ElemType> CPUMatrix<ElemType>::operator+(const CPUMatrix<ElemType>& a) const { if (GetNumElements() == 1) { CPUMatrix<ElemType> c(a); c += (*this)(0, 0); return c; } else if (a.GetNumElements() == 1) { CPUMatrix<ElemType> c(*this); c += a(0, 0); return c; } else { CPUMatrix<ElemType> c(*this); // this implementation will introduce a copy overhead. but make resue of the code c += a; return c; } } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSumOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b) { if (a.GetNumElements() == 1) { SetValue(b); (*this) += a; } else { SetValue(a); (*this) += b; } return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator-=(ElemType alpha) { return AssignDifferenceOf(*this, alpha); } template <class ElemType> CPUMatrix<ElemType> CPUMatrix<ElemType>::operator-(ElemType alpha) const { CPUMatrix<ElemType> c(GetNumRows(), GetNumCols()); c.AssignDifferenceOf(*this, alpha); return c; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignDifferenceOf(const ElemType alpha, const CPUMatrix<ElemType>& a) { auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); long m = (long) GetNumRows(), n = (long) GetNumCols(); #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, j) = alpha - a(i, j); us(i + 1, j) = alpha - a(i + 1, j); us(i + 2, j) = alpha - a(i + 2, j); us(i + 3, j) = alpha - a(i + 3, j); } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, j) = alpha - a(i, j); } } return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignDifferenceOf(const CPUMatrix<ElemType>& a, const ElemType alpha) { auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); long m = (long) GetNumRows(), n = (long) GetNumCols(); #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, j) = a(i, j) - alpha; us(i + 1, j) = a(i + 1, j) - alpha; us(i + 2, j) = a(i + 2, j) - alpha; us(i + 3, j) = a(i + 3, j) - alpha; } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, j) = a(i, j) - alpha; } } return *this; } //if [this] and a have same dimension then [this]=[this]-a //if a is a column vector, minus it from all columns of [this] //if a is a row vector, minus it from all rows of [this] template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator-=(const CPUMatrix<ElemType>& a) { ScaleAndAdd(-1, a, *this); return *this; } //if [this] and a have same dimension then output=[this]-a //if a is a column vector, minus it from all columns of [this] //if a is a row vector, minus it from all rows of [this] template <class ElemType> CPUMatrix<ElemType> CPUMatrix<ElemType>::operator-(const CPUMatrix<ElemType>& a) const { CPUMatrix<ElemType> c(*this); // this implementation will introduce a copy overhead. but make resue of the code c -= a; return c; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignDifferenceOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b) { if (this != &a) { RequireSize(a.GetNumRows(), a.GetNumCols()); SetValue(a); } (*this) -= b; return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator*=(ElemType alpha) { Scale(alpha, *this); return *this; } template <class ElemType> CPUMatrix<ElemType> CPUMatrix<ElemType>::operator*(ElemType alpha) const { CPUMatrix<ElemType> c(GetNumRows(), GetNumCols()); Scale(alpha, *this, c); return c; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignProductOf(const ElemType alpha, const CPUMatrix<ElemType>& a) { Scale(alpha, a, *this); return *this; } // [this]=a*b template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignProductOf(const CPUMatrix<ElemType>& a, const bool transposeA, const CPUMatrix<ElemType>& b, const bool transposeB) { if (a.GetNumElements() == 1) { if (transposeB) AssignTransposeOf(b); (*this) *= a(0, 0); } else if (b.GetNumElements() == 1) { if (transposeA) AssignTransposeOf(a); (*this) *= b(0, 0); } else Multiply(a, transposeA, b, transposeB, *this); return *this; } template <class ElemType> CPUMatrix<ElemType> CPUMatrix<ElemType>::operator*(const CPUMatrix<ElemType>& a) const { auto& us = *this; if (GetNumElements() == 1) { CPUMatrix<ElemType> c; c.AssignProductOf(us(0, 0), a); return c; } else if (a.GetNumElements() == 1) { CPUMatrix<ElemType> c; c.AssignProductOf(a(0, 0), us); return c; } else { CPUMatrix<ElemType> c; Multiply(*this, a, c); return c; } } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator/=(ElemType alpha) { (*this) *= 1 / alpha; return (*this); } template <class ElemType> CPUMatrix<ElemType> CPUMatrix<ElemType>::operator/(ElemType alpha) const { return ((*this) * (1 / alpha)); } //element-wise power template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator^=(ElemType alpha) { auto& us = *this; ElementWisePower(alpha, us, us); return us; } //element-wise power template <class ElemType> CPUMatrix<ElemType> CPUMatrix<ElemType>::operator^(ElemType alpha) const { CPUMatrix<ElemType> c(GetNumRows(), GetNumCols()); ElementWisePower(alpha, *this, c); return c; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementPowerOf(const CPUMatrix<ElemType>& a, const ElemType power) { ElementWisePower(power, a, *this); return *this; } //[this]=[this] .* a (we cannot override operator .* in c++) template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::ElementMultiplyWith(const CPUMatrix<ElemType>& a) { return AssignElementProductOf(*this, a); } //[this]=[this] .* a (we cannot override operator .* in c++) template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::ElementDivideBy(const CPUMatrix<ElemType>& a) { return AssignElementDivisionOf(*this, a); } //[this]=a .* b template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b) { if (a.IsEmpty() || b.IsEmpty()) LogicError("AssignElementProductOf: Matrix is empty."); if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols())) InvalidArgument("AssignElementProductOf: The input matrix dimensions do not match."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); long m = (long) GetNumRows(), n = (long) GetNumCols(); #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, j) = a(i, j) * b(i, j); us(i + 1, j) = a(i + 1, j) * b(i + 1, j); us(i + 2, j) = a(i + 2, j) * b(i + 2, j); us(i + 3, j) = a(i + 3, j) * b(i + 3, j); } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, j) = a(i, j) * b(i, j); } } return *this; } //[this] +=a .* b template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddElementProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b) { if (a.IsEmpty() || b.IsEmpty()) LogicError("AddElementProductOf: Matrix is empty."); if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols())) InvalidArgument("AddElementProductOf : The input matrix dimensions do not match."); if (!(a.GetNumRows() == GetNumRows() && a.GetNumCols() == GetNumCols())) InvalidArgument("AddElementProductOf : The input matrix dimensions do not match [this]."); auto& us = *this; long m = (long) GetNumRows(), n = (long) GetNumCols(); #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, j) += a(i, j) * b(i, j); us(i + 1, j) += a(i + 1, j) * b(i + 1, j); us(i + 2, j) += a(i + 2, j) * b(i + 2, j); us(i + 3, j) += a(i + 3, j) * b(i + 3, j); } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, j) += a(i, j) * b(i, j); } } return *this; } //[this]=a ./ b // TODO: This clips the divisor by a small value. Is that really what one would want? template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementDivisionOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b) { if (a.IsEmpty() || b.IsEmpty()) LogicError("AssignElementDivisionOf: Matrix is empty."); if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols())) InvalidArgument("AssignElementDivisionOf : The input matrix dimensions do not match."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); ElemType smallValue = EPS_IN_INVERSE; #pragma omp parallel for foreach_coord (i, j, us) { ElemType v = b(i, j); if (v >= 0 && v < smallValue) us(i, j) = a(i, j) / smallValue; else if (v < 0 && v > -smallValue) us(i, j) = a(i, j) / (-smallValue); else us(i, j) = a(i, j) / v; } return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::ColumnElementMultiplyWith(const CPUMatrix<ElemType>& a) { if (a.IsEmpty() || IsEmpty()) LogicError("ColumnElementMultiplyWith: Matrix is empty."); if (!(a.GetNumRows() == GetNumRows() && a.GetNumCols() == 1)) InvalidArgument("ColumnElementMultiplyWith: The input matrix should be a col vector and match [this]'s rows."); auto& us = *this; long m = (long) GetNumRows(), n = (long) GetNumCols(); #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, j) *= a(i, 0); us(i + 1, j) *= a(i + 1, 0); us(i + 2, j) *= a(i + 2, 0); us(i + 3, j) *= a(i + 3, 0); } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, j) *= a(i, 0); } } return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::RowElementMultiplyWith(const CPUMatrix<ElemType>& a) { if (a.IsEmpty() || IsEmpty()) LogicError("RowElementMultiplyWith: Matrix is empty."); if (!(a.GetNumRows() == 1 && a.GetNumCols() == GetNumCols())) InvalidArgument("RowElementMultiplyWith: The input matrix should be a row vector and match [this]'s columns."); auto& us = *this; long m = (long) GetNumRows(), n = (long) GetNumCols(); #pragma omp parallel for for (long j = 0; j < n; j++) { ElemType v = a(0, j); // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, j) *= v; us(i + 1, j) *= v; us(i + 2, j) *= v; us(i + 3, j) *= v; } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, j) *= v; } } return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::RowElementDivideBy(const CPUMatrix<ElemType>& a) { if (a.IsEmpty() || IsEmpty()) LogicError("RowElementDivideBy: Matrix is empty."); if (!(a.GetNumRows() == 1 && a.GetNumCols() == GetNumCols())) InvalidArgument("RowElementDivideBy: The input matrix should be a row vector and match [this]'s columns."); auto& us = *this; long m = (long) GetNumRows(), n = (long) GetNumCols(); #pragma omp parallel for for (long j = 0; j < n; j++) { ElemType v = a(0, j); if (v >= 0 && v < EPS_IN_INVERSE) v = EPS_IN_INVERSE; else if (v < 0 && v > -EPS_IN_INVERSE) v = (-EPS_IN_INVERSE); // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, j) /= v; us(i + 1, j) /= v; us(i + 2, j) /= v; us(i + 3, j) /= v; } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, j) /= v; } } return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::ColumnElementDivideBy(const CPUMatrix<ElemType>& a) { if (a.IsEmpty() || IsEmpty()) LogicError("ColumnElementDivideBy: Matrix is empty."); if (!(a.GetNumRows() == GetNumRows() && a.GetNumCols() == 1)) InvalidArgument("ColumnElementDivideBy: The input matrix should be a col vector and match [this]'s rows."); auto& us = *this; long m = (long) GetNumRows(), n = (long) GetNumCols(); ElemType smallValue = EPS_IN_INVERSE; #pragma omp parallel for for (long j = 0; j < n; j++) { for (long i = 0; i < m; i++) { ElemType v = a(i, 0); if (v >= 0 && v < smallValue) us(i, j) /= smallValue; else if (v < 0 && v > -smallValue) us(i, j) /= (-smallValue); else us(i, j) /= v; } } return *this; } //[this]=1 ./ a template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::ElementInverse() { return AssignElementInverseOf(*this); } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementInverseOf(const CPUMatrix<ElemType>& a) { ElemType smallValue = EPS_IN_INVERSE; if (a.IsEmpty()) LogicError("AssignElementInverseOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); #pragma omp parallel for foreach_coord (i, j, us) { if (a(i, j) < 0 && a(i, j) > -smallValue) us(i, j) = 1 / (-smallValue); else if (a(i, j) >= 0 && a(i, j) < smallValue) us(i, j) = 1 / smallValue; else us(i, j) = 1 / a(i, j); } return *this; } //[this]=sigmoid([this]) element wise template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceSigmoid() { return AssignSigmoidOf(*this); } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSigmoidOf(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignSigmoidOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); #pragma omp parallel for foreach_coord (i, j, us) { if (a(i, j) >= 0) us(i, j) = 1 / (1 + exp(-a(i, j))); else { ElemType v = exp(a(i, j)); us(i, j) = v / (1 + v); } } return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceLinearRectifierDerivative() { return AssignLinearRectifierDerivativeOf(*this); } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignLinearRectifierDerivativeOf(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignLinearRectifierDerivativeOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); long m = (long) GetNumRows(), n = (long) GetNumCols(); #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, j) = a(i, j) > 0.0f ? 1.0f : 0.0f; us(i + 1, j) = a(i + 1, j) > 0.0f ? 1.0f : 0.0f; us(i + 2, j) = a(i + 2, j) > 0.0f ? 1.0f : 0.0f; us(i + 3, j) = a(i + 3, j) > 0.0f ? 1.0f : 0.0f; } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, j) = a(i, j) > 0.0f ? 1.0f : 0.0f; } } return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceSigmoidDerivative() { return AssignSigmoidDerivativeOf(*this); } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSigmoidDerivativeOf(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignSigmoidDerivativeOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); long m = (long) GetNumRows(), n = (long) GetNumCols(); #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { ElemType v = a(i, j); us(i, j) = v * (1 - v); ElemType v1 = a(i + 1, j); us(i + 1, j) = v1 * (1 - v1); ElemType v2 = a(i + 2, j); us(i + 2, j) = v2 * (1 - v2); ElemType v3 = a(i + 3, j); us(i + 3, j) = v3 * (1 - v3); } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { ElemType v = a(i, j); us(i, j) = v * (1 - v); } } return *this; } //[this]=tanh([this]) element wise template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceTanh() { return AssignTanhOf(*this); } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignTanhOf(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignTanhOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); long m = (long) GetNumRows(), n = (long) GetNumCols(); #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, j) = tanh(a(i, j)); us(i + 1, j) = tanh(a(i + 1, j)); us(i + 2, j) = tanh(a(i + 2, j)); us(i + 3, j) = tanh(a(i + 3, j)); } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, j) = tanh(a(i, j)); } } return *this; } //[this]=atanh([this]) element wise template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceAtanh() { return AssignAtanhOf(*this); } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignAtanhOf(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignAtanhOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); long m = (long) GetNumRows(), n = (long) GetNumCols(); #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, j) = atanh(a(i, j)); us(i + 1, j) = atanh(a(i + 1, j)); us(i + 2, j) = atanh(a(i + 2, j)); us(i + 3, j) = atanh(a(i + 3, j)); } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, j) = atanh(a(i, j)); } } return *this; } //[this]=softmax([this]) element wise template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceLogSoftmax(const bool isColWise) { return AssignLogSoftmaxOf(*this, isColWise); } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignLogSoftmaxOf(const CPUMatrix<ElemType>& a, const bool isColWise) { if (a.IsEmpty()) LogicError("AssignLogSoftmaxOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); if (isColWise) { #pragma omp parallel for foreach_column (j, a) { // we need to extract max before applying exp to avoid overflow ElemType maxV = a(0, j); foreach_row (i, a) maxV = std::max(maxV, a(i, j)); ElemType sum = 0; foreach_row (i, a) sum += exp(us(i, j) = a(i, j) - maxV); sum = log(sum); foreach_row (i, us) us(i, j) -= sum; } } else { #pragma omp parallel for foreach_row (i, a) { // we need to extract max before applying exp to avoid overflow ElemType maxV = a(i, 0); foreach_column (j, a) maxV = std::max(maxV, a(i, j)); ElemType sum = 0; foreach_column (j, a) sum += exp(us(i, j) = a(i, j) - maxV); sum = log(sum); foreach_column (j, us) us(i, j) -= sum; } } return *this; } //[this]=hardmax([this]) //the max element is 1 else is 0 template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceHardmax(const bool isColWise) { return AssignHardmaxOf(*this, isColWise); } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignHardmaxOf(const CPUMatrix<ElemType>& a, const bool isColWise) { if (a.IsEmpty()) LogicError("AssignHardmaxOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); bool isInplace = (us.Data() == a.Data()); if (!isInplace) memset(us.Data(), 0, a.GetNumElements() * sizeof(ElemType)); if (isColWise) { foreach_column (j, a) { // we need to extract max ElemType maxV = a(0, j); long maxI = 0; foreach_row (i, a) { if (maxV < a(i, j)) { maxV = a(i, j); maxI = i; } } if (isInplace) memset(us.Data() + j * a.GetNumRows(), 0, a.GetNumRows() * sizeof(ElemType)); us(maxI, j) = 1.0f; } } else { foreach_row (i, a) { // we need to extract max ElemType maxV = a(i, 0); long maxJ = 0; foreach_column (j, a) { if (maxV < a(i, j)) { maxV = a(i, j); maxJ = j; } } if (isInplace) { foreach_column(j, us) us(i, j) = (j == maxJ) ? 1.0f : 0.0f; } else us(i, maxJ) = 1.0f; } } return *this; } //[this]=sqrt([this]) element wise template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceSqrt() { return AssignSqrtOf(*this); } //to prevent negative values caused by floating operations, we force inputs to be >=0 //this may, however, hide problems in the caller. template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSqrtOf(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignSqrtOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); long m = (long) GetNumRows(), n = (long) GetNumCols(); #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, j) = sqrt(max((ElemType)0, a(i, j))); us(i + 1, j) = sqrt(max((ElemType)0, a(i + 1, j))); us(i + 2, j) = sqrt(max((ElemType)0, a(i + 2, j))); us(i + 3, j) = sqrt(max((ElemType)0, a(i + 3, j))); } // remaining for (long i = m & ~3; i < m; i++) { us(i, j) = sqrt(max((ElemType)0, a(i, j))); } } return *this; } //[this]=exp([this]) element wise template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceExp() { return AssignExpOf(*this); } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignExpOf(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignExpOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); long m = (long) GetNumRows(), n = (long) GetNumCols(); #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, j) = exp(a(i, j)); us(i + 1, j) = exp(a(i + 1, j)); us(i + 2, j) = exp(a(i + 2, j)); us(i + 3, j) = exp(a(i + 3, j)); } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, j) = exp(a(i, j)); } } return *this; } //[this]=exp([this]) element wise template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceAbs() { return AssignAbsOf(*this); } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignAbsOf(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignAbsOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); long m = (long) GetNumRows(), n = (long) GetNumCols(); #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, j) = abs(a(i, j)); us(i + 1, j) = abs(a(i + 1, j)); us(i + 2, j) = abs(a(i + 2, j)); us(i + 3, j) = abs(a(i + 3, j)); } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, j) = abs(a(i, j)); } } return *this; } //[this]=log([this]) element wise template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceLog() { return AssignLogOf(*this); } //[this]=log([this]) element wise template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceLog10() { return AssignLog10Of(*this); } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignLogOf(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignLogOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); #pragma omp parallel for foreach_coord (i, j, a) { const ElemType v = a(i, j); if (v < EPS_IN_LOG) { us(i, j) = LOG_OF_EPS_IN_LOG; } else us(i, j) = log(v); } return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignLog10Of(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignLogOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); #pragma omp parallel for foreach_coord (i, j, a) { const ElemType v = a(i, j); if (v <= 0) LogicError("AssignLogOf: Log can only applied to numbers larger than 0."); else if (v < EPS_IN_LOG) { us(i, j) = LOG10_OF_EPS_IN_LOG; } else us(i, j) = log10(v); } return *this; } //[this]=cos([this]) element wise template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceCosine() { return AssignCosineOf(*this); } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignCosineOf(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignCosineOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); #pragma omp parallel for foreach_coord (i, j, a) { const ElemType v = a(i, j); us(i, j) = cos(v); } return *this; } //[this]=-sin([this]) element wise template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceNegativeSine() { return AssignNegativeSineOf(*this); } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignNegativeSineOf(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignNegativeSineOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); #pragma omp parallel for foreach_coord (i, j, a) { const ElemType v = a(i, j); us(i, j) = -sin(v); } return *this; } //[this]=tan([this]) element wise template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceTan() { return AssignTanOf(*this); } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignTanOf(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignTanOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); #pragma omp parallel for foreach_coord(i, j, a) { const ElemType v = a(i, j); us(i, j) = tan(v); } return *this; } //[this]=acos([this]) element wise template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceAcos() { return AssignAcosOf(*this); } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignAcosOf(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignAcosOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); #pragma omp parallel for foreach_coord (i, j, a) { const ElemType v = a(i, j); us(i, j) = acos(v); } return *this; } //[this]=asin([this]) element wise template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceAsin() { return AssignAsinOf(*this); } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignAsinOf(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignAsinOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); #pragma omp parallel for foreach_coord (i, j, a) { const ElemType v = a(i, j); us(i, j) = asin(v); } return *this; } //[this]=atan([this]) element wise template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceAtan() { return AssignAtanOf(*this); } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignAtanOf(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignAtanOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); #pragma omp parallel for foreach_coord(i, j, a) { const ElemType v = a(i, j); us(i, j) = atan(v); } return *this; } //[this]=cosh([this]) element wise template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceCosh() { return AssignCoshOf(*this); } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignCoshOf(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignCoshOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); #pragma omp parallel for foreach_coord (i, j, a) { const ElemType v = a(i, j); us(i, j) = cosh(v); } return *this; } //[this]=sinh([this]) element wise template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceSinh() { return AssignSinhOf(*this); } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSinhOf(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignSinhOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); #pragma omp parallel for foreach_coord (i, j, a) { const ElemType v = a(i, j); us(i, j) = sinh(v); } return *this; } //[this]=asinh([this]) element wise template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceAsinh() { return AssignAsinhOf(*this); } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignAsinhOf(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignAsinhOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); #pragma omp parallel for foreach_coord (i, j, a) { const ElemType v = a(i, j); us(i, j) = asinh(v); } return *this; } //Threshold truncating: this[i] = max( this[i], threshold ) template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceTruncateBottom(const ElemType threshold) { if (IsEmpty()) LogicError("InplaceTruncateBottom: Matrix is empty."); auto& us = *this; long m = (long) GetNumRows(), n = (long) GetNumCols(); #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { if (us(i, j) < threshold) us(i, j) = threshold; if (us(i + 1, j) < threshold) us(i + 1, j) = threshold; if (us(i + 2, j) < threshold) us(i + 2, j) = threshold; if (us(i + 3, j) < threshold) us(i + 3, j) = threshold; } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { if (us(i, j) < threshold) us(i, j) = threshold; } } return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceTruncate(const ElemType threshold) { if (IsEmpty()) LogicError("InplaceTruncate: Matrix is empty."); auto& us = *this; ElemType locThresholdPos = abs(threshold); ElemType locTHresholdNeg = -locThresholdPos; long m = (long) GetNumRows(), n = (long) GetNumCols(); #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { if (us(i, j) > locThresholdPos) us(i, j) = locThresholdPos; else if (us(i, j) < locTHresholdNeg) us(i, j) = locTHresholdNeg; if (us(i + 1, j) > locThresholdPos) us(i + 1, j) = locThresholdPos; else if (us(i + 1, j) < locTHresholdNeg) us(i + 1, j) = locTHresholdNeg; if (us(i + 2, j) > locThresholdPos) us(i + 2, j) = locThresholdPos; else if (us(i + 2, j) < locTHresholdNeg) us(i + 2, j) = locTHresholdNeg; if (us(i + 3, j) > locThresholdPos) us(i + 3, j) = locThresholdPos; else if (us(i + 3, j) < locTHresholdNeg) us(i + 3, j) = locTHresholdNeg; } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { if (us(i, j) > locThresholdPos) us(i, j) = locThresholdPos; else if (us(i, j) < locTHresholdNeg) us(i, j) = locTHresholdNeg; } } return *this; } //x= x-threshold if x>threshold, x+threshold if x<-threshold, 0 otherwise template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceSoftThreshold(const ElemType threshold) { if (IsEmpty()) LogicError("InplaceTruncate: Matrix is empty."); long m = (long) GetNumElements(); ElemType* bufPtr = Data(); #pragma omp parallel for for (long i = 0; i < (m & ~3); i += 4) // four-way unrolling { if (bufPtr[i] > threshold) bufPtr[i] -= threshold; else if (bufPtr[i] < -threshold) bufPtr[i] += threshold; else bufPtr[i] = 0; if (bufPtr[i + 1] > threshold) bufPtr[i + 1] -= threshold; else if (bufPtr[i + 1] < -threshold) bufPtr[i + 1] += threshold; else bufPtr[i + 1] = 0; if (bufPtr[i + 2] > threshold) bufPtr[i + 2] -= threshold; else if (bufPtr[i + 2] < -threshold) bufPtr[i + 2] += threshold; else bufPtr[i + 2] = 0; if (bufPtr[i + 3] > threshold) bufPtr[i + 3] -= threshold; else if (bufPtr[i + 3] < -threshold) bufPtr[i + 3] += threshold; else bufPtr[i + 3] = 0; } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { if (bufPtr[i] > threshold) bufPtr[i] -= threshold; else if (bufPtr[i] < -threshold) bufPtr[i] += threshold; else bufPtr[i] = 0; } return *this; } //Threshold truncating: this[i] = max( a[i], threshold ) template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignTruncateBottomOf(const CPUMatrix<ElemType>& a, const ElemType threshold) { if (a.IsEmpty()) LogicError("AssignTruncateBottomOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); #pragma omp parallel for foreach_coord (i, j, a) { if (a(i, j) < threshold) us(i, j) = threshold; else us(i, j) = a(i, j); } return *this; } //Threshold truncating: this[i] = min( this[i], threshold ) template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceTruncateTop(const ElemType threshold) { if (IsEmpty()) LogicError("InplaceTruncateTop: Matrix is empty."); auto& us = *this; #pragma omp parallel for foreach_coord (i, j, us) { if (us(i, j) > threshold) us(i, j) = threshold; } return *this; } //Threshold truncating: this[i] = min( a[i], threshold ) template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignTruncateTopOf(const CPUMatrix<ElemType>& a, const ElemType threshold) { if (a.IsEmpty()) LogicError("AssignTruncateTopOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); #pragma omp parallel for foreach_coord (i, j, a) { if (a(i, j) > threshold) us(i, j) = threshold; else us(i, j) = a(i, j); } return *this; } //Threshold truncating: this[i] = 0 if abs(this[i]<threshold). template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::SetToZeroIfAbsLessThan(const ElemType threshold) { if (IsEmpty()) LogicError("SetToZeroIfAbsLessThan: Matrix is empty."); auto& us = *this; #pragma omp parallel for foreach_coord (i, j, us) { if (abs(us(i, j)) < threshold) us(i, j) = 0; } return *this; } //sum of all abs(elements) template <class ElemType> ElemType CPUMatrix<ElemType>::SumOfAbsElements() const { if (IsEmpty()) LogicError("SumOfAbsElements: Matrix is empty."); if (std::is_same<ElemType, double>::value) { return (ElemType) cblas_dasum((int) GetNumElements(), reinterpret_cast<double*>(Data()), 1); } else if (std::is_same<ElemType, float>::value) { #pragma warning(suppress : 4244) return cblas_sasum((int) GetNumElements(), reinterpret_cast<float*>(Data()), 1); } else { RuntimeError("Unsupported data format"); } } //sum of all elements template <class ElemType> ElemType CPUMatrix<ElemType>::SumOfElements() const { if (IsEmpty()) LogicError("SumOfElements: Matrix is empty."); ElemType sum = 0; long m = (long) GetNumElements(); // note: OpenMP requires loop indices to be long, not size_t ElemType* bufPtr = Data(); //four-way unrolling #pragma omp parallel for reduction(+ : sum) for (long i = 0; i < (m & ~3); i += 4) { sum += bufPtr[i] + bufPtr[i + 1] + bufPtr[i + 2] + bufPtr[i + 3]; } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { sum += bufPtr[i]; } return sum; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSumOfElements(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignSumOfElements: Matrix a is empty."); auto& us = *this; us.RequireSize(1, 1); us(0, 0) = a.SumOfElements(); return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignOneHot(const CPUMatrix<ElemType>& a, vector<size_t>& shape, size_t axis) { if (a.IsEmpty()) LogicError("AssignOneHot: Matrix a is empty."); if (axis >= shape.size()) LogicError("AssignOneHot: axis is not correct"); size_t item_size = 1; for (size_t i = 0; i < shape.size() && i < axis; i++) item_size *= shape[i]; size_t num_class = shape[axis]; auto& us = *this; auto nCols = a.GetNumCols(); auto nRows = num_class * a.GetNumRows(); us.RequireSize(nRows, nCols); ElemType* bufPtr = Data(); ElemType* aBufPtr = a.Data(); memset(bufPtr, 0, sizeof(ElemType) * nRows *nCols); #pragma omp parallel for for (long i = 0; i < a.GetNumElements(); i++) { if (aBufPtr[i] >= 0 && aBufPtr[i] < num_class) { size_t block_id = i / item_size; size_t item_id = i % item_size; bufPtr[block_id * num_class * item_size + item_id + item_size * (size_t)aBufPtr[i]] = 1; } } return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::GatherFromTarget(const CPUMatrix<ElemType>& indices, const CPUMatrix<ElemType>& target, size_t row_elements) { if (indices.IsEmpty() || target.IsEmpty()) LogicError("GatherFromTarget: input matrix is empty."); if (row_elements == 0) LogicError("GatherFromTarget: target matrix at least need 1 dim."); auto nCols = indices.GetNumCols(); auto nRows = indices.GetNumRows() * row_elements; this->RequireSize(nRows, nCols); ElemType* indicesBufPtr = indices.Data(); ElemType* targetBufPtr = target.Data(); ElemType* buffer = Data(); #pragma omp parallel for for (int i = 0; i < indices.GetNumElements(); i++) { memcpy(buffer + i * row_elements, targetBufPtr + ((size_t)indicesBufPtr[i] * row_elements), sizeof(ElemType) * row_elements); } return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::ScatterToIndices(const CPUMatrix<ElemType>& values, const CPUMatrix<ElemType>& indices, size_t row_elements, const CPUMatrix<char>* mask/*= nullptr*/) { if (indices.IsEmpty() || values.IsEmpty() || (mask && mask->IsEmpty())) LogicError("ScatterToIndices: input matrix is empty."); if (mask && (indices.GetNumCols() % mask->GetNumCols() != 0)) LogicError("ScatterAccordingIndices: The number of columns(%zu) of the matrix slice to be masked is not a multiple of the number of columns(%zu) of the mask slice.", indices.GetNumCols(), mask->GetNumCols()); ElemType* indicesBufPtr = indices.Data(); ElemType* valueBufPtr = values.Data(); char* maskBufPtr = mask ? mask->Data() : nullptr; ElemType* buffer = Data(); size_t numElemsPerMaskEntry = mask ? indices.GetNumCols() / mask->GetNumCols() * indices.GetNumRows() : 0; ScatterValues(indicesBufPtr, valueBufPtr, buffer, static_cast<ElemType>(1), indices.GetNumElements(), row_elements, this->GetNumCols(), maskBufPtr, numElemsPerMaskEntry); return *this; } template <class ElemType> bool CPUMatrix<ElemType>::IsEqualTo(const CPUMatrix<ElemType>& a, const ElemType threshold /*= 1e-8*/) const { return AreEqual(*this, a, threshold); } template <class ElemType> void CPUMatrix<ElemType>::VectorSum(const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& c, const bool isColWise) { if (a.IsEmpty()) LogicError("VectorSum: Input matrix a is empty."); const int m = (int) a.GetNumRows(); const int n = (int) a.GetNumCols(); assert(m > 0 && n > 0); // converting from size_t to int may cause overflow if (isColWise) // col-wise { c.RequireSize(1, n); #pragma omp parallel for foreach_column (j, a) { ElemType v = 0; foreach_row (i, a) { #pragma omp atomic v += a(i, j); } c(0, j) = v; } } else { c.RequireSize(m, 1); #pragma omp parallel for foreach_row (i, a) { ElemType v = 0; foreach_column (j, a) { #pragma omp atomic v += a(i, j); } c(i, 0) = v; } } } template <class ElemType> void CPUMatrix<ElemType>::VectorNorm1(CPUMatrix<ElemType>& c, const bool isColWise) const { if (IsEmpty()) LogicError("VectorNorm1: Matrix is empty."); auto& us = *this; const int m = (int) us.GetNumRows(); const int n = (int) us.GetNumCols(); assert(m > 0 && n > 0); // converting from size_t to int may cause overflow if (isColWise) // col-wise { c.RequireSize(1, n); #pragma omp parallel for foreach_column (j, us) { ElemType v = 0; foreach_row (i, us) { #pragma omp atomic v += abs(us(i, j)); } c(0, j) = v; } } else { c.RequireSize(m, 1); #pragma omp parallel for foreach_row (i, us) { ElemType v = 0; foreach_column (j, us) { #pragma omp atomic v += abs(us(i, j)); } c(i, 0) = v; } } } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignVectorNorm1Of(CPUMatrix<ElemType>& a, const bool isColWise) { a.VectorNorm1(*this, isColWise); return *this; } template <class ElemType> void CPUMatrix<ElemType>::VectorNorm2(CPUMatrix<ElemType>& c, const bool isColWise) const { if (IsEmpty()) LogicError("VectorNorm2: Matrix is empty."); auto& us = *this; const int m = (int) us.GetNumRows(); const int n = (int) us.GetNumCols(); assert(m > 0 && n > 0); // converting from size_t to int may cause overflow ElemType* bufPtr = us.Data(); if (isColWise) // col-wise { c.RequireSize(1, n); if (std::is_same<ElemType, double>::value) { #pragma omp parallel for foreach_column (j, c) { c(0, j) = (ElemType) cblas_dnrm2(m, reinterpret_cast<double*>(bufPtr + us.LocateColumn(j)), 1); } } else if(std::is_same<ElemType, float>::value) { #pragma omp parallel for foreach_column (j, c) { #pragma warning(suppress : 4244) c(0, j) = cblas_snrm2(m, reinterpret_cast<float*>(bufPtr + us.LocateColumn(j)), 1); } } else { RuntimeError("Unsupported data format"); } } else { c.RequireSize(m, 1); if (std::is_same<ElemType, double>::value) { #pragma omp parallel for foreach_row (i, c) { c(i, 0) = cblas_dnrm2(n, reinterpret_cast<double*>(bufPtr + i), m); } } else if (std::is_same<ElemType, float>::value) { #pragma omp parallel for foreach_row (i, c) { #pragma warning(suppress : 4244) c(i, 0) = cblas_snrm2(n, reinterpret_cast<float*>(bufPtr + i), m); } } else { RuntimeError("Unsupported data format"); } } } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignVectorNorm2Of(CPUMatrix<ElemType>& a, const bool isColWise) { a.VectorNorm2(*this, isColWise); return *this; } template <class ElemType> void CPUMatrix<ElemType>::VectorNormInf(CPUMatrix<ElemType>& c, const bool isColWise) const { if (IsEmpty()) LogicError("VectorNormInf: Matrix is empty."); auto& us = *this; const int m = (int) us.GetNumRows(); const int n = (int) us.GetNumCols(); assert(m > 0 && n > 0); // converting from size_t to int may cause overflow if (isColWise) // col-wise { c.RequireSize(1, n); // #pragma omp parallel for foreach_column (j, us) { ElemType v = 0; foreach_row (i, us) { v = std::max(v, fabs_(us(i, j))); } c(0, j) = v; } } else { c.RequireSize(m, 1); // #pragma omp parallel for foreach_row (i, us) { ElemType v = 0; foreach_column (j, us) { v = std::max(v, fabs_(us(i, j))); } c(i, 0) = v; } } } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignVectorNormInfOf(CPUMatrix<ElemType>& a, const bool isColWise) { a.VectorNormInf(*this, isColWise); return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignInnerProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const bool isColWise) { InnerProduct(a, b, *this, isColWise); return *this; } //column-wise crossproduct template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignKhatriRaoProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b) { if (a.IsEmpty() || b.IsEmpty()) LogicError("AssignKhatriRaoProductOf: Matrix is empty."); long cols = (long) a.GetNumCols(); if (cols != b.GetNumCols()) InvalidArgument("a.GetNumCols() != b.GetNumCols()"); long rowsA = (long) a.GetNumRows(); long rowsB = (long) b.GetNumRows(); RequireSize(rowsA * rowsB, cols); #ifdef __INTEL_COMPILER // TODO: check this #pragma simd statement #endif #pragma omp parallel for for (long k = 0; k < cols; k++) { long jj = 0; for (long j = 0; j < rowsB; j++) { for (long i = 0; i < rowsA; i++) { (*this)(jj++, k) = a(i, k) * b(j, k); } } } return *this; } //column-wise reshaped product. Used to compute KhatriRaoProduct Gradient // this = reshape each column of a from (K1xK2,1) to (K1, K2) // if each column of a is not transposed, each (K1, K2) times each column of b (K2, frames). // the output is a (K1, frames) matrix // if each column of a is tranposed, each (K1, K2)^T times each column of b(K1, frames) and output is (K2, frames) template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddColumnReshapeProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const bool transposeAColumn) { if (a.IsEmpty() || b.IsEmpty()) LogicError("AddColumnReshapeProductOf: Matrix is empty."); long cols = (long) a.GetNumCols(); if (cols != b.GetNumCols()) InvalidArgument("AddColumnReshapeProductOf: a.GetNumCols() != b.GetNumCols()"); long rowsA = (long) a.GetNumRows(); long rowsB = (long) b.GetNumRows(); if (rowsA % rowsB != 0) InvalidArgument("AddColumnReshapeProductOf: number of rows in a should be multiples of that in b."); long rowsC = rowsA / rowsB; if (rowsC != GetNumRows() || cols != GetNumCols()) InvalidArgument("AddColumnReshapeProductOf: This matrix does not have the right size."); auto& us = *this; if (transposeAColumn) { // find nrows and ncols of tbe reshaped a long nrows = rowsB; long ncols = rowsC; #ifdef __INTEL_COMPILER // TODO: check this #pragma simd statement #endif #pragma omp parallel for foreach_column (t, a) { size_t k = 0; for (size_t j = 0; j < ncols; j++) // row and col is transposed { ElemType v = 0; for (size_t i = 0; i < nrows; i++) { v += a(k, t) * b(i, t); k++; } us(j, t) += v; } } } else { size_t ncols = rowsB; size_t nrows = rowsC; #ifdef __INTEL_COMPILER // TODO: check this #pragma simd statement #endif #pragma omp parallel for foreach_column (t, a) { size_t k = 0; for (size_t j = 0; j < ncols; j++) { for (size_t i = 0; i < nrows; i++) { us(i, t) += a(k, t) * b(j, t); k++; } } } } return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddWithScaleOf(ElemType alpha, const CPUMatrix<ElemType>& a) { ScaleAndAdd(alpha, a, *this); return *this; } template <class ElemType> ElemType CPUMatrix<ElemType>::FrobeniusNorm() const { if (IsEmpty()) LogicError("FrobeniusNorm: Matrix is empty."); ElemType v = 0; long m = (long) GetNumElements(); ElemType* bufPtr = Data(); //four-way unrolling #pragma omp parallel for reduction(+ : v) for (long i = 0; i < (m & ~3); i += 4) { v += bufPtr[i] * bufPtr[i] + bufPtr[i + 1] * bufPtr[i + 1] + bufPtr[i + 2] * bufPtr[i + 2] + bufPtr[i + 3] * bufPtr[i + 3]; } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { v += bufPtr[i] * bufPtr[i]; } return sqrt(v); } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignFrobeniusNormOf(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignFrobeniusNormOf: Matrix a is empty."); auto& us = *this; us.RequireSize(1, 1); us(0, 0) = a.FrobeniusNorm(); return us; } template <class ElemType> ElemType CPUMatrix<ElemType>::MatrixNormInf() const { if (IsEmpty()) LogicError("MatrixNormInf: Matrix is empty."); auto& us = *this; ElemType v = 0; #pragma omp parallel for foreach_coord (i, j, us) { #pragma omp critical { v = std::max(v, fabs_(us(i, j))); } } return v; } template <class ElemType> ElemType CPUMatrix<ElemType>::MatrixNorm0() const { if (IsEmpty()) LogicError("MatrixNorm0: Matrix is empty."); auto& us = *this; ElemType v = 0; #pragma omp parallel for foreach_coord (i, j, us) { if (us(i, j) != 0) { #pragma omp critical { ++v; } } } return v; } template <class ElemType> ElemType CPUMatrix<ElemType>::MatrixNorm1() const { if (IsEmpty()) LogicError("MatrixNorm1: Matrix is empty."); auto& us = *this; ElemType sum = 0; #pragma omp parallel for reduction(+ : sum) foreach_coord (i, j, us) { sum += abs(us(i, j)); } return sum; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSignOf(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignSignOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); #pragma omp parallel for foreach_column (j, us) { foreach_row (i, us) { ElemType v = a(i, j); if (!std::isnan(v)) us(i, j) = (v == (ElemType) 0 ? (ElemType) 0 : (v > 0 ? (ElemType) 1 : (ElemType)(-1))); else us(i, j) = v; } } return us; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddSignOf(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AddSignOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); #pragma omp parallel for foreach_column (j, us) { foreach_row (i, us) { ElemType v = a(i, j); if (!std::isnan(v)) us(i, j) += (v == (ElemType) 0 ? (ElemType) 0 : (v > 0 ? (ElemType) 1 : (ElemType)(-1))); else us(i, j) = v; } } return us; } //I decided to use CPUMatrix<ElemType>& maxIndexes instead of integer vector because the result may be used to do additional calculation template <class ElemType> void CPUMatrix<ElemType>::VectorMax(CPUMatrix<ElemType>& maxIndexes, CPUMatrix<ElemType>& maxValues, const bool isColWise, int topK) const { if (IsEmpty()) LogicError("VectorMax: Matrix is empty."); auto& us = *this; const int m = (int) GetNumRows(); const int n = (int) GetNumCols(); if (topK > m) InvalidArgument("VectorMax: TopK must be less or equal than the number of rows"); assert(m > 0 && n > 0); // converting from size_t to int may cause overflow if (isColWise) // col-wise { maxValues.RequireSize(topK, n); maxIndexes.RequireSize(topK, n); if (topK == 1) { #pragma omp parallel for for (int j = 0; j < n; j++) { ElemType v = us(0, j); size_t index = 0; foreach_row (i, us) { if (v < us(i, j)) { index = i; v = us(i, j); } } maxValues(0, j) = v; maxIndexes(0, j) = (ElemType) index; } } else { std::vector<int> indices(m); const ElemType* curVal = Data(); ElemType* curIdx = maxIndexes.Data(); ElemType* curMax = maxValues.Data(); for (int icol = 0; icol < n; icol++, curVal += m, curIdx += topK, curMax += topK) { std::iota(indices.begin(), indices.end(), 0); // Partial sort, descending order. std::partial_sort(indices.begin(), indices.begin() + topK, indices.end(), [curVal](const int& a, const int& b) { return curVal[a] > curVal[b]; }); // REVIEW alexeyk: the following produces warning (see SCL_SECURE_NO_WARNINGS) so use loop instead. // std::transform(indices.begin(), indices.begin() + topK, curIdx, [](const int& a) { return static_cast<ElemType>(a); }); for (int i2 = 0; i2 < topK; i2++) { curIdx[i2] = static_cast<ElemType>(indices[i2]); curMax[i2] = curVal[indices[i2]]; } } } } else { if (topK > 1) RuntimeError("Row-wise TopK max is not supported."); maxValues.RequireSize(m, 1); maxIndexes.RequireSize(m, 1); #pragma omp parallel for for (int i = 0; i < m; i++) { ElemType v = us(i, 0); size_t index = 0; foreach_column (j, us) { if (v < us(i, j)) { index = j; v = us(i, j); } } maxValues(i, 0) = v; maxIndexes(i, 0) = (ElemType) index; } } } template <class ElemType> void CPUMatrix<ElemType>::VectorMin(CPUMatrix<ElemType>& minIndexes, CPUMatrix<ElemType>& minValues, const bool isColWise) const { if (IsEmpty()) LogicError("VectorMin: Matrix is empty."); auto& us = *this; const int m = (int) GetNumRows(); const int n = (int) GetNumCols(); assert(m > 0 && n > 0); // converting from size_t to int may cause overflow if (isColWise) // col-wise { minValues.RequireSize(1, n); minIndexes.RequireSize(1, n); #pragma omp parallel for for (int j = 0; j < n; j++) { ElemType v = us(0, j); size_t index = 0; foreach_row (i, us) { if (v > us(i, j)) { index = i; v = us(i, j); } } minValues(0, j) = v; minIndexes(0, j) = (ElemType) index; } } else { minValues.RequireSize(m, 1); minIndexes.RequireSize(m, 1); #pragma omp parallel for for (int i = 0; i < m; i++) { ElemType v = us(i, 0); size_t index = 0; foreach_column (j, us) { if (v > us(i, j)) { index = j; v = us(i, j); } } minValues(i, 0) = v; minIndexes(i, 0) = (ElemType) index; } } } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignNumOfDiff(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, bool searchInCol) { if (a.GetNumCols() != b.GetNumCols()) throw std::invalid_argument("AssignNumOfDiff: a and b must have the same number of columns."); if (!searchInCol && a.GetNumRows() != b.GetNumRows()) throw std::invalid_argument("AssignNumOfDiff: a and b must have the same number of rows."); ElemType n = 0; if (!searchInCol) { foreach_coord (i, j, a) { n += (a(i, j) != b(i, j)); } } else { size_t crow = b.GetNumRows(); const ElemType* curCol = b.Data(); for (size_t icol = 0; icol < a.GetNumCols(); icol++, curCol += crow) { auto res = std::find(curCol, curCol + crow, a(0, icol)); if (res == curCol + crow) n++; } } RequireSize(1, 1); // result should be one element (*this)(0, 0) = n; return *this; } #pragma endregion Member BLAS Functions #pragma region Other helper Functions struct PrintRange { // print from begin to skipBegin, then from skipEnd to end // skipBegin = end if no split size_t begin; size_t skipBegin; size_t skipEnd; size_t end; bool IsEmpty() const { return end <= begin; } // examples: // * 3..10 // * -3..-3: include end-3..end and 0..3 PrintRange(ptrdiff_t first, ptrdiff_t last, size_t total) { if (first >= 0 && last >= 0) { begin = (size_t)first; end = (size_t)last + 1; if (end > total) // allow INT_MAX, meaning to end end = total; skipBegin = end; skipEnd = end; } else if (first < 0 && last < 0) { begin = 0; skipBegin = (size_t)(-last); skipEnd = (size_t)(total + first); if (skipEnd <= skipBegin) skipBegin = skipEnd = total; end = total; } else // if other combinations are ever of interest then implement them here LogicError("Print: Bounds must be either both positive or both negative."); } }; // use negative ranges to print corners, e.g. Print("name", -3, -3, -3, -3) will print the first 3 and last 3 rows/cols template <class ElemType> void CPUMatrix<ElemType>::Print(const char* matrixName, ptrdiff_t rowFirst, ptrdiff_t rowLast, ptrdiff_t colFirst, ptrdiff_t colLast) const { fprintf(stderr, "\n###### "); if (matrixName != nullptr) fprintf(stderr, "%s ", matrixName); fprintf(stderr, "(%lu, %lu)", (unsigned long)GetNumRows(), (unsigned long)GetNumCols()); if (rowFirst != 0 || colFirst != 0 || (size_t)(rowLast + 1) != GetNumRows() || (size_t)(colLast + 1) != GetNumCols()) fprintf(stderr, " [%ld:%ld, %ld:%ld]", (long)rowFirst, (long)rowLast, (long)colFirst, (long)colLast); fprintf(stderr, " ######\n\n"); if (IsEmpty()) { fprintf(stderr, "(empty)\n"); return; } PrintRange rowRange(rowFirst, rowLast, GetNumRows()); PrintRange colRange(colFirst, colLast, GetNumCols()); if (rowRange.IsEmpty() || colRange.IsEmpty()) { fprintf(stderr, "(empty)\n"); return; } const auto& us = *this; if (rowRange.begin > 0) fprintf(stderr, "...\n"); for (size_t i = rowRange.begin; i < rowRange.end; i++) { if (i == rowRange.skipBegin) // insert ... between the two blocks if any { fprintf(stderr, "...\n"); i = rowRange.skipEnd; } if (colRange.begin > 0) // ... at line start fprintf(stderr, "...\t"); for (size_t j = colRange.begin; j < colRange.end; j++) { if (j == colRange.skipBegin) { fprintf(stderr, "...\t"); j = colRange.skipEnd; } fprintf(stderr, "%.10f\t", (double)us(i, j)); } if (colRange.end < GetNumCols()) // ... at line end fprintf(stderr, "..."); fprintf(stderr, "\n"); } if (rowRange.end < GetNumRows()) fprintf(stderr, "...\n"); } template <class ElemType> void CPUMatrix<ElemType>::Print(const char* matrixName /*=nullptr*/) const { Print(matrixName, 0, GetNumRows() - 1, 0, GetNumCols() - 1); } // file I/O //matrixName is used to verify that correct matrix is read. template <class ElemType> void CPUMatrix<ElemType>::ReadFromFile(FILE*, const char* /*matrixName*/) { RuntimeError("not implemented."); } //matrixName is used to verify that correct matrix is read. template <class ElemType> void CPUMatrix<ElemType>::WriteToFile(FILE*, const char* /*matrixName*/) { RuntimeError("not implemented."); } //assume each column is an input sample. Each sample is stored in [channel, row, col] (r00, g00, b00, r01, g01, b01, r10, g10, b10, r11, g11, b11) template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignPackedConvolutionInput(const CPUMatrix<ElemType>& inputSubBatch, const size_t inputWidth, const size_t inputHeight, const size_t inputChannels, const size_t outputWidth, const size_t outputHeight, const size_t /*outputChannels*/, const size_t kernelWidth, const size_t kernelHeight, const size_t horizontalSubsample, const size_t verticalSubsample, const bool zeroPadding) { if (verticalSubsample > kernelHeight || horizontalSubsample > kernelWidth) LogicError("Arguments verticalSubsample (or horitzontalSubsample) must be less or equal than kernelHeight (or kernelWidth)."); const size_t packedInputRows = kernelWidth * kernelHeight * inputChannels; const size_t packedInputColsPerSample = outputWidth * outputHeight; // output size per channel const size_t inputDim = inputWidth * inputHeight * inputChannels; const size_t smallBatchSize = inputSubBatch.GetNumCols(); const long inputHeightTimesChannel = (long) (inputHeight * inputChannels); RequireSize(packedInputRows, packedInputColsPerSample * smallBatchSize); if (zeroPadding) SetValue((ElemType) 0); const long halfKernelWidth = (long) kernelWidth / 2; const long halfKernelHeight = (long) kernelHeight / 2; #pragma omp parallel for // each input element is copied to many places for (long sample = 0; sample < smallBatchSize; sample++) { for (long id = 0; id < inputDim; id++) { // IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * inputChannels) // IN_ELEM_COLPOS = sample const long y = id / inputHeightTimesChannel; // inputCol const long nXC = id % inputHeightTimesChannel; // channel + inputRow*inputChannels const long x = nXC / (long) inputChannels; // inputRow const long c = nXC % (long) inputChannels; // channel long x0 = 0, y0 = 0, x1 = 0, y1 = 0; if (zeroPadding) { x0 = (long) max((ElemType)0, ceil((x - (ElemType)kernelHeight + 1.0f + halfKernelHeight) / (ElemType)verticalSubsample)); // row : first wrow in which x is in x1 = (long) (x + halfKernelHeight - x0 * verticalSubsample); // first posxInKernel y0 = (long) max((ElemType)0, ceil((y - (ElemType)kernelWidth + 1.0f + halfKernelWidth) / (ElemType)horizontalSubsample)); // col : first wcol in which y is in y1 = (long) (y + halfKernelWidth - y0 * horizontalSubsample); // first posyInKernel } else { x0 = (long) max((ElemType)0, ceil((x - (ElemType)kernelHeight + 1) / (ElemType)verticalSubsample)); // row : first wrow in which x is in x1 = (long) (x - x0 * verticalSubsample); // first posxInKernel y0 = (long) max((ElemType)0, ceil((y - (ElemType)kernelWidth + 1) / (ElemType)horizontalSubsample)); // col : first wcol in which y is in y1 = (long) (y - y0 * horizontalSubsample); // first posyInKernel } assert(x1 >= 0 && x1 < kernelHeight && y1 >= 0 && y1 < kernelWidth); // PACK_ELEM_ROWPOS(channel, posxInKernel, posyInKernel) = (channel * kernelWidth * kernelHeight + posxInKernel + posyInKernel * kernelHeight) // PACK_ELEM_COLPOS(sample, wrow, wcol) = (sample*packedInputColsPerSample + outputHeight*wcol + wrow ElemType currentInputValue = inputSubBatch(id, sample); long packColBase = (long) (sample * packedInputColsPerSample + y0 * outputHeight); for (long wcol = y0, posyInKernel = y1; wcol < (long) outputWidth && posyInKernel >= 0; wcol++, posyInKernel -= (long) horizontalSubsample) { long packRowBase = (long) (c * kernelWidth * kernelHeight + posyInKernel * kernelHeight); for (long wrow = x0, posxInKernel = x1; wrow < (long) outputHeight && posxInKernel >= 0; wrow++, posxInKernel -= (long) verticalSubsample) { const long packRow = packRowBase + posxInKernel; const long packCol = packColBase + wrow; (*this)(packRow, packCol) = currentInputValue; } packColBase += (long) outputHeight; } } } return *this; } //assume each column is an input sample. Each sample is stored in [channel, row, col] (r00, g00, b00, r01, g01, b01, r10, g10, b10, r11, g11, b11) template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::UnpackConvolutionInput(CPUMatrix<ElemType>& inputSubBatch, const size_t inputWidth, const size_t inputHeight, const size_t inputChannels, const size_t outputWidth, const size_t outputHeight, const size_t /*outputChannels*/, const size_t kernelWidth, const size_t kernelHeight, const size_t horizontalSubsample, const size_t verticalSubsample, const bool zeroPadding) const { if (verticalSubsample > kernelHeight || horizontalSubsample > kernelWidth) LogicError("Arguments verticalSubsample (or horizonSubsample) must be less than or equal to kernelHeight (or kernelWidth)."); const size_t packedInputColsPerSample = outputWidth * outputHeight; // output size per channel const size_t inputDim = inputWidth * inputHeight * inputChannels; const size_t smallBatchSize = inputSubBatch.GetNumCols(); const long inputHeightTimesChannel = (long) (inputHeight * inputChannels); const long halfKernelWidth = (long) kernelWidth / 2; const long halfKernelHeight = (long) kernelHeight / 2; #pragma omp parallel for // each input element is copied to many places for (long sample = 0; sample < smallBatchSize; sample++) { for (long id = 0; id < inputDim; id++) { // IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * inputChannels) // IN_ELEM_COLPOS = sample const long y = id / inputHeightTimesChannel; // inputCol const long nXC = id % inputHeightTimesChannel; // channel + inputRow*inputChannels const long x = nXC / (long) inputChannels; // inputRow const long c = nXC % (long) inputChannels; // channel long x0 = 0, y0 = 0, x1 = 0, y1 = 0; if (zeroPadding) { x0 = (long) max((ElemType)0, ceil((x - (ElemType) kernelHeight + 1.0f + halfKernelHeight) / (ElemType) verticalSubsample)); // row : first wrow in which x is in x1 = (long) (x + halfKernelHeight - x0 * verticalSubsample); // first posxInKernel y0 = (long) max((ElemType)0, ceil((y - (ElemType) kernelWidth + 1.0f + halfKernelWidth) / (ElemType) horizontalSubsample)); // col : first wcol in which y is in y1 = (long) (y + halfKernelWidth - y0 * horizontalSubsample); // first posyInKernel } else { x0 = (long) max((ElemType)0, ceil((x - (ElemType) kernelHeight + 1) / (ElemType) verticalSubsample)); // row : first wrow in which x is in x1 = (long) (x - x0 * verticalSubsample); // first posxInKernel y0 = (long) max((ElemType)0, ceil((y - (ElemType) kernelWidth + 1) / (ElemType) horizontalSubsample)); // col : first wcol in which y is in y1 = (long) (y - y0 * horizontalSubsample); // first posyInKernel } assert(x1 >= 0 && x1 < kernelHeight && y1 >= 0 && y1 < kernelWidth); // PACK_ELEM_ROWPOS(channel, posxInKernel, posyInKernel) = (channel * kernelWidth * kernelHeight + posxInKernel + posyInKernel * kernelHeight) // PACK_ELEM_COLPOS(sample, wrow, wcol) = (sample*packedInputColsPerSample + outputHeight*wcol + wrow ElemType currentInputValue = inputSubBatch(id, sample); long packColBase = (long) (sample * packedInputColsPerSample + y0 * outputHeight); for (long wcol = y0, posyInKernel = y1; wcol < (long) outputWidth && posyInKernel >= 0; wcol++, posyInKernel -= (long) horizontalSubsample) { long packRowBase = (long) (c * kernelWidth * kernelHeight + posyInKernel * kernelHeight); for (long wrow = x0, posxInKernel = x1; wrow < (long) outputHeight && posxInKernel >= 0; wrow++, posxInKernel -= (long) verticalSubsample) { const long packRow = packRowBase + posxInKernel; const long packCol = packColBase + wrow; currentInputValue += (*this)(packRow, packCol); } packColBase += (long) outputHeight; } inputSubBatch(id, sample) = currentInputValue; } } return inputSubBatch; } //assume each column is an input sample. Each sample is stored in (r00, g00, b00, r01, g01, b01, r10, g10, b10, r11, g11, b11) template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignMaxPoolingResult(const CPUMatrix<ElemType>& inputBatch, const size_t channels, const size_t /*inputWidth*/, const size_t inputHeight, const size_t /*inputSizePerSample*/, const size_t /*outputWidth*/, const size_t outputHeight, const size_t outputSizePerSample, const size_t windowWidth, const size_t windowHeight, const size_t horizontalSubsample, const size_t verticalSubsample) { const long inputHeightTimesChannel = (long) (inputHeight * channels); const long outputHeightTimesChannel = (long) (outputHeight * channels); const size_t batchSize = inputBatch.GetNumCols(); RequireSize(outputSizePerSample, batchSize); // IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * channels) // IN_ELEM_COLPOS = sample // OUT_ELEM_ROWPOS(channel, wrow, wcol) = (channel + (wrow + wcol * outputHeight) * channels) // OUT_ELEM_COLPOS = sample #pragma omp parallel for for (long sample = 0; sample < (long) batchSize; sample++) { for (long outputIndexWithinSample = 0; outputIndexWithinSample < outputSizePerSample; outputIndexWithinSample++) { const long y = outputIndexWithinSample / outputHeightTimesChannel; // wcol const long nXC = outputIndexWithinSample % outputHeightTimesChannel; // channel + wrow*channels const long x = (long) (nXC / channels); // wrow const long c = (long) (nXC % channels); // channel ElemType maxVal = -FLT_MAX; ElemType minVal = FLT_MAX; const long rowInWindowBase = (long) ((x * verticalSubsample + y * horizontalSubsample * inputHeight) * channels + c); for (long colInWindow = 0; colInWindow < windowWidth; colInWindow++) { long rowInInput = rowInWindowBase + colInWindow * inputHeightTimesChannel; for (long rowInWindow = 0; rowInWindow < windowHeight; rowInWindow++) { const ElemType val = inputBatch(rowInInput, sample); // pf[rowInWindow*channels]; maxVal = std::max(maxVal, val); minVal = std::min(minVal, val); rowInInput += (long) channels; } } (*this)(outputIndexWithinSample, sample) = maxVal; } } return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddMaxPoolingGradient(const CPUMatrix<ElemType>& outputGradientBatch, const CPUMatrix<ElemType>& inputBatch, const CPUMatrix<ElemType>& outputBatch, const size_t channels, const size_t /*inputWidth*/, const size_t inputHeight, const size_t inputSizePerSample, const size_t outputWidth, const size_t outputHeight, const size_t /*outputSizePerSample*/, const size_t windowWidth, const size_t windowHeight, const size_t horizontalSubsample, const size_t verticalSubsample) { size_t batchSize = inputBatch.GetNumCols(); const long inputHeightTimesChannel = (long) (inputHeight * channels); const long outputHeightTimesChannel = (long) (outputHeight * channels); // IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * channels) // IN_ELEM_COLPOS = sample // OUT_ELEM_ROWPOS(channel, wrow, wcol) = (channel + (wrow + wcol * outputHeight) * channels) // OUT_ELEM_COLPOS = sample #pragma omp parallel for for (long sample = 0; sample < batchSize; sample++) { for (long inputIndexWithinSample = 0; inputIndexWithinSample < inputSizePerSample; inputIndexWithinSample++) { const long y = inputIndexWithinSample / inputHeightTimesChannel; // col in input const long nXC = inputIndexWithinSample % inputHeightTimesChannel; // channel + row*chanels const long x = (long) (nXC / channels); // row in input const long c = (long) (nXC % channels); // channel long startOutX = (long) max((ElemType)0, ceil((x - (ElemType) windowHeight + 1) / (ElemType) verticalSubsample)); // inclusive start long endOutX = (long) ((x / verticalSubsample < outputHeight - 1) ? x / verticalSubsample : outputHeight - 1); // inclusive end long startOutY = (long) max((ElemType)0, ceil((y - (ElemType) windowWidth + 1) / (ElemType) horizontalSubsample)); // inclusive start long endOutY = (long) ((y / horizontalSubsample < outputWidth - 1) ? y / horizontalSubsample : outputWidth - 1); // inclusive end ElemType inputValue = inputBatch(inputIndexWithinSample, sample); for (long outY = startOutY; outY <= endOutY; outY++) { for (long outX = startOutX; outX <= endOutX; outX++) { long outputIndex = (long) (outY * outputHeightTimesChannel + outX * channels + c); if (inputValue == outputBatch(outputIndex, sample)) (*this)(inputIndexWithinSample, sample) += outputGradientBatch(outputIndex, sample); } } } } return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignAveragePoolingResult(const CPUMatrix<ElemType>& inputBatch, const size_t channels, const size_t /*inputWidth*/, const size_t inputHeight, const size_t /*inputSizePerSample*/, const size_t /*outputWidth*/, const size_t outputHeight, const size_t outputSizePerSample, const size_t windowWidth, const size_t windowHeight, const size_t horizontalSubsample, const size_t verticalSubsample) { const long inputHeightTimesChannel = (long) (inputHeight * channels); const long outputHeightTimesChannel = (long) (outputHeight * channels); const size_t batchSize = inputBatch.GetNumCols(); const size_t windowSize = windowWidth * windowHeight; RequireSize(outputSizePerSample, batchSize); // IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * channels) // IN_ELEM_COLPOS = sample // OUT_ELEM_ROWPOS(channel, wrow, wcol) = (channel + (wrow + wcol * outputHeight) * channels) // OUT_ELEM_COLPOS = sample #pragma omp parallel for for (long sample = 0; sample < batchSize; sample++) { for (long outputIndexWithinSample = 0; outputIndexWithinSample < outputSizePerSample; outputIndexWithinSample++) { const long y = outputIndexWithinSample / outputHeightTimesChannel; // wcol const long nXC = outputIndexWithinSample % outputHeightTimesChannel; // channel + wrow*channels const long x = (long) (nXC / channels); // wrow const long c = (long) (nXC % channels); // channel ElemType sum = 0; const long rowInWindowBase = (long) ((x * verticalSubsample + y * horizontalSubsample * inputHeight) * channels + c); for (long colInWindow = 0; colInWindow < windowWidth; colInWindow++) { long rowInInput = rowInWindowBase + colInWindow * inputHeightTimesChannel; for (long rowInWindow = 0; rowInWindow < windowHeight; rowInWindow++) { sum += inputBatch(rowInInput, sample); rowInInput += (long) channels; } } (*this)(outputIndexWithinSample, sample) = sum / windowSize; } } return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddAveragePoolingGradient(const CPUMatrix<ElemType>& outputGradientBatch, const size_t channels, const size_t /*inputWidth*/, const size_t inputHeight, const size_t inputSizePerSample, const size_t outputWidth, const size_t outputHeight, const size_t /*outputSizePerSample*/, const size_t windowWidth, const size_t windowHeight, const size_t horizontalSubsample, const size_t verticalSubsample) { size_t batchSize = outputGradientBatch.GetNumCols(); const long inputHeightTimesChannel = (long) (inputHeight * channels); const long outputHeightTimesChannel = (long) (outputHeight * channels); const long windowSize = (long) (windowWidth * windowHeight); // IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * channels) // IN_ELEM_COLPOS = sample // OUT_ELEM_ROWPOS(channel, wrow, wcol) = (channel + (wrow + wcol * outputHeight) * channels) // OUT_ELEM_COLPOS = sample #pragma omp parallel for for (long sample = 0; sample < batchSize; sample++) { for (long inputIndexWithinSample = 0; inputIndexWithinSample < inputSizePerSample; inputIndexWithinSample++) { const long y = inputIndexWithinSample / inputHeightTimesChannel; // col in input const long nXC = inputIndexWithinSample % inputHeightTimesChannel; // channel + row*chanels const long x = nXC / (long) channels; // row in input const long c = nXC % (long) channels; // channel long startOutX = (long) max((ElemType)0, ceil((x - (ElemType) windowHeight + 1) / (ElemType) verticalSubsample)); // inclusive start long endOutX = (long) ((x / verticalSubsample < outputHeight - 1) ? x / (long) verticalSubsample : outputHeight - 1); // inclusive end long startOutY = (long) max((ElemType)0, ceil((y - (ElemType) windowWidth + 1) / (ElemType) horizontalSubsample)); // inclusive start long endOutY = (long) ((y / horizontalSubsample < outputWidth - 1) ? y / horizontalSubsample : outputWidth - 1); // inclusive end for (long outY = startOutY; outY <= endOutY; outY++) { for (long outX = startOutX; outX <= endOutX; outX++) { long outputIndex = outY * outputHeightTimesChannel + outX * (long) channels + c; (*this)(inputIndexWithinSample, sample) += outputGradientBatch(outputIndex, sample) / windowSize; } } } } return *this; } #pragma endregion Other Helper Functions template <class ElemType> void CPUMatrix<ElemType>::ConvolutionForward(const CPUMatrix<ElemType>& kernel, const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIwht, const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& output) const { #pragma omp parallel for for (int64_t sample = 0; sample < (int64_t)output.GetNumCols(); sample++) { for (size_t row = 0; row < output.GetNumRows(); row++) { int colBase = mpRowCol(row, 0); int ivBase = mpRowIwht(row, 0); assert(0 <= colBase && colBase < GetNumRows()); ElemType sum = 0; int i0 = mpRowRun(row, 0); int skip = runs(i0++, 0); int size = runs(i0++, 0); int imask = i0 + size; for (int i = 0; i < size; i++) { if (runs(imask + i, 0) == 0) continue; int dcol = runs(i0 + i, 0); assert(0 <= colBase + dcol && colBase + dcol < GetNumRows()); sum += kernel.Data()[ivBase + skip + i] * (*this)(colBase + dcol, sample); } output(row, sample) = sum; } } } template <class ElemType> void CPUMatrix<ElemType>::ConvolutionBackwardData(const CPUMatrix<ElemType>& kernel, const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIwht, const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& grad) const { #pragma omp parallel for for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++) { for (size_t row = 0; row < GetNumRows(); row++) { int colBase = mpRowCol(row, 0); int ivBase = mpRowIwht(row, 0); assert(0 <= colBase && colBase < grad.GetNumRows()); ElemType curGrad = (*this)(row, sample); int i0 = mpRowRun(row, 0); int skip = runs(i0++, 0); int size = runs(i0++, 0); int imask = i0 + size; for (int i = 0; i < size; i++) { if (runs(imask + i, 0) == 0) continue; int dcol = runs(i0 + i, 0); assert(0 <= colBase + dcol && colBase + dcol < grad.GetNumRows()); grad(colBase + dcol, sample) += curGrad * kernel.Data()[ivBase + skip + i]; } } } } template <class ElemType> void CPUMatrix<ElemType>::ConvolutionBackwardKernel(const CPUMatrix<ElemType>& in, const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIwht, const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& kernelGrad) const { // Do NOT parallelize these loops! for (size_t sample = 0; sample < GetNumCols(); sample++) { for (size_t row = 0; row < GetNumRows(); row++) { int colBase = mpRowCol(row, 0); int ivBase = mpRowIwht(row, 0); assert(0 <= colBase && colBase < in.GetNumRows()); ElemType curGrad = (*this)(row, sample); int i0 = mpRowRun(row, 0); int skip = runs(i0++, 0); int size = runs(i0++, 0); int imask = i0 + size; for (int i = 0; i < size; i++) { if (runs(imask + i, 0) == 0) continue; int dcol = runs(i0 + i, 0); assert(0 <= colBase + dcol && colBase + dcol < in.GetNumRows()); kernelGrad.Data()[ivBase + skip + i] += curGrad * in(colBase + dcol, sample); } } } } template <class ElemType> void CPUMatrix<ElemType>::UnrollConvolutionInput(size_t unrollCols, size_t mapOutSize, const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& output) const { size_t batchSize = GetNumCols(); #pragma omp parallel for for (int64_t sample = 0; sample < (int64_t)batchSize; sample++) { for (size_t row = 0; row < mapOutSize; row++) { int colBase = mpRowCol(row, 0); assert(0 <= colBase && colBase < GetNumRows()); int i0 = mpRowRun(row, 0); int skip = runs(i0++, 0); int size = runs(i0++, 0); int imask = i0 + size; for (int i = 0; i < size; i++) { if (runs(imask + i, 0) == 0) continue; int dcol = runs(i0 + i, 0); assert(0 <= colBase + dcol && colBase + dcol < GetNumRows()); output.Data()[(row * batchSize + sample) * unrollCols + skip + i] = (*this)(colBase + dcol, sample); } } } } template <class ElemType> void CPUMatrix<ElemType>::UnrollConvolutionOutput(size_t unrollCols, size_t mapInCount, size_t mapOutCount, const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& output) const { if (mpRowCol.GetNumRows() % mapOutCount != 0) InvalidArgument("The number of rows in mpRowCol must be multiple of mapOutCount."); size_t mapOutSize = mpRowCol.GetNumRows() / mapOutCount; size_t batchSize = GetNumCols(); size_t kernelSize = runs(1, 0); if (kernelSize % mapInCount != 0) InvalidArgument("kernelSize must be multiple of mapInCount."); size_t kernelMapSize = kernelSize / mapInCount; #pragma omp parallel for for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++) { for (size_t row = 0; row < mapOutSize; row++) { int colBase = mpRowCol(row, 0); int i0 = mpRowRun(row, 0); int skip = runs(i0++, 0); int size = runs(i0++, 0); int imask = i0 + size; for (int i = 0; i < std::min(size, (int)kernelMapSize); i++) { if (runs(imask + i, 0) == 0) continue; int dcol = runs(i0 + i, 0); size_t isrc = row; size_t idst = ((colBase + dcol) * batchSize + sample) * unrollCols + ((skip + i) % kernelMapSize) * mapOutCount; for (size_t outMap = 0; outMap < mapOutCount; outMap++, isrc += mapOutSize) { assert(isrc < GetNumElements()); assert(idst + outMap < output.GetNumElements()); output.Data()[idst + outMap] = (*this)(isrc, sample); } } } } } template <class ElemType> void CPUMatrix<ElemType>::UnrollConvolutionInputForKernelBackprop(size_t mapOutSize, const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& output) const { size_t batchSize = GetNumCols(); size_t unrollCols = mapOutSize * batchSize; #pragma omp parallel for for (int64_t sample = 0; sample < (int64_t)batchSize; sample++) { for (size_t row = 0; row < mapOutSize; row++) { int colBase = mpRowCol(row, 0); assert(0 <= colBase && colBase < GetNumRows()); int i0 = mpRowRun(row, 0); int skip = runs(i0++, 0); int size = runs(i0++, 0); int imask = i0 + size; for (int i = 0; i < size; i++) { if (runs(imask + i, 0) == 0) continue; int dcol = runs(i0 + i, 0); assert(0 <= colBase + dcol && colBase + dcol < GetNumRows()); size_t idst = (skip + i) * unrollCols + row * batchSize + sample; assert(idst < output.GetNumElements()); output.Data()[idst] = (*this)(colBase + dcol, sample); } } } } template <class ElemType> void CPUMatrix<ElemType>::MaxPoolingForward(const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices, const CPUMatrix<int>& indices, CPUMatrix<ElemType>& output) const { #pragma omp parallel for for (int64_t sample = 0; sample < (int64_t)output.GetNumCols(); sample++) { for (size_t row = 0; row < output.GetNumRows(); row++) { int colBase = mpRowCol(row, 0); assert(0 <= colBase && colBase < GetNumRows()); assert(std::numeric_limits<ElemType>::has_infinity); ElemType res = -std::numeric_limits<ElemType>::infinity(); int i0 = mpRowIndices(row, 0); int size = indices(i0++, 0); assert(size > 0); for (int i = 0; i < size; i++) { int dcol = indices(i0 + i, 0); assert(0 <= colBase + dcol && colBase + dcol < GetNumRows()); res = std::max(res, (*this)(colBase + dcol, sample)); } output(row, sample) = res; } } } template <class ElemType> void CPUMatrix<ElemType>::MaxPoolingBackward(const CPUMatrix<ElemType>& out, const CPUMatrix<ElemType>& in, const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices, const CPUMatrix<int>& indices, CPUMatrix<ElemType>& grad, bool accumulateGradient) const { if (!accumulateGradient) grad.SetValue((ElemType)0); #pragma omp parallel for for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++) { for (size_t row = 0; row < GetNumRows(); row++) { int colBase = mpRowCol(row, 0); assert(0 <= colBase && colBase < grad.GetNumRows()); int i0 = mpRowIndices(row, 0); int size = indices(i0++, 0); assert(size > 0); ElemType g = (*this)(row, sample); ElemType m = out(row, sample); for (int i = 0; i < size; i++) { int dcol = indices(i0 + i, 0); assert(0 <= colBase + dcol && colBase + dcol < grad.GetNumRows()); if (in(colBase + dcol, sample) >= m) { #pragma omp atomic grad(colBase + dcol, sample) += g; break; } } } } } // For each image, for each ROI, this function treats that ROI as an image // and does max pooling so that it has output size pooledHeight x pooledWidth. // It loops over each location in the output tensor, computes which ROI // and image should populate that location, computes the subset of the image // corresponding to the ROI and which pixels in that subset should go into the // output location, then takes the max value over that window. // src: Images [W x H x C x N] // roiData: ROIs [4 x numROIs x N], // dst: Pooled ROIs [PW x PH x C x numROIs x N] // argmax: max positions [PW x PH x C x numROIs x N] // spatialScale ratio of input feature map to the original image. // where PW = Pooled Width, PH = Pooled Height, C = Channels, N = Batch Size template <class ElemType> void CPUMatrix<ElemType>::MaxROIPoolingForward(const size_t numRois, const size_t numImg, const size_t channels, const size_t width, const size_t height, const size_t pooledWidth, const size_t pooledHeight, const CPUMatrix<ElemType>& roiData, CPUMatrix<ElemType>& output, CPUMatrix<ElemType>& argmax, double spatialScale) const { size_t roiOutputSize = pooledHeight * pooledWidth * channels; #pragma omp parallel for for (int imgIdx = 0; imgIdx < numImg; imgIdx++) { auto img = ColumnSlice(imgIdx, 1); auto rois = roiData.ColumnSlice(imgIdx, 1); #pragma omp parallel for for (int roiIdx = 0; roiIdx < numRois; roiIdx++) { // each ROI is 4 elements: (x, y, w, h). int base = roiIdx * 4; // roi points represent the absolute location of the roi // in the original image. ElemType scX1 = rois(base, (ElemType)0); ElemType scY1 = rois(base + (ElemType)1, (ElemType)0); ElemType scX2 = rois(base + (ElemType)2, (ElemType)0); ElemType scY2 = rois(base + (ElemType)3, (ElemType)0); // compute actual spatial location of the ROI in our featuremap. size_t x1 = (size_t)round(scX1 * spatialScale); size_t y1 = (size_t)round(scY1 * spatialScale); size_t x2 = (size_t)round(scX2 * spatialScale); size_t y2 = (size_t)round(scY2 * spatialScale); ElemType roiW = (ElemType)max(x2 - x1 + 1, (size_t)1); ElemType roiH = (ElemType)max(y2 - y1 + 1, (size_t)1); const ElemType winW = roiW / (ElemType)pooledWidth; const ElemType winH = roiH / (ElemType)pooledHeight; // inspired by Ross Girshick fast-rcnn caffe cpu: https://github.com/rbgirshick/fast-rcnn // loop over spatial locations in output. #pragma omp parallel for for (int outw = 0; outw < pooledWidth; outw++) { for (int outh = 0; outh < pooledHeight; outh++) { // compute the top left corner of the input // spatial window corresponding to this output unit size_t hstart = (size_t)floor(outh * winH); size_t wstart = (size_t)floor(outw * winW); // compute bottom right corner (not included) size_t hend = (size_t)ceil((outh + 1) * winH); size_t wend = (size_t)ceil((outw + 1) * winW); // offset window based on ROI top left corner. // these indices are into the input slice. hstart = min(max(hstart + y1, (size_t)0), height); wstart = min(max(wstart + x1, (size_t)0), width); hend = min(max(hend + y1, (size_t)0), height); wend = min(max(wend + x1, (size_t)0), width); bool isempty = (hend <= hstart) || (wend <= wstart); for (size_t c = 0; c < channels; c++) { // [W x H x C x R x N]; R = ROIs per image size_t outputIdx = roiIdx * roiOutputSize + outw + outh * pooledWidth + c * pooledHeight * pooledWidth; size_t maxidx = 0; ElemType maxval = isempty ? (ElemType)0 : (ElemType)-FLT_MAX; size_t baseIdx = c * height * width; for (size_t h = hstart; h < hend; h++) { for (size_t w = wstart; w < wend; w++) { // stored argmax indices are relative to the current channel. size_t dataIdx = w + h * width; if (img(baseIdx + dataIdx, 0) > maxval) { maxval = img(baseIdx + dataIdx, 0); maxidx = dataIdx; } } } output(outputIdx, imgIdx) = maxval; argmax(outputIdx, imgIdx) = maxidx; } } } } } } // This function loops over locations in the input to the ROIPoolingNode (image locations). // It loops over the ROIs corresponding to that image, seeing which ones could contain the current location // in their output. For each ROI, it checks the argmax data to see if that ROI indeed chose // this pixel location as the maximum. If so, it increments the gradient term for the input location. template <class ElemType> void CPUMatrix<ElemType>::MaxROIPoolingBackward(const size_t numRois, const size_t numImg, const size_t channels, const size_t width, const size_t height, const size_t pooledWidth, const size_t pooledHeight, const CPUMatrix<ElemType>& roiData, CPUMatrix<ElemType>& grad, CPUMatrix<ElemType>& argmax, double spatialScale) const { // loop over images in the batch. #pragma omp parallel for for (int imgIdx = 0; imgIdx < numImg; imgIdx++) { // ROIs for this image. length 4*numRois; auto rois = roiData.ColumnSlice(imgIdx, 1).Data(); // gradient values for all ROIs from this image. length numRois*pooledHeight*pooledWidth*channels; auto pooledGrad = ColumnSlice(imgIdx, 1).Data(); auto argmaxCol = argmax.ColumnSlice(imgIdx, 1).Data(); // loop over spatial locations in the image. #pragma omp parallel for for (int w = 0; w < width; w++) { #pragma omp parallel for for (int h = 0; h < width; h++) { // loop over the ROIs seeing which ones contain this location. for (int roiN = 0; roiN < numRois; roiN++) { // each ROI is 4 elements: (x, y, w, h). int roiOffset = roiN * 4; // ROI data points represent the absolute location of the roi // in the original image. size_t roiStartW = (size_t)round(rois[roiOffset + 0] * spatialScale); size_t roiStartH = (size_t)round(rois[roiOffset + 1] * spatialScale); size_t roiEndW = (size_t)round(rois[roiOffset + 2] * spatialScale); size_t roiEndH = (size_t)round(rois[roiOffset + 3] * spatialScale); size_t roiWidth = max(roiEndW - roiStartW + 1, (size_t)1); size_t roiHeight = max(roiEndH - roiStartH + 1, (size_t)1); // skip this ROI if it doesn't contain the current input location. const bool inROI = (w >= roiStartW && w < roiStartW + roiWidth && h >= roiStartH && h < roiStartH + roiHeight); if (!inROI) continue; ElemType winH = (ElemType)roiHeight / (ElemType)pooledHeight; ElemType winW = (ElemType)roiWidth / (ElemType)pooledWidth; // what pooled nodes in the output for this ROI could have pooled this input location? size_t phstart = (size_t)((h - roiStartH) / winH); size_t pwstart = (size_t)((w - roiStartW) / winW); size_t phend = (size_t)(ceil((h - roiStartH + 1) / winH)); size_t pwend = (size_t)(ceil((w - roiStartW + 1) / winW)); phstart = min(max(phstart, (size_t)0), pooledHeight); phend = min(max(phend, (size_t)0), pooledHeight); pwstart = min(max(pwstart, (size_t)0), pooledWidth); pwend = min(max(pwend, (size_t)0), pooledWidth); for (size_t c = 0; c < channels; c++) { ElemType gradient = 0; // [W x H x C x N] size_t index = w + h*width + c*height*width; // go right up to channel c of the current ROI. size_t offset = (roiN * channels + c) * pooledWidth * pooledHeight; const ElemType* offsetPoolGrad = pooledGrad + offset; const ElemType* offsetArgmax = argmaxCol + offset; for (size_t ph = phstart; ph < phend; ph++) { for (size_t pw = pwstart; pw < pwend; pw++) { if ((size_t)offsetArgmax[ph * pooledWidth + pw] == (w + h * width)) { gradient += offsetPoolGrad[ph * pooledWidth + pw]; } } } #pragma omp atomic grad(index, imgIdx) += gradient; } } } } } } template <class ElemType> void CPUMatrix<ElemType>::MaxUnpooling(const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices, const CPUMatrix<int>& indices, const CPUMatrix<ElemType>& poolInput, CPUMatrix<ElemType>& input) const { #pragma omp parallel for for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++) { for (size_t row = 0; row < GetNumRows(); row++) { int colBase = mpRowCol(row, 0); assert(0 <= colBase && colBase < input.GetNumRows()); int i0 = mpRowIndices(row, 0); int size = indices(i0++, 0); assert(size > 0); ElemType curMax = poolInput(colBase + indices(i0, 0), sample); ElemType prevMax = curMax; int imax = 0; for (int i = 1; i < size; i++) { int dcol = indices(i0 + i, 0); assert(0 <= colBase + dcol && colBase + dcol < poolInput.GetNumRows()); curMax = std::max(curMax, poolInput(colBase + dcol, sample)); if (curMax > prevMax) { prevMax = curMax; imax = i; } } int dcol = indices(i0 + imax, 0); assert(0 <= colBase + dcol && colBase + dcol < input.GetNumRows()); input(colBase + dcol, sample) = (*this)(row, sample); //int i = (int)poolIn(row, sample); //assert(0 <= i && i < size); //int dcol = indices(i0 + i, 0); //assert(0 <= colBase + dcol && colBase + dcol < input.GetNumRows()); //input(colBase + dcol, sample) = (*this)(row, sample); } } } template <class ElemType> void CPUMatrix<ElemType>::AveragePoolingForward(const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices, const CPUMatrix<int>& indices, CPUMatrix<ElemType>& output, const bool poolIncludePad) const { #pragma omp parallel for for (int64_t sample = 0; sample < (int64_t)output.GetNumCols(); sample++) { for (size_t row = 0; row < output.GetNumRows(); row++) { int colBase = mpRowCol(row, 0); assert(0 <= colBase && colBase < GetNumRows()); ElemType sum = 0; int i0 = mpRowIndices(row, 0); int size = indices(i0++, 0); assert(size > 0); for (int i = 0; i < size; i++) { int dcol = indices(i0 + i, 0); assert(0 <= colBase + dcol && colBase + dcol < GetNumRows()); sum += (*this)(colBase + dcol, sample); } // Note that we divide by size which is the number of actual elements (does not include padding). // if poolIncludePad == true, use avg_pool_include_pad if (poolIncludePad) size = indices(0, 0); output(row, sample) = sum / size; } } } template <class ElemType> void CPUMatrix<ElemType>::AveragePoolingBackward(const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices, const CPUMatrix<int>& indices, CPUMatrix<ElemType>& grad, const bool poolIncludePad, bool accumulateGradient) const { if (!accumulateGradient) grad.SetValue((ElemType)0); #pragma omp parallel for for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++) { for (size_t row = 0; row < GetNumRows(); row++) { int colBase = mpRowCol(row, 0); assert(0 <= colBase && colBase < grad.GetNumRows()); int i0 = mpRowIndices(row, 0); int size = indices(i0++, 0); int tmp = size; if (poolIncludePad) size = indices(0, 0); assert(size > 0); ElemType g = (*this)(row, sample) / size; size = tmp; for (int i = 0; i < size; i++) { int dcol = indices(i0 + i, 0); assert(0 <= colBase + dcol && colBase + dcol < grad.GetNumRows()); #pragma omp atomic grad(colBase + dcol, sample) += g; } } } } template <class ElemType> template <class StatType> void CPUMatrix<ElemType>::BatchNormalizationForward(const CPUMatrix<StatType>& scale, const CPUMatrix<StatType>& bias, bool inferenceOnly, double expAvgFactor, double blendFactor, CPUMatrix<StatType>& runMean, CPUMatrix<StatType>& runVariance, CPUMatrix<ElemType>& out, double epsilon, CPUMatrix<StatType>& saveMean, CPUMatrix<StatType>& saveInvStdDev) const { if (GetNumRows() % scale.GetNumRows() != 0) LogicError("The number of rows of this matrx must be multiple of the number of rows of the scale matrix."); if (!inferenceOnly || expAvgFactor != 0 || blendFactor != 1) RuntimeError("Batch normalization training on CPU is not yet implemented."); saveMean.Resize(0, 0); // only doing inference: these two are not produced saveInvStdDev.Resize(0, 0); bool spatial = GetNumRows() != scale.GetNumRows(); if (spatial) { size_t spatialSize = GetNumRows() / scale.GetNumRows(); #pragma omp parallel for for (long icol = 0; icol < out.GetNumCols(); icol++) { for (long irow = 0; irow < out.GetNumRows(); irow++) { size_t imap = irow / spatialSize; ElemType stdDev = sqrt(runVariance(imap, 0) + epsilon); out(irow, icol) = (ElemType)(scale(imap, 0) * ((*this)(irow, icol) - runMean(imap, 0)) / stdDev + bias(imap, 0)); } } } else { #pragma omp parallel for for (long icol = 0; icol < out.GetNumCols(); icol++) { for (long irow = 0; irow < out.GetNumRows(); irow++) { ElemType stdDev = sqrt(runVariance(irow, 0) + epsilon); out(irow, icol) = (ElemType)(scale(irow, 0) * ((*this)(irow, icol) - runMean(irow, 0)) / stdDev + bias(irow, 0)); } } } } template <class ElemType> template <class StatType> void CPUMatrix<ElemType>::BatchNormalizationBackward(const CPUMatrix<ElemType>& in, CPUMatrix<ElemType>& grad, const CPUMatrix<StatType>& scale, double blendFactor, const CPUMatrix<StatType>& saveMean, const CPUMatrix<StatType>& saveInvStdDev, CPUMatrix<StatType>& scaleGrad, CPUMatrix<StatType>& biasGrad) const { UNUSED(in); UNUSED(grad); UNUSED(scale); UNUSED(blendFactor), UNUSED(saveMean); UNUSED(saveInvStdDev); UNUSED(scaleGrad); UNUSED(biasGrad); RuntimeError("Batch normalization training on CPU is not yet implemented."); } #pragma region Static BLAS Functions /// <summary>Matrix-matrix multiply with col-major matrices (a and b may be transposed): c = alpha * op(a) * op(b) + beta*c</summary> /// <param name="alpha">Scalar</param> /// <param name="a">Input matrix</param> /// <param name="transposeA">Whether matrix a is transposed</param> /// <param name="b">Input matrix</param> /// <param name="transposeB">Whether matrix b is transposed</param> /// <param name="beta">Scalar</param> /// <param name="c">Resulting matrix, user is responsible for allocating this</param> template <class ElemType> void CPUMatrix<ElemType>::MultiplyAndWeightedAdd(ElemType alpha, const CPUMatrix<ElemType>& a, const bool transposeA, const CPUMatrix<ElemType>& b, const bool transposeB, ElemType beta, CPUMatrix<ElemType>& c, shared_ptr<QuantizedMultiplier<ElemType>> pQuantizedMultiplier) { if (a.IsEmpty() || b.IsEmpty()) return; int m, n, k, l; int lda, ldb, ldc; CBLAS_TRANSPOSE mklTransA; CBLAS_TRANSPOSE mklTransB; if (transposeA) { m = (int) a.GetNumCols(); k = (int) a.GetNumRows(); lda = k; mklTransA = CBLAS_TRANSPOSE::CblasTrans; } else { m = (int) a.GetNumRows(); k = (int) a.GetNumCols(); lda = m; mklTransA = CBLAS_TRANSPOSE::CblasNoTrans; } if (transposeB) { l = (int) b.GetNumCols(); n = (int) b.GetNumRows(); ldb = n; mklTransB = CBLAS_TRANSPOSE::CblasTrans; } else { l = (int) b.GetNumRows(); n = (int) b.GetNumCols(); ldb = l; mklTransB = CBLAS_TRANSPOSE::CblasNoTrans; } assert(m > 0 && k > 0 && l > 0 && n > 0); // converting from size_t to int may cause overflow if (k != l) InvalidArgument("CPUMatrix<ElemType>::MultiplyAndWeightedAdd : The inner dimensions of a and b must match."); if (beta == 0) c.RequireSize(m, n); else c.VerifySize(m, n); // Can't resize if beta != 0 ldc = (int) c.GetNumRows(); if (pQuantizedMultiplier == nullptr) { if (std::is_same<ElemType, double>::value) { cblas_dgemm((CBLAS_ORDER) (int)MatrixOrder::ColMajor, mklTransA, mklTransB, m, n, k, alpha, reinterpret_cast<double*>(a.Data()), lda, reinterpret_cast<double*>(b.Data()), ldb, beta, reinterpret_cast<double*>(c.Data()), ldc); } else if (std::is_same<ElemType, float>::value) { #pragma warning(suppress : 4244) cblas_sgemm((CBLAS_ORDER) (int)MatrixOrder::ColMajor, mklTransA, mklTransB, m, n, k, alpha, reinterpret_cast<float*>(a.Data()), lda, reinterpret_cast<float*>(b.Data()), ldb, beta, reinterpret_cast<float*>(c.Data()), ldc); } else { RuntimeError("Unsupported data format"); } } else { // TODO: support transpose product if (mklTransA == CBLAS_TRANSPOSE::CblasTrans || mklTransB == CBLAS_TRANSPOSE::CblasTrans) LogicError("Quantized multiplier currently doesn't support transpose."); pQuantizedMultiplier->Multiply(m, n, k, a.Data(), b.Data(), c.Data()); } } template <class ElemType> void CPUMatrix<ElemType>::Multiply1x1AndWeightedAdd(ElemType alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, ElemType beta, CPUMatrix<ElemType>& c) { if (a.GetNumElements() != 1) InvalidArgument("the argument a must be a scalar"); // a is a scalar ElemType f = alpha * a.Get00Element(); if (beta == 0) // don't even read the memory if beta is 0 #pragma omp parallel for foreach_coord (i, j, c) c(i, j) = b(i, j) * f; else #pragma omp parallel for foreach_coord (i, j, c) c(i, j) = b(i, j) * f + c(i, j) * beta; } template <class ElemType> void CPUMatrix<ElemType>::ColumnwiseScaleAndWeightedAdd(ElemType alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& v, ElemType beta, CPUMatrix<ElemType>& c) { if (v.GetNumRows() != 1 && v.GetNumCols() != 1) InvalidArgument("the argument v must be a vector"); // v is a vector if (beta == 0) c.RequireSize(a.GetNumRows(), a.GetNumCols()); else c.VerifySize(a.GetNumRows(), a.GetNumCols()); // Can't resize if beta != 0 const ElemType* vd = v.Data(); if (beta == 0) // don't even read the memory if beta is 0 #pragma omp parallel for foreach_coord(i, j, c) c(i, j) = alpha * a(i, j) * vd[j]; else #pragma omp parallel for foreach_coord(i, j, c) c(i, j) = alpha * a(i, j) * vd[j] + c(i, j) * beta; } /* compute singular value decomposition as A = U*SIGMA*VT W is used as temp working memory */ template <class ElemType> void CPUMatrix<ElemType>::SVD(const CPUMatrix<ElemType>& A, CPUMatrix<ElemType>& SIGMA, CPUMatrix<ElemType>& U, CPUMatrix<ElemType>& VT, CPUMatrix<ElemType>& W) { if (A.IsEmpty()) LogicError("SVD: input matrix is empty."); int info; int m, n, lda, ldu, ldvt; m = (int) A.GetNumRows(); n = (int) A.GetNumCols(); W.GetNumRows(); // W is used as temp working memory lda = m; ldu = m; ldvt = n; U.RequireSize(m, m); SIGMA.RequireSize(std::min(m, n), 1); VT.RequireSize(n, n); #if CNTK_UWP RuntimeError("Error, LAPACKE_*gesvd is not supported for UWP.\n"); #else if (std::is_same<ElemType, double>::value) { std::vector<double> superb(std::max(std::min(m, n) - 1, 1)); info = LAPACKE_dgesvd((int) MatrixOrder::ColMajor, 'A', 'A', (int) m, (int) n, reinterpret_cast<double*>(A.Data()), (int) lda, reinterpret_cast<double*>(SIGMA.Data()), reinterpret_cast<double*>(U.Data()), (int) ldu, reinterpret_cast<double*>(VT.Data()), (int) ldvt, &superb[0]); } else if (std::is_same<ElemType, float>::value) { std::vector<float> superb(std::max(std::min(m, n) - 1, 1)); info = LAPACKE_sgesvd((int) MatrixOrder::ColMajor, 'A', 'A', (int) m, (int) n, reinterpret_cast<float*>(A.Data()), (int) lda, reinterpret_cast<float*>(SIGMA.Data()), reinterpret_cast<float*>(U.Data()), (int) ldu, reinterpret_cast<float*>(VT.Data()), (int) ldvt, &superb[0]); } else { RuntimeError("Unsupported data format"); } #endif if (info > 0) { RuntimeError("The algorithm computing SVD failed to converge.\n"); } } /// <summary>Matrix-matrix multiply with col-major matrices (a and b may be transposed): c = op(a) * op(b) + c</summary> /// <param name="a">Input matrix</param> /// <param name="transposeA">Whether matrix a is transposed</param> /// <param name="b">Input matrix</param> /// <param name="transposeB">Whether matrix b is transposed</param> /// <param name="c">Resulting matrix, user is responsible for allocating this</param> template <class ElemType> void CPUMatrix<ElemType>::MultiplyAndAdd(const CPUMatrix<ElemType>& a, const bool transposeA, const CPUMatrix<ElemType>& b, const bool transposeB, CPUMatrix<ElemType>& c) { return CPUMatrix<ElemType>::MultiplyAndWeightedAdd(1.0, a, transposeA, b, transposeB, 1.0, c); } template <class ElemType> void CPUMatrix<ElemType>::AssignSoftmaxSum(const CPUMatrix<ElemType>& softmax, CPUMatrix<ElemType>& c) { ElemType log_likelihood = 0.0; size_t batch_size = GetNumCols(); #pragma omp parallel for reduction(+ : log_likelihood) for (int instance_id = 0; instance_id < batch_size; instance_id++) { int sample = (int) (*this)(0, instance_id); log_likelihood += softmax(instance_id, sample); } c(0, 0) = -log_likelihood; } template <class ElemType> void CPUMatrix<ElemType>::AssignNCEUnnormalizedEval(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const CPUMatrix<ElemType>& bias, CPUMatrix<ElemType>& c) //this: samples+probs // a: hidden // b: embedding // tmp: softmax // c: loglikelihood { ElemType log_likelihood = 0.0; size_t batch_size = GetNumCols(); #pragma omp parallel for reduction(+ : log_likelihood) for (int instance_id = 0; instance_id < batch_size; instance_id++) { int sample = -(int) (*this)(0, instance_id); ElemType score = bias(sample, 0); for (int dim = 0; dim < b.GetNumRows(); dim++) score += b(dim, sample) * a(dim, instance_id); log_likelihood += score; } c(0, 0) = -log_likelihood; } //samples+prob gradient hidden embedding embedding/hidden //a.m_CPUMatrix->AssignNCEDerivative(*tmp.m_CPUMatrix, *a.m_CPUMatrix, *b.m_CPUMatrix, inputIndex, *c.m_CPUMatrix); template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignNCEDerivative(const CPUMatrix<ElemType>& tmp, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t inputIndex, CPUMatrix<ElemType>& c) { size_t sample_size = GetNumRows() / 2; size_t batch_size = GetNumCols(); if (inputIndex == 1) { #pragma omp parallel for for (int instance_id = 0; instance_id < batch_size; instance_id++) for (int sample_id = 0; sample_id < sample_size; sample_id++) { int sample = (int) (*this)(2 * sample_id, instance_id); for (int dim = 0; dim < b.GetNumRows(); dim++) c(dim, instance_id) -= b(dim, sample) * tmp(sample_id, instance_id); } } else if (inputIndex == 2) { int i_blocks = omp_get_num_threads() * 16; // Assume only one block in k direction. // We don't need to explicitly block in the j direction. #pragma omp parallel for for (int ib = 0; ib < i_blocks; ib++) for (int instance_id = 0; instance_id < batch_size; instance_id++) for (int sample_id = 0; sample_id < sample_size; sample_id++) { int sample = (int) (*this)(2 * sample_id, instance_id); if (sample % i_blocks == ib) for (int dim = 0; dim < b.GetNumRows(); dim++) c(dim, sample) -= a(dim, instance_id) * tmp(sample_id, instance_id); } } else if (inputIndex == 3) { // Assume only one block in k direction. // We don't need to explicitly block in the j direction. for (int instance_id = 0; instance_id < batch_size; instance_id++) for (int sample_id = 0; sample_id < sample_size; sample_id++) { int sample = (int) (*this)(2 * sample_id, instance_id); c(0, sample) -= tmp(sample_id, instance_id); } } else InvalidArgument("The argument inputIndex must be 1 or 2 or 3."); return *this; } template <class ElemType> void CPUMatrix<ElemType>::AssignNoiseContrastiveEstimation(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const CPUMatrix<ElemType>& bias, CPUMatrix<ElemType>& tmp, CPUMatrix<ElemType>& c) //this: samples+probs // a: hidden // b: embedding // tmp: softmax // c: loglikelihood { double log_likelihood = 0.0; size_t sample_size = GetNumRows() / 2; size_t batch_size = GetNumCols(); size_t num_noise_samples = sample_size - 1; double log_num_noise_samples = std::log(num_noise_samples); #pragma omp parallel for reduction(+ : log_likelihood) for (int instance_id = 0; instance_id < batch_size; instance_id++) for (int sample_id = 0; sample_id < sample_size; sample_id++) { int sample = (int) (*this)(2 * sample_id, instance_id); double score = bias(0, sample); for (int dim = 0; dim < b.GetNumRows(); dim++) score += (double)(a(dim, instance_id) * b(dim, sample)); double sample_prob = -(*this)(2 * sample_id + 1, instance_id); if (sample_id == 0) sample_prob = -sample_prob; double score_noise = log_num_noise_samples + sample_prob; double z = LogAdd(score, score_noise); double logprob = score - z; double logprob_noise = score_noise - z; tmp(sample_id, instance_id) = (ElemType) -std::exp(logprob); if (sample_id == 0) tmp(sample_id, instance_id) += 1; log_likelihood += sample_id == 0 ? logprob : logprob_noise; } c(0, 0) = (ElemType) -log_likelihood; } /// <summary>Matrix-matrix multiply with col-major matrices (a and b may be transposed): c = op(a) * op(b)</summary> /// <param name="a">Input matrix</param> /// <param name="transposeA">Whether matrix a is transposed</param> /// <param name="b">Input matrix</param> /// <param name="transposeB">Whether matrix b is transposed</param> /// <param name="c">Resulting matrix, user is responsible for allocating this</param> template <class ElemType> void CPUMatrix<ElemType>::Multiply(const CPUMatrix<ElemType>& a, const bool transposeA, const CPUMatrix<ElemType>& b, const bool transposeB, CPUMatrix<ElemType>& c) { return CPUMatrix<ElemType>::MultiplyAndWeightedAdd(1.0, a, transposeA, b, transposeB, 0.0, c); } /// <summary>Matrix-matrix multiply with col-major matrices (a and b are not transposed): c = a * b</summary> /// <param name="a">Input matrix</param> /// <param name="b">Input matrix</param> /// <param name="c">Resulting matrix, user is responsible for allocating this</param> template <class ElemType> void CPUMatrix<ElemType>::Multiply(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c) { return CPUMatrix<ElemType>::MultiplyAndWeightedAdd(1.0, a, false, b, false, 0.0, c); } /// <summary>Matrix-scalar multiply with col-major matrices: c = alpha * a + c</summary> /// if a is a column vector, add to all columns of c /// if a is a row vector, add to all rows of c /// if a is a scalar, add to all rows of c /// <param name="alpha">Scalar</param> /// <param name="a">Input matrix</param> /// <param name="c">Resulting matrix, user is responsible for allocating this</param> template <class ElemType> void CPUMatrix<ElemType>::ScaleAndAdd(ElemType alpha, const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& c) { if (a.IsEmpty() || c.IsEmpty()) LogicError("ScaleAndAdd: one of the input matrices is empty."); if (a.GetNumRows() != 1 && a.GetNumCols() != 1) // a is not a col or row vector { const int m = (int) a.GetNumRows(); const int n = (int) a.GetNumCols(); const int len = m * n; const int incx = 1; const int incy = 1; assert(m > 0 && n > 0 && len > 0); // converting from size_t to int may cause overflow if ((int) c.GetNumRows() != m || (int) c.GetNumCols() != n) InvalidArgument("Dimension of matrix c does not match dimension of matrix a."); if (std::is_same<ElemType, double>::value) { cblas_daxpy(len, alpha, reinterpret_cast<double*>(a.Data()), incx, reinterpret_cast<double*>(c.Data()), incy); } else if (std::is_same<ElemType, float>::value) { #pragma warning(suppress : 4244) cblas_saxpy(len, alpha, reinterpret_cast<float*>(a.Data()), incx, reinterpret_cast<float*>(c.Data()), incy); } else { RuntimeError("Unsupported data format"); } } else if (a.GetNumElements() == 1) // scalar, add to all elements { ElemType v = alpha * a(0, 0); long m = (long) c.GetNumRows(), n = (long) c.GetNumCols(); #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { c(i, j) += v; c(i + 1, j) += v; c(i + 2, j) += v; c(i + 3, j) += v; } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { c(i, j) += v; } } } else if (a.GetNumCols() == 1) // col vector, add it to all columns { int m = (int) c.GetNumRows(); if (m != (int) a.GetNumRows()) InvalidArgument("To add column vector, rows should match."); ElemType* aBufPtr = a.Data(); ElemType* cBufPtr = c.Data(); if (std::is_same<ElemType, double>::value) { #pragma omp parallel for foreach_column (j, c) { cblas_daxpy(m, alpha, reinterpret_cast<double*>(aBufPtr), 1, reinterpret_cast<double*>(cBufPtr + c.LocateColumn(j)), 1); } } else if (std::is_same<ElemType, float>::value) { #pragma omp parallel for foreach_column (j, c) { #pragma warning(suppress : 4244) cblas_saxpy(m, alpha, reinterpret_cast<float*>(aBufPtr), 1, reinterpret_cast<float*>(cBufPtr + c.LocateColumn(j)), 1); } } else { RuntimeError("Unsupported data format"); } } else // row vector, add it to all rows { int m = (int) c.GetNumRows(); int n = (int) c.GetNumCols(); if (n != (int) a.GetNumCols()) InvalidArgument("To add row vector, cols should match."); ElemType* aBufPtr = a.Data(); ElemType* cBufPtr = c.Data(); if (std::is_same<ElemType, double>::value) { #pragma omp parallel for foreach_row (i, c) { cblas_daxpy(n, alpha, reinterpret_cast<double*>(aBufPtr), 1, reinterpret_cast<double*>(cBufPtr + i), m); } } else if (std::is_same<ElemType, float>::value) { #pragma omp parallel for foreach_row (i, c) { #pragma warning(suppress : 4244) cblas_saxpy(n, alpha, reinterpret_cast<float*>(aBufPtr), 1, reinterpret_cast<float*>(cBufPtr + i), m); } } else { RuntimeError("Unsupported data format"); } } } /// <summary>c += alpha * (a-b)</summary> /// if a, b, c must have same dim /// <param name="alpha">Scalar</param> /// <param name="a">Input matrix</param> /// <param name="b">Input matrix</param> /// <param name="c">Resulting matrix, user is responsible for allocating this</param> template <class ElemType> void CPUMatrix<ElemType>::AddScaledDifference(const ElemType alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c) { if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumRows() == c.GetNumRows() && a.GetNumCols() == b.GetNumCols() && a.GetNumCols() == c.GetNumCols())) { InvalidArgument("AddScaledDifference: a, b, and c must have same dimension."); } if (a.IsEmpty()) LogicError("AddScaledDifference: Input matrix a is empty."); ElemType* aBufPtr = a.Data(); ElemType* bBufPtr = b.Data(); ElemType* cBufPtr = c.Data(); long m = (long) c.GetNumElements(); #pragma omp parallel for // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { cBufPtr[i] += alpha * (aBufPtr[i] - bBufPtr[i]); cBufPtr[i + 1] += alpha * (aBufPtr[i + 1] - bBufPtr[i + 1]); cBufPtr[i + 2] += alpha * (aBufPtr[i + 2] - bBufPtr[i + 2]); cBufPtr[i + 3] += alpha * (aBufPtr[i + 3] - bBufPtr[i + 3]); } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { cBufPtr[i] += alpha * (aBufPtr[i] - bBufPtr[i]); } } /// <summary> c = alpha * (a-b)</summary> /// if a, b, c must have same dim /// <param name="alpha">Scalar</param> /// <param name="a">Input matrix</param> /// <param name="b">Input matrix</param> /// <param name="c">Resulting matrix, user is responsible for allocating this</param> template <class ElemType> void CPUMatrix<ElemType>::AssignScaledDifference(const ElemType alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c) { if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols())) { InvalidArgument("AssignScaledDifference: a, b must have same dimension."); } if (a.IsEmpty()) LogicError("AssignScaledDifference: Input matrix a is empty."); if (&c != &a && &c != &b) c.RequireSize(a.GetNumRows(), a.GetNumCols()); ElemType* aBufPtr = a.Data(); ElemType* bBufPtr = b.Data(); ElemType* cBufPtr = c.Data(); long m = (long) c.GetNumElements(); #pragma omp parallel for // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { cBufPtr[i] = alpha * (aBufPtr[i] - bBufPtr[i]); cBufPtr[i + 1] = alpha * (aBufPtr[i + 1] - bBufPtr[i + 1]); cBufPtr[i + 2] = alpha * (aBufPtr[i + 2] - bBufPtr[i + 2]); cBufPtr[i + 3] = alpha * (aBufPtr[i + 3] - bBufPtr[i + 3]); } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { cBufPtr[i] = alpha * (aBufPtr[i] - bBufPtr[i]); } } // c[ci,cj] += a[ai,aj] template <class ElemType> void CPUMatrix<ElemType>::AddElementToElement(ElemType beta, const CPUMatrix<ElemType>& a, const size_t ai, const size_t aj, CPUMatrix<ElemType>& c, const size_t ci, const size_t cj) { if (ai >= a.GetNumRows() || aj >= a.GetNumCols() || ci >= c.GetNumRows() || cj >= c.GetNumCols()) InvalidArgument("AddElementToElement: index out of range."); ElemType us = beta ? beta * c(ci, cj) : (ElemType)0; // do not multiply if beta is 0, could be a NaN us += a(ai, aj); c(ci, cj) = us; } ////c[ci,cj] += a[ai,aj] //template<class ElemType> //void CPUMatrix<ElemType>::AddLogElementToElement(const CPUMatrix<ElemType>& a, const size_t ai, const size_t aj, CPUMatrix<ElemType>& c, const size_t ci, const size_t cj) //{ // if (ai >= a.GetNumRows() || aj >=a.GetNumCols() || // ci >= c.GetNumRows() || cj >=c.GetNumCols()) // InvalidArgument("AddElementToElement: index out of range."); // // ElemType v = a(ai,aj); // c(ci, cj) += ((v < EPS_IN_LOG) ? LOG_OF_EPS_IN_LOG : log(v)); //} #if 0 // now done as AddElementToElement (beta=0) // c[ci,cj] = a[ai,aj] template <class ElemType> void CPUMatrix<ElemType>::AssignElementToElement(const CPUMatrix<ElemType>& a, const size_t ai, const size_t aj, CPUMatrix<ElemType>& c, const size_t ci, const size_t cj) { if (ai >= a.GetNumRows() || aj >= a.GetNumCols() || ci >= c.GetNumRows() || cj >= c.GetNumCols()) InvalidArgument("AssignElementToElement: index out of range."); c(ci, cj) = a(ai, aj); } #endif /// <summary>c += alpha * (a-b)</summary> /// if a, b, c must have same dim /// <param name="alpha">1X1 matrix</param> /// <param name="a">Input matrix</param> /// <param name="b">Input matrix</param> /// <param name="c">Resulting matrix, user is responsible for allocating this</param> template <class ElemType> void CPUMatrix<ElemType>::AddScaledDifference(const CPUMatrix<ElemType>& alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c) { if (alpha.GetNumElements() != 1) InvalidArgument("AddScaledDifference: alpha must be a 1X1 matrix."); AddScaledDifference(alpha(0, 0), a, b, c); } /// <summary> c = alpha * (a-b)</summary> /// if a, b, c must have same dim /// <param name="alpha">1X1 matrix</param> /// <param name="a">Input matrix</param> /// <param name="b">Input matrix</param> /// <param name="c">Resulting matrix, user is responsible for allocating this</param> template <class ElemType> void CPUMatrix<ElemType>::AssignScaledDifference(const CPUMatrix<ElemType>& alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c) { if (alpha.GetNumElements() != 1) InvalidArgument("AddScaledDifference: alpha must be a 1X1 matrix."); AssignScaledDifference(alpha(0, 0), a, b, c); } /// <summary>Matrix-scalar multiply with col-major matrices: c = alpha * a</summary> /// <param name="alpha">Scalar</param> /// <param name="a">Input matrix</param> /// <param name="c">Resulting matrix, user is responsible for allocating this</param> template <class ElemType> /*static*/ void CPUMatrix<ElemType>::Scale(ElemType alpha, const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& c) { if (a.IsEmpty()) LogicError("Scale: Input matrix a is empty."); const int m = (int) a.GetNumRows(); const int n = (int) a.GetNumCols(); assert(m > 0 && n > 0); // converting from size_t to int may cause overflow c.RequireSize(m, n); ElemType* aBufPtr = a.Data(); ElemType* cBufPtr = c.Data(); if (alpha == 0) { memset(cBufPtr, 0, sizeof(ElemType) * c.GetNumElements()); return; } long size = (long) c.GetNumElements(); #pragma omp parallel for // four-way unrolling for (long i = 0; i < (size & ~3); i += 4) { cBufPtr[i] = alpha * aBufPtr[i]; cBufPtr[i + 1] = alpha * aBufPtr[i + 1]; cBufPtr[i + 2] = alpha * aBufPtr[i + 2]; cBufPtr[i + 3] = alpha * aBufPtr[i + 3]; } // remaining elements for (long i = size & ~3; i < size; i++) { cBufPtr[i] = alpha * aBufPtr[i]; } } /// <summary>Matrix-scalar multiply with col-major matrices: a = alpha * a</summary> /// <param name="alpha">Scalar</param> /// <param name="a">Input matrix</param> template <class ElemType> /*static*/ void CPUMatrix<ElemType>::Scale(ElemType alpha, CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("Scale: Input matrix a is empty."); const int m = (int) a.GetNumRows(); const int n = (int) a.GetNumCols(); const int len = m * n; const int incx = 1; assert(m > 0 && n > 0 && len > 0); // converting from size_t to int may cause overflow if (alpha == 0 && incx == 1) { memset(a.Data(), 0, sizeof(ElemType) * len); } else if (std::is_same<ElemType, double>::value) { cblas_dscal(len, alpha, reinterpret_cast<double*>(a.Data()), incx); } else if (std::is_same<ElemType, float>::value) { #pragma warning(suppress : 4244) cblas_sscal(len, alpha, reinterpret_cast<float*>(a.Data()), incx); } else { RuntimeError("Unsupported data format"); } } /// <summary>Matrix multiply with col-major matrices: a = alpha[1,1] * a</summary> /// <param name="alpha">1x1 matrix</param> /// <param name="a">Input matrix</param> template <class ElemType> /*static*/ void CPUMatrix<ElemType>::Scale(CPUMatrix<ElemType> alpha, CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("Scale: Input matrix a is empty."); if (alpha.GetNumElements() != 1) LogicError("Matrix alpha must be 1x1"); CPUMatrix<ElemType>::Scale(alpha(0, 0), a); } template <class ElemType> void CPUMatrix<ElemType>::InnerProduct(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c, const bool isColWise) { if (a.IsEmpty() || b.IsEmpty()) LogicError("InnerProduct: one of the input matrices is empty."); const int m = (int) a.GetNumRows(); const int n = (int) a.GetNumCols(); const int k = (int) b.GetNumRows(); const int l = (int) b.GetNumCols(); assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow if (m != k || n != l) InvalidArgument("InnerProduct: Matrices a and b should have same dimension."); if ((isColWise && m == 1) || !isColWise && n == 1) // in this case it's equivalent to element-wise product { c.AssignElementProductOf(a, b); } else if (isColWise) // col-wise { c.RequireSize(1, n); ElemType* aBufPtr = a.Data(); ElemType* bBufPtr = b.Data(); if (std::is_same<ElemType, double>::value) { #pragma omp parallel for foreach_column (j, c) { c(0, j) = (ElemType) cblas_ddot(m, reinterpret_cast<double*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<double*>(bBufPtr + b.LocateColumn(j)), 1); } } else if (std::is_same<ElemType, float>::value) { #pragma omp parallel for foreach_column (j, c) { #pragma warning(suppress : 4244) c(0, j) = (ElemType) cblas_sdot(m, reinterpret_cast<float*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<float*>(bBufPtr + b.LocateColumn(j)), 1); } } else { RuntimeError("Unsupported data format"); } } else { c.RequireSize(m, 1); ElemType* aBufPtr = a.Data(); ElemType* bBufPtr = b.Data(); if (std::is_same<ElemType, double>::value) { #pragma omp parallel for foreach_row (i, c) { c(i, 0) = cblas_ddot(n, reinterpret_cast<double*>(aBufPtr + i), m, reinterpret_cast<double*>(bBufPtr + i), m); } } else if (std::is_same<ElemType, float>::value) { #pragma omp parallel for foreach_row (i, c) { #pragma warning(suppress : 4244) c(i, 0) = cblas_sdot(n, reinterpret_cast<float*>(aBufPtr + i), m, reinterpret_cast<float*>(bBufPtr + i), m); } } else { RuntimeError("Unsupported data format"); } } } // treat matrices as vectors. do vec(a)^T vec(b) template <class ElemType> ElemType CPUMatrix<ElemType>::InnerProductOfMatrices(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b) { if (a.IsEmpty() || b.IsEmpty()) LogicError("InnerProductOfMatrices: one of the input matrices is empty."); const int m = (int) a.GetNumRows(); const int n = (int) a.GetNumCols(); const int k = (int) b.GetNumRows(); const int l = (int) b.GetNumCols(); assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow if (m != k || n != l) InvalidArgument("InnerProductOfMatrices: Matrices a and b should have same dimension."); if (std::is_same<ElemType, double>::value) { return (ElemType) cblas_ddot((int) a.GetNumElements(), reinterpret_cast<double*>(a.Data()), 1, reinterpret_cast<double*>(b.Data()), 1); } else if (std::is_same<ElemType, float>::value) { #pragma warning(suppress : 4244) return (ElemType) cblas_sdot((int) a.GetNumElements(), reinterpret_cast<float*>(a.Data()), 1, reinterpret_cast<float*>(b.Data()), 1); } else { RuntimeError("Unsupported data format"); } } template <class ElemType> void CPUMatrix<ElemType>::ElementWisePower(ElemType alpha, const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& c) { if (a.IsEmpty()) LogicError("Scale: The input matrix a is empty."); c.RequireSize(a.GetNumRows(), a.GetNumCols()); if (alpha == 2) { #pragma omp parallel for foreach_coord (i, j, c) { c(i, j) = a(i, j) * a(i, j); } } else if (alpha == 3) { #pragma omp parallel for foreach_coord (i, j, c) { c(i, j) = a(i, j) * a(i, j) * a(i, j); } } else { #pragma omp parallel for foreach_coord (i, j, c) { c(i, j) = pow(a(i, j), alpha); } } } template <class ElemType> void CPUMatrix<ElemType>::BatchMatMul(ElemType beta, const CPUMatrix<ElemType>& a, const bool transposeA, const int m, const CPUMatrix<ElemType>& b, const bool transposeB, const int n, CPUMatrix<ElemType>& c, const bool isColWise) { if (a.IsEmpty() || b.IsEmpty()) LogicError("BatchMatMul: one of the input matrices is empty."); if (!isColWise) LogicError("Only column wise is supported."); const int aSampleElemNum = (int)a.GetNumRows(); const int aBatchSize = (int)a.GetNumCols(); const int bSampleElemNum = (int)b.GetNumRows(); const int bBatchSize = (int)b.GetNumCols(); assert(aSampleElemNum > 0 && aBatchSize > 0 && bSampleElemNum > 0 && bBatchSize > 0); if (aBatchSize != bBatchSize) InvalidArgument("BatchMatMul: Matrices a and b should have same batch size."); int k = aSampleElemNum / m; int kb = bSampleElemNum / n; if (k != kb) InvalidArgument("BatchMatMul: Matrices a's cols number should match Matrices b's rows number."); size_t cSampleElemNum = m * n; if (beta == 0) c.RequireSize(cSampleElemNum, aBatchSize); else c.VerifySize(cSampleElemNum, aBatchSize); // Can't resize if beta != 0 #ifdef USE_OPENBLAS int lda, ldb, ldc; CBLAS_TRANSPOSE blasTransA; CBLAS_TRANSPOSE blasTransB; lda = transposeA ? k : m; ldb = transposeB ? n : k; blasTransA = transposeA ? CblasTrans : CblasNoTrans; blasTransB = transposeB ? CblasTrans : CblasNoTrans; ldc = m; std::vector<const ElemType *> a_array; std::vector<const ElemType *> b_array; std::vector<ElemType *> c_array; a_array.reserve(aBatchSize); b_array.reserve(aBatchSize); c_array.reserve(aBatchSize); ElemType* aBufPtr = a.Data(); ElemType* bBufPtr = b.Data(); ElemType* cBufPtr = c.Data(); for (size_t i = 0; i < aBatchSize; i++) { a_array.push_back(aBufPtr + a.LocateColumn(i)); b_array.push_back(bBufPtr + b.LocateColumn(i)); c_array.push_back(cBufPtr + c.LocateColumn(i)); } for (size_t i = 0; i < aBatchSize; i++) { if (sizeof(ElemType) == sizeof(double)) { double alpha = 1.0; cblas_dgemm((CBLAS_ORDER)(int)MatrixOrder::ColMajor, blasTransA, blasTransB, m, n, k, alpha, reinterpret_cast<const double*>(a_array[i]), lda, reinterpret_cast<const double*>(b_array[i]), ldb, double(beta), reinterpret_cast<double*>(c_array[i]), ldc); } else { float alpha = 1.0f; cblas_sgemm((CBLAS_ORDER)(int)MatrixOrder::ColMajor, blasTransA, blasTransB, m, n, k, alpha, reinterpret_cast<const float*>(a_array[i]), lda, reinterpret_cast<const float*>(b_array[i]), ldb, float(beta), reinterpret_cast<float*>(c_array[i]), ldc); } } #else std::vector<int> m_array(aBatchSize, m); std::vector<int> n_array(aBatchSize, n); std::vector<int> k_array(aBatchSize, k); std::vector<int> lda_array(aBatchSize, transposeA ? k : m); std::vector<int> ldb_array(aBatchSize, transposeB ? n : k); std::vector<int> ldc_array(aBatchSize, m); std::vector<int> group_size(1, aBatchSize); std::vector<CBLAS_TRANSPOSE> transa_array(aBatchSize, transposeA ? CblasTrans : CblasNoTrans); std::vector<CBLAS_TRANSPOSE> transb_array(aBatchSize, transposeB ? CblasTrans : CblasNoTrans); std::vector<const ElemType *> a_array; std::vector<const ElemType *> b_array; std::vector<ElemType *> c_array; a_array.reserve(aBatchSize); b_array.reserve(aBatchSize); c_array.reserve(aBatchSize); ElemType* aBufPtr = a.Data(); ElemType* bBufPtr = b.Data(); ElemType* cBufPtr = c.Data(); for (size_t i = 0; i < aBatchSize; i++) { a_array.push_back(aBufPtr + a.LocateColumn(i)); b_array.push_back(bBufPtr + b.LocateColumn(i)); c_array.push_back(cBufPtr + c.LocateColumn(i)); } if (sizeof(ElemType) == sizeof(double)) { std::vector<double> alpha_array(group_size[0], 1.0); std::vector<double> beta_array(group_size[0], double(beta)); cblas_dgemm_batch(CblasColMajor, &transa_array[0], &transb_array[0], &m_array[0], &n_array[0], &k_array[0], &alpha_array[0], reinterpret_cast<const double**>(&a_array[0]), &lda_array[0], reinterpret_cast<const double**>(&b_array[0]), &ldb_array[0], &beta_array[0], reinterpret_cast<double**>(&c_array[0]), &ldc_array[0], 1, &group_size[0]); } else { std::vector<float> alpha_array(group_size[0], 1.0f); std::vector<float> beta_array(group_size[0], float(beta)); cblas_sgemm_batch(CblasColMajor, &transa_array[0], &transb_array[0], &m_array[0], &n_array[0], &k_array[0], &alpha_array[0], reinterpret_cast<const float**>(&a_array[0]), &lda_array[0], reinterpret_cast<const float**>(&b_array[0]), &ldb_array[0], &beta_array[0], reinterpret_cast<float**>(&c_array[0]), &ldc_array[0], 1, &group_size[0]); } #endif } template <class ElemType> bool CPUMatrix<ElemType>::AreEqual(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const ElemType threshold /*= 1e-8*/) { if (a.GetNumRows() != b.GetNumRows() || a.GetNumCols() != b.GetNumCols()) return false; bool result = true; #pragma omp parallel for foreach_coord (i, j, a) { if (abs(a(i, j) - b(i, j)) > threshold) { result = false; break; } } return result; } // see Matrix<ElemType>::TensorShuffleScaleAndAdd() for comments template <class ElemType> void CPUMatrix<ElemType>::TensorShuffleScaleAndAdd(ElemType keepWeight, const CPUMatrix<ElemType>& a, size_t D, size_t S, size_t M, size_t K, size_t T, ElemType scaleFactor, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c) { size_t N = D * S * M * K * T; const auto pa = a.Data(); const auto pb = b.Data(); auto pc = c.Data(); // Note: This code is written to match a GPU implementation. It is not super-efficient on the CPU. for (size_t na = 0; na < N; na++) // loop over all elements { // recover the 5 indices from the loop counter size_t d = na % D; size_t s = (na / D) % S; size_t m = (na / D / S) % M; size_t k = (na / D / S / M) % K; size_t t = (na / D / S / M / K) % T; // compute index for the a and b/c tensors assert(na == (((t * K + k) * M + m) * S + s) * D + d); // input tensor of dimension (D x S x M x K x T) size_t nb = (((t * S + s) * M + m) * K + k) * D + d; // output tensor of dimension (D x K x M x S x T): k/K and s/S swapped assert(nb < N); // perform the computation ElemType cval = keepWeight ? keepWeight * pb[nb] : (ElemType)0; // if weight is 0 then don't bother to read memory (efficiency) or to multiply (NaN-safe) cval += scaleFactor * pa[na]; pc[nb] = cval; } } template <class ElemType> CPUMatrix<ElemType> CPUMatrix<ElemType>::Ones(const size_t rows, const size_t cols) { CPUMatrix<ElemType> c(rows, cols); // will initialize to 0 c.SetValue(1); return c; } template <class ElemType> CPUMatrix<ElemType> CPUMatrix<ElemType>::Zeros(const size_t rows, const size_t cols) { CPUMatrix<ElemType> c(rows, cols); // will initialize to 0 c.SetValue(0); return c; } template <class ElemType> CPUMatrix<ElemType> CPUMatrix<ElemType>::Eye(const size_t rows) { CPUMatrix<ElemType> c(rows, rows); // will initialize to 0 c.SetDiagonalValue(1); return c; } template <class ElemType> CPUMatrix<ElemType> CPUMatrix<ElemType>::RandomUniform(const size_t rows, const size_t cols, const ElemType low, const ElemType high, unsigned long seed) { CPUMatrix<ElemType> c(rows, cols); // will initialize to 0 c.SetUniformRandomValue(low, high, seed); return c; } template <class ElemType> CPUMatrix<ElemType> CPUMatrix<ElemType>::RandomGaussian(const size_t rows, const size_t cols, const ElemType mean, const ElemType sigma, unsigned long seed) { CPUMatrix<ElemType> c(rows, cols); // will initialize to 0 c.SetGaussianRandomValue(mean, sigma, seed); return c; } template <class ElemType> bool CPUMatrix<ElemType>::HasElement(const CPUMatrix<ElemType>& mat, const ElemType v) { bool bHas = false; bool isvFinite = std::isfinite(v); #pragma omp parallel for for (long j = 0; j < mat.GetNumElements(); j++) { #pragma omp flush(bHas) if (!bHas) { ElemType cur = mat.Data()[j]; if (isvFinite && std::isfinite(cur)) { if (cur == v) bHas = true; } else if (std::isnan(v) && std::isnan(cur)) bHas = true; else if (std::isinf(v) && std::isinf(cur) && std::signbit(v) == std::signbit(cur)) bHas = true; } } return bHas; } // CPUMatrix<ElemType>& AssignElementProductOfWithShiftNeg(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t shift, size_t negnumber); //[this]=a .* b // here, a and b must be two row vectors of the same size, i.e. [1,m] // the inputs are two rwo vectors // the output is a matrix of size(neg+1, col) template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementProductOfWithShiftNeg(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t shift, size_t negnumber) { if (a.IsEmpty() || b.IsEmpty()) LogicError("AssignElementProductOfWithShiftNeg: Matrix is empty."); if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols())) InvalidArgument("AssignElementProductOfWithShiftNeg: The input matrix dimensions do not match."); if (a.GetNumRows() != 1) InvalidArgument("AssignElementProductOfWithShiftNeg: The input matrix must be a row vector."); auto& us = *this; if (this != &a) { RequireSize(negnumber + 1, a.GetNumCols()); // RequireSize(a.GetNumRows(), a.GetNumCols()); } long m = (long) GetNumRows(), n = (long) GetNumCols(); // a and b are of size (1,n) // #pragma omp parallel for for (long j = 0; j < n; j++) { us(0, j) = a(0, j) * b(0, j); } for (long j = 0; j < n; j++) { for (long i = 1; i < m; i++) { us(i, j) = a(0, j) * b(0, (j + shift + i - 1) % n); } } return *this; } template <class ElemType> void CPUMatrix<ElemType>::InnerProductWithShiftNeg(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c, const bool isColWise, size_t shift, size_t negnumber) { if (a.IsEmpty() || b.IsEmpty()) LogicError("InnerProduct: one of the input matrices is empty."); const int m = (int) a.GetNumRows(); const int n = (int) a.GetNumCols(); const int k = (int) b.GetNumRows(); const int l = (int) b.GetNumCols(); assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow if (m != k || n != l) InvalidArgument("InnerProduct: Matrices a and b should have same dimension."); if ((isColWise && m == 1) || !isColWise && n == 1) // in this case it's equivalent to element-wise product { InvalidArgument("InnerProduct: Both matrices should be normal ones, not vectors"); // c.AssignElementProductOf(a, b); } else if (isColWise) // col-wise { c.RequireSize(negnumber + 1, n); // this line ischanged ElemType* aBufPtr = a.Data(); ElemType* bBufPtr = b.Data(); if (std::is_same<ElemType, double>::value) { for (long j = 0; j < n; j++) { c(0, j) = (ElemType) cblas_ddot(m, reinterpret_cast<double*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<double*>(bBufPtr + b.LocateColumn(j)), 1); } for (long j = 0; j < n; j++) { for (long i = 1; i < negnumber + 1; i++) { c(i, j) = (ElemType) cblas_ddot(m, reinterpret_cast<double*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<double*>(bBufPtr + b.LocateColumn((j + shift + i - 1) % n)), 1); } } } else if (std::is_same<ElemType, float>::value) { for (long j = 0; j < n; j++) { c(0, j) = (ElemType) cblas_sdot(m, reinterpret_cast<float*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<float*>(bBufPtr + b.LocateColumn(j)), 1); } for (long j = 0; j < n; j++) { for (long i = 1; i < negnumber + 1; i++) { c(i, j) = (ElemType) cblas_sdot(m, reinterpret_cast<float*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<float*>(bBufPtr + b.LocateColumn((j + shift + i - 1) % n)), 1); } } } else { RuntimeError("Unsupported data format"); } } else { InvalidArgument("InnerProduct: Rowwise is not supported yet"); c.RequireSize(m, 1); ElemType* aBufPtr = a.Data(); ElemType* bBufPtr = b.Data(); if (std::is_same<ElemType, double>::value) { #pragma omp parallel for foreach_row (i, c) { c(i, 0) = (ElemType) cblas_ddot(n, reinterpret_cast<double*>(aBufPtr + i), m, reinterpret_cast<double*>(bBufPtr + i), m); } } else if (std::is_same<ElemType, float>::value) { #pragma omp parallel for foreach_row (i, c) { #pragma warning(suppress : 4244) c(i, 0) = cblas_sdot(n, reinterpret_cast<float*>(aBufPtr + i), m, reinterpret_cast<float*>(bBufPtr + i), m); } } else { RuntimeError("Unsupported data format"); } } } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::GetARowByIndex(const CPUMatrix<ElemType>& a, size_t index) { if (a.IsEmpty()) LogicError("GetARowByIndex: the input matrices is empty."); const int m = (int) a.GetNumRows(); const int n = (int) a.GetNumCols(); if (index < 0 || index >= m) LogicError("GetARowByIndex: the row index is out of range."); assert(m > 0 && n > 0); // converting from size_t to int may cause overflow auto& us = *this; RequireSize(1, n); for (long j = 0; j < n; j++) { us(0, j) = a(index, j); } return *this; } // input: a, a row vector // input: b, a matrix. b.col == a.col // input firstmatrixfixed: If true, keep a's order. Otherwise, keep b's order // output: c, a matrix. c.size == b.size /* Example, a = [a1 a2 a3] b = [b11 b12 b13; b21 b22 b23 ] if true: shift = 1 then c = [a1*b12 a2*b13 a3*b11 a1*b22 a2*b23 a3*b21] if shift = 2 then c = [ a1*b13 a2*b11 a3*b12 a1*b23 a2*b21 a3*b22] i.e. we do column-wise shift if false: shift = 1 then c = [a2*b11 a3*b12 a1*b13 a2*b21 a3*b22 a1*b23] shift = 2 then c = [ a3*b11 a1*b12 a2*b13 a3*b21 a1*b22 a2*b23] */ template <class ElemType> void CPUMatrix<ElemType>::ConductRowElementMultiplyWithShift(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c, size_t shift, bool bFirstmatrixfixed) { if (a.IsEmpty() || b.IsEmpty()) LogicError("InnerProduct: one of the input matrices is empty."); const int m = (int) a.GetNumRows(); const int n = (int) a.GetNumCols(); const int k = (int) b.GetNumRows(); const int l = (int) b.GetNumCols(); assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow if (m != 1 || n != l) InvalidArgument("InnerProduct: Matrices a and b should have same dimension."); c.RequireSize(k, l); // c must the same size of b if (bFirstmatrixfixed) { for (long j = 0; j < l; j++) { for (long i = 0; i < k; i++) { c(i, j) = a(0, j) * b(i, (j + shift) % l); } } } else { for (long j = 0; j < l; j++) { for (long i = 0; i < k; i++) { c(i, j) = a(0, (j + shift) % l) * b(i, j); } } } } // CPUMatrix<ElemType>& AssignElementProductOfWithShift(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t shift); //[this]=a .* b // here, a and b must be two row vectors of the same size, i.e. [1,m]. We will do element product with shift. // inputs are 2 row vectors // output is a row vector template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementProductOfWithShift(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t shift) { if (a.IsEmpty() || b.IsEmpty()) LogicError("AssignElementProductOfWithShiftNeg: Matrix is empty."); if (a.GetNumRows() != b.GetNumRows() || a.GetNumCols() != b.GetNumCols()) InvalidArgument("AssignElementProductOfWithShiftNeg: The input matrix dimensions do not match."); if (a.GetNumRows() != 1) InvalidArgument("AssignElementProductOfWithShiftNeg: The input matrix must be a row vector."); auto& us = *this; if (this != &a) { RequireSize(1, a.GetNumCols()); // RequireSize(a.GetNumRows(), a.GetNumCols()); } // long m = (long)GetNumRows(), n = (long)GetNumCols(); // a and b are of size (1,n) long n = (long) GetNumCols(); // a and b are of size (1,n) #pragma omp parallel for for (long j = 0; j < n; j++) { us(0, j) = a(0, j) * b(0, (j + shift) % n); } return *this; } #pragma endregion Static BLAS Functions // 'double' version of LogAdd inline double LogAddD(double x, double y) { return LogAdd(x, y); } template <class ElemType> ElemType CPUMatrix<ElemType>::LogSumOfElements() const { ElemType fAlpha = (ElemType) LZERO; ElemType* bufPtr = Data(); for (int k = 0; k < GetNumElements(); k++) fAlpha = (ElemType) LogAddD(fAlpha, bufPtr[k]); return fAlpha; } template <class ElemType> void CPUMatrix<ElemType>::RCRFBackwardCompute(const CPUMatrix<ElemType>& alpha, CPUMatrix<ElemType>& beta, const CPUMatrix<ElemType>& lbls, const CPUMatrix<ElemType>& pair_scores) { int iNumPos = (int) lbls.GetNumCols(); int iNumLab = (int) lbls.GetNumRows(); int lastLbl = -1; for (int ik = 0; ik < lbls.GetNumRows(); ik++) if (lbls(ik, iNumPos - 1) != 0) { lastLbl = ik; break; } beta.RequireSize(iNumLab, iNumPos); for (int t = iNumPos - 1; t >= 0; t--) { #pragma omp parallel for for (int k = 0; k < iNumLab; k++) { _rcrfBackwardCompute(t, k, alpha, beta, pair_scores); } } }; // Calculate alpha in forward-backward calculation. equation (6), (7) in ftp://ftp.idsia.ch/pub/juergen/icml2006.pdf // GPU x dimension corresponds to utterances, y dimension corresponds to phone sequence in each utterance // prob (input): the posterior output from the network // alpha (output): alpha for forward-backward calculation. // phoneSeq (input): phone ID sequence for each utterance in this minibatch, each col is one utterance // phoneBound (input): phone boundary (frame index) of each phone for each utterance in this minibatch, each col is one utterance // uttToChanInd (input): map from utterance ID to minibatch channel ID. We need this because each channel may contain more than one utterance. // uttFrameNum (input): the frame number of each utterance. The size of this vector = the number of all utterances in this minibatch // uttBeginFrame(input): the position of the first frame of each utterance in the minibatch channel. We need this because each channel may contain more than one utterance. // uttPhoneNum (input): the phone number of each utterance. The size of this vector = the number of all utterances in this minibatch // numChannels (input): channel number in this minibatch // uttNum (input): number of utterances // t (input): time stamp to process // maxPhoneNum (input): the max number of phones between utterances // totalPhoneNum (input): the total number of phones of all utterances // blankTokenId (input): id of the CTC blank token // delayConstraint -- label output delay constraint introduced during training that allows to have shorter delay during inference. // Alpha and Beta scores outside of the delay boundary are set to zero. // Setting this parameter smaller will result in shorted delay between label output during decoding. // delayConstraint=-1 means no constraint template<class ElemType> void _assignAlphaScore( const ElemType *prob, ElemType *alphaScore, ElemType *phoneSeq, ElemType *phoneBound, const std::vector<size_t>& uttToChanInd, const std::vector<size_t>& uttFrameNum, const std::vector<size_t>& uttBeginFrame, const std::vector<size_t>& uttPhoneNum, size_t numChannels, const size_t uttNum, const size_t t, const size_t maxPhoneNum, // Maximum length of utterance in this MB const size_t totalPhoneNum, // Total number of phones const size_t blankTokenId, const int delayConstraint) { for (size_t uttId = 0;uttId < uttNum;uttId++) { // Number of phones and frames in this utterance size_t frameNum = uttFrameNum[uttId]; if (t >= frameNum) continue; size_t phoneNum = uttPhoneNum[uttId]; #pragma omp parallel for for (int phoneSeqId = 1;phoneSeqId < phoneNum - 1;phoneSeqId++) { // Index of the label in the sequence // Current and previous phone indices in phoneSeq matrix size_t labelid = uttId*maxPhoneNum + phoneSeqId; // Actual current phone label size_t phoneId = (size_t)(phoneSeq[labelid]); // Index of the current frame in minibatch size_t timeId = (t + uttBeginFrame[uttId])*numChannels + uttToChanInd[uttId]; // Index of probability of observing phoneId at frame timeId size_t probId = timeId*totalPhoneNum + phoneId; size_t alphaId = maxPhoneNum* timeId + phoneSeqId; // alpha_t(s) if (t == 0) { // Initialize recursion if (phoneSeqId == 1 || phoneSeqId == 2) { alphaScore[alphaId] = prob[probId]; } } else { if (phoneSeqId >= 1) { size_t timeId_1 = timeId - numChannels; // Index corresponding to (t-1) size_t alphaId_0 = maxPhoneNum* timeId_1 + phoneSeqId; // alpha_{t-1}(s) size_t alphaId_1 = alphaId_0 - 1; // alpha_{t-1}(s-1) size_t alphaId_2 = alphaId_0 - 2; // alpha_{t-1}(s-2) ElemType x = LZERO; ElemType ascore; if (phoneSeqId > 2) { size_t labelid_2 = labelid - 2; // if current label is not blank and not equal prev non-blank label if ((size_t)(phoneSeq[labelid]) != blankTokenId && phoneId != (size_t)(phoneSeq[labelid_2])) { x = LogAdd(x, alphaScore[alphaId_2]); } } if (phoneSeqId > 1) { x = LogAdd(x, alphaScore[alphaId_1]); } x = LogAdd(x, alphaScore[alphaId_0]); if (phoneId != SIZE_MAX) ascore = prob[probId]; // Probability of observing given label at given time else ascore = 0; alphaScore[alphaId] = (ElemType)x + ascore; if (delayConstraint != -1) { size_t labelid_r = labelid + 2; size_t phoneBoundId_r = (size_t)(phoneBound[labelid_r]); if (phoneId == blankTokenId) { // only constraint right side if (t > phoneBoundId_r + delayConstraint - 1) alphaScore[alphaId] = LZERO; } else if (phoneId != blankTokenId) { if (t > phoneBoundId_r + delayConstraint) alphaScore[alphaId] = LZERO; } } } } } } } // Calculate beta in forward-backward calculation, equation (10), (11) in ftp://ftp.idsia.ch/pub/juergen/icml2006.pdf // See _assignAlphaScore for the explanation of parameters template<class ElemType> void _assignBetaScore( const ElemType *prob, ElemType *betaScore, ElemType *phoneSeq, ElemType *phoneBound, const std::vector<size_t>& uttToChanInd, const std::vector<size_t>& uttFrameNum, const std::vector<size_t>& uttBeginFrame, const std::vector<size_t>& uttPhoneNum, const size_t numChannels, const size_t uttNum, const long t, const size_t maxPhoneNum, const size_t totalPhoneNum, const size_t blankTokenId, const int delayConstraint) { for (size_t uttId = 0;uttId < uttNum;uttId++) { // Number of phones and frames in this utterance size_t frameNum = uttFrameNum[uttId]; if (t >= frameNum) continue; size_t phoneNum = uttPhoneNum[uttId]; #pragma omp parallel for for (int phoneSeqId = 1;phoneSeqId < phoneNum - 1;phoneSeqId++) { size_t labelid = uttId*maxPhoneNum + phoneSeqId; size_t labelid_2 = labelid + 2; size_t phoneId = (LONG64)(phoneSeq[labelid]); size_t timeId = (t + uttBeginFrame[uttId])*numChannels + uttToChanInd[uttId]; size_t probId = timeId*totalPhoneNum + phoneId; size_t betaid = maxPhoneNum* timeId + phoneSeqId; size_t timeId_1 = timeId + numChannels; size_t betaid_0 = maxPhoneNum* timeId_1 + phoneSeqId; size_t betaid_1 = betaid_0 + 1; size_t betaid_2 = betaid_0 + 2; if (t == frameNum - 1) { if (phoneSeqId == phoneNum - 3 || phoneSeqId == phoneNum - 2) { betaScore[betaid] = prob[probId]; } } else { if (phoneSeqId >= 1) { ElemType x = LZERO; ElemType ascore; if (phoneSeqId < phoneNum - 3) { if (phoneSeq[labelid] != blankTokenId && phoneId != phoneSeq[labelid_2]) { x = LogAdd(x, betaScore[betaid_2]); } } if (phoneSeqId < phoneNum - 2) { x = LogAdd(x, betaScore[betaid_1]); } x = LogAdd(x, betaScore[betaid_0]); if (phoneId != SIZE_MAX) ascore = prob[probId]; else ascore = 0; betaScore[betaid] = (ElemType)x + ascore; if (delayConstraint != -1) { size_t phoneBoundId_r = (size_t)(phoneBound[labelid_2]); if (phoneId == blankTokenId) { if (t > phoneBoundId_r + delayConstraint - 1) betaScore[betaid] = LZERO; } else if (phoneId != blankTokenId) { if (t > phoneBoundId_r + delayConstraint) betaScore[betaid] = LZERO; } } } } } } } // Calculate CTC score. equation (8) in ftp://ftp.idsia.ch/pub/juergen/icml2006.pdf template<class ElemType> void _assignTotalScore(ElemType *betaScore, std::vector<ElemType>& totalScore, const size_t uttNum, const std::vector<size_t>& uttToChanInd, const std::vector<size_t>& uttBeginFrame, const size_t numChannels, const size_t maxPhoneNum) { #pragma omp parallel for for (int uttId = 0; uttId < uttNum; uttId++) { if (uttId < uttNum) { LONG64 alphaId_0 = (uttBeginFrame[uttId] * numChannels + uttToChanInd[uttId]) * maxPhoneNum; betaScore[alphaId_0] = LogAdd(betaScore[alphaId_0 + 1], betaScore[alphaId_0 + 2]); totalScore[uttId] = betaScore[alphaId_0]; } } } // Calculate derivative, equation (15) in ftp://ftp.idsia.ch/pub/juergen/icml2006.pdf // See _assignAlphaScore for the explanation of parameters template<class ElemType> void _assignCTCScore( ElemType *CTCscore, ElemType *prob, ElemType *alphaScore, ElemType *betaScore, ElemType *phoneSeq, const size_t uttNum, const std::vector<size_t>& uttToChanInd, const std::vector<size_t>& uttBeginFrame, const std::vector<size_t>& uttPhoneNum, const std::vector<size_t>& uttFrameNum, const size_t numChannels, const size_t maxPhoneNum, const size_t totalPhoneNum) { for (size_t uttId = 0;uttId < uttNum;uttId++) { #pragma omp parallel for for (int t = 0; t < uttFrameNum[uttId]; t++) { size_t phoneNum = uttPhoneNum[uttId]; size_t alphaId_0 = (uttBeginFrame[uttId] * numChannels + uttToChanInd[uttId]) * maxPhoneNum; size_t timeId = (t + uttBeginFrame[uttId])*numChannels + uttToChanInd[uttId]; ElemType P_lx = betaScore[alphaId_0]; for (int s = 1; s < phoneNum - 1; s++) { long phoneId = phoneSeq[uttId*maxPhoneNum + s]; size_t alphaId = maxPhoneNum* timeId + s; size_t probId = timeId*totalPhoneNum + phoneId; if (phoneId != SIZE_MAX) { ElemType logoccu = alphaScore[alphaId] + betaScore[alphaId] - prob[probId] - (ElemType)P_lx; CTCscore[probId] = LogAdd(CTCscore[probId], logoccu); } } for (int s = 0; s < totalPhoneNum; s++) { size_t probId = timeId*totalPhoneNum + s; ElemType logoccu = CTCscore[probId]; if (logoccu < LZERO) CTCscore[probId] = 0.0f; else CTCscore[probId] = exp(logoccu); } } } } template<class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignCTCScore( const CPUMatrix<ElemType>& prob, CPUMatrix<ElemType>& alpha, CPUMatrix<ElemType>& beta, const CPUMatrix<ElemType>& phoneSeq, const CPUMatrix<ElemType>& phoneBoundary, CPUMatrix<ElemType> & totalScore, const std::vector<size_t>& uttToChanInd, const std::vector<size_t> & uttBeginFrame, const std::vector<size_t> & uttFrameNum, const std::vector<size_t> & uttPhoneNum, const size_t numParallelSequences, const size_t maxFrameNum, const size_t blankTokenId, const int delayConstraint, const bool isColWise) { // Column wise representation of sequences in input matrices (each column is one sequence/utterance) if (isColWise) { // Total number of phones size_t totalPhoneNum = prob.GetNumRows(); size_t uttNum = uttFrameNum.size(); // Max number of phones in utterances in this minibatch size_t maxPhoneNum = phoneSeq.GetNumRows(); for (size_t t = 0; t < maxFrameNum; t++) { _assignAlphaScore(prob.Data(), alpha.Data(), phoneSeq.Data(), phoneBoundary.Data(), uttToChanInd, uttFrameNum, uttBeginFrame, uttPhoneNum, numParallelSequences, uttNum, t, maxPhoneNum, totalPhoneNum, blankTokenId, delayConstraint); } for (LONG64 t = maxFrameNum - 1; t >= 0; t--) { _assignBetaScore(prob.Data(), beta.Data(), phoneSeq.Data(), phoneBoundary.Data(), uttToChanInd, uttFrameNum, uttBeginFrame, uttPhoneNum, numParallelSequences, uttNum, t, maxPhoneNum, totalPhoneNum, blankTokenId, delayConstraint); } std::vector<ElemType> scores(uttNum); _assignTotalScore(beta.Data(), scores, uttNum, uttToChanInd, uttBeginFrame, numParallelSequences, maxPhoneNum); _assignCTCScore(Data(), prob.Data(), alpha.Data(), beta.Data(), phoneSeq.Data(), uttNum, uttToChanInd, uttBeginFrame, uttPhoneNum, uttFrameNum, numParallelSequences, maxPhoneNum, totalPhoneNum); totalScore(0, 0) = 0.0; for (size_t utt = 0; utt < uttNum; utt++) { totalScore(0,0) -= scores[utt]; } return *this; } else { LogicError("Only ColWise minibatch layout is supported."); } return *this; } /// the kernel function for RCRF backward computation template <class ElemType> void CPUMatrix<ElemType>::_rcrfBackwardCompute(size_t t, size_t k, const CPUMatrix<ElemType>& alpha, CPUMatrix<ElemType>& beta, const CPUMatrix<ElemType>& pair_scores) { size_t iNumLab = alpha.GetNumRows(); size_t iNumPos = alpha.GetNumCols(); ElemType fSum; ElemType fTmp = (ElemType) LZERO; if (t == iNumPos - 1) { fSum = (ElemType) LZERO; for (int j = 0; j < iNumLab; j++) { fSum = (ElemType) LogAddD(fSum, alpha(j, t)); } fTmp = alpha(k, t) - fSum; beta(k, t) = fTmp; } else { for (int j = 0; j < iNumLab; j++) { fSum = (ElemType) LZERO; for (int m = 0; m < iNumLab; m++) { fSum = (ElemType) LogAddD(fSum, alpha(m, t) + pair_scores(j, m)); } fTmp = (ElemType) LogAddD(fTmp, beta(j, t + 1) + alpha(k, t) + pair_scores(j, k) - fSum); } beta(k, t) = fTmp; } } template <class ElemType> void CPUMatrix<ElemType>::RCRFTransGrdCompute(const CPUMatrix<ElemType>& lbls, const CPUMatrix<ElemType>& alpha, const CPUMatrix<ElemType>& beta, const CPUMatrix<ElemType>& pair_scores, CPUMatrix<ElemType>& grd) { int iNumPos = (int) alpha.GetNumCols(); int iNumLab = (int) alpha.GetNumRows(); int firstLbl = -1; for (int ik = 0; ik < lbls.GetNumRows(); ik++) if (lbls(ik, 0) != 0) { firstLbl = ik; break; } for (size_t tPos = 0; tPos < iNumPos; tPos++) { CPUMatrix<ElemType> b = beta.ColumnSlice(tPos, 1); CPUMatrix<ElemType> a; if (tPos > 0) a = alpha.ColumnSlice(tPos - 1, 1); #pragma omp parallel for for (int i = 0; i < iNumLab; i++) { _rcrfTransGrdCompute(i, lbls, alpha, beta, pair_scores, grd, tPos); } // transition score int i = -1; if (tPos == 0) i = firstLbl; else { for (int ik = 0; ik < lbls.GetNumRows(); ik++) if (lbls(ik, tPos - 1) != 0) { i = ik; break; } } int j = -1; for (int ik = 0; ik < lbls.GetNumRows(); ik++) { if (lbls(ik, tPos) != 0) { j = ik; break; } } grd(j, i) -= 1.0; } }; template <class ElemType> void CPUMatrix<ElemType>::_rcrfTransGrdCompute(size_t i, const CPUMatrix<ElemType>& lbls, const CPUMatrix<ElemType>& alpha, const CPUMatrix<ElemType>& beta, const CPUMatrix<ElemType>& pair_scores, CPUMatrix<ElemType>& grd, const size_t tPos // position ) { int iNumLab = (int) alpha.GetNumRows(); int firstLbl = -1; for (int ik = 0; ik < lbls.GetNumRows(); ik++) if (lbls(ik, 0) != 0) { firstLbl = ik; break; } CPUMatrix<ElemType> b = beta.ColumnSlice(tPos, 1); CPUMatrix<ElemType> a; if (tPos > 0) a = alpha.ColumnSlice(tPos - 1, 1); { ElemType fTmp = (ElemType) LZERO; for (int j = 0; j < iNumLab; j++) { if (tPos == 0) { if (i == firstLbl) { fTmp = 0; } else { fTmp = (ElemType) LZERO; } } else { fTmp = a(i, 0); } fTmp += pair_scores(j, i); ElemType fSum = (ElemType) LZERO; for (int k = 0; k < iNumLab; k++) { ElemType fTmp2; if (tPos == 0) { if (k == firstLbl) { fTmp2 = 0; } else { fTmp2 = (ElemType) LZERO; } } else { fTmp2 = a(k, 0); } fSum = (ElemType) LogAddD(fSum, fTmp2 + pair_scores(j, k)); } fTmp -= fSum; fTmp += b(j, 0); grd(j, i) += exp(fTmp); } } }; template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::DropFrame(const CPUMatrix<ElemType>& label, const CPUMatrix<ElemType>& gamma, const ElemType& threshhold) { auto& us = *this; if (us.GetNumCols() != gamma.GetNumCols() || us.GetNumRows() != gamma.GetNumRows()) LogicError("DropFrame: target matrix is not in the same size as gamm matrix."); #pragma omp parallel for foreach_column (j, label) { bool dropframe = false; foreach_row (i, label) { if (fabs(label(i, j) - 1.0f) < 0.1) { if (gamma(i, j) < threshhold) dropframe = true; break; } } foreach_row (i, label) { us(i, j) = 0.0f; } } return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSequenceError(const ElemType hsmoothingWeight, const CPUMatrix<ElemType>& label, const CPUMatrix<ElemType>& dnnoutput, const CPUMatrix<ElemType>& gamma, ElemType alpha) { auto& us = *this; foreach_coord (i, j, us) us(i, j) += alpha * (label(i, j) - (1 - hsmoothingWeight) * dnnoutput(i, j) - hsmoothingWeight * gamma(i, j)); return *this; } // note: this function does not depend on the <ElemType> parameter template <class ElemType> int CPUMatrix<ElemType>::SetNumThreads(int numThreads) { if (numThreads == 0) // use default return numThreads; int mthreads = (int) std::thread::hardware_concurrency(); if (numThreads <= 0) numThreads = std::max(1, mthreads + numThreads); if (numThreads > mthreads) numThreads = mthreads; #ifdef _OPENMP omp_set_num_threads(numThreads); numThreads = omp_get_max_threads(); #ifdef USE_MKL mkl_set_num_threads(numThreads); #elif defined(USE_OPENBLAS) openblas_set_num_threads(numThreads); #endif #endif return numThreads; } template <class ElemType> int CPUMatrix<ElemType>::GetMaxNumThreads() { int numThreads = (int)std::thread::hardware_concurrency(); #ifdef _OPENMP numThreads = omp_get_max_threads(); #endif return numThreads; } // To ensure Intel MKL calls return the same results on all Intel or Intel compatible CPUs, // the function set CBWR compatible mode. template <class ElemType> void CPUMatrix<ElemType>::SetCompatibleMode() { // mkl_cbwr_set not supported in MKLML yet // Explanation on numeric diff: https://software.intel.com/en-us/articles/introduction-to-the-conditional-numerical-reproducibility-cnr // #ifdef USE_MKL // if (mkl_cbwr_set(MKL_CBWR_COMPATIBLE) != MKL_CBWR_SUCCESS) // RuntimeError("Could not set MKL compatible mode."); // #endif } template <class ElemType> void CPUMatrix<ElemType>::SetOptimizationFlags(int flags) { m_optimizationFlags = flags; } template <class ElemType> int CPUMatrix<ElemType>::GetOptimizationFlags() { return m_optimizationFlags; } // ----------------------------------------------------------------------- // entry points from Matrix.cpp; calls into CPUMatrixTensorOpImpl // ----------------------------------------------------------------------- // perform unary operation 'op' on a giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides // This maps 'op' to a lambda. template <class ElemType> void CPUMatrix<ElemType>::TensorOp(ElemType beta, const CPUMatrix<ElemType>& a, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, const array<size_t, 2>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides) { CPUMatrixTensorOpImpl<ElemType>(beta, a, *this, alpha, op, reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides); } // perform binary operation 'op' on a and b giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides // This maps 'op' to a lambda. template <class ElemType> void CPUMatrix<ElemType>::TensorOp(ElemType beta, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, const array<size_t, 3>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 3>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 3>& reducingStrides) { CPUMatrixTensorOpImpl<ElemType>(beta, a, b, *this, alpha, op, reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides); } // perform ternary operation 'op' on a, and c giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides // This maps 'op' to a lambda. template <class ElemType> void CPUMatrix<ElemType>::TensorOp(ElemType beta, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const CPUMatrix<ElemType>& c, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, const array<size_t, 4>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 4>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 4>& reducingStrides) { CPUMatrixTensorOpImpl<ElemType>(beta, a, b, c, *this, alpha, op, reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides); } template <class ElemType> int CPUMatrix<ElemType>::Argmin() const { int minArg = -1; ElemType minValue = std::numeric_limits<ElemType>::max(); #pragma omp parallel { int localMinArg = -1; ElemType localMinValue = std::numeric_limits<ElemType>::max(); #pragma omp for for (int index = 0; index < (int)GetNumElements(); ++index) { if (localMinValue > Data()[index]) { localMinArg = index; localMinValue = Data()[index]; } // If we have more then one min value, select the one with lower index. else if ((localMinValue == Data()[index]) && (localMinArg > index)) { localMinArg = index; } } #pragma omp critical { if (minValue > localMinValue) { minArg = localMinArg; minValue = localMinValue; } // If we have more then one min value, select the one with lower index. else if ((minValue == localMinValue) && (minArg > localMinArg)) { minArg = localMinArg; } } } return minArg; } template <class ElemType> int CPUMatrix<ElemType>::Argmax() const { int maxArg = -1; ElemType maxValue = std::numeric_limits<ElemType>::lowest(); #pragma omp parallel { int localMaxArg = -1; ElemType localMaxValue = std::numeric_limits<ElemType>::lowest(); #pragma omp for for (int index = 0; index < (int)GetNumElements(); ++index) { if (localMaxValue < Data()[index]) { localMaxArg = index; localMaxValue = Data()[index]; } // If we have more then one max value, select the one with lower index. else if ((localMaxValue == Data()[index]) && (localMaxArg > index)) { localMaxArg = index; } } #pragma omp critical { if (maxValue < localMaxValue) { maxArg = localMaxArg; maxValue = localMaxValue; } // If we have more then one max value, select the one with lower index. else if ((maxValue == localMaxValue) && (maxArg > localMaxArg)) { maxArg = localMaxArg; } } } return maxArg; } template <class ElemType> int CPUMatrix<ElemType>::ArgOp(ElementWiseOperator reductionOp) const { switch (reductionOp) { case ElementWiseOperator::opArgmin: return Argmin(); break; case ElementWiseOperator::opArgmax: return Argmax(); break; } InvalidArgument("ArgOp: Arg reduction operations other than opArgmax, and opArgmin are not implemented."); return -1; } template <class ElemType> void CPUMatrix<ElemType>::TensorArgOp(const CPUMatrix<ElemType>& a, ElementWiseOperator reductionOp, const array<size_t, 2>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides) { CPUMatrixTensorArgOpImpl<ElemType>(a, *this, reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides); } template <class ElemType> void CPUMatrix<ElemType>::ScatterValues(ElemType* indices, ElemType* value, ElemType* data, ElemType alpha, size_t num_indices, size_t rows, size_t cols, size_t indices_step/*=1*/) { ScatterValues(indices, value, data, alpha, num_indices, rows, cols, /*mask*/nullptr, /*numElemsPerMaskEntry*/0, indices_step); } template <class ElemType> void CPUMatrix<ElemType>::ScatterValues(ElemType* indices, ElemType* value, ElemType* data, ElemType alpha, size_t num_indices, size_t rows, size_t cols, char* mask, size_t numElemsPerMaskEntry, size_t indices_step/*=1*/) { if (!indices || !value || !data) LogicError("ScatterValues: input data is null."); if (mask && (numElemsPerMaskEntry == 0)) RuntimeError("ScatterValues: numElemsPerMaskEntry must not be 0 when a mask is provided."); #pragma omp parallel { int ithread = omp_get_thread_num(); int nthread = omp_get_num_threads(); for (auto i = 0; i < num_indices; i++) { auto col_r = indices[i * indices_step]; if (std::isnan(col_r) || col_r < 0) continue; auto col = (size_t)col_r; //ignore the elements that is not partitioned into this thread if (col % nthread != ithread) continue; //check if colMask is invalid if (mask && mask[i * indices_step / numElemsPerMaskEntry] == 0) continue; if (col >= cols) InvalidArgument("ScatterValues: Indices map out of bounds. %ld >= %ld", (long int)col, (long int)cols); auto index = col * rows; auto offset = i * rows; for (auto j = 0; j < rows; j++) data[index + j] = data[index + j] + alpha * value[offset + j]; } } } // We use Matrix<char> as the backing store for QuantizedMatrix // Let's explicitly instantiate the methods we need for that purpose template CPUMatrix<char>::CPUMatrix(const size_t numRows, const size_t numCols); template CPUMatrix<char>::CPUMatrix(const size_t numRows, const size_t numCols, char* pArray, const size_t matrixFlags); template CPUMatrix<char>::CPUMatrix(); template CPUMatrix<char>::CPUMatrix(CPUMatrix<char> const&); template CPUMatrix<char>::CPUMatrix(CPUMatrix<char>&&); template size_t CPUMatrix<char>::LocateElement(size_t, size_t) const; template CPUMatrix<char> CPUMatrix<char>::ColumnSlice(size_t startColumn, size_t numCols) const; template CPUMatrix<char>& CPUMatrix<char>::operator=(CPUMatrix<char>&&); template void CPUMatrix<char>::SetValue(const char); template void CPUMatrix<char>::SetValue(const size_t numRows, const size_t numCols, char* pArray, size_t matrixFlags); template void CPUMatrix<char>::SetValue(CPUMatrix<char> const&); template bool CPUMatrix<char>::IsEqualTo(const CPUMatrix<char>& a, const char threshold) const; //template void CPUMatrix<char>::SetValue(GPUMatrix<char> const&); //template void CPUMatrix<char>::SetValue(CPUSparseMatrix<char> const&); //template void CPUMatrix<char>::SetValue(GPUSparseMatrix<char> const&); template void CPUMatrix<char>::RequireSize(const size_t numRows, const size_t numCols, bool growOnly); template void CPUMatrix<char>::Resize(const size_t numRows, const size_t numCols, bool growOnly); template char* CPUMatrix<char>::CopyToArray(void) const; template void CPUMatrix<char>::CopySection(size_t numRows, size_t numCols, char* dst, size_t colStride) const; template void CPUMatrix<char>::Reshape(const size_t, const size_t); template void CPUMatrix<char>::SetUniformRandomValue(const char low, const char high, unsigned long seed); template void CPUMatrix<char>::SetUniformRandomValue(RNGHandle& rngHandle, const char low, const char high); template void CPUMatrix<char>::SetGaussianRandomValue(const char mean, const char sigma, unsigned long seed); // Support <short> template CPUMatrix<short>::CPUMatrix(const size_t numRows, const size_t numCols); template CPUMatrix<short>::CPUMatrix(const size_t numRows, const size_t numCols, short* pArray, const size_t matrixFlags); template CPUMatrix<short>::CPUMatrix(); template CPUMatrix<short>::CPUMatrix(CPUMatrix<short> const&); template CPUMatrix<short>::CPUMatrix(CPUMatrix<short>&&); template size_t CPUMatrix<short>::LocateElement(size_t, size_t) const; template CPUMatrix<short> CPUMatrix<short>::ColumnSlice(size_t startColumn, size_t numCols) const; template CPUMatrix<short>& CPUMatrix<short>::operator=(CPUMatrix<short>&&); template void CPUMatrix<short>::SetValue(const short); template void CPUMatrix<short>::SetValue(const size_t numRows, const size_t numCols, short* pArray, size_t matrixFlags); template void CPUMatrix<short>::SetValue(CPUMatrix<short> const&); template bool CPUMatrix<short>::IsEqualTo(const CPUMatrix<short>& a, const short threshold) const; //template void CPUMatrix<short>::SetValue(GPUMatrix<short> const&); //template void CPUMatrix<short>::SetValue(CPUSparseMatrix<short> const&); //template void CPUMatrix<short>::SetValue(GPUSparseMatrix<short> const&); template void CPUMatrix<short>::RequireSize(const size_t numRows, const size_t numCols, bool growOnly); template void CPUMatrix<short>::Resize(const size_t numRows, const size_t numCols, bool growOnly); template short* CPUMatrix<short>::CopyToArray(void) const; template void CPUMatrix<short>::CopySection(size_t numRows, size_t numCols, short* dst, size_t colStride) const; template void CPUMatrix<short>::Reshape(const size_t, const size_t); template void CPUMatrix<short>::SetUniformRandomValue(const short low, const short high, unsigned long seed); template void CPUMatrix<short>::SetUniformRandomValue(RNGHandle& rngHandle, const short low, const short high); template void CPUMatrix<short>::SetGaussianRandomValue(const short mean, const short sigma, unsigned long seed); template CPUMatrix<int>::CPUMatrix(const size_t, const size_t, int*, const size_t); }}}
FrankWolfeAssignment.h
#pragma once #include <algorithm> #include <cassert> #include <fstream> #include <iostream> #include <vector> #include "Algorithms/TrafficAssignment/ObjectiveFunctions/SystemOptimum.h" #include "Algorithms/TrafficAssignment/ObjectiveFunctions/UserEquilibrium.h" #include "Algorithms/TrafficAssignment/AllOrNothingAssignment.h" #include "Algorithms/TrafficAssignment/UnivariateMinimization.h" #include "DataStructures/Graph/Attributes/TraversalCostAttribute.h" #include "DataStructures/Graph/Graph.h" #include "DataStructures/Utilities/OriginDestination.h" #include "Stats/TrafficAssignment/FrankWolfeAssignmentStats.h" #include "Tools/Math.h" #include "Tools/Timer.h" // A traffic assignment procedure based on the Frank-Wolfe method (also known as convex combinations // method). At its heart are iterative shortest-paths computations. The algo can be parameterized to // compute the user equilibrium or system optimum, and to use different traversal cost functions and // shortest-path algorithms. template < template <typename> class ObjFunctionT, template <typename> class TraversalCostFunctionT, template <typename, typename> class ShortestPathAlgoT, typename GraphT> class FrankWolfeAssignment { public: using Graph = GraphT; // Constructs an assignment procedure based on the Frank-Wolfe method. FrankWolfeAssignment(Graph& graph, const std::vector<ClusteredOriginDestination>& odPairs, const bool verbose = true) : aonAssignment(graph, odPairs, verbose), graph(graph), trafficFlows(graph.numEdges()), pointOfSight(graph.numEdges()), traversalCostFunction(graph), objFunction(traversalCostFunction), verbose(verbose) { assert(graph.isDefrag()); stats.totalRunningTime = aonAssignment.stats.totalRoutingTime; } // Assigns all OD flows onto the graph. void run( std::ofstream& flowFile, std::ofstream& distFile, std::ofstream& statFile, const int numIterations = 0, const bool outputIntermediates = false) { assert(numIterations >= 0); Timer timer; auto prevSkipInterval = 1u; determineInitialSolution(prevSkipInterval); stats.lastRunningTime = timer.elapsed(); stats.lastLineSearchTime = stats.lastRunningTime - aonAssignment.stats.lastRoutingTime; stats.finishIteration(); if (flowFile.is_open()) FORALL_EDGES(graph, e) { const auto vol = trafficFlows[e]; const auto sat = vol / graph.capacity(e); const auto time = graph.travelTime(e); const auto wayId = graph.wayId(e); const auto bprResult = traversalCostFunction(e, trafficFlows[e]); flowFile << aonAssignment.stats.numIterations << ',' << vol << ',' << sat << ',' << time << ',' << wayId << ',' << bprResult << '\n'; } if (distFile.is_open()) for (const auto dist : aonAssignment.stats.lastDistances) distFile << aonAssignment.stats.numIterations << ',' << dist << '\n'; if (statFile.is_open()) { statFile << aonAssignment.stats.numIterations << ","; statFile << aonAssignment.stats.lastCustomizationTime << ","; statFile << aonAssignment.stats.lastQueryTime << ","; statFile << stats.lastLineSearchTime << "," << stats.lastRunningTime << ",nan,nan,"; statFile << aonAssignment.stats.lastChecksum << std::endl; } if (verbose) { std::cout << " Line search: " << stats.lastLineSearchTime << "ms"; std::cout << " Total: " << stats.lastRunningTime << "ms\n"; std::cout << std::flush; } // reducing target value stats.prevRelGap from 1e-4 to 1e-2 // reason is - assigment for a lot of ODs (>500k) sometimes this value requires around 50-500 iterations // which is adding from 1 to 30 more minutes of execution while ((numIterations != 0 || stats.prevRelGap > 1e-2 || prevSkipInterval > 1) && (numIterations == 0 || aonAssignment.stats.numIterations < numIterations)) { stats.startIteration(); Timer timer; const unsigned int skip = std::min(std::max(stats.prevRelGap / 1e-2, 1.0), double{-1u}); const auto skipInterval = std::min(roundDownToPowerOfTwo(skip), prevSkipInterval); updateTraversalCosts(); findDescentDirection(skipInterval); const auto tau = findMoveSize(); moveAlongDescentDirection(tau); const auto prevMinPathCost = aonAssignment.stats.prevMinPathCost * prevSkipInterval; assert(prevMinPathCost <= stats.prevTotalPathCost); prevSkipInterval = skipInterval; stats.lastRunningTime = timer.elapsed(); stats.lastLineSearchTime = stats.lastRunningTime - aonAssignment.stats.lastRoutingTime; stats.prevRelGap = 1 - prevMinPathCost / stats.prevTotalPathCost; stats.finishIteration(); if (flowFile.is_open() && outputIntermediates) FORALL_EDGES(graph, e) { const auto vol = trafficFlows[e]; const auto sat = vol / graph.capacity(e); const auto time = graph.travelTime(e); const auto wayId = graph.wayId(e); const auto bprResult = traversalCostFunction(e, trafficFlows[e]); flowFile << aonAssignment.stats.numIterations << ',' << vol << ',' << sat << ',' << time << ',' << wayId << ',' << bprResult << '\n'; } if (distFile.is_open() && outputIntermediates) for (const auto dist : aonAssignment.stats.lastDistances) distFile << aonAssignment.stats.numIterations << ',' << dist << '\n'; if (statFile.is_open()) { statFile << aonAssignment.stats.numIterations << ","; statFile << aonAssignment.stats.lastCustomizationTime << ","; statFile << aonAssignment.stats.lastQueryTime << ","; statFile << stats.lastLineSearchTime << "," << stats.lastRunningTime << ","; statFile << stats.prevTotalTraversalCost << "," << stats.prevRelGap << ","; statFile << aonAssignment.stats.lastChecksum << std::endl; } if (verbose) { std::cout << " Line search: " << stats.lastLineSearchTime << "ms"; std::cout << " Total: " << stats.lastRunningTime << "ms\n"; std::cout << " Prev total traversal cost: " << stats.prevTotalTraversalCost << "\n"; std::cout << " Prev relative gap: " << stats.prevRelGap << "\n"; std::cout << std::flush; } } if (flowFile.is_open() && !outputIntermediates) FORALL_EDGES(graph, e) { const auto vol = trafficFlows[e]; const auto sat = vol / graph.capacity(e); const auto time = graph.travelTime(e); const auto wayId = graph.wayId(e); const auto bprResult = traversalCostFunction(e, trafficFlows[e]); flowFile << aonAssignment.stats.numIterations << ',' << vol << ',' << sat << ',' << time << ',' << wayId << ',' << bprResult << '\n'; } if (distFile.is_open() && !outputIntermediates) for (const auto dist : aonAssignment.stats.lastDistances) distFile << aonAssignment.stats.numIterations << ',' << dist << '\n'; if (verbose) { std::cout << "Total:\n"; std::cout << " Checksum: " << aonAssignment.stats.totalChecksum; std::cout << " Prepro: " << aonAssignment.stats.totalPreprocessingTime << "ms"; std::cout << " Custom: " << aonAssignment.stats.totalCustomizationTime << "ms"; std::cout << " Queries: " << aonAssignment.stats.totalQueryTime << "ms"; std::cout << " Routing: " << aonAssignment.stats.totalRoutingTime << "ms\n"; std::cout << " Line search: " << stats.totalLineSearchTime << "ms"; std::cout << " Total: " << stats.totalRunningTime << "ms\n"; std::cout << std::flush; } } // Returns the traffic flow on edge e. double trafficFlowOn(const int e) const { assert(e >= 0); assert(e < graph.numEdges()); return trafficFlows[e]; } FrankWolfeAssignmentStats stats; // Statistics about the execution. private: // Determines the initial solution. void determineInitialSolution(const int skipInterval) { #pragma omp parallel for schedule(static) FORALL_EDGES(graph, e) graph.traversalCost(e) = objFunction.derivative(e, 0); aonAssignment.run(skipInterval); #pragma omp parallel for schedule(static) FORALL_EDGES(graph, e) trafficFlows[e] = aonAssignment.trafficFlowOn(e); } // Updates traversal costs. void updateTraversalCosts() { auto totalTraversalCost = 0.0, totalPathCost = 0.0; #pragma omp parallel for reduction(+: totalTraversalCost, totalPathCost) schedule(static) FORALL_EDGES(graph, e) { graph.traversalCost(e) = objFunction.derivative(e, trafficFlows[e]); totalTraversalCost += trafficFlows[e] * traversalCostFunction(e, trafficFlows[e]); totalPathCost += trafficFlows[e] * graph.traversalCost(e); } stats.prevTotalTraversalCost = totalTraversalCost; stats.prevTotalPathCost = totalPathCost; } // Finds the descent direction. void findDescentDirection(const int skipInterval) { aonAssignment.run(skipInterval); #ifndef TA_NO_CFW if (aonAssignment.stats.numIterations == 2) { FORALL_EDGES(graph, e) pointOfSight[e] = aonAssignment.trafficFlowOn(e); return; } auto num = 0.0, den = 0.0; #pragma omp parallel for reduction(+: num, den) schedule(static) FORALL_EDGES(graph, e) { const auto residualDirection = pointOfSight[e] - trafficFlows[e]; const auto secondDerivative = objFunction.secondDerivative(e, trafficFlows[e]); const auto fwDirection = aonAssignment.trafficFlowOn(e) - trafficFlows[e]; num += residualDirection * secondDerivative * fwDirection; den += residualDirection * secondDerivative * (fwDirection - residualDirection); } const auto alpha = std::min(std::max(0.0, num / den), 1 - 1e-15); #pragma omp parallel for schedule(static) FORALL_EDGES(graph, e) pointOfSight[e] = alpha * pointOfSight[e] + (1 - alpha) * aonAssignment.trafficFlowOn(e); #endif } // Find the optimal move size. double findMoveSize() const { return bisectionMethod([this](const double tau) { auto sum = 0.0; #pragma omp parallel for reduction(+: sum) schedule(static) FORALL_EDGES(graph, e) { #ifndef TA_NO_CFW const auto direction = pointOfSight[e] - trafficFlows[e]; #else const auto direction = aonAssignment.trafficFlowOn(e) - trafficFlows[e]; #endif sum += direction * objFunction.derivative(e, trafficFlows[e] + tau * direction); } return sum; }, 0, 1); } // Moves along the descent direction. void moveAlongDescentDirection(const double tau) { #pragma omp parallel for schedule(static) FORALL_EDGES(graph, e) #ifndef TA_NO_CFW trafficFlows[e] += tau * (pointOfSight[e] - trafficFlows[e]); #else trafficFlows[e] += tau * (aonAssignment.trafficFlowOn(e) - trafficFlows[e]); #endif } using AonAssignment = AllOrNothingAssignment<ShortestPathAlgoT<Graph, TraversalCostAttribute>>; using TraversalCostFunction = TraversalCostFunctionT<Graph>; using ObjFunction = ObjFunctionT<TraversalCostFunction>; AonAssignment aonAssignment; // The all-or-nothing assignment subroutine. Graph& graph; // The network of interest. std::vector<double> trafficFlows; // The traffic flows on the edges. std::vector<double> pointOfSight; // The point defining the descent direction d = s - x TraversalCostFunction traversalCostFunction; // A functor returning the traversal cost of an edge. ObjFunction objFunction; // The objective function to be minimized (UE or SO). const bool verbose; // Should informative messages be displayed? }; // An alias template for a user-equilibrium (UE) traffic assignment. template < template <typename> class TraversalCostFunctionT, template <typename, typename> class ShortestPathAlgoT, typename GraphT> using UEAssignment = FrankWolfeAssignment<UserEquilibrium, TraversalCostFunctionT, ShortestPathAlgoT, GraphT>; // An alias template for a system-optimum (SO) traffic assignment. template < template <typename> class TraversalCostFunctionT, template <typename, typename> class ShortestPathAlgoT, typename GraphT> using SOAssignment = FrankWolfeAssignment<SystemOptimum, TraversalCostFunctionT, ShortestPathAlgoT, GraphT>;
ParFriends.h
#ifndef _PAR_FRIENDS_H_ #define _PAR_FRIENDS_H_ #include "mpi.h" #include <iostream> #include <cstdarg> #include "SpParMat.h" #include "SpParHelper.h" #include "MPIType.h" #include "Friends.h" #include "OptBuf.h" using namespace std; template <class IT, class NT, class DER> class SpParMat; /*************************************************************************************************/ /**************************** FRIEND FUNCTIONS FOR PARALLEL CLASSES ******************************/ /*************************************************************************************************/ /** ** Concatenate all the FullyDistVec<IT,NT> objects into a single one **/ template <typename IT, typename NT> FullyDistVec<IT,NT> Concatenate ( vector< FullyDistVec<IT,NT> > & vecs) { if(vecs.size() < 1) { SpParHelper::Print("Warning: Nothing to concatenate, returning empty "); return FullyDistVec<IT,NT>(); } else if (vecs.size() < 2) { return vecs[1]; } else { typename vector< FullyDistVec<IT,NT> >::iterator it = vecs.begin(); shared_ptr<CommGrid> commGridPtr = it->getcommgrid(); MPI_Comm World = commGridPtr->GetWorld(); IT nglen = it->TotalLength(); // new global length IT cumloclen = it->MyLocLength(); // existing cumulative local lengths ++it; for(; it != vecs.end(); ++it) { if(*(commGridPtr) != *(it->getcommgrid())) { SpParHelper::Print("Grids are not comparable for FullyDistVec<IT,NT>::EWiseApply\n"); MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); } nglen += it->TotalLength(); cumloclen += it->MyLocLength(); } FullyDistVec<IT,NT> ConCat (commGridPtr, nglen, NT()); int nprocs = commGridPtr->GetSize(); vector< vector< NT > > data(nprocs); vector< vector< IT > > inds(nprocs); IT gloffset = 0; for(it = vecs.begin(); it != vecs.end(); ++it) { IT loclen = it->LocArrSize(); for(IT i=0; i < loclen; ++i) { IT locind; IT loffset = it->LengthUntil(); int owner = ConCat.Owner(gloffset+loffset+i, locind); data[owner].push_back(it->arr[i]); inds[owner].push_back(locind); } gloffset += it->TotalLength(); } int * sendcnt = new int[nprocs]; int * sdispls = new int[nprocs]; for(int i=0; i<nprocs; ++i) sendcnt[i] = (int) data[i].size(); int * rdispls = new int[nprocs]; int * recvcnt = new int[nprocs]; MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, World); // share the request counts sdispls[0] = 0; rdispls[0] = 0; for(int i=0; i<nprocs-1; ++i) { sdispls[i+1] = sdispls[i] + sendcnt[i]; rdispls[i+1] = rdispls[i] + recvcnt[i]; } IT totrecv = accumulate(recvcnt,recvcnt+nprocs,static_cast<IT>(0)); NT * senddatabuf = new NT[cumloclen]; for(int i=0; i<nprocs; ++i) { copy(data[i].begin(), data[i].end(), senddatabuf+sdispls[i]); vector<NT>().swap(data[i]); // delete data vectors } NT * recvdatabuf = new NT[totrecv]; MPI_Alltoallv(senddatabuf, sendcnt, sdispls, MPIType<NT>(), recvdatabuf, recvcnt, rdispls, MPIType<NT>(), World); // send data delete [] senddatabuf; IT * sendindsbuf = new IT[cumloclen]; for(int i=0; i<nprocs; ++i) { copy(inds[i].begin(), inds[i].end(), sendindsbuf+sdispls[i]); vector<IT>().swap(inds[i]); // delete inds vectors } IT * recvindsbuf = new IT[totrecv]; MPI_Alltoallv(sendindsbuf, sendcnt, sdispls, MPIType<IT>(), recvindsbuf, recvcnt, rdispls, MPIType<IT>(), World); // send new inds DeleteAll(sendindsbuf, sendcnt, sdispls); for(int i=0; i<nprocs; ++i) { for(int j = rdispls[i]; j < rdispls[i] + recvcnt[i]; ++j) { ConCat.arr[recvindsbuf[j]] = recvdatabuf[j]; } } DeleteAll(recvindsbuf, recvcnt, rdispls); return ConCat; } } template <typename MATRIXA, typename MATRIXB> bool CheckSpGEMMCompliance(const MATRIXA & A, const MATRIXB & B) { if(A.getncol() != B.getnrow()) { ostringstream outs; outs << "Can not multiply, dimensions does not match"<< endl; outs << A.getncol() << " != " << B.getnrow() << endl; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); return false; } if((void*) &A == (void*) &B) { ostringstream outs; outs << "Can not multiply, inputs alias (make a temporary copy of one of them first)"<< endl; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, MATRIXALIAS); return false; } return true; } /** * Parallel C = A*B routine that uses a double buffered broadcasting scheme * @pre { Input matrices, A and B, should not alias } * Most memory efficient version available. Total stages: 2*sqrt(p) * Memory requirement during first sqrt(p) stages: <= (3/2)*(nnz(A)+nnz(B))+(1/2)*nnz(C) * Memory requirement during second sqrt(p) stages: <= nnz(A)+nnz(B)+nnz(C) * Final memory requirement: nnz(C) if clearA and clearB are true **/ template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> SpParMat<IU,NUO,UDERO> Mult_AnXBn_DoubleBuff (SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, bool clearA = false, bool clearB = false ) { if(!CheckSpGEMMCompliance(A,B) ) { return SpParMat< IU,NUO,UDERO >(); } int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy); IU C_m = A.spSeq->getnrow(); IU C_n = B.spSeq->getncol(); UDERA * A1seq = new UDERA(); UDERA * A2seq = new UDERA(); UDERB * B1seq = new UDERB(); UDERB * B2seq = new UDERB(); (A.spSeq)->Split( *A1seq, *A2seq); const_cast< UDERB* >(B.spSeq)->Transpose(); (B.spSeq)->Split( *B1seq, *B2seq); MPI_Barrier(GridC->GetWorld()); IU ** ARecvSizes = SpHelper::allocate2D<IU>(UDERA::esscount, stages); IU ** BRecvSizes = SpHelper::allocate2D<IU>(UDERB::esscount, stages); SpParHelper::GetSetSizes( *A1seq, ARecvSizes, (A.commGrid)->GetRowWorld()); SpParHelper::GetSetSizes( *B1seq, BRecvSizes, (B.commGrid)->GetColWorld()); // Remotely fetched matrices are stored as pointers UDERA * ARecv; UDERB * BRecv; vector< SpTuples<IU,NUO> *> tomerge; int Aself = (A.commGrid)->GetRankInProcRow(); int Bself = (B.commGrid)->GetRankInProcCol(); for(int i = 0; i < stages; ++i) { vector<IU> ess; if(i == Aself) { ARecv = A1seq; // shallow-copy } else { ess.resize(UDERA::esscount); for(int j=0; j< UDERA::esscount; ++j) { ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row } ARecv = new UDERA(); // first, create the object } SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements ess.clear(); if(i == Bself) { BRecv = B1seq; // shallow-copy } else { ess.resize(UDERB::esscount); for(int j=0; j< UDERB::esscount; ++j) { ess[j] = BRecvSizes[j][i]; } BRecv = new UDERB(); } SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements SpTuples<IU,NUO> * C_cont = MultiplyReturnTuples<SR, NUO> (*ARecv, *BRecv, // parameters themselves false, true, // transpose information (B is transposed) i != Aself, // 'delete A' condition i != Bself); // 'delete B' condition if(!C_cont->isZero()) tomerge.push_back(C_cont); else delete C_cont; } if(clearA) delete A1seq; if(clearB) delete B1seq; // Set the new dimensions SpParHelper::GetSetSizes( *A2seq, ARecvSizes, (A.commGrid)->GetRowWorld()); SpParHelper::GetSetSizes( *B2seq, BRecvSizes, (B.commGrid)->GetColWorld()); // Start the second round for(int i = 0; i < stages; ++i) { vector<IU> ess; if(i == Aself) { ARecv = A2seq; // shallow-copy } else { ess.resize(UDERA::esscount); for(int j=0; j< UDERA::esscount; ++j) { ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row } ARecv = new UDERA(); // first, create the object } SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements ess.clear(); if(i == Bself) { BRecv = B2seq; // shallow-copy } else { ess.resize(UDERB::esscount); for(int j=0; j< UDERB::esscount; ++j) { ess[j] = BRecvSizes[j][i]; } BRecv = new UDERB(); } SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements SpTuples<IU,NUO> * C_cont = MultiplyReturnTuples<SR, NUO> (*ARecv, *BRecv, // parameters themselves false, true, // transpose information (B is transposed) i != Aself, // 'delete A' condition i != Bself); // 'delete B' condition if(!C_cont->isZero()) tomerge.push_back(C_cont); else delete C_cont; } SpHelper::deallocate2D(ARecvSizes, UDERA::esscount); SpHelper::deallocate2D(BRecvSizes, UDERB::esscount); if(clearA) { delete A2seq; delete A.spSeq; A.spSeq = NULL; } else { (A.spSeq)->Merge(*A1seq, *A2seq); delete A1seq; delete A2seq; } if(clearB) { delete B2seq; delete B.spSeq; B.spSeq = NULL; } else { (B.spSeq)->Merge(*B1seq, *B2seq); delete B1seq; delete B2seq; const_cast< UDERB* >(B.spSeq)->Transpose(); // transpose back to original } UDERO * C = new UDERO(MergeAll<SR>(tomerge, C_m, C_n,true), false); // First get the result in SpTuples, then convert to UDER return SpParMat<IU,NUO,UDERO> (C, GridC); // return the result object } /** * Parallel A = B*C routine that uses only MPI-1 features * Relies on simple blocking broadcast * @pre { Input matrices, A and B, should not alias } **/ template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> SpParMat<IU, NUO, UDERO> Mult_AnXBn_Synch (SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, bool clearA = false, bool clearB = false ) { if(!CheckSpGEMMCompliance(A,B) ) { return SpParMat< IU,NUO,UDERO >(); } int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy); IU C_m = A.spSeq->getnrow(); IU C_n = B.spSeq->getncol(); const_cast< UDERB* >(B.spSeq)->Transpose(); MPI_Barrier(GridC->GetWorld()); IU ** ARecvSizes = SpHelper::allocate2D<IU>(UDERA::esscount, stages); IU ** BRecvSizes = SpHelper::allocate2D<IU>(UDERB::esscount, stages); SpParHelper::GetSetSizes( *(A.spSeq), ARecvSizes, (A.commGrid)->GetRowWorld()); SpParHelper::GetSetSizes( *(B.spSeq), BRecvSizes, (B.commGrid)->GetColWorld()); // Remotely fetched matrices are stored as pointers UDERA * ARecv; UDERB * BRecv; vector< SpTuples<IU,NUO> *> tomerge; int Aself = (A.commGrid)->GetRankInProcRow(); int Bself = (B.commGrid)->GetRankInProcCol(); for(int i = 0; i < stages; ++i) { vector<IU> ess; if(i == Aself) { ARecv = A.spSeq; // shallow-copy } else { ess.resize(UDERA::esscount); for(int j=0; j< UDERA::esscount; ++j) { ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row } ARecv = new UDERA(); // first, create the object } SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements ess.clear(); if(i == Bself) { BRecv = B.spSeq; // shallow-copy } else { ess.resize(UDERB::esscount); for(int j=0; j< UDERB::esscount; ++j) { ess[j] = BRecvSizes[j][i]; } BRecv = new UDERB(); } SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements SpTuples<IU,NUO> * C_cont = MultiplyReturnTuples<SR, NUO> (*ARecv, *BRecv, // parameters themselves false, true, // transpose information (B is transposed) i != Aself, // 'delete A' condition i != Bself); // 'delete B' condition if(!C_cont->isZero()) tomerge.push_back(C_cont); #ifndef NDEBUG ostringstream outs; outs << i << "th SUMMA iteration"<< endl; SpParHelper::Print(outs.str()); #endif } if(clearA && A.spSeq != NULL) { delete A.spSeq; A.spSeq = NULL; } if(clearB && B.spSeq != NULL) { delete B.spSeq; B.spSeq = NULL; } SpHelper::deallocate2D(ARecvSizes, UDERA::esscount); SpHelper::deallocate2D(BRecvSizes, UDERB::esscount); UDERO * C = new UDERO(MergeAll<SR>(tomerge, C_m, C_n,true), false); // First get the result in SpTuples, then convert to UDER // the last parameter to MergeAll deletes tomerge arrays if(!clearB) const_cast< UDERB* >(B.spSeq)->Transpose(); // transpose back to original return SpParMat<IU,NUO,UDERO> (C, GridC); // return the result object } template <typename MATRIX, typename VECTOR> void CheckSpMVCompliance(const MATRIX & A, const VECTOR & x) { if(A.getncol() != x.TotalLength()) { ostringstream outs; outs << "Can not multiply, dimensions does not match"<< endl; outs << A.getncol() << " != " << x.TotalLength() << endl; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); } if(! ( *(A.getcommgrid()) == *(x.getcommgrid())) ) { cout << "Grids are not comparable for SpMV" << endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); } } template <typename SR, typename IU, typename NUM, typename UDER> FullyDistSpVec<IU,typename promote_trait<NUM,IU>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IU> & x, bool indexisvalue, OptBuf<int32_t, typename promote_trait<NUM,IU>::T_promote > & optbuf); template <typename SR, typename IU, typename NUM, typename UDER> FullyDistSpVec<IU,typename promote_trait<NUM,IU>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IU> & x, bool indexisvalue) { typedef typename promote_trait<NUM,IU>::T_promote T_promote; OptBuf<int32_t, T_promote > optbuf = OptBuf<int32_t, T_promote >(); return SpMV<SR>(A, x, indexisvalue, optbuf); } /** * Step 1 of the sparse SpMV algorithm * @param[in,out] trxlocnz, lenuntil,trxinds,trxnums { set or allocated } * @param[in] indexisvalue **/ template<typename IU, typename NV> void TransposeVector(MPI_Comm & World, const FullyDistSpVec<IU,NV> & x, int32_t & trxlocnz, IU & lenuntil, int32_t * & trxinds, NV * & trxnums, bool indexisvalue) { int32_t xlocnz = (int32_t) x.getlocnnz(); int32_t roffst = (int32_t) x.RowLenUntil(); // since trxinds is int32_t int32_t roffset; IU luntil = x.LengthUntil(); int diagneigh = x.commGrid->GetComplementRank(); MPI_Status status; MPI_Sendrecv(&roffst, 1, MPIType<int32_t>(), diagneigh, TROST, &roffset, 1, MPIType<int32_t>(), diagneigh, TROST, World, &status); MPI_Sendrecv(&xlocnz, 1, MPIType<int32_t>(), diagneigh, TRNNZ, &trxlocnz, 1, MPIType<int32_t>(), diagneigh, TRNNZ, World, &status); MPI_Sendrecv(&luntil, 1, MPIType<IU>(), diagneigh, TRLUT, &lenuntil, 1, MPIType<IU>(), diagneigh, TRLUT, World, &status); // ABAB: Important observation is that local indices (given by x.ind) is 32-bit addressible // Copy them to 32 bit integers and transfer that to save 50% of off-node bandwidth trxinds = new int32_t[trxlocnz]; int32_t * temp_xind = new int32_t[xlocnz]; for(int i=0; i< xlocnz; ++i) temp_xind[i] = (int32_t) x.ind[i]; MPI_Sendrecv(temp_xind, xlocnz, MPIType<int32_t>(), diagneigh, TRI, trxinds, trxlocnz, MPIType<int32_t>(), diagneigh, TRI, World, &status); delete [] temp_xind; if(!indexisvalue) { trxnums = new NV[trxlocnz]; MPI_Sendrecv(const_cast<NV*>(SpHelper::p2a(x.num)), xlocnz, MPIType<NV>(), diagneigh, TRX, trxnums, trxlocnz, MPIType<NV>(), diagneigh, TRX, World, &status); } transform(trxinds, trxinds+trxlocnz, trxinds, bind2nd(plus<int32_t>(), roffset)); // fullydist indexing (p pieces) -> matrix indexing (sqrt(p) pieces) } /** * Step 2 of the sparse SpMV algorithm * @param[in,out] trxinds, trxnums { deallocated } * @param[in,out] indacc, numacc { allocated } * @param[in,out] accnz { set } * @param[in] trxlocnz, lenuntil, indexisvalue **/ template<typename IU, typename NV> void AllGatherVector(MPI_Comm & ColWorld, int trxlocnz, IU lenuntil, int32_t * & trxinds, NV * & trxnums, int32_t * & indacc, NV * & numacc, int & accnz, bool indexisvalue) { int colneighs, colrank; MPI_Comm_size(ColWorld, &colneighs); MPI_Comm_rank(ColWorld, &colrank); int * colnz = new int[colneighs]; colnz[colrank] = trxlocnz; MPI_Allgather(MPI_IN_PLACE, 1, MPI_INT, colnz, 1, MPI_INT, ColWorld); int * dpls = new int[colneighs](); // displacements (zero initialized pid) std::partial_sum(colnz, colnz+colneighs-1, dpls+1); accnz = std::accumulate(colnz, colnz+colneighs, 0); indacc = new int32_t[accnz]; numacc = new NV[accnz]; // ABAB: Future issues here, colnz is of type int (MPI limitation) // What if the aggregate vector size along the processor row/column is not 32-bit addressible? // This will happen when n/sqrt(p) > 2^31 // Currently we can solve a small problem (scale 32) with 4096 processor // For a medium problem (scale 35), we'll need 32K processors which gives sqrt(p) ~ 180 // 2^35 / 180 ~ 2^29 / 3 which is not an issue ! #ifdef TIMING double t0=MPI_Wtime(); #endif MPI_Allgatherv(trxinds, trxlocnz, MPIType<int32_t>(), indacc, colnz, dpls, MPIType<int32_t>(), ColWorld); delete [] trxinds; if(indexisvalue) { IU lenuntilcol; if(colrank == 0) lenuntilcol = lenuntil; MPI_Bcast(&lenuntilcol, 1, MPIType<IU>(), 0, ColWorld); for(int i=0; i< accnz; ++i) // fill numerical values from indices { numacc[i] = indacc[i] + lenuntilcol; } } else { MPI_Allgatherv(trxnums, trxlocnz, MPIType<NV>(), numacc, colnz, dpls, MPIType<NV>(), ColWorld); delete [] trxnums; } #ifdef TIMING double t1=MPI_Wtime(); cblas_allgathertime += (t1-t0); #endif DeleteAll(colnz,dpls); } /** * Step 3 of the sparse SpMV algorithm, with the semiring * @param[in,out] optbuf {scratch space for all-to-all (fold) communication} * @param[in,out] indacc, numacc {index and values of the input vector, deleted upon exit} * @param[in,out] sendindbuf, sendnumbuf {index and values of the output vector, created} **/ template<typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER> void LocalSpMV(const SpParMat<IU,NUM,UDER> & A, int rowneighs, OptBuf<int32_t, OVT > & optbuf, int32_t * & indacc, IVT * & numacc, int32_t * & sendindbuf, OVT * & sendnumbuf, int * & sdispls, int * sendcnt, int accnz, bool indexisvalue) { if(optbuf.totmax > 0) // graph500 optimization enabled { if(A.spSeq->getnsplit() > 0) { // optbuf.{inds/nums/dspls} and sendcnt are all pre-allocated and only filled by dcsc_gespmv_threaded dcsc_gespmv_threaded_setbuffers<SR> (*(A.spSeq), indacc, numacc, accnz, optbuf.inds, optbuf.nums, sendcnt, optbuf.dspls, rowneighs); } else { dcsc_gespmv<SR> (*(A.spSeq), indacc, numacc, accnz, optbuf.inds, optbuf.nums, sendcnt, optbuf.dspls, rowneighs, indexisvalue); } DeleteAll(indacc,numacc); } else { if(A.spSeq->getnsplit() > 0) { // sendindbuf/sendnumbuf/sdispls are all allocated and filled by dcsc_gespmv_threaded int totalsent = dcsc_gespmv_threaded<SR> (*(A.spSeq), indacc, numacc, accnz, sendindbuf, sendnumbuf, sdispls, rowneighs); DeleteAll(indacc, numacc); for(int i=0; i<rowneighs-1; ++i) sendcnt[i] = sdispls[i+1] - sdispls[i]; sendcnt[rowneighs-1] = totalsent - sdispls[rowneighs-1]; } else { // serial SpMV with sparse vector vector< int32_t > indy; vector< OVT > numy; dcsc_gespmv<SR>(*(A.spSeq), indacc, numacc, accnz, indy, numy); // actual multiplication DeleteAll(indacc, numacc); int32_t bufsize = indy.size(); // as compact as possible sendindbuf = new int32_t[bufsize]; sendnumbuf = new OVT[bufsize]; int32_t perproc = A.getlocalrows() / rowneighs; int k = 0; // index to buffer for(int i=0; i<rowneighs; ++i) { int32_t end_this = (i==rowneighs-1) ? A.getlocalrows(): (i+1)*perproc; while(k < bufsize && indy[k] < end_this) { sendindbuf[k] = indy[k] - i*perproc; sendnumbuf[k] = numy[k]; ++sendcnt[i]; ++k; } } sdispls = new int[rowneighs](); partial_sum(sendcnt, sendcnt+rowneighs-1, sdispls+1); } } } template <typename SR, typename IU, typename OVT> void MergeContributions(FullyDistSpVec<IU,OVT> & y, int * & recvcnt, int * & rdispls, int32_t * & recvindbuf, OVT * & recvnumbuf, int rowneighs) { // free memory of y, in case it was aliased vector<IU>().swap(y.ind); vector<OVT>().swap(y.num); #ifndef HEAPMERGE IU ysize = y.MyLocLength(); // my local length is only O(n/p) bool * isthere = new bool[ysize]; vector< pair<IU,OVT> > ts_pairs; fill_n(isthere, ysize, false); // We don't need to keep a "merger" because minimum will always come from the processor // with the smallest rank; so a linear sweep over the received buffer is enough for(int i=0; i<rowneighs; ++i) { for(int j=0; j< recvcnt[i]; ++j) { int32_t index = recvindbuf[rdispls[i] + j]; if(!isthere[index]) ts_pairs.push_back(make_pair(index, recvnumbuf[rdispls[i] + j])); } } DeleteAll(recvcnt, rdispls); DeleteAll(isthere, recvindbuf, recvnumbuf); sort(ts_pairs.begin(), ts_pairs.end()); int nnzy = ts_pairs.size(); y.ind.resize(nnzy); y.num.resize(nnzy); for(int i=0; i< nnzy; ++i) { y.ind[i] = ts_pairs[i].first; y.num[i] = ts_pairs[i].second; } #else // Alternative 2: Heap-merge int32_t hsize = 0; int32_t inf = numeric_limits<int32_t>::min(); int32_t sup = numeric_limits<int32_t>::max(); KNHeap< int32_t, int32_t > sHeap(sup, inf); int * processed = new int[rowneighs](); for(int i=0; i<rowneighs; ++i) { if(recvcnt[i] > 0) { // key, proc_id sHeap.insert(recvindbuf[rdispls[i]], i); ++hsize; } } int32_t key, locv; if(hsize > 0) { sHeap.deleteMin(&key, &locv); y.ind.push_back( static_cast<IU>(key)); y.num.push_back(recvnumbuf[rdispls[locv]]); // nothing is processed yet if( (++(processed[locv])) < recvcnt[locv] ) sHeap.insert(recvindbuf[rdispls[locv]+processed[locv]], locv); else --hsize; } while(hsize > 0) { sHeap.deleteMin(&key, &locv); IU deref = rdispls[locv] + processed[locv]; if(y.ind.back() == static_cast<IU>(key)) // y.ind is surely not empty { y.num.back() = SR::add(y.num.back(), recvnumbuf[deref]); // ABAB: Benchmark actually allows us to be non-deterministic in terms of parent selection // We can just skip this addition operator (if it's a max/min select) } else { y.ind.push_back(static_cast<IU>(key)); y.num.push_back(recvnumbuf[deref]); } if( (++(processed[locv])) < recvcnt[locv] ) sHeap.insert(recvindbuf[rdispls[locv]+processed[locv]], locv); else --hsize; } DeleteAll(recvcnt, rdispls,processed); DeleteAll(recvindbuf, recvnumbuf); #endif } /** * This version is the most flexible sparse matrix X sparse vector [Used in KDT] * It accepts different types for the matrix (NUM), the input vector (IVT) and the output vector (OVT) * without relying on automatic type promotion * Input (x) and output (y) vectors can be ALIASED because y is not written until the algorithm is done with x. */ template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER> void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y, bool indexisvalue, OptBuf<int32_t, OVT > & optbuf) { CheckSpMVCompliance(A,x); optbuf.MarkEmpty(); MPI_Comm World = x.commGrid->GetWorld(); MPI_Comm ColWorld = x.commGrid->GetColWorld(); MPI_Comm RowWorld = x.commGrid->GetRowWorld(); int accnz; int32_t trxlocnz; IU lenuntil; int32_t *trxinds, *indacc; IVT *trxnums, *numacc; /* char errorstring[PAPI_MAX_STR_LEN+1]; int Events2Add [] = {PAPI_TOT_INS, PAPI_L1_TCM, PAPI_L2_TCM, PAPI_L3_TCM}; string EventNames [] = {"PAPI_TOT_INS", "PAPI_L1_TCM", "PAPI_L2_TCM", "PAPI_L3_TCM"}; int arraysize = sizeof(Events2Add) / sizeof(int); long long ptr2values[arraysize]; int errorcode = PAPI_start_counters(Events2Add, arraysize); if (errorcode != PAPI_OK) { PAPI_perror(errorcode, errorstring, PAPI_MAX_STR_LEN); fprintf(stderr, "PAPI error (%d): %s\n", errorcode, errorstring); } */ TransposeVector(World, x, trxlocnz, lenuntil, trxinds, trxnums, indexisvalue); AllGatherVector(ColWorld, trxlocnz, lenuntil, trxinds, trxnums, indacc, numacc, accnz, indexisvalue); /* errorcode = PAPI_read_counters(ptr2values, arraysize); if (errorcode != PAPI_OK) { PAPI_perror(errorcode, errorstring, PAPI_MAX_STR_LEN); fprintf(stderr, "PAPI error (%d): %s\n", errorcode, errorstring); } errorcode = PAPI_stop_counters(ptr2values, arraysize); */ int rowneighs; MPI_Comm_size(RowWorld, &rowneighs); int * sendcnt = new int[rowneighs](); int32_t * sendindbuf; OVT * sendnumbuf; int * sdispls; LocalSpMV<SR>(A, rowneighs, optbuf, indacc, numacc, sendindbuf, sendnumbuf, sdispls, sendcnt, accnz, indexisvalue); // indacc/numacc deallocated, sendindbuf/sendnumbuf/sdispls allocated int * rdispls = new int[rowneighs]; int * recvcnt = new int[rowneighs]; MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, RowWorld); // share the request counts // receive displacements are exact whereas send displacements have slack rdispls[0] = 0; for(int i=0; i<rowneighs-1; ++i) { rdispls[i+1] = rdispls[i] + recvcnt[i]; } int totrecv = accumulate(recvcnt,recvcnt+rowneighs,0); int32_t * recvindbuf = new int32_t[totrecv]; OVT * recvnumbuf = new OVT[totrecv]; #ifdef TIMING double t2=MPI_Wtime(); #endif if(optbuf.totmax > 0 ) // graph500 optimization enabled { MPI_Alltoallv(optbuf.inds, sendcnt, optbuf.dspls, MPIType<int32_t>(), recvindbuf, recvcnt, rdispls, MPIType<int32_t>(), RowWorld); MPI_Alltoallv(optbuf.nums, sendcnt, optbuf.dspls, MPIType<OVT>(), recvnumbuf, recvcnt, rdispls, MPIType<OVT>(), RowWorld); delete [] sendcnt; } else { /* ofstream oput; x.commGrid->OpenDebugFile("Send", oput); oput << "To displacements: "; copy(sdispls, sdispls+rowneighs, ostream_iterator<int>(oput, " ")); oput << endl; oput << "To counts: "; copy(sendcnt, sendcnt+rowneighs, ostream_iterator<int>(oput, " ")); oput << endl; for(int i=0; i< rowneighs; ++i) { oput << "To neighbor: " << i << endl; copy(sendindbuf+sdispls[i], sendindbuf+sdispls[i]+sendcnt[i], ostream_iterator<int32_t>(oput, " ")); oput << endl; copy(sendnumbuf+sdispls[i], sendnumbuf+sdispls[i]+sendcnt[i], ostream_iterator<OVT>(oput, " ")); oput << endl; } oput.close(); */ MPI_Alltoallv(sendindbuf, sendcnt, sdispls, MPIType<int32_t>(), recvindbuf, recvcnt, rdispls, MPIType<int32_t>(), RowWorld); MPI_Alltoallv(sendnumbuf, sendcnt, sdispls, MPIType<OVT>(), recvnumbuf, recvcnt, rdispls, MPIType<OVT>(), RowWorld); DeleteAll(sendindbuf, sendnumbuf); DeleteAll(sendcnt, sdispls); } #ifdef TIMING double t3=MPI_Wtime(); cblas_alltoalltime += (t3-t2); #endif // ofstream output; // A.commGrid->OpenDebugFile("Recv", output); // copy(recvindbuf, recvindbuf+totrecv, ostream_iterator<IU>(output," ")); output << endl; // output.close(); MergeContributions<SR>(y,recvcnt, rdispls, recvindbuf, recvnumbuf, rowneighs); } template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER> void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y, bool indexisvalue) { OptBuf< int32_t, OVT > optbuf = OptBuf< int32_t,OVT >(); SpMV<SR>(A, x, y, indexisvalue, optbuf); } /** * Automatic type promotion is ONLY done here, all the callee functions (in Friends.h and below) are initialized with the promoted type * If indexisvalues = true, then we do not need to transfer values for x (happens for BFS iterations with boolean matrices and integer rhs vectors) **/ template <typename SR, typename IU, typename NUM, typename UDER> FullyDistSpVec<IU,typename promote_trait<NUM,IU>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IU> & x, bool indexisvalue, OptBuf<int32_t, typename promote_trait<NUM,IU>::T_promote > & optbuf) { typedef typename promote_trait<NUM,IU>::T_promote T_promote; FullyDistSpVec<IU, T_promote> y ( x.getcommgrid(), A.getnrow()); // identity doesn't matter for sparse vectors SpMV<SR>(A, x, y, indexisvalue, optbuf); return y; } /** * Parallel dense SpMV **/ template <typename SR, typename IU, typename NUM, typename NUV, typename UDER> FullyDistVec<IU,typename promote_trait<NUM,NUV>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistVec<IU,NUV> & x ) { typedef typename promote_trait<NUM,NUV>::T_promote T_promote; CheckSpMVCompliance(A, x); MPI_Comm World = x.commGrid->GetWorld(); MPI_Comm ColWorld = x.commGrid->GetColWorld(); MPI_Comm RowWorld = x.commGrid->GetRowWorld(); int xsize = (int) x.LocArrSize(); int trxsize = 0; int diagneigh = x.commGrid->GetComplementRank(); MPI_Status status; MPI_Sendrecv(&xsize, 1, MPI_INT, diagneigh, TRX, &trxsize, 1, MPI_INT, diagneigh, TRX, World, &status); NUV * trxnums = new NUV[trxsize]; MPI_Sendrecv(const_cast<NUV*>(SpHelper::p2a(x.arr)), xsize, MPIType<NUV>(), diagneigh, TRX, trxnums, trxsize, MPIType<NUV>(), diagneigh, TRX, World, &status); int colneighs, colrank; MPI_Comm_size(ColWorld, &colneighs); MPI_Comm_rank(ColWorld, &colrank); int * colsize = new int[colneighs]; colsize[colrank] = trxsize; MPI_Allgather(MPI_IN_PLACE, 1, MPI_INT, colsize, 1, MPI_INT, ColWorld); int * dpls = new int[colneighs](); // displacements (zero initialized pid) std::partial_sum(colsize, colsize+colneighs-1, dpls+1); int accsize = std::accumulate(colsize, colsize+colneighs, 0); NUV * numacc = new NUV[accsize]; MPI_Allgatherv(trxnums, trxsize, MPIType<NUV>(), numacc, colsize, dpls, MPIType<NUV>(), ColWorld); delete [] trxnums; // serial SpMV with dense vector T_promote id = SR::id(); IU ysize = A.getlocalrows(); T_promote * localy = new T_promote[ysize]; fill_n(localy, ysize, id); dcsc_gespmv<SR>(*(A.spSeq), numacc, localy); DeleteAll(numacc,colsize, dpls); // FullyDistVec<IT,NT>(shared_ptr<CommGrid> grid, IT globallen, NT initval, NT id) FullyDistVec<IU, T_promote> y ( x.commGrid, A.getnrow(), id); int rowneighs; MPI_Comm_size(RowWorld, &rowneighs); IU begptr, endptr; for(int i=0; i< rowneighs; ++i) { begptr = y.RowLenUntil(i); if(i == rowneighs-1) { endptr = ysize; } else { endptr = y.RowLenUntil(i+1); } MPI_Reduce(localy+begptr, SpHelper::p2a(y.arr), endptr-begptr, MPIType<T_promote>(), SR::mpi_op(), i, RowWorld); } delete [] localy; return y; } /** * Old version that is no longer considered optimal * Kept for legacy purposes * To be removed when other functionals are fully tested. **/ template <typename SR, typename IU, typename NUM, typename NUV, typename UDER> FullyDistSpVec<IU,typename promote_trait<NUM,NUV>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,NUV> & x) { typedef typename promote_trait<NUM,NUV>::T_promote T_promote; CheckSpMVCompliance(A, x); MPI_Comm World = x.commGrid->GetWorld(); MPI_Comm ColWorld = x.commGrid->GetColWorld(); MPI_Comm RowWorld = x.commGrid->GetRowWorld(); int xlocnz = (int) x.getlocnnz(); int trxlocnz = 0; int roffst = x.RowLenUntil(); int offset; int diagneigh = x.commGrid->GetComplementRank(); MPI_Status status; MPI_Sendrecv(&xlocnz, 1, MPI_INT, diagneigh, TRX, &trxlocnz, 1, MPI_INT, diagneigh, TRX, World, &status); MPI_Sendrecv(&roffst, 1, MPI_INT, diagneigh, TROST, &offset, 1, MPI_INT, diagneigh, TROST, World, &status); IU * trxinds = new IU[trxlocnz]; NUV * trxnums = new NUV[trxlocnz]; MPI_Sendrecv(const_cast<IU*>(SpHelper::p2a(x.ind)), xlocnz, MPIType<IU>(), diagneigh, TRX, trxinds, trxlocnz, MPIType<IU>(), diagneigh, TRX, World, &status); MPI_Sendrecv(const_cast<NUV*>(SpHelper::p2a(x.num)), xlocnz, MPIType<NUV>(), diagneigh, TRX, trxnums, trxlocnz, MPIType<NUV>(), diagneigh, TRX, World, &status); transform(trxinds, trxinds+trxlocnz, trxinds, bind2nd(plus<IU>(), offset)); // fullydist indexing (n pieces) -> matrix indexing (sqrt(p) pieces) int colneighs, colrank; MPI_Comm_size(ColWorld, &colneighs); MPI_Comm_rank(ColWorld, &colrank); int * colnz = new int[colneighs]; colnz[colrank] = trxlocnz; MPI_Allgather(MPI_IN_PLACE, 1, MPI_INT, colnz, 1, MPI_INT, ColWorld); int * dpls = new int[colneighs](); // displacements (zero initialized pid) std::partial_sum(colnz, colnz+colneighs-1, dpls+1); int accnz = std::accumulate(colnz, colnz+colneighs, 0); IU * indacc = new IU[accnz]; NUV * numacc = new NUV[accnz]; // ABAB: Future issues here, colnz is of type int (MPI limitation) // What if the aggregate vector size along the processor row/column is not 32-bit addressible? MPI_Allgatherv(trxinds, trxlocnz, MPIType<IU>(), indacc, colnz, dpls, MPIType<IU>(), ColWorld); MPI_Allgatherv(trxnums, trxlocnz, MPIType<NUV>(), numacc, colnz, dpls, MPIType<NUV>(), ColWorld); DeleteAll(trxinds, trxnums); // serial SpMV with sparse vector vector< int32_t > indy; vector< T_promote > numy; int32_t * tmpindacc = new int32_t[accnz]; for(int i=0; i< accnz; ++i) tmpindacc[i] = indacc[i]; delete [] indacc; dcsc_gespmv<SR>(*(A.spSeq), tmpindacc, numacc, accnz, indy, numy); // actual multiplication DeleteAll(tmpindacc, numacc); DeleteAll(colnz, dpls); FullyDistSpVec<IU, T_promote> y ( x.commGrid, A.getnrow()); // identity doesn't matter for sparse vectors IU yintlen = y.MyRowLength(); int rowneighs; MPI_Comm_size(RowWorld,&rowneighs); vector< vector<IU> > sendind(rowneighs); vector< vector<T_promote> > sendnum(rowneighs); typename vector<int32_t>::size_type outnz = indy.size(); for(typename vector<IU>::size_type i=0; i< outnz; ++i) { IU locind; int rown = y.OwnerWithinRow(yintlen, static_cast<IU>(indy[i]), locind); sendind[rown].push_back(locind); sendnum[rown].push_back(numy[i]); } IU * sendindbuf = new IU[outnz]; T_promote * sendnumbuf = new T_promote[outnz]; int * sendcnt = new int[rowneighs]; int * sdispls = new int[rowneighs]; for(int i=0; i<rowneighs; ++i) sendcnt[i] = sendind[i].size(); int * rdispls = new int[rowneighs]; int * recvcnt = new int[rowneighs]; MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, RowWorld); // share the request counts sdispls[0] = 0; rdispls[0] = 0; for(int i=0; i<rowneighs-1; ++i) { sdispls[i+1] = sdispls[i] + sendcnt[i]; rdispls[i+1] = rdispls[i] + recvcnt[i]; } int totrecv = accumulate(recvcnt,recvcnt+rowneighs,0); IU * recvindbuf = new IU[totrecv]; T_promote * recvnumbuf = new T_promote[totrecv]; for(int i=0; i<rowneighs; ++i) { copy(sendind[i].begin(), sendind[i].end(), sendindbuf+sdispls[i]); vector<IU>().swap(sendind[i]); } for(int i=0; i<rowneighs; ++i) { copy(sendnum[i].begin(), sendnum[i].end(), sendnumbuf+sdispls[i]); vector<T_promote>().swap(sendnum[i]); } MPI_Alltoallv(sendindbuf, sendcnt, sdispls, MPIType<IU>(), recvindbuf, recvcnt, rdispls, MPIType<IU>(), RowWorld); MPI_Alltoallv(sendnumbuf, sendcnt, sdispls, MPIType<T_promote>(), recvnumbuf, recvcnt, rdispls, MPIType<T_promote>(), RowWorld); DeleteAll(sendindbuf, sendnumbuf); DeleteAll(sendcnt, recvcnt, sdispls, rdispls); // define a SPA-like data structure IU ysize = y.MyLocLength(); T_promote * localy = new T_promote[ysize]; bool * isthere = new bool[ysize]; vector<IU> nzinds; // nonzero indices fill_n(isthere, ysize, false); for(int i=0; i< totrecv; ++i) { if(!isthere[recvindbuf[i]]) { localy[recvindbuf[i]] = recvnumbuf[i]; // initial assignment nzinds.push_back(recvindbuf[i]); isthere[recvindbuf[i]] = true; } else { localy[recvindbuf[i]] = SR::add(localy[recvindbuf[i]], recvnumbuf[i]); } } DeleteAll(isthere, recvindbuf, recvnumbuf); sort(nzinds.begin(), nzinds.end()); int nnzy = nzinds.size(); y.ind.resize(nnzy); y.num.resize(nnzy); for(int i=0; i< nnzy; ++i) { y.ind[i] = nzinds[i]; y.num[i] = localy[nzinds[i]]; } delete [] localy; return y; } template <typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> SpParMat<IU,typename promote_trait<NU1,NU2>::T_promote,typename promote_trait<UDERA,UDERB>::T_promote> EWiseMult (const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B , bool exclude) { typedef typename promote_trait<NU1,NU2>::T_promote N_promote; typedef typename promote_trait<UDERA,UDERB>::T_promote DER_promote; if(*(A.commGrid) == *(B.commGrid)) { DER_promote * result = new DER_promote( EWiseMult(*(A.spSeq),*(B.spSeq),exclude) ); return SpParMat<IU, N_promote, DER_promote> (result, A.commGrid); } else { cout << "Grids are not comparable elementwise multiplication" << endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); return SpParMat< IU,N_promote,DER_promote >(); } } template <typename RETT, typename RETDER, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB, typename _BinaryOperation> SpParMat<IU,RETT,RETDER> EWiseApply (const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B, _BinaryOperation __binary_op, bool notB, const NU2& defaultBVal) { if(*(A.commGrid) == *(B.commGrid)) { RETDER * result = new RETDER( EWiseApply<RETT>(*(A.spSeq),*(B.spSeq), __binary_op, notB, defaultBVal) ); return SpParMat<IU, RETT, RETDER> (result, A.commGrid); } else { cout << "Grids are not comparable elementwise apply" << endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); return SpParMat< IU,RETT,RETDER >(); } } template <typename RETT, typename RETDER, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB, typename _BinaryOperation, typename _BinaryPredicate> SpParMat<IU,RETT,RETDER> EWiseApply (const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B, _BinaryOperation __binary_op, _BinaryPredicate do_op, bool allowANulls, bool allowBNulls, const NU1& ANullVal, const NU2& BNullVal, const bool allowIntersect, const bool useExtendedBinOp) { if(*(A.commGrid) == *(B.commGrid)) { RETDER * result = new RETDER( EWiseApply<RETT>(*(A.spSeq),*(B.spSeq), __binary_op, do_op, allowANulls, allowBNulls, ANullVal, BNullVal, allowIntersect) ); return SpParMat<IU, RETT, RETDER> (result, A.commGrid); } else { cout << "Grids are not comparable elementwise apply" << endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); return SpParMat< IU,RETT,RETDER >(); } } // plain adapter template <typename RETT, typename RETDER, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB, typename _BinaryOperation, typename _BinaryPredicate> SpParMat<IU,RETT,RETDER> EWiseApply (const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B, _BinaryOperation __binary_op, _BinaryPredicate do_op, bool allowANulls, bool allowBNulls, const NU1& ANullVal, const NU2& BNullVal, const bool allowIntersect = true) { return EWiseApply<RETT, RETDER>(A, B, EWiseExtToPlainAdapter<RETT, NU1, NU2, _BinaryOperation>(__binary_op), EWiseExtToPlainAdapter<bool, NU1, NU2, _BinaryPredicate>(do_op), allowANulls, allowBNulls, ANullVal, BNullVal, allowIntersect, true); } // end adapter /** * if exclude is true, then we prune all entries W[i] != zero from V * if exclude is false, then we perform a proper elementwise multiplication **/ template <typename IU, typename NU1, typename NU2> SpParVec<IU,typename promote_trait<NU1,NU2>::T_promote> EWiseMult (const SpParVec<IU,NU1> & V, const DenseParVec<IU,NU2> & W , bool exclude, NU2 zero) { typedef typename promote_trait<NU1,NU2>::T_promote T_promote; if(*(V.commGrid) == *(W.commGrid)) { SpParVec< IU, T_promote> Product(V.commGrid); Product.length = V.length; if(Product.diagonal) { if(exclude) { IU size= V.ind.size(); for(IU i=0; i<size; ++i) { if(W.arr.size() <= V.ind[i] || W.arr[V.ind[i]] == zero) // keep only those { Product.ind.push_back(V.ind[i]); Product.num.push_back(V.num[i]); } } } else { IU size= V.ind.size(); for(IU i=0; i<size; ++i) { if(W.arr.size() > V.ind[i] && W.arr[V.ind[i]] != zero) // keep only those { Product.ind.push_back(V.ind[i]); Product.num.push_back(V.num[i] * W.arr[V.ind[i]]); } } } } return Product; } else { cout << "Grids are not comparable elementwise multiplication" << endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); return SpParVec< IU,T_promote>(); } } /** * if exclude is true, then we prune all entries W[i] != zero from V * if exclude is false, then we perform a proper elementwise multiplication **/ template <typename IU, typename NU1, typename NU2> FullyDistSpVec<IU,typename promote_trait<NU1,NU2>::T_promote> EWiseMult (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , bool exclude, NU2 zero) { typedef typename promote_trait<NU1,NU2>::T_promote T_promote; if(*(V.commGrid) == *(W.commGrid)) { FullyDistSpVec< IU, T_promote> Product(V.commGrid); if(V.glen != W.glen) { cerr << "Vector dimensions don't match for EWiseMult\n"; MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); } else { Product.glen = V.glen; IU size= V.getlocnnz(); if(exclude) { #if defined(_OPENMP) && defined(CBLAS_EXPERIMENTAL) // not faster than serial int actual_splits = cblas_splits * 1; // 1 is the parallel slackness vector <IU> tlosizes (actual_splits, 0); vector < vector<IU> > tlinds(actual_splits); vector < vector<T_promote> > tlnums(actual_splits); IU tlsize = size / actual_splits; #pragma omp parallel for //schedule(dynamic, 1) for(IU t = 0; t < actual_splits; ++t) { IU tlbegin = t*tlsize; IU tlend = (t==actual_splits-1)? size : (t+1)*tlsize; for(IU i=tlbegin; i<tlend; ++i) { if(W.arr[V.ind[i]] == zero) // keep only those { tlinds[t].push_back(V.ind[i]); tlnums[t].push_back(V.num[i]); tlosizes[t]++; } } } vector<IU> prefix_sum(actual_splits+1,0); partial_sum(tlosizes.begin(), tlosizes.end(), prefix_sum.begin()+1); Product.ind.resize(prefix_sum[actual_splits]); Product.num.resize(prefix_sum[actual_splits]); #pragma omp parallel for //schedule(dynamic, 1) for(IU t=0; t< actual_splits; ++t) { copy(tlinds[t].begin(), tlinds[t].end(), Product.ind.begin()+prefix_sum[t]); copy(tlnums[t].begin(), tlnums[t].end(), Product.num.begin()+prefix_sum[t]); } #else for(IU i=0; i<size; ++i) { if(W.arr[V.ind[i]] == zero) // keep only those { Product.ind.push_back(V.ind[i]); Product.num.push_back(V.num[i]); } } #endif } else { for(IU i=0; i<size; ++i) { if(W.arr[V.ind[i]] != zero) // keep only those { Product.ind.push_back(V.ind[i]); Product.num.push_back(V.num[i] * W.arr[V.ind[i]]); } } } } return Product; } else { cout << "Grids are not comparable elementwise multiplication" << endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); return FullyDistSpVec< IU,T_promote>(); } } /** * Performs an arbitrary binary operation _binary_op on the corresponding elements of two vectors with the result stored in a return vector ret. * The binary operatiation is only performed if the binary predicate _doOp returns true for those elements. Otherwise the binary operation is not * performed and ret does not contain an element at that position. * More formally the operation is defined as: * if (_doOp(V[i], W[i])) * ret[i] = _binary_op(V[i], W[i]) * else * // ret[i] is not set * Hence _doOp can be used to implement a filter on either of the vectors. * * The above is only defined if both V[i] and W[i] exist (i.e. an intersection). To allow a union operation (ex. when V[i] doesn't exist but W[i] does) * the allowVNulls flag is set to true and the Vzero argument is used as the missing V[i] value. * * The type of each element of ret must not necessarily be related to the types of V or W, so the return type must be explicitly specified as a template parameter: * FullyDistSpVec<int, double> r = EWiseApply<double>(V, W, plus, retTrue, false, 0) **/ template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate> FullyDistSpVec<IU,RET> EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero, const bool useExtendedBinOp) { typedef RET T_promote; //typedef typename promote_trait<NU1,NU2>::T_promote T_promote; if(*(V.commGrid) == *(W.commGrid)) { FullyDistSpVec< IU, T_promote> Product(V.commGrid); FullyDistVec< IU, NU1> DV (V); if(V.TotalLength() != W.TotalLength()) { ostringstream outs; outs << "Vector dimensions don't match (" << V.TotalLength() << " vs " << W.TotalLength() << ") for EWiseApply (short version)\n"; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); } else { Product.glen = V.glen; IU size= W.LocArrSize(); IU spsize = V.getlocnnz(); IU sp_iter = 0; if (allowVNulls) { // iterate over the dense vector for(IU i=0; i<size; ++i) { if(sp_iter < spsize && V.ind[sp_iter] == i) { if (_doOp(V.num[sp_iter], W.arr[i], false, false)) { Product.ind.push_back(i); Product.num.push_back(_binary_op(V.num[sp_iter], W.arr[i], false, false)); } sp_iter++; } else { if (_doOp(Vzero, W.arr[i], true, false)) { Product.ind.push_back(i); Product.num.push_back(_binary_op(Vzero, W.arr[i], true, false)); } } } } else { // iterate over the sparse vector for(sp_iter = 0; sp_iter < spsize; ++sp_iter) { if (_doOp(V.num[sp_iter], W.arr[V.ind[sp_iter]], false, false)) { Product.ind.push_back(V.ind[sp_iter]); Product.num.push_back(_binary_op(V.num[sp_iter], W.arr[V.ind[sp_iter]], false, false)); } } } } return Product; } else { cout << "Grids are not comparable for EWiseApply" << endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); return FullyDistSpVec< IU,T_promote>(); } } /** * Performs an arbitrary binary operation _binary_op on the corresponding elements of two vectors with the result stored in a return vector ret. * The binary operatiation is only performed if the binary predicate _doOp returns true for those elements. Otherwise the binary operation is not * performed and ret does not contain an element at that position. * More formally the operation is defined as: * if (_doOp(V[i], W[i])) * ret[i] = _binary_op(V[i], W[i]) * else * // ret[i] is not set * Hence _doOp can be used to implement a filter on either of the vectors. * * The above is only defined if both V[i] and W[i] exist (i.e. an intersection). To allow a union operation (ex. when V[i] doesn't exist but W[i] does) * the allowVNulls flag is set to true and the Vzero argument is used as the missing V[i] value. * !allowVNulls && !allowWNulls => intersection * !allowVNulls && allowWNulls => operate on all elements of V * allowVNulls && !allowWNulls => operate on all elements of W * allowVNulls && allowWNulls => union * * The type of each element of ret must not necessarily be related to the types of V or W, so the return type must be explicitly specified as a template parameter: * FullyDistSpVec<int, double> r = EWiseApply<double>(V, W, plus, retTrue, false, 0, false, 0) **/ template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate> FullyDistSpVec<IU,RET> EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistSpVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, bool allowWNulls, NU1 Vzero, NU2 Wzero, const bool allowIntersect, const bool useExtendedBinOp) { typedef RET T_promote; // typename promote_trait<NU1,NU2>::T_promote T_promote; if(*(V.commGrid) == *(W.commGrid)) { FullyDistSpVec< IU, T_promote> Product(V.commGrid); if(V.glen != W.glen) { ostringstream outs; outs << "Vector dimensions don't match (" << V.glen << " vs " << W.glen << ") for EWiseApply (full version)\n"; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); } else { Product.glen = V.glen; typename vector< IU >::const_iterator indV = V.ind.begin(); typename vector< NU1 >::const_iterator numV = V.num.begin(); typename vector< IU >::const_iterator indW = W.ind.begin(); typename vector< NU2 >::const_iterator numW = W.num.begin(); while (indV < V.ind.end() && indW < W.ind.end()) { if (*indV == *indW) { // overlap if (allowIntersect) { if (_doOp(*numV, *numW, false, false)) { Product.ind.push_back(*indV); Product.num.push_back(_binary_op(*numV, *numW, false, false)); } } indV++; numV++; indW++; numW++; } else if (*indV < *indW) { // V has value but W does not if (allowWNulls) { if (_doOp(*numV, Wzero, false, true)) { Product.ind.push_back(*indV); Product.num.push_back(_binary_op(*numV, Wzero, false, true)); } } indV++; numV++; } else //(*indV > *indW) { // W has value but V does not if (allowVNulls) { if (_doOp(Vzero, *numW, true, false)) { Product.ind.push_back(*indW); Product.num.push_back(_binary_op(Vzero, *numW, true, false)); } } indW++; numW++; } } // clean up while (allowWNulls && indV < V.ind.end()) { if (_doOp(*numV, Wzero, false, true)) { Product.ind.push_back(*indV); Product.num.push_back(_binary_op(*numV, Wzero, false, true)); } indV++; numV++; } while (allowVNulls && indW < W.ind.end()) { if (_doOp(Vzero, *numW, true, false)) { Product.ind.push_back(*indW); Product.num.push_back(_binary_op(Vzero, *numW, true, false)); } indW++; numW++; } } return Product; } else { cout << "Grids are not comparable for EWiseApply" << endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); return FullyDistSpVec< IU,T_promote>(); } } // plain callback versions template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate> FullyDistSpVec<IU,RET> EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero) { return EWiseApply<RET>(V, W, EWiseExtToPlainAdapter<RET, NU1, NU2, _BinaryOperation>(_binary_op), EWiseExtToPlainAdapter<bool, NU1, NU2, _BinaryPredicate>(_doOp), allowVNulls, Vzero, true); } template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate> FullyDistSpVec<IU,RET> EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistSpVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, bool allowWNulls, NU1 Vzero, NU2 Wzero, const bool allowIntersect = true) { return EWiseApply<RET>(V, W, EWiseExtToPlainAdapter<RET, NU1, NU2, _BinaryOperation>(_binary_op), EWiseExtToPlainAdapter<bool, NU1, NU2, _BinaryPredicate>(_doOp), allowVNulls, allowWNulls, Vzero, Wzero, allowIntersect, true); } #endif
GB_unaryop__lnot_uint64_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint64_uint32 // op(A') function: GB_tran__lnot_uint64_uint32 // C type: uint64_t // A type: uint32_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT64 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint64_uint32 ( uint64_t *Cx, // Cx and Ax may be aliased uint32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint64_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mm.c
#include <stdio.h> #include <stdlib.h> /* PC da PUC * Programa paralelizado * *Teste1 *real 0m0.463s *user 0m1.800s *sys 0m0.004s * *Teste2 *real 0m0.463s *user 0m1.803s *sys 0m0.000s * *Teste3 *real 0m0.469s *user 0m1.803s *sys 0m0.004s * *Teste4 *real 0m0.465s *user 0m1.808s *sys 0m0.000s * * Programa sequencial * *Teste1 *real 0m1.448s *user 0m1.442s *sys 0m0.004s * *Teste2 *real 0m1.448s *user 0m1.434s *sys 0m0.012s * *Teste3 *real 0m1.466s *user 0m1.462s *sys 0m0.000s * *Teste4 *real 0m1.459s *user 0m1.449s *sys 0m0.008s * */ //Meu PC: // Linux version 5.13.12-arch1-1 (linux@archlinux) (gcc (GCC) 11.1.0 // CPU: Intel i7-7700 (8) @ 4.200GHz // // Paralelizado // 101,36s user 2,77s system 432% cpu 24,073 total // // Não paralelizado // 63,21s user 0,03s system 99% cpu 1:03,36 total void mm(double* a, double* b, double* c, int width) { #pragma omp parallel for collapse(2) for (int i = 0; i < width; i++) { for (int j = 0; j < width; j++) { double sum = 0; #pragma omp parallel for for (int k = 0; k < width; k++) { double x = a[i * width + k]; double y = b[k * width + j]; sum += x * y; } c[i * width + j] = sum; } } } int main() { int width = 500; double *a = (double*) malloc (width * width * sizeof(double)); double *b = (double*) malloc (width * width * sizeof(double)); double *c = (double*) malloc (width * width * sizeof(double)); #pragma omp parallel for collapse(2) for(int i = 0; i < width; i++) { for(int j = 0; j < width; j++) { a[i*width+j] = i; b[i*width+j] = j; c[i*width+j] = 0; } } mm(a,b,c,width); //for(int i = 0; i < width; i++) { // for(int j = 0; j < width; j++) { // printf("\n c[%d][%d] = %f",i,j,c[i*width+j]); // } //} }
mttkrp_omp.c
/* This file is part of ParTI!. ParTI! is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. ParTI! is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with ParTI!. If not, see <http://www.gnu.org/licenses/>. */ #include <ParTI.h> #include "sptensor.h" int sptOmpMTTKRP_3D(sptSparseTensor const * const X, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRP_3D_Reduce(sptSparseTensor const * const X, sptMatrix * mats[], // mats[nmodes] as temporary space. sptMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRP_3D_Lock(sptSparseTensor const * const X, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk, sptMutexPool * lock_pool); /** * OpenMP parallelized Matriced sparse tensor times a sequence of dense matrix Khatri-Rao products (MTTKRP) on a specified mode * @param[out] mats[nmodes] the result of MTTKRP, a dense matrix, with size * ndims[mode] * R * @param[in] X the sparse tensor input X * @param[in] mats (N+1) dense matrices, with mats[nmodes] as temporary * @param[in] mats_order the order of the Khatri-Rao products * @param[in] mode the mode on which the MTTKRP is performed * @param[in] scratch an temporary array to store intermediate results, space assigned before this function * * This function uses support arbitrary-order sparse tensors with Khatri-Rao * products of dense factor matrices, the output is the updated dense matrix for the "mode". * In this version, a large scratch is used to maximize parallelism. (To be optimized) */ int sptOmpMTTKRP_Init(sptSparseTensor const * const X, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode) { sptIndex const nmodes = X->nmodes; sptNnzIndex const nnz = X->nnz; sptIndex const * const ndims = X->ndims; sptValue const * const vals = X->values.data; sptIndex const stride = mats[0]->stride; sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, nnz * stride, nnz * stride); sptConstantValueVector(&scratch, 0); /* Check the mats. */ for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptIndex const R = mats[mode]->ncols; sptIndex const * const mode_ind = X->inds[mode].data; sptMatrix * const M = mats[nmodes]; sptValue * const mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(sptValue)); #pragma omp parallel for for(sptNnzIndex x=0; x<nnz; ++x) { sptIndex times_mat_index = mats_order[1]; sptMatrix * times_mat = mats[times_mat_index]; sptIndex * times_inds = X->inds[times_mat_index].data; sptIndex tmp_i = times_inds[x]; sptValue const entry = vals[x]; for(sptIndex r=0; r<R; ++r) { scratch.data[x * stride + r] = entry * times_mat->values[tmp_i * stride + r]; } for(sptIndex i=2; i<nmodes; ++i) { times_mat_index = mats_order[i]; times_mat = mats[times_mat_index]; times_inds = X->inds[times_mat_index].data; tmp_i = times_inds[x]; for(sptIndex r=0; r<R; ++r) { scratch.data[x * stride + r] *= times_mat->values[tmp_i * stride + r]; } } } for(sptNnzIndex x=0; x<nnz; ++x) { sptIndex const mode_i = mode_ind[x]; for(sptIndex r=0; r<R; ++r) { mvals[mode_i * stride + r] += scratch.data[x * stride + r]; } } sptFreeValueVector(&scratch); return 0; } int sptOmpMTTKRP(sptSparseTensor const * const X, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = X->nmodes; if(nmodes == 3) { sptAssert(sptOmpMTTKRP_3D(X, mats, mats_order, mode, tk) == 0); return 0; } sptNnzIndex const nnz = X->nnz; sptIndex const * const ndims = X->ndims; sptValue const * const vals = X->values.data; sptIndex const stride = mats[0]->stride; /* Check the mats. */ for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptIndex const R = mats[mode]->ncols; sptIndex const * const mode_ind = X->inds[mode].data; sptValue * const restrict mvals = mats[nmodes]->values; memset(mvals, 0, tmpI*stride*sizeof(sptValue)); #pragma omp parallel for schedule(static) num_threads(tk) for(sptNnzIndex x=0; x<nnz; ++x) { sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); sptConstantValueVector(&scratch, 0); sptIndex times_mat_index = mats_order[1]; sptMatrix * times_mat = mats[times_mat_index]; sptIndex * times_inds = X->inds[times_mat_index].data; sptIndex tmp_i = times_inds[x]; sptValue const entry = vals[x]; #pragma omp simd for(sptIndex r=0; r<R; ++r) { scratch.data[r] = entry * times_mat->values[tmp_i * stride + r]; } for(sptIndex i=2; i<nmodes; ++i) { times_mat_index = mats_order[i]; times_mat = mats[times_mat_index]; times_inds = X->inds[times_mat_index].data; tmp_i = times_inds[x]; #pragma omp simd for(sptIndex r=0; r<R; ++r) { scratch.data[r] *= times_mat->values[tmp_i * stride + r]; } } sptIndex const mode_i = mode_ind[x]; sptValue * const restrict mvals_row = mvals + mode_i * stride; for(sptIndex r=0; r<R; ++r) { #pragma omp atomic update mvals_row[r] += scratch.data[r]; } sptFreeValueVector(&scratch); } // End loop nnzs return 0; } int sptOmpMTTKRP_3D(sptSparseTensor const * const X, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = X->nmodes; sptNnzIndex const nnz = X->nnz; sptIndex const * const ndims = X->ndims; sptValue const * const restrict vals = X->values.data; sptIndex const stride = mats[0]->stride; /* Check the mats. */ sptAssert(nmodes ==3); for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptIndex const R = mats[mode]->ncols; sptIndex const * const restrict mode_ind = X->inds[mode].data; sptValue * const restrict mvals = mats[nmodes]->values; memset(mvals, 0, tmpI*stride*sizeof(sptValue)); sptIndex times_mat_index_1 = mats_order[1]; sptMatrix * restrict times_mat_1 = mats[times_mat_index_1]; sptIndex * restrict times_inds_1 = X->inds[times_mat_index_1].data; sptIndex times_mat_index_2 = mats_order[2]; sptMatrix * restrict times_mat_2 = mats[times_mat_index_2]; sptIndex * restrict times_inds_2 = X->inds[times_mat_index_2].data; #pragma omp parallel for schedule(static) num_threads(tk) for(sptNnzIndex x=0; x<nnz; ++x) { sptIndex mode_i = mode_ind[x]; sptValue * const restrict mvals_row = mvals + mode_i * stride; sptIndex tmp_i_1 = times_inds_1[x]; sptIndex tmp_i_2 = times_inds_2[x]; sptValue entry = vals[x]; for(sptIndex r=0; r<R; ++r) { #pragma omp atomic update mvals_row[r] += entry * times_mat_1->values[tmp_i_1 * stride + r] * times_mat_2->values[tmp_i_2 * stride + r]; } } return 0; } int sptOmpMTTKRP_Lock(sptSparseTensor const * const X, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk, sptMutexPool * lock_pool) { sptIndex const nmodes = X->nmodes; if(nmodes == 3) { sptAssert(sptOmpMTTKRP_3D_Lock(X, mats, mats_order, mode, tk, lock_pool) == 0); return 0; } sptNnzIndex const nnz = X->nnz; sptIndex const * const ndims = X->ndims; sptValue const * const vals = X->values.data; sptIndex const stride = mats[0]->stride; /* Check the mats. */ for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptIndex const R = mats[mode]->ncols; sptIndex const * const mode_ind = X->inds[mode].data; sptValue * const mvals = mats[nmodes]->values; memset(mvals, 0, tmpI*stride*sizeof(sptValue)); #pragma omp parallel for schedule(static) num_threads(tk) for(sptNnzIndex x=0; x<nnz; ++x) { sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); sptConstantValueVector(&scratch, 0); sptIndex times_mat_index = mats_order[1]; sptMatrix * times_mat = mats[times_mat_index]; sptIndex * times_inds = X->inds[times_mat_index].data; sptIndex tmp_i = times_inds[x]; sptValue const entry = vals[x]; for(sptIndex r=0; r<R; ++r) { scratch.data[r] = entry * times_mat->values[tmp_i * stride + r]; } for(sptIndex i=2; i<nmodes; ++i) { times_mat_index = mats_order[i]; times_mat = mats[times_mat_index]; times_inds = X->inds[times_mat_index].data; tmp_i = times_inds[x]; for(sptIndex r=0; r<R; ++r) { scratch.data[r] *= times_mat->values[tmp_i * stride + r]; } } sptIndex const mode_i = mode_ind[x]; sptValue * const restrict mvals_row = mvals + mode_i * stride; sptMutexSetLock(lock_pool, mode_i); for(sptIndex r=0; r<R; ++r) { mvals_row[r] += scratch.data[r]; } sptMutexUnsetLock(lock_pool, mode_i); sptFreeValueVector(&scratch); } // End loop nnzs return 0; } int sptOmpMTTKRP_3D_Lock(sptSparseTensor const * const X, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk, sptMutexPool * lock_pool) { sptIndex const nmodes = X->nmodes; sptNnzIndex const nnz = X->nnz; sptIndex const * const ndims = X->ndims; sptValue const * const restrict vals = X->values.data; sptIndex const stride = mats[0]->stride; /* Check the mats. */ sptAssert(nmodes ==3); for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptIndex const R = mats[mode]->ncols; sptIndex const * const restrict mode_ind = X->inds[mode].data; sptValue * const restrict mvals = mats[nmodes]->values; memset(mvals, 0, tmpI*stride*sizeof(sptValue)); sptIndex times_mat_index_1 = mats_order[1]; sptMatrix * restrict times_mat_1 = mats[times_mat_index_1]; sptIndex * restrict times_inds_1 = X->inds[times_mat_index_1].data; sptIndex times_mat_index_2 = mats_order[2]; sptMatrix * restrict times_mat_2 = mats[times_mat_index_2]; sptIndex * restrict times_inds_2 = X->inds[times_mat_index_2].data; #pragma omp parallel for schedule(static) num_threads(tk) for(sptNnzIndex x=0; x<nnz; ++x) { sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); sptConstantValueVector(&scratch, 0); sptIndex mode_i = mode_ind[x]; sptValue * const restrict mvals_row = mvals + mode_i * stride; sptIndex tmp_i_1 = times_inds_1[x]; sptIndex tmp_i_2 = times_inds_2[x]; sptValue entry = vals[x]; for(sptIndex r=0; r<R; ++r) { scratch.data[r] = entry * times_mat_1->values[tmp_i_1 * stride + r] * times_mat_2->values[tmp_i_2 * stride + r]; } sptMutexSetLock(lock_pool, mode_i); for(sptIndex r=0; r<R; ++r) { mvals_row[r] += scratch.data[r]; } sptMutexUnsetLock(lock_pool, mode_i); sptFreeValueVector(&scratch); } return 0; } int sptOmpMTTKRP_Reduce(sptSparseTensor const * const X, sptMatrix * mats[], // mats[nmodes] as temporary space. sptMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = X->nmodes; if(nmodes == 3) { sptAssert(sptOmpMTTKRP_3D_Reduce(X, mats, copy_mats, mats_order, mode, tk) == 0); return 0; } sptNnzIndex const nnz = X->nnz; sptIndex const * const ndims = X->ndims; sptValue const * const vals = X->values.data; sptIndex const stride = mats[0]->stride; /* Check the mats. */ for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptIndex const R = mats[mode]->ncols; sptIndex const * const mode_ind = X->inds[mode].data; sptMatrix * const M = mats[nmodes]; sptValue * const mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(sptValue)); for(int t=0; t<tk; ++t) { memset(copy_mats[t]->values, 0, ndims[mode]*stride*sizeof(*(copy_mats[t]->values))); } #pragma omp parallel for schedule(static) num_threads(tk) for(sptNnzIndex x=0; x<nnz; ++x) { int tid = omp_get_thread_num(); sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); sptConstantValueVector(&scratch, 0); sptIndex times_mat_index = mats_order[1]; sptMatrix * times_mat = mats[times_mat_index]; sptIndex * times_inds = X->inds[times_mat_index].data; sptIndex tmp_i = times_inds[x]; sptValue const entry = vals[x]; #pragma omp simd for(sptIndex r=0; r<R; ++r) { scratch.data[r] = entry * times_mat->values[tmp_i * stride + r]; } for(sptIndex i=2; i<nmodes; ++i) { times_mat_index = mats_order[i]; times_mat = mats[times_mat_index]; times_inds = X->inds[times_mat_index].data; tmp_i = times_inds[x]; #pragma omp simd for(sptIndex r=0; r<R; ++r) { scratch.data[r] *= times_mat->values[tmp_i * stride + r]; } } sptIndex const mode_i = mode_ind[x]; #pragma omp simd for(sptIndex r=0; r<R; ++r) { copy_mats[tid]->values[mode_i * stride + r] += scratch.data[r]; } sptFreeValueVector(&scratch); } // End loop nnzs /* Reduction */ #pragma omp parallel for schedule(static) num_threads(tk) for(sptIndex i=0; i<ndims[mode]; ++i) { for(int t=0; t<tk; ++t) { #pragma omp simd for(sptIndex r=0; r<R; ++r) { mvals[i * stride + r] += copy_mats[t]->values[i * stride + r]; } } } return 0; } int sptOmpMTTKRP_3D_Reduce(sptSparseTensor const * const X, sptMatrix * mats[], // mats[nmodes] as temporary space. sptMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = X->nmodes; sptNnzIndex const nnz = X->nnz; sptIndex const * const ndims = X->ndims; sptValue const * const restrict vals = X->values.data; sptIndex const stride = mats[0]->stride; /* Check the mats. */ sptAssert(nmodes ==3); for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptIndex const R = mats[mode]->ncols; sptIndex const * const restrict mode_ind = X->inds[mode].data; sptMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(sptValue)); for(int t=0; t<tk; ++t) { memset(copy_mats[t]->values, 0, ndims[mode]*stride*sizeof(*(copy_mats[t]->values))); } sptIndex times_mat_index_1 = mats_order[1]; sptMatrix * restrict times_mat_1 = mats[times_mat_index_1]; sptIndex * restrict times_inds_1 = X->inds[times_mat_index_1].data; sptIndex times_mat_index_2 = mats_order[2]; sptMatrix * restrict times_mat_2 = mats[times_mat_index_2]; sptIndex * restrict times_inds_2 = X->inds[times_mat_index_2].data; #pragma omp parallel for schedule(static) num_threads(tk) for(sptNnzIndex x=0; x<nnz; ++x) { int tid = omp_get_thread_num(); sptIndex mode_i = mode_ind[x]; sptIndex tmp_i_1 = times_inds_1[x]; sptIndex tmp_i_2 = times_inds_2[x]; sptValue entry = vals[x]; #pragma omp simd for(sptIndex r=0; r<R; ++r) { copy_mats[tid]->values[mode_i * stride + r] += entry * times_mat_1->values[tmp_i_1 * stride + r] * times_mat_2->values[tmp_i_2 * stride + r]; } } /* Reduction */ #pragma omp parallel for schedule(static) num_threads(tk) for(sptIndex i=0; i<ndims[mode]; ++i) { for(int t=0; t<tk; ++t) { #pragma omp simd for(sptIndex r=0; r<R; ++r) { mvals[i * stride + r] += copy_mats[t]->values[i * stride + r]; } } } return 0; }
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 24; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
omp_ex_14.c
#include <stdio.h> #include <omp.h> /* MIT License Copyright (c) 2019 NOUREDDINE DAGHBOUDJ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ int main() { unsigned int a = 90; printf("Before a = %i\n", a); #pragma omp parallel private(a) { a += 10 + omp_get_thread_num(); printf("Inside a = %i\n", a); } printf("After a = %i\n", a); return 0; }
valid.mob1.src.h
#pragma once #include "ukr.h" #include "omp.h" #include "transpose.h" #include "gen_ukr_A6B2gemm_1_32_112_112_32_3_3.h" #include "gen_ukr_A4B2gemm_1_32_112_112_32_3_3.h" void testrun(float* A ,float*B, float*C, float*oriB ){ int tid = omp_get_thread_num(); int Nx = 112; int Ny = 112; int Nh = 3; long long Astrides[6] = {0,1,2,3,4,5}; int b1 = 0; for (int fpck = (tid%1)*16; fpck < uNf; fpck+=1*16){ for(int cwh = (tid/1)*8; cwh < uNc*uNw*uNh/8*8; cwh+=8*1){ transpose8x8_avx(oriB+ (fpck+0)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 0, uNc*uNw*uNh, 16); transpose8x8_avx(oriB+ (fpck+8)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 8, uNc*uNw*uNh, 16); } } /* int Tc1 = 8; // 1 8 16 32 48 */ /* int Txy3 = 12*8; // 18, 36, 72, 144 */ /* int Tf2 = 32; // 32, 64, 128 ,256 */ #pragma omp barrier// begin push button generated block for(int c5=0;c5<32+0;c5+=32) { for(int f5=0;f5<32+0;f5+=32) { for(int xy5=0;xy5<12544+0;xy5+=12544) { for(int c4=c5;c4<min(32, 32+c5);c4+=32) { for(int xy4=xy5;xy4<min(12544, 12544+xy5);xy4+=12544) { for(int f4=f5;f4<min(32, 32+f5);f4+=Tf2) { for(int xy3=xy4;xy3<min(12544, 12544+xy4);xy3+=Txy3) { for(int f3=f4;f3<min(32, Tf2+f4);f3+=Tf2) { for(int c3=c4;c3<min(32, 32+c4);c3+=Tc1) { for(int xy2=xy3;xy2<min(12544, Txy3+xy3);xy2+=6) { for(int f2=f3;f2<min(32, Tf2+f3);f2+=16) { for(int c2=c3;c2<min(32, Tc1+c3);c2+=Tc1) { for(int c1=c2;c1<min(32, Tc1+c2);c1+=Tc1) { for(int xy1=xy2;xy1<min(12544, 6+xy2);xy1+=6) { for(int f1=f2;f1<min(32, 16+f2);f1+=16) { int ctile=min(Tc1, 32-c1); int x1=xy1/112; int y1=xy1%112/1; int c1_1=c1/1; int c1_2=c1%1/1; int kf1_1=f1/16; int kf1_2=f1%16/1; int of1_1=f1/1; int of1_2=f1%1/1; int offsetA=0+b1*415872+c1_1*12996+1*x1*114+1*y1*1+c1_2*1; int offsetB=0+kf1_1*4608+c1*144+0*48+0*16+kf1_2*1; int offsetC=0+b1*401408+of1_1*12544+x1*112+y1*1+of1_2*1; if(112-y1>=6){ cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); } else if(112*112-xy1>=6){ for(int sti=112-y1;sti<6;sti+=1) { Astrides[sti]+=2; } cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); for(int sti=112-y1;sti<6;sti+=1) { Astrides[sti]-=2; } } else{ cnn_ukr_float_scatter_4x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); } } } } } } } } } } } } } } } } // end push button generated block }
GB_binop__bor_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bor_int8) // A.*B function (eWiseMult): GB (_AemultB_08__bor_int8) // A.*B function (eWiseMult): GB (_AemultB_02__bor_int8) // A.*B function (eWiseMult): GB (_AemultB_04__bor_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bor_int8) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bor_int8) // C+=b function (dense accum): GB (_Cdense_accumb__bor_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bor_int8) // C=scalar+B GB (_bind1st__bor_int8) // C=scalar+B' GB (_bind1st_tran__bor_int8) // C=A+scalar GB (_bind2nd__bor_int8) // C=A'+scalar GB (_bind2nd_tran__bor_int8) // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = (aij) | (bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x) | (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BOR || GxB_NO_INT8 || GxB_NO_BOR_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bor_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bor_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bor_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bor_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bor_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bor_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bor_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bor_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bor_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = (x) | (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bor_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij) | (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x) | (aij) ; \ } GrB_Info GB (_bind1st_tran__bor_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij) | (y) ; \ } GrB_Info GB (_bind2nd_tran__bor_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
tinyexr.h
/* Copyright (c) 2014 - 2016, Syoyo Fujita All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the <organization> nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // TinyEXR contains some OpenEXR code, which is licensed under ------------ /////////////////////////////////////////////////////////////////////////// // // Copyright (c) 2002, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Industrial Light & Magic nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////// // End of OpenEXR license ------------------------------------------------- #ifndef TINYEXR_H_ #define TINYEXR_H_ // // // Do this: // #define TINYEXR_IMPLEMENTATION // before you include this file in *one* C or C++ file to create the // implementation. // // // i.e. it should look like this: // #include ... // #include ... // #include ... // #define TINYEXR_IMPLEMENTATION // #include "tinyexr.h" // // #include <stddef.h> // for size_t #include <stdint.h> // guess stdint.h is available(C99) #ifdef __cplusplus extern "C" { #endif // Use embedded miniz or not to decode ZIP format pixel. Linking with zlib // required if this flas is 0. #ifndef TINYEXR_USE_MINIZ #define TINYEXR_USE_MINIZ (1) #endif // Disable PIZ comporession when applying cpplint. #ifndef TINYEXR_USE_PIZ #define TINYEXR_USE_PIZ (1) #endif #ifndef TINYEXR_USE_ZFP #define TINYEXR_USE_ZFP (0) // TinyEXR extension. // http://computation.llnl.gov/projects/floating-point-compression #endif #define TINYEXR_SUCCESS (0) #define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1) #define TINYEXR_ERROR_INVALID_EXR_VERSION (-2) #define TINYEXR_ERROR_INVALID_ARGUMENT (-3) #define TINYEXR_ERROR_INVALID_DATA (-4) #define TINYEXR_ERROR_INVALID_FILE (-5) #define TINYEXR_ERROR_INVALID_PARAMETER (-5) #define TINYEXR_ERROR_CANT_OPEN_FILE (-6) #define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-7) #define TINYEXR_ERROR_INVALID_HEADER (-8) // @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf } // pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2 #define TINYEXR_PIXELTYPE_UINT (0) #define TINYEXR_PIXELTYPE_HALF (1) #define TINYEXR_PIXELTYPE_FLOAT (2) #define TINYEXR_MAX_ATTRIBUTES (128) #define TINYEXR_COMPRESSIONTYPE_NONE (0) #define TINYEXR_COMPRESSIONTYPE_RLE (1) #define TINYEXR_COMPRESSIONTYPE_ZIPS (2) #define TINYEXR_COMPRESSIONTYPE_ZIP (3) #define TINYEXR_COMPRESSIONTYPE_PIZ (4) #define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension #define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0) #define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1) #define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2) #define TINYEXR_TILE_ONE_LEVEL (0) #define TINYEXR_TILE_MIPMAP_LEVELS (1) #define TINYEXR_TILE_RIPMAP_LEVELS (2) #define TINYEXR_TILE_ROUND_DOWN (0) #define TINYEXR_TILE_ROUND_UP (1) typedef struct _EXRVersion { int version; // this must be 2 int tiled; // tile format image int long_name; // long name attribute int non_image; // deep image(EXR 2.0) int multipart; // multi-part(EXR 2.0) } EXRVersion; typedef struct _EXRAttribute { char name[256]; // name and type are up to 255 chars long. char type[256]; unsigned char *value; // uint8_t* int size; int pad0; } EXRAttribute; typedef struct _EXRChannelInfo { char name[256]; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } EXRChannelInfo; typedef struct _EXRTile { int offset_x; int offset_y; int level_x; int level_y; int width; // actual width in a tile. int height; // actual height int a tile. unsigned char **images; // image[channels][pixels] } EXRTile; typedef struct _EXRHeader { float pixel_aspect_ratio; int line_order; int data_window[4]; int display_window[4]; float screen_window_center[2]; float screen_window_width; int chunk_count; // Properties for tiled format(`tiledesc`). int tiled; int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; int long_name; int non_image; int multipart; unsigned int header_len; // Custom attributes(exludes required attributes(e.g. `channels`, // `compression`, etc) int num_custom_attributes; EXRAttribute custom_attributes[TINYEXR_MAX_ATTRIBUTES]; EXRChannelInfo *channels; // [num_channels] int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for // each channel. This is overwritten with `requested_pixel_types` when // loading. int num_channels; int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*) int *requested_pixel_types; // Filled initially by // ParseEXRHeaderFrom(Meomory|File), then users // can edit it(only valid for HALF pixel type // channel) } EXRHeader; typedef struct _EXRMultiPartHeader { int num_headers; EXRHeader *headers; } EXRMultiPartHeader; typedef struct _EXRImage { EXRTile *tiles; // Tiled pixel data. The application must reconstruct image // from tiles manually. NULL if scanline format. unsigned char **images; // image[channels][pixels]. NULL if tiled format. int width; int height; int num_channels; // Properties for tile format. int num_tiles; } EXRImage; typedef struct _EXRMultiPartImage { int num_images; EXRImage *images; } EXRMultiPartImage; typedef struct _DeepImage { const char **channel_names; float ***image; // image[channels][scanlines][samples] int **offset_table; // offset_table[scanline][offsets] int num_channels; int width; int height; int pad0; } DeepImage; // @deprecated { to be removed. } // Loads single-frame OpenEXR image. Assume EXR image contains RGB(A) channels. // Application must free image data as returned by `out_rgba` // Result image format is: float x RGBA x width x hight // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err); // @deprecated { to be removed. } // Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels. // components must be 3(RGB) or 4(RGBA). // Result image format is: float x RGB(A) x width x hight extern int SaveEXR(const float *data, int width, int height, int components, const char *filename); // Initialize EXRHeader struct extern void InitEXRHeader(EXRHeader *exr_header); // Initialize EXRImage struct extern void InitEXRImage(EXRImage *exr_image); // Free's internal data of EXRHeader struct extern int FreeEXRHeader(EXRHeader *exr_header); // Free's internal data of EXRImage struct extern int FreeEXRImage(EXRImage *exr_image); // Parse EXR version header of a file. extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename); // Parse EXR version header from memory-mapped EXR data. extern int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size); // Parse single-part OpenEXR header from a file and initialize `EXRHeader`. extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version, const char *filename, const char **err); // Parse single-part OpenEXR header from a memory and initialize `EXRHeader`. extern int ParseEXRHeaderFromMemory(EXRHeader *header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*` // array. extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const char *filename, const char **err); // Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*` // array extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Loads single-part OpenEXR image from a file. // Application must setup `ParseEXRHeaderFromFile` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header, const char *filename, const char **err); // Loads single-part OpenEXR image from a memory. // Application must setup `EXRHeader` with // `ParseEXRHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header, const unsigned char *memory, const size_t size, const char **err); // Loads multi-part OpenEXR image from a file. // Application must setup `ParseEXRMultipartHeaderFromFile` before calling this // function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXRMultipartImageFromFile(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const char *filename, const char **err); // Loads multi-part OpenEXR image from a memory. // Application must setup `EXRHeader*` array with // `ParseEXRMultipartHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXRMultipartImageFromMemory(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const unsigned char *memory, const char **err); // Saves multi-channel, single-frame OpenEXR image to a file. // Returns negative value and may set error string in `err` when there's an // error extern int SaveEXRImageToFile(const EXRImage *image, const EXRHeader *exr_header, const char *filename, const char **err); // Saves multi-channel, single-frame OpenEXR image to a memory. // Image is compressed using EXRImage.compression value. // Return the number of bytes if succes. // Returns negative value and may set error string in `err` when there's an // error extern size_t SaveEXRImageToMemory(const EXRImage *image, const EXRHeader *exr_header, unsigned char **memory, const char **err); // Loads single-frame OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // Returns negative value and may set error string in `err` when there's an // error extern int LoadDeepEXR(DeepImage *out_image, const char *filename, const char **err); // NOT YET IMPLEMENTED: // Saves single-frame OpenEXR deep image. // Returns negative value and may set error string in `err` when there's an // error // extern int SaveDeepEXR(const DeepImage *in_image, const char *filename, // const char **err); // NOT YET IMPLEMENTED: // Loads multi-part OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const // char *filename, // const char **err); // For emscripten. // Loads single-frame OpenEXR image from memory. Assume EXR image contains // RGB(A) channels. // `out_rgba` must have enough memory(at least sizeof(float) x 4(RGBA) x width x // hight) // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXRFromMemory(float *out_rgba, const unsigned char *memory, size_t size, const char **err); #ifdef __cplusplus } #endif #ifdef TINYEXR_IMPLEMENTATION #include <algorithm> #include <cassert> #include <cstdio> #include <cstdlib> #include <cstring> #include <sstream> #include <string> #include <vector> // @todo { remove including tinyexr.h } #include "tinyexr.h" #ifdef _OPENMP #include <omp.h> #endif #if TINYEXR_USE_MINIZ #else #include "zlib.h" #endif #if TINYEXR_USE_ZFP #include "zfp.h" #endif #if __cplusplus > 199711L // C++11 #include <cstdint> #endif // __cplusplus > 199711L namespace tinyexr { #if __cplusplus > 199711L // C++11 typedef uint64_t tinyexr_uint64; typedef int64_t tinyexr_int64; #else // Although `long long` is not a standard type pre C++11, assume it is defined // as a compiler's extension. #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #endif typedef unsigned long long tinyexr_uint64; typedef long long tinyexr_int64; #ifdef __clang__ #pragma clang diagnostic pop #endif #endif #if TINYEXR_USE_MINIZ namespace miniz { #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #endif /* miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP reading/writing/appending, PNG writing See "unlicense" statement at the end of this file. Rich Geldreich <richgel99@gmail.com>, last updated Oct. 13, 2013 Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951: http://www.ietf.org/rfc/rfc1951.txt Most API's defined in miniz.c are optional. For example, to disable the archive related functions just define MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO (see the list below for more macros). * Change History 10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major release with Zip64 support (almost there!): - Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug (thanks kahmyong.moon@hp.com) which could cause locate files to not find files. This bug would only have occured in earlier versions if you explicitly used this flag, OR if you used mz_zip_extract_archive_file_to_heap() or mz_zip_add_mem_to_archive_file_in_place() (which used this flag). If you can't switch to v1.15 but want to fix this bug, just remove the uses of this flag from both helper funcs (and of course don't use the flag). - Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when pUser_read_buf is not NULL and compressed size is > uncompressed size - Fixing mz_zip_reader_extract_*() funcs so they don't try to extract compressed data from directory entries, to account for weird zipfiles which contain zero-size compressed data on dir entries. Hopefully this fix won't cause any issues on weird zip archives, because it assumes the low 16-bits of zip external attributes are DOS attributes (which I believe they always are in practice). - Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the internal attributes, just the filename and external attributes - mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed - Added cmake support for Linux builds which builds all the examples, tested with clang v3.3 and gcc v4.6. - Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti - Merged MZ_FORCEINLINE fix from hdeanclark - Fix <time.h> include before config #ifdef, thanks emil.brink - Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping (super useful for OpenGL apps), and explicit control over the compression level (so you can set it to 1 for real-time compression). - Merged in some compiler fixes from paulharris's github repro. - Retested this build under Windows (VS 2010, including static analysis), tcc 0.9.26, gcc v4.6 and clang v3.3. - Added example6.c, which dumps an image of the mandelbrot set to a PNG file. - Modified example2 to help test the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more. - In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix possible src file fclose() leak if alignment bytes+local header file write faiiled - In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader(): Was pushing the wrong central dir header offset, appears harmless in this release, but it became a problem in the zip64 branch 5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE, #include <time.h> (thanks fermtect). 5/19/12 v1.13 - From jason@cornsyrup.org and kelwert@mtu.edu - Fix mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit. - Temporarily/locally slammed in "typedef unsigned long mz_ulong" and re-ran a randomized regression test on ~500k files. - Eliminated a bunch of warnings when compiling with GCC 32-bit/64. - Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze (static analysis) option and fixed all warnings (except for the silly "Use of the comma-operator in a tested expression.." analysis warning, which I purposely use to work around a MSVC compiler warning). - Created 32-bit and 64-bit Codeblocks projects/workspace. Built and tested Linux executables. The codeblocks workspace is compatible with Linux+Win32/x64. - Added miniz_tester solution/project, which is a useful little app derived from LZHAM's tester app that I use as part of the regression test. - Ran miniz.c and tinfl.c through another series of regression testing on ~500,000 files and archives. - Modified example5.c so it purposely disables a bunch of high-level functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the MINIZ_NO_STDIO bug report.) - Fix ftell() usage in examples so they exit with an error on files which are too large (a limitation of the examples, not miniz itself). 4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple minor level_and_flags issues in the archive API's. level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce Dawson <bruced@valvesoftware.com> for the feedback/bug report. 5/28/11 v1.11 - Added statement from unlicense.org 5/27/11 v1.10 - Substantial compressor optimizations: - Level 1 is now ~4x faster than before. The L1 compressor's throughput now varies between 70-110MB/sec. on a - Core i7 (actual throughput varies depending on the type of data, and x64 vs. x86). - Improved baseline L2-L9 compression perf. Also, greatly improved compression perf. issues on some file types. - Refactored the compression code for better readability and maintainability. - Added level 10 compression level (L10 has slightly better ratio than level 9, but could have a potentially large drop in throughput on some files). 5/15/11 v1.09 - Initial stable release. * Low-level Deflate/Inflate implementation notes: Compression: Use the "tdefl" API's. The compressor supports raw, static, and dynamic blocks, lazy or greedy parsing, match length filtering, RLE-only, and Huffman-only streams. It performs and compresses approximately as well as zlib. Decompression: Use the "tinfl" API's. The entire decompressor is implemented as a single function coroutine: see tinfl_decompress(). It supports decompression into a 32KB (or larger power of 2) wrapping buffer, or into a memory block large enough to hold the entire file. The low-level tdefl/tinfl API's do not make any use of dynamic memory allocation. * zlib-style API notes: miniz.c implements a fairly large subset of zlib. There's enough functionality present for it to be a drop-in zlib replacement in many apps: The z_stream struct, optional memory allocation callbacks deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound inflateInit/inflateInit2/inflate/inflateEnd compress, compress2, compressBound, uncompress CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly routines. Supports raw deflate streams or standard zlib streams with adler-32 checking. Limitations: The callback API's are not implemented yet. No support for gzip headers or zlib static dictionaries. I've tried to closely emulate zlib's various flavors of stream flushing and return status codes, but there are no guarantees that miniz.c pulls this off perfectly. * PNG writing: See the tdefl_write_image_to_png_file_in_memory() function, originally written by Alex Evans. Supports 1-4 bytes/pixel images. * ZIP archive API notes: The ZIP archive API's where designed with simplicity and efficiency in mind, with just enough abstraction to get the job done with minimal fuss. There are simple API's to retrieve file information, read files from existing archives, create new archives, append new files to existing archives, or clone archive data from one archive to another. It supports archives located in memory or the heap, on disk (using stdio.h), or you can specify custom file read/write callbacks. - Archive reading: Just call this function to read a single file from a disk archive: void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); For more complex cases, use the "mz_zip_reader" functions. Upon opening an archive, the entire central directory is located and read as-is into memory, and subsequent file access only occurs when reading individual files. - Archives file scanning: The simple way is to use this function to scan a loaded archive for a specific file: int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); The locate operation can optionally check file comments too, which (as one example) can be used to identify multiple versions of the same file in an archive. This function uses a simple linear search through the central directory, so it's not very fast. Alternately, you can iterate through all the files in an archive (using mz_zip_reader_get_num_files()) and retrieve detailed info on each file by calling mz_zip_reader_file_stat(). - Archive creation: Use the "mz_zip_writer" functions. The ZIP writer immediately writes compressed file data to disk and builds an exact image of the central directory in memory. The central directory image is written all at once at the end of the archive file when the archive is finalized. The archive writer can optionally align each file's local header and file data to any power of 2 alignment, which can be useful when the archive will be read from optical media. Also, the writer supports placing arbitrary data blobs at the very beginning of ZIP archives. Archives written using either feature are still readable by any ZIP tool. - Archive appending: The simple way to add a single file to an archive is to call this function: mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); The archive will be created if it doesn't already exist, otherwise it'll be appended to. Note the appending is done in-place and is not an atomic operation, so if something goes wrong during the operation it's possible the archive could be left without a central directory (although the local file headers and file data will be fine, so the archive will be recoverable). For more complex archive modification scenarios: 1. The safest way is to use a mz_zip_reader to read the existing archive, cloning only those bits you want to preserve into a new archive using using the mz_zip_writer_add_from_zip_reader() function (which compiles the compressed file data as-is). When you're done, delete the old archive and rename the newly written archive, and you're done. This is safe but requires a bunch of temporary disk space or heap memory. 2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using mz_zip_writer_init_from_reader(), append new files as needed, then finalize the archive which will write an updated central directory to the original archive. (This is basically what mz_zip_add_mem_to_archive_file_in_place() does.) There's a possibility that the archive's central directory could be lost with this method if anything goes wrong, though. - ZIP archive support limitations: No zip64 or spanning support. Extraction functions can only handle unencrypted, stored or deflated files. Requires streams capable of seeking. * This is a header file library, like stb_image.c. To get only a header file, either cut and paste the below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then include miniz.c from it. * Important: For best perf. be sure to customize the below macros for your target platform: #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_LITTLE_ENDIAN 1 #define MINIZ_HAS_64BIT_REGISTERS 1 * On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before including miniz.c to ensure miniz uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be able to process large files (i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes). */ #ifndef MINIZ_HEADER_INCLUDED #define MINIZ_HEADER_INCLUDED #include <stdlib.h> // Defines to completely disable specific portions of miniz.c: // If all macros here are defined the only functionality remaining will be // CRC-32, adler-32, tinfl, and tdefl. // Define MINIZ_NO_STDIO to disable all usage and any functions which rely on // stdio for file I/O. //#define MINIZ_NO_STDIO // If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able // to get the current time, or // get/set file times, and the C run-time funcs that get/set times won't be // called. // The current downside is the times written to your archives will be from 1979. #define MINIZ_NO_TIME // Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's. //#define MINIZ_NO_ARCHIVE_APIS // Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive // API's. //#define MINIZ_NO_ARCHIVE_WRITING_APIS // Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression // API's. //#define MINIZ_NO_ZLIB_APIS // Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent // conflicts against stock zlib. //#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES // Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc. // Note if MINIZ_NO_MALLOC is defined then the user must always provide custom // user alloc/free/realloc // callbacks to the zlib and archive API's, and a few stand-alone helper API's // which don't provide custom user // functions (such as tdefl_compress_mem_to_heap() and // tinfl_decompress_mem_to_heap()) won't work. //#define MINIZ_NO_MALLOC #if defined(__TINYC__) && (defined(__linux) || defined(__linux__)) // TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc // on Linux #define MINIZ_NO_TIME #endif #if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS) #include <time.h> #endif #if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \ defined(__i386) || defined(__i486__) || defined(__i486) || \ defined(i386) || defined(__ia64__) || defined(__x86_64__) // MINIZ_X86_OR_X64_CPU is only used to help set the below macros. #define MINIZ_X86_OR_X64_CPU 1 #else #define MINIZ_X86_OR_X64_CPU 0 #endif #if defined(__sparcv9) // Big endian #else #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. #define MINIZ_LITTLE_ENDIAN 1 #endif #endif #if 1 // MINIZ_X86_OR_X64_CPU // Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient // integer loads and stores from unaligned addresses. //#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES \ 0 // disable to suppress compiler warnings #endif #if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \ defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \ defined(__x86_64__) // Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are // reasonably fast (and don't involve compiler generated calls to helper // functions). #define MINIZ_HAS_64BIT_REGISTERS 1 #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API Definitions. // For more compatibility with zlib, miniz.c uses unsigned long for some // parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits! typedef unsigned long mz_ulong; // mz_free() internally uses the MZ_FREE() macro (which by default calls free() // unless you've modified the MZ_MALLOC macro) to release a block allocated from // the heap. void mz_free(void *p); #define MZ_ADLER32_INIT (1) // mz_adler32() returns the initial adler-32 value to use when called with // ptr==NULL. mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len); #define MZ_CRC32_INIT (0) // mz_crc32() returns the initial CRC-32 value to use when called with // ptr==NULL. mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len); // Compression strategies. enum { MZ_DEFAULT_STRATEGY = 0, MZ_FILTERED = 1, MZ_HUFFMAN_ONLY = 2, MZ_RLE = 3, MZ_FIXED = 4 }; // Method #define MZ_DEFLATED 8 #ifndef MINIZ_NO_ZLIB_APIS // Heap allocation callbacks. // Note that mz_alloc_func parameter types purpsosely differ from zlib's: // items/size is size_t, not unsigned long. typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size); typedef void (*mz_free_func)(void *opaque, void *address); typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items, size_t size); #define MZ_VERSION "9.1.15" #define MZ_VERNUM 0x91F0 #define MZ_VER_MAJOR 9 #define MZ_VER_MINOR 1 #define MZ_VER_REVISION 15 #define MZ_VER_SUBREVISION 0 // Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The // other values are for advanced use (refer to the zlib docs). enum { MZ_NO_FLUSH = 0, MZ_PARTIAL_FLUSH = 1, MZ_SYNC_FLUSH = 2, MZ_FULL_FLUSH = 3, MZ_FINISH = 4, MZ_BLOCK = 5 }; // Return status codes. MZ_PARAM_ERROR is non-standard. enum { MZ_OK = 0, MZ_STREAM_END = 1, MZ_NEED_DICT = 2, MZ_ERRNO = -1, MZ_STREAM_ERROR = -2, MZ_DATA_ERROR = -3, MZ_MEM_ERROR = -4, MZ_BUF_ERROR = -5, MZ_VERSION_ERROR = -6, MZ_PARAM_ERROR = -10000 }; // Compression levels: 0-9 are the standard zlib-style levels, 10 is best // possible compression (not zlib compatible, and may be very slow), // MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL. enum { MZ_NO_COMPRESSION = 0, MZ_BEST_SPEED = 1, MZ_BEST_COMPRESSION = 9, MZ_UBER_COMPRESSION = 10, MZ_DEFAULT_LEVEL = 6, MZ_DEFAULT_COMPRESSION = -1 }; // Window bits #define MZ_DEFAULT_WINDOW_BITS 15 struct mz_internal_state; // Compression/decompression stream struct. typedef struct mz_stream_s { const unsigned char *next_in; // pointer to next byte to read unsigned int avail_in; // number of bytes available at next_in mz_ulong total_in; // total number of bytes consumed so far unsigned char *next_out; // pointer to next byte to write unsigned int avail_out; // number of bytes that can be written to next_out mz_ulong total_out; // total number of bytes produced so far char *msg; // error msg (unused) struct mz_internal_state *state; // internal state, allocated by zalloc/zfree mz_alloc_func zalloc; // optional heap allocation function (defaults to malloc) mz_free_func zfree; // optional heap free function (defaults to free) void *opaque; // heap alloc function user pointer int data_type; // data_type (unused) mz_ulong adler; // adler32 of the source or uncompressed data mz_ulong reserved; // not used } mz_stream; typedef mz_stream *mz_streamp; // Returns the version string of miniz.c. const char *mz_version(void); // mz_deflateInit() initializes a compressor with default options: // Parameters: // pStream must point to an initialized mz_stream struct. // level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION]. // level 1 enables a specially optimized compression function that's been // optimized purely for performance, not ratio. // (This special func. is currently only enabled when // MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.) // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if the input parameters are bogus. // MZ_MEM_ERROR on out of memory. int mz_deflateInit(mz_streamp pStream, int level); // mz_deflateInit2() is like mz_deflate(), except with more control: // Additional parameters: // method must be MZ_DEFLATED // window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with // zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no // header or footer) // mem_level must be between [1, 9] (it's checked but ignored by miniz.c) int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy); // Quickly resets a compressor without having to reallocate anything. Same as // calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2(). int mz_deflateReset(mz_streamp pStream); // mz_deflate() compresses the input to output, consuming as much of the input // and producing as much output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update // the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or // MZ_FINISH. // Return values: // MZ_OK on success (when flushing, or if more input is needed but not // available, and/or there's more output to be written but the output buffer // is full). // MZ_STREAM_END if all input has been consumed and all output bytes have been // written. Don't call mz_deflate() on the stream anymore. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input and/or // output buffers are empty. (Fill up the input buffer or free up some output // space and try again.) int mz_deflate(mz_streamp pStream, int flush); // mz_deflateEnd() deinitializes a compressor: // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. int mz_deflateEnd(mz_streamp pStream); // mz_deflateBound() returns a (very) conservative upper bound on the amount of // data that could be generated by deflate(), assuming flush is set to only // MZ_NO_FLUSH or MZ_FINISH. mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len); // Single-call compression functions mz_compress() and mz_compress2(): // Returns MZ_OK on success, or one of the error codes from mz_deflate() on // failure. int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level); // mz_compressBound() returns a (very) conservative upper bound on the amount of // data that could be generated by calling mz_compress(). mz_ulong mz_compressBound(mz_ulong source_len); // Initializes a decompressor. int mz_inflateInit(mz_streamp pStream); // mz_inflateInit2() is like mz_inflateInit() with an additional option that // controls the window size and whether or not the stream has been wrapped with // a zlib header/footer: // window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or // -MZ_DEFAULT_WINDOW_BITS (raw deflate). int mz_inflateInit2(mz_streamp pStream, int window_bits); // Decompresses the input stream to the output, consuming only as much of the // input as needed, and writing as much to the output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update // the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH. // On the first call, if flush is MZ_FINISH it's assumed the input and output // buffers are both sized large enough to decompress the entire stream in a // single call (this is slightly faster). // MZ_FINISH implies that there are no more source bytes available beside // what's already in the input buffer, and that the output buffer is large // enough to hold the rest of the decompressed data. // Return values: // MZ_OK on success. Either more input is needed but not available, and/or // there's more output to be written but the output buffer is full. // MZ_STREAM_END if all needed input has been consumed and all output bytes // have been written. For zlib streams, the adler-32 of the decompressed data // has also been verified. // MZ_STREAM_ERROR if the stream is bogus. // MZ_DATA_ERROR if the deflate stream is invalid. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input buffer is // empty but the inflater needs more input to continue, or if the output // buffer is not large enough. Call mz_inflate() again // with more input data, or with more room in the output buffer (except when // using single call decompression, described above). int mz_inflate(mz_streamp pStream, int flush); // Deinitializes a decompressor. int mz_inflateEnd(mz_streamp pStream); // Single-call decompression. // Returns MZ_OK on success, or one of the error codes from mz_inflate() on // failure. int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); // Returns a string description of the specified error code, or NULL if the // error code is invalid. const char *mz_error(int err); // Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used // as a drop-in replacement for the subset of zlib that miniz.c supports. // Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you // use zlib in the same project. #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES typedef unsigned char Byte; typedef unsigned int uInt; typedef mz_ulong uLong; typedef Byte Bytef; typedef uInt uIntf; typedef char charf; typedef int intf; typedef void *voidpf; typedef uLong uLongf; typedef void *voidp; typedef void *const voidpc; #define Z_NULL 0 #define Z_NO_FLUSH MZ_NO_FLUSH #define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH #define Z_SYNC_FLUSH MZ_SYNC_FLUSH #define Z_FULL_FLUSH MZ_FULL_FLUSH #define Z_FINISH MZ_FINISH #define Z_BLOCK MZ_BLOCK #define Z_OK MZ_OK #define Z_STREAM_END MZ_STREAM_END #define Z_NEED_DICT MZ_NEED_DICT #define Z_ERRNO MZ_ERRNO #define Z_STREAM_ERROR MZ_STREAM_ERROR #define Z_DATA_ERROR MZ_DATA_ERROR #define Z_MEM_ERROR MZ_MEM_ERROR #define Z_BUF_ERROR MZ_BUF_ERROR #define Z_VERSION_ERROR MZ_VERSION_ERROR #define Z_PARAM_ERROR MZ_PARAM_ERROR #define Z_NO_COMPRESSION MZ_NO_COMPRESSION #define Z_BEST_SPEED MZ_BEST_SPEED #define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION #define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION #define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY #define Z_FILTERED MZ_FILTERED #define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY #define Z_RLE MZ_RLE #define Z_FIXED MZ_FIXED #define Z_DEFLATED MZ_DEFLATED #define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS #define alloc_func mz_alloc_func #define free_func mz_free_func #define internal_state mz_internal_state #define z_stream mz_stream #define deflateInit mz_deflateInit #define deflateInit2 mz_deflateInit2 #define deflateReset mz_deflateReset #define deflate mz_deflate #define deflateEnd mz_deflateEnd #define deflateBound mz_deflateBound #define compress mz_compress #define compress2 mz_compress2 #define compressBound mz_compressBound #define inflateInit mz_inflateInit #define inflateInit2 mz_inflateInit2 #define inflate mz_inflate #define inflateEnd mz_inflateEnd #define uncompress mz_uncompress #define crc32 mz_crc32 #define adler32 mz_adler32 #define MAX_WBITS 15 #define MAX_MEM_LEVEL 9 #define zError mz_error #define ZLIB_VERSION MZ_VERSION #define ZLIB_VERNUM MZ_VERNUM #define ZLIB_VER_MAJOR MZ_VER_MAJOR #define ZLIB_VER_MINOR MZ_VER_MINOR #define ZLIB_VER_REVISION MZ_VER_REVISION #define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION #define zlibVersion mz_version #define zlib_version mz_version() #endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES #endif // MINIZ_NO_ZLIB_APIS // ------------------- Types and macros typedef unsigned char mz_uint8; typedef signed short mz_int16; typedef unsigned short mz_uint16; typedef unsigned int mz_uint32; typedef unsigned int mz_uint; typedef long long mz_int64; typedef unsigned long long mz_uint64; typedef int mz_bool; #define MZ_FALSE (0) #define MZ_TRUE (1) // An attempt to work around MSVC's spammy "warning C4127: conditional // expression is constant" message. #ifdef _MSC_VER #define MZ_MACRO_END while (0, 0) #else #define MZ_MACRO_END while (0) #endif // ------------------- ZIP archive reading/writing #ifndef MINIZ_NO_ARCHIVE_APIS enum { MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256 }; typedef struct { mz_uint32 m_file_index; mz_uint32 m_central_dir_ofs; mz_uint16 m_version_made_by; mz_uint16 m_version_needed; mz_uint16 m_bit_flag; mz_uint16 m_method; #ifndef MINIZ_NO_TIME time_t m_time; #endif mz_uint32 m_crc32; mz_uint64 m_comp_size; mz_uint64 m_uncomp_size; mz_uint16 m_internal_attr; mz_uint32 m_external_attr; mz_uint64 m_local_header_ofs; mz_uint32 m_comment_size; char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE]; char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE]; } mz_zip_archive_file_stat; typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n); typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n); struct mz_zip_internal_state_tag; typedef struct mz_zip_internal_state_tag mz_zip_internal_state; typedef enum { MZ_ZIP_MODE_INVALID = 0, MZ_ZIP_MODE_READING = 1, MZ_ZIP_MODE_WRITING = 2, MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3 } mz_zip_mode; typedef struct mz_zip_archive_tag { mz_uint64 m_archive_size; mz_uint64 m_central_directory_file_ofs; mz_uint m_total_files; mz_zip_mode m_zip_mode; mz_uint m_file_offset_alignment; mz_alloc_func m_pAlloc; mz_free_func m_pFree; mz_realloc_func m_pRealloc; void *m_pAlloc_opaque; mz_file_read_func m_pRead; mz_file_write_func m_pWrite; void *m_pIO_opaque; mz_zip_internal_state *m_pState; } mz_zip_archive; typedef enum { MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100, MZ_ZIP_FLAG_IGNORE_PATH = 0x0200, MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400, MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800 } mz_zip_flags; // ZIP archive reading // Inits a ZIP archive reader. // These functions read and validate the archive's central directory. mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags); mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags); #endif // Returns the total number of files in the archive. mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip); // Returns detailed information about an archive file entry. mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat); // Determines if an archive file entry is a directory entry. mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index); mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index); // Retrieves the filename of an archive file entry. // Returns the number of bytes written to pFilename, or if filename_buf_size is // 0 this function returns the number of bytes needed to fully store the // filename. mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size); // Attempts to locates a file in the archive's central directory. // Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH // Returns -1 if the file cannot be found. int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); // Extracts a archive file to a memory buffer using no memory allocation. mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); // Extracts a archive file to a memory buffer. mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags); // Extracts a archive file to a dynamically allocated heap buffer. void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags); void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags); // Extracts a archive file using a callback function to output the file's data. mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); #ifndef MINIZ_NO_STDIO // Extracts a archive file to a disk file and sets its last accessed and // modified times. // This function only extracts files, not archive directory records. mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags); #endif // Ends archive reading, freeing all allocations, and closing the input archive // file if mz_zip_reader_init_file() was used. mz_bool mz_zip_reader_end(mz_zip_archive *pZip); // ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS // Inits a ZIP archive writer. mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size); mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning); #endif // Converts a ZIP archive reader object into a writer object, to allow efficient // in-place file appends to occur on an existing archive. // For archives opened using mz_zip_reader_init_file, pFilename must be the // archive's filename so it can be reopened for writing. If the file can't be // reopened, mz_zip_reader_end() will be called. // For archives opened using mz_zip_reader_init_mem, the memory block must be // growable using the realloc callback (which defaults to realloc unless you've // overridden it). // Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's // user provided m_pWrite function cannot be NULL. // Note: In-place archive modification is not recommended unless you know what // you're doing, because if execution stops or something goes wrong before // the archive is finalized the file's central directory will be hosed. mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename); // Adds the contents of a memory buffer to an archive. These functions record // the current local time into the archive. // To add a directory entry, call this method with an archive name ending in a // forwardslash with empty buffer. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags); mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32); #ifndef MINIZ_NO_STDIO // Adds the contents of a disk file to an archive. This function also records // the disk file's modified time into the archive. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); #endif // Adds a file to an archive by fully cloning the data from another archive. // This function fully clones the source file's compressed data (no // recompression), along with its full filename, extra data, and comment fields. mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index); // Finalizes the archive by writing the central directory records followed by // the end of central directory record. // After an archive is finalized, the only valid call on the mz_zip_archive // struct is mz_zip_writer_end(). // An archive must be manually finalized by calling this function for it to be // valid. mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip); mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize); // Ends archive writing, freeing all allocations, and closing the output file if // mz_zip_writer_init_file() was used. // Note for the archive to be valid, it must have been finalized before ending. mz_bool mz_zip_writer_end(mz_zip_archive *pZip); // Misc. high-level helper functions: // mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically) // appends a memory blob to a ZIP archive. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); // Reads a single file from an archive into a heap block. // Returns NULL on failure. void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS // ------------------- Low-level Decompression API Definitions // Decompression flags used by tinfl_decompress(). // TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and // ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the // input is a raw deflate stream. // TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available // beyond the end of the supplied input buffer. If clear, the input buffer // contains all remaining input. // TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large // enough to hold the entire decompressed stream. If clear, the output buffer is // at least the size of the dictionary (typically 32KB). // TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the // decompressed bytes. enum { TINFL_FLAG_PARSE_ZLIB_HEADER = 1, TINFL_FLAG_HAS_MORE_INPUT = 2, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4, TINFL_FLAG_COMPUTE_ADLER32 = 8 }; // High level decompression functions: // tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block // allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data // to decompress. // On return: // Function returns a pointer to the decompressed data, or NULL on failure. // *pOut_len will be set to the decompressed data's size, which could be larger // than src_buf_len on uncompressible data. // The caller must call mz_free() on the returned block when it's no longer // needed. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tinfl_decompress_mem_to_mem() decompresses a block in memory to another block // in memory. // Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes // written on success. #define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1)) size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // tinfl_decompress_mem_to_callback() decompresses a block in memory to an // internal 32KB buffer, and a user provided callback function will be called to // flush the buffer. // Returns 1 on success or 0 on failure. typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); struct tinfl_decompressor_tag; typedef struct tinfl_decompressor_tag tinfl_decompressor; // Max size of LZ dictionary. #define TINFL_LZ_DICT_SIZE 32768 // Return status. typedef enum { TINFL_STATUS_BAD_PARAM = -3, TINFL_STATUS_ADLER32_MISMATCH = -2, TINFL_STATUS_FAILED = -1, TINFL_STATUS_DONE = 0, TINFL_STATUS_NEEDS_MORE_INPUT = 1, TINFL_STATUS_HAS_MORE_OUTPUT = 2 } tinfl_status; // Initializes the decompressor to its initial state. #define tinfl_init(r) \ do { \ (r)->m_state = 0; \ } \ MZ_MACRO_END #define tinfl_get_adler32(r) (r)->m_check_adler32 // Main low-level decompressor coroutine function. This is the only function // actually needed for decompression. All the other functions are just // high-level helpers for improved usability. // This is a universal API, i.e. it can be used as a building block to build any // desired higher level decompression API. In the limit case, it can be called // once per every byte input or output. tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags); // Internal/private bits follow. enum { TINFL_MAX_HUFF_TABLES = 3, TINFL_MAX_HUFF_SYMBOLS_0 = 288, TINFL_MAX_HUFF_SYMBOLS_1 = 32, TINFL_MAX_HUFF_SYMBOLS_2 = 19, TINFL_FAST_LOOKUP_BITS = 10, TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS }; typedef struct { mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0]; mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE], m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2]; } tinfl_huff_table; #ifndef MINIZ_HAS_64BIT_REGISTERS # define MINIZ_HAS_64BIT_REGISTERS 0 #endif #ifndef TINFL_USE_64BIT_BITBUF # if MINIZ_HAS_64BIT_REGISTERS # define TINFL_USE_64BIT_BITBUF 1 # else # define TINFL_USE_64BIT_BITBUF 0 # endif #endif #if TINFL_USE_64BIT_BITBUF typedef mz_uint64 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (64) #else typedef mz_uint32 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (32) #endif struct tinfl_decompressor_tag { mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type, m_check_adler32, m_dist, m_counter, m_num_extra, m_table_sizes[TINFL_MAX_HUFF_TABLES]; tinfl_bit_buf_t m_bit_buf; size_t m_dist_from_out_buf_start; tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES]; mz_uint8 m_raw_header[4], m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137]; }; // ------------------- Low-level Compression API Definitions // Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly // slower, and raw/dynamic blocks will be output more frequently). #define TDEFL_LESS_MEMORY 0 // tdefl_init() compression flags logically OR'd together (low 12 bits contain // the max. number of probes per dictionary search): // TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes // per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap // compression), 4095=Huffman+LZ (slowest/best compression). enum { TDEFL_HUFFMAN_ONLY = 0, TDEFL_DEFAULT_MAX_PROBES = 128, TDEFL_MAX_PROBES_MASK = 0xFFF }; // TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before // the deflate data, and the Adler-32 of the source data at the end. Otherwise, // you'll get raw deflate data. // TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even // when not writing zlib headers). // TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more // efficient lazy parsing. // TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's // initialization time to the minimum, but the output may vary from run to run // given the same input (depending on the contents of memory). // TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1) // TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled. // TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables. // TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks. // The low 12 bits are reserved to control the max # of hash probes per // dictionary lookup (see TDEFL_MAX_PROBES_MASK). enum { TDEFL_WRITE_ZLIB_HEADER = 0x01000, TDEFL_COMPUTE_ADLER32 = 0x02000, TDEFL_GREEDY_PARSING_FLAG = 0x04000, TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000, TDEFL_RLE_MATCHES = 0x10000, TDEFL_FILTER_MATCHES = 0x20000, TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000, TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000 }; // High level compression functions: // tdefl_compress_mem_to_heap() compresses a block in memory to a heap block // allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of source block to compress. // flags: The max match finder probes (default is 128) logically OR'd against // the above flags. Higher probes are slower but improve compression. // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pOut_len will be set to the compressed data's size, which could be larger // than src_buf_len on uncompressible data. // The caller must free() the returned block when it's no longer needed. void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tdefl_compress_mem_to_mem() compresses a block in memory to another block in // memory. // Returns 0 on failure. size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // Compresses an image to a compressed PNG file in memory. // On entry: // pImage, w, h, and num_chans describe the image to compress. num_chans may be // 1, 2, 3, or 4. // The image pitch in bytes per scanline will be w*num_chans. The leftmost // pixel on the top scanline is stored first in memory. // level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL // If flip is true, the image will be flipped on the Y axis (useful for OpenGL // apps). // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pLen_out will be set to the size of the PNG image file. // The caller must mz_free() the returned heap block (which will typically be // larger than *pLen_out) when it's no longer needed. void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip); void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out); // Output stream interface. The compressor uses this interface to write // compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time. typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); // tdefl_compress_mem_to_output() compresses a block to an output stream. The // above helpers use this function internally. mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); enum { TDEFL_MAX_HUFF_TABLES = 3, TDEFL_MAX_HUFF_SYMBOLS_0 = 288, TDEFL_MAX_HUFF_SYMBOLS_1 = 32, TDEFL_MAX_HUFF_SYMBOLS_2 = 19, TDEFL_LZ_DICT_SIZE = 32768, TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1, TDEFL_MIN_MATCH_LEN = 3, TDEFL_MAX_MATCH_LEN = 258 }; // TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed // output block (using static/fixed Huffman codes). #if TDEFL_LESS_MEMORY enum { TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 12, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #else enum { TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 15, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #endif // The low-level tdefl functions below may be used directly if the above helper // functions aren't flexible enough. The low-level functions don't make any heap // allocations, unlike the above helper functions. typedef enum { TDEFL_STATUS_BAD_PARAM = -2, TDEFL_STATUS_PUT_BUF_FAILED = -1, TDEFL_STATUS_OKAY = 0, TDEFL_STATUS_DONE = 1 } tdefl_status; // Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums typedef enum { TDEFL_NO_FLUSH = 0, TDEFL_SYNC_FLUSH = 2, TDEFL_FULL_FLUSH = 3, TDEFL_FINISH = 4 } tdefl_flush; // tdefl's compression state structure. typedef struct { tdefl_put_buf_func_ptr m_pPut_buf_func; void *m_pPut_buf_user; mz_uint m_flags, m_max_probes[2]; int m_greedy_parsing; mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size; mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end; mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in, m_bit_buffer; mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit, m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index, m_wants_to_finish; tdefl_status m_prev_return_status; const void *m_pIn_buf; void *m_pOut_buf; size_t *m_pIn_buf_size, *m_pOut_buf_size; tdefl_flush m_flush; const mz_uint8 *m_pSrc; size_t m_src_buf_left, m_out_buf_ofs; mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1]; mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE]; mz_uint16 m_next[TDEFL_LZ_DICT_SIZE]; mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE]; mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE]; } tdefl_compressor; // Initializes the compressor. // There is no corresponding deinit() function because the tdefl API's do not // dynamically allocate memory. // pBut_buf_func: If NULL, output data will be supplied to the specified // callback. In this case, the user should call the tdefl_compress_buffer() API // for compression. // If pBut_buf_func is NULL the user should always call the tdefl_compress() // API. // flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER, // etc.) tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); // Compresses a block of data, consuming as much of the specified input buffer // as possible, and writing as much compressed data to the specified output // buffer as possible. tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush); // tdefl_compress_buffer() is only usable when the tdefl_init() is called with a // non-NULL tdefl_put_buf_func_ptr. // tdefl_compress_buffer() always consumes the entire input buffer. tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush); tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d); mz_uint32 tdefl_get_adler32(tdefl_compressor *d); // Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't // defined, because it uses some of its macros. #ifndef MINIZ_NO_ZLIB_APIS // Create tdefl_compress() flags given zlib-style compression parameters. // level may range from [0,10] (where 10 is absolute max compression, but may be // much slower on some files) // window_bits may be -15 (raw deflate) or 15 (zlib) // strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY, // MZ_RLE, or MZ_FIXED mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy); #endif // #ifndef MINIZ_NO_ZLIB_APIS #ifdef __cplusplus } #endif #endif // MINIZ_HEADER_INCLUDED // ------------------- End of Header: Implementation follows. (If you only want // the header, define MINIZ_HEADER_FILE_ONLY.) #ifndef MINIZ_HEADER_FILE_ONLY typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1]; typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1]; typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1]; #include <assert.h> #include <string.h> #define MZ_ASSERT(x) assert(x) #ifdef MINIZ_NO_MALLOC #define MZ_MALLOC(x) NULL #define MZ_FREE(x) (void)x, ((void)0) #define MZ_REALLOC(p, x) NULL #else #define MZ_MALLOC(x) malloc(x) #define MZ_FREE(x) free(x) #define MZ_REALLOC(p, x) realloc(p, x) #endif #define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b)) #define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b)) #define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj)) #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN #define MZ_READ_LE16(p) *((const mz_uint16 *)(p)) #define MZ_READ_LE32(p) *((const mz_uint32 *)(p)) #else #define MZ_READ_LE16(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U)) #define MZ_READ_LE32(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U)) #endif #ifdef _MSC_VER #define MZ_FORCEINLINE __forceinline #elif defined(__GNUC__) #define MZ_FORCEINLINE inline __attribute__((__always_inline__)) #else #define MZ_FORCEINLINE inline #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API's mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) { mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16); size_t block_len = buf_len % 5552; if (!ptr) return MZ_ADLER32_INIT; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } return (s2 << 16) + s1; } // Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C // implementation that balances processor cache usage against speed": // http://www.geocities.com/malbrain/ mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) { static const mz_uint32 s_crc32[16] = { 0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c, 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c}; mz_uint32 crcu32 = (mz_uint32)crc; if (!ptr) return MZ_CRC32_INIT; crcu32 = ~crcu32; while (buf_len--) { mz_uint8 b = *ptr++; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)]; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)]; } return ~crcu32; } void mz_free(void *p) { MZ_FREE(p); } #ifndef MINIZ_NO_ZLIB_APIS static void *def_alloc_func(void *opaque, size_t items, size_t size) { (void)opaque, (void)items, (void)size; return MZ_MALLOC(items * size); } static void def_free_func(void *opaque, void *address) { (void)opaque, (void)address; MZ_FREE(address); } static void *def_realloc_func(void *opaque, void *address, size_t items, size_t size) { (void)opaque, (void)address, (void)items, (void)size; return MZ_REALLOC(address, items * size); } const char *mz_version(void) { return MZ_VERSION; } int mz_deflateInit(mz_streamp pStream, int level) { return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9, MZ_DEFAULT_STRATEGY); } int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy) { tdefl_compressor *pComp; mz_uint comp_flags = TDEFL_COMPUTE_ADLER32 | tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy); if (!pStream) return MZ_STREAM_ERROR; if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) || ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS))) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = MZ_ADLER32_INIT; pStream->msg = NULL; pStream->reserved = 0; pStream->total_in = 0; pStream->total_out = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1, sizeof(tdefl_compressor)); if (!pComp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pComp; if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) { mz_deflateEnd(pStream); return MZ_PARAM_ERROR; } return MZ_OK; } int mz_deflateReset(mz_streamp pStream) { if ((!pStream) || (!pStream->state) || (!pStream->zalloc) || (!pStream->zfree)) return MZ_STREAM_ERROR; pStream->total_in = pStream->total_out = 0; tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL, ((tdefl_compressor *)pStream->state)->m_flags); return MZ_OK; } int mz_deflate(mz_streamp pStream, int flush) { size_t in_bytes, out_bytes; mz_ulong orig_total_in, orig_total_out; int mz_status = MZ_OK; if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) || (!pStream->next_out)) return MZ_STREAM_ERROR; if (!pStream->avail_out) return MZ_BUF_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if (((tdefl_compressor *)pStream->state)->m_prev_return_status == TDEFL_STATUS_DONE) return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR; orig_total_in = pStream->total_in; orig_total_out = pStream->total_out; for (;;) { tdefl_status defl_status; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; defl_status = tdefl_compress((tdefl_compressor *)pStream->state, pStream->next_in, &in_bytes, pStream->next_out, &out_bytes, (tdefl_flush)flush); pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (defl_status < 0) { mz_status = MZ_STREAM_ERROR; break; } else if (defl_status == TDEFL_STATUS_DONE) { mz_status = MZ_STREAM_END; break; } else if (!pStream->avail_out) break; else if ((!pStream->avail_in) && (flush != MZ_FINISH)) { if ((flush) || (pStream->total_in != orig_total_in) || (pStream->total_out != orig_total_out)) break; return MZ_BUF_ERROR; // Can't make forward progress without some input. } } return mz_status; } int mz_deflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) { (void)pStream; // This is really over conservative. (And lame, but it's actually pretty // tricky to compute a true upper bound given the way tdefl's blocking works.) return MZ_MAX(128 + (source_len * 110) / 100, 128 + source_len + ((source_len / (31 * 1024)) + 1) * 5); } int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level) { int status; mz_stream stream; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_deflateInit(&stream, level); if (status != MZ_OK) return status; status = mz_deflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_deflateEnd(&stream); return (status == MZ_OK) ? MZ_BUF_ERROR : status; } *pDest_len = stream.total_out; return mz_deflateEnd(&stream); } int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { return mz_compress2(pDest, pDest_len, pSource, source_len, MZ_DEFAULT_COMPRESSION); } mz_ulong mz_compressBound(mz_ulong source_len) { return mz_deflateBound(NULL, source_len); } typedef struct { tinfl_decompressor m_decomp; mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed; int m_window_bits; mz_uint8 m_dict[TINFL_LZ_DICT_SIZE]; tinfl_status m_last_status; } inflate_state; int mz_inflateInit2(mz_streamp pStream, int window_bits) { inflate_state *pDecomp; if (!pStream) return MZ_STREAM_ERROR; if ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS)) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = 0; pStream->msg = NULL; pStream->total_in = 0; pStream->total_out = 0; pStream->reserved = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1, sizeof(inflate_state)); if (!pDecomp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pDecomp; tinfl_init(&pDecomp->m_decomp); pDecomp->m_dict_ofs = 0; pDecomp->m_dict_avail = 0; pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT; pDecomp->m_first_call = 1; pDecomp->m_has_flushed = 0; pDecomp->m_window_bits = window_bits; return MZ_OK; } int mz_inflateInit(mz_streamp pStream) { return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS); } int mz_inflate(mz_streamp pStream, int flush) { inflate_state *pState; mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32; size_t in_bytes, out_bytes, orig_avail_in; tinfl_status status; if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState = (inflate_state *)pStream->state; if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER; orig_avail_in = pStream->avail_in; first_call = pState->m_first_call; pState->m_first_call = 0; if (pState->m_last_status < 0) return MZ_DATA_ERROR; if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState->m_has_flushed |= (flush == MZ_FINISH); if ((flush == MZ_FINISH) && (first_call)) { // MZ_FINISH on the first call implies that the input and output buffers are // large enough to hold the entire compressed/decompressed file. decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes, pStream->next_out, pStream->next_out, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (status < 0) return MZ_DATA_ERROR; else if (status != TINFL_STATUS_DONE) { pState->m_last_status = TINFL_STATUS_FAILED; return MZ_BUF_ERROR; } return MZ_STREAM_END; } // flush != MZ_FINISH then we must assume there's more input. if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT; if (pState->m_dict_avail) { n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); return ((pState->m_last_status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } for (;;) { in_bytes = pStream->avail_in; out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs; status = tinfl_decompress( &pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict, pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pState->m_dict_avail = (mz_uint)out_bytes; n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); if (status < 0) return MZ_DATA_ERROR; // Stream is corrupted (there could be some // uncompressed data left in the output dictionary - // oh well). else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in)) return MZ_BUF_ERROR; // Signal caller that we can't make forward progress // without supplying more input or by setting flush // to MZ_FINISH. else if (flush == MZ_FINISH) { // The output buffer MUST be large to hold the remaining uncompressed data // when flush==MZ_FINISH. if (status == TINFL_STATUS_DONE) return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END; // status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's // at least 1 more byte on the way. If there's no more room left in the // output buffer then something is wrong. else if (!pStream->avail_out) return MZ_BUF_ERROR; } else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) || (!pStream->avail_out) || (pState->m_dict_avail)) break; } return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } int mz_inflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { mz_stream stream; int status; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_inflateInit(&stream); if (status != MZ_OK) return status; status = mz_inflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_inflateEnd(&stream); return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR : status; } *pDest_len = stream.total_out; return mz_inflateEnd(&stream); } const char *mz_error(int err) { static struct { int m_err; const char *m_pDesc; } s_error_descs[] = {{MZ_OK, ""}, {MZ_STREAM_END, "stream end"}, {MZ_NEED_DICT, "need dictionary"}, {MZ_ERRNO, "file error"}, {MZ_STREAM_ERROR, "stream error"}, {MZ_DATA_ERROR, "data error"}, {MZ_MEM_ERROR, "out of memory"}, {MZ_BUF_ERROR, "buf error"}, {MZ_VERSION_ERROR, "version error"}, {MZ_PARAM_ERROR, "parameter error"}}; mz_uint i; for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i) if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc; return NULL; } #endif // MINIZ_NO_ZLIB_APIS // ------------------- Low-level Decompression (completely independent from all // compression API's) #define TINFL_MEMCPY(d, s, l) memcpy(d, s, l) #define TINFL_MEMSET(p, c, l) memset(p, c, l) #define TINFL_CR_BEGIN \ switch (r->m_state) { \ case 0: #define TINFL_CR_RETURN(state_index, result) \ do { \ status = result; \ r->m_state = state_index; \ goto common_exit; \ case state_index:; \ } \ MZ_MACRO_END #define TINFL_CR_RETURN_FOREVER(state_index, result) \ do { \ for (;;) { \ TINFL_CR_RETURN(state_index, result); \ } \ } \ MZ_MACRO_END #define TINFL_CR_FINISH } // TODO: If the caller has indicated that there's no more input, and we attempt // to read beyond the input buf, then something is wrong with the input because // the inflator never // reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of // the stream with 0's in this scenario. #define TINFL_GET_BYTE(state_index, c) \ do { \ if (pIn_buf_cur >= pIn_buf_end) { \ for (;;) { \ if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \ TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \ if (pIn_buf_cur < pIn_buf_end) { \ c = *pIn_buf_cur++; \ break; \ } \ } else { \ c = 0; \ break; \ } \ } \ } else \ c = *pIn_buf_cur++; \ } \ MZ_MACRO_END #define TINFL_NEED_BITS(state_index, n) \ do { \ mz_uint c; \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < (mz_uint)(n)) #define TINFL_SKIP_BITS(state_index, n) \ do { \ if (num_bits < (mz_uint)(n)) { \ TINFL_NEED_BITS(state_index, n); \ } \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END #define TINFL_GET_BITS(state_index, b, n) \ do { \ if (num_bits < (mz_uint)(n)) { \ TINFL_NEED_BITS(state_index, n); \ } \ b = bit_buf & ((1 << (n)) - 1); \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END // TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes // remaining in the input buffer falls below 2. // It reads just enough bytes from the input stream that are needed to decode // the next Huffman code (and absolutely no more). It works by trying to fully // decode a // Huffman code by using whatever bits are currently present in the bit buffer. // If this fails, it reads another byte, and tries again until it succeeds or // until the // bit buffer contains >=15 bits (deflate's max. Huffman code size). #define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \ do { \ temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \ if (temp >= 0) { \ code_len = temp >> 9; \ if ((code_len) && (num_bits >= code_len)) break; \ } else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while ((temp < 0) && (num_bits >= (code_len + 1))); \ if (temp >= 0) break; \ } \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < 15); // TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex // than you would initially expect because the zlib API expects the decompressor // to never read // beyond the final byte of the deflate stream. (In other words, when this macro // wants to read another byte from the input, it REALLY needs another byte in // order to fully // decode the next Huffman code.) Handling this properly is particularly // important on raw deflate (non-zlib) streams, which aren't followed by a byte // aligned adler-32. // The slow path is only executed at the very end of the input buffer. #define TINFL_HUFF_DECODE(state_index, sym, pHuff) \ do { \ int temp; \ mz_uint code_len, c; \ if (num_bits < 15) { \ if ((pIn_buf_end - pIn_buf_cur) < 2) { \ TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \ } else { \ bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \ (((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \ pIn_buf_cur += 2; \ num_bits += 16; \ } \ } \ if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \ 0) \ code_len = temp >> 9, temp &= 511; \ else { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while (temp < 0); \ } \ sym = temp; \ bit_buf >>= code_len; \ num_bits -= code_len; \ } \ MZ_MACRO_END tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags) { static const int s_length_base[31] = { 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; static const int s_length_extra[31] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0}; static const int s_dist_base[32] = { 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0}; static const int s_dist_extra[32] = {0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13}; static const mz_uint8 s_length_dezigzag[19] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; static const int s_min_table_sizes[3] = {257, 1, 4}; tinfl_status status = TINFL_STATUS_FAILED; mz_uint32 num_bits, dist, counter, num_extra; tinfl_bit_buf_t bit_buf; const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end = pIn_buf_next + *pIn_buf_size; mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end = pOut_buf_next + *pOut_buf_size; size_t out_buf_size_mask = (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF) ? (size_t)-1 : ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1, dist_from_out_buf_start; // Ensure the output buffer's size is a power of 2, unless the output buffer // is large enough to hold the entire output file (in which case it doesn't // matter). if (((out_buf_size_mask + 1) & out_buf_size_mask) || (pOut_buf_next < pOut_buf_start)) { *pIn_buf_size = *pOut_buf_size = 0; return TINFL_STATUS_BAD_PARAM; } num_bits = r->m_num_bits; bit_buf = r->m_bit_buf; dist = r->m_dist; counter = r->m_counter; num_extra = r->m_num_extra; dist_from_out_buf_start = r->m_dist_from_out_buf_start; TINFL_CR_BEGIN bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0; r->m_z_adler32 = r->m_check_adler32 = 1; if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_GET_BYTE(1, r->m_zhdr0); TINFL_GET_BYTE(2, r->m_zhdr1); counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) || (r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8)); if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) || ((out_buf_size_mask + 1) < (size_t)(1ULL << (8U + (r->m_zhdr0 >> 4))))); if (counter) { TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED); } } do { TINFL_GET_BITS(3, r->m_final, 3); r->m_type = r->m_final >> 1; if (r->m_type == 0) { TINFL_SKIP_BITS(5, num_bits & 7); for (counter = 0; counter < 4; ++counter) { if (num_bits) TINFL_GET_BITS(6, r->m_raw_header[counter], 8); else TINFL_GET_BYTE(7, r->m_raw_header[counter]); } if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) != (mz_uint)(0xFFFF ^ (r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) { TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED); } while ((counter) && (num_bits)) { TINFL_GET_BITS(51, dist, 8); while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)dist; counter--; } while (counter) { size_t n; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT); } while (pIn_buf_cur >= pIn_buf_end) { if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT); } else { TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED); } } n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur), (size_t)(pIn_buf_end - pIn_buf_cur)), counter); TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n); pIn_buf_cur += n; pOut_buf_cur += n; counter -= (mz_uint)n; } } else if (r->m_type == 3) { TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED); } else { if (r->m_type == 1) { mz_uint8 *p = r->m_tables[0].m_code_size; mz_uint i; r->m_table_sizes[0] = 288; r->m_table_sizes[1] = 32; TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32); for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; } else { for (counter = 0; counter < 3; counter++) { TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]); r->m_table_sizes[counter] += s_min_table_sizes[counter]; } MZ_CLEAR_OBJ(r->m_tables[2].m_code_size); for (counter = 0; counter < r->m_table_sizes[2]; counter++) { mz_uint s; TINFL_GET_BITS(14, s, 3); r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s; } r->m_table_sizes[2] = 19; } for (; (int)r->m_type >= 0; r->m_type--) { int tree_next, tree_cur; tinfl_huff_table *pTable; mz_uint i, j, used_syms, total, sym_index, next_code[17], total_syms[16]; pTable = &r->m_tables[r->m_type]; MZ_CLEAR_OBJ(total_syms); MZ_CLEAR_OBJ(pTable->m_look_up); MZ_CLEAR_OBJ(pTable->m_tree); for (i = 0; i < r->m_table_sizes[r->m_type]; ++i) total_syms[pTable->m_code_size[i]]++; used_syms = 0, total = 0; next_code[0] = next_code[1] = 0; for (i = 1; i <= 15; ++i) { used_syms += total_syms[i]; next_code[i + 1] = (total = ((total + total_syms[i]) << 1)); } if ((65536 != total) && (used_syms > 1)) { TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED); } for (tree_next = -1, sym_index = 0; sym_index < r->m_table_sizes[r->m_type]; ++sym_index) { mz_uint rev_code = 0, l, cur_code, code_size = pTable->m_code_size[sym_index]; if (!code_size) continue; cur_code = next_code[code_size]++; for (l = code_size; l > 0; l--, cur_code >>= 1) rev_code = (rev_code << 1) | (cur_code & 1); if (code_size <= TINFL_FAST_LOOKUP_BITS) { mz_int16 k = (mz_int16)((code_size << 9) | sym_index); while (rev_code < TINFL_FAST_LOOKUP_SIZE) { pTable->m_look_up[rev_code] = k; rev_code += (1 << code_size); } continue; } if (0 == (tree_cur = pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)])) { pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1); for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) { tree_cur -= ((rev_code >>= 1) & 1); if (!pTable->m_tree[-tree_cur - 1]) { pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } else tree_cur = pTable->m_tree[-tree_cur - 1]; } tree_cur -= ((rev_code >>= 1) & 1); pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index; } if (r->m_type == 2) { for (counter = 0; counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) { mz_uint s; TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]); if (dist < 16) { r->m_len_codes[counter++] = (mz_uint8)dist; continue; } if ((dist == 16) && (!counter)) { TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED); } num_extra = "\02\03\07"[dist - 16]; TINFL_GET_BITS(18, s, num_extra); s += "\03\03\013"[dist - 16]; TINFL_MEMSET(r->m_len_codes + counter, (dist == 16) ? r->m_len_codes[counter - 1] : 0, s); counter += s; } if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) { TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED); } TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes, r->m_table_sizes[0]); TINFL_MEMCPY(r->m_tables[1].m_code_size, r->m_len_codes + r->m_table_sizes[0], r->m_table_sizes[1]); } } for (;;) { mz_uint8 *pSrc; for (;;) { if (((pIn_buf_end - pIn_buf_cur) < 4) || ((pOut_buf_end - pOut_buf_cur) < 2)) { TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]); if (counter >= 256) break; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)counter; } else { int sym2; mz_uint code_len; #if TINFL_USE_64BIT_BITBUF if (num_bits < 30) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits); pIn_buf_cur += 4; num_bits += 32; } #else if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0] .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0] .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } counter = sym2; bit_buf >>= code_len; num_bits -= code_len; if (counter & 256) break; #if !TINFL_USE_64BIT_BITBUF if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0] .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0] .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } bit_buf >>= code_len; num_bits -= code_len; pOut_buf_cur[0] = (mz_uint8)counter; if (sym2 & 256) { pOut_buf_cur++; counter = sym2; break; } pOut_buf_cur[1] = (mz_uint8)sym2; pOut_buf_cur += 2; } } if ((counter &= 511) == 256) break; num_extra = s_length_extra[counter - 257]; counter = s_length_base[counter - 257]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(25, extra_bits, num_extra); counter += extra_bits; } TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]); num_extra = s_dist_extra[dist]; dist = s_dist_base[dist]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(27, extra_bits, num_extra); dist += extra_bits; } dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start; if ((dist > dist_from_out_buf_start) && (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) { TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED); } pSrc = pOut_buf_start + ((dist_from_out_buf_start - dist) & out_buf_size_mask); if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) { while (counter--) { while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = pOut_buf_start[(dist_from_out_buf_start++ - dist) & out_buf_size_mask]; } continue; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES else if ((counter >= 9) && (counter <= dist)) { const mz_uint8 *pSrc_end = pSrc + (counter & ~7); do { ((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0]; ((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1]; pOut_buf_cur += 8; } while ((pSrc += 8) < pSrc_end); if ((counter &= 7) < 3) { if (counter) { pOut_buf_cur[0] = pSrc[0]; if (counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } continue; } } #endif do { pOut_buf_cur[0] = pSrc[0]; pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur[2] = pSrc[2]; pOut_buf_cur += 3; pSrc += 3; } while ((int)(counter -= 3) > 2); if ((int)counter > 0) { pOut_buf_cur[0] = pSrc[0]; if ((int)counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } } } } while (!(r->m_final & 1)); if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_SKIP_BITS(32, num_bits & 7); for (counter = 0; counter < 4; ++counter) { mz_uint s; if (num_bits) TINFL_GET_BITS(41, s, 8); else TINFL_GET_BYTE(42, s); r->m_z_adler32 = (r->m_z_adler32 << 8) | s; } } TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE); TINFL_CR_FINISH common_exit: r->m_num_bits = num_bits; r->m_bit_buf = bit_buf; r->m_dist = dist; r->m_counter = counter; r->m_num_extra = num_extra; r->m_dist_from_out_buf_start = dist_from_out_buf_start; *pIn_buf_size = pIn_buf_cur - pIn_buf_next; *pOut_buf_size = pOut_buf_cur - pOut_buf_next; if ((decomp_flags & (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) && (status >= 0)) { const mz_uint8 *ptr = pOut_buf_next; size_t buf_len = *pOut_buf_size; mz_uint32 i, s1 = r->m_check_adler32 & 0xffff, s2 = r->m_check_adler32 >> 16; size_t block_len = buf_len % 5552; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } r->m_check_adler32 = (s2 << 16) + s1; if ((status == TINFL_STATUS_DONE) && (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) && (r->m_check_adler32 != r->m_z_adler32)) status = TINFL_STATUS_ADLER32_MISMATCH; } return status; } // Higher level helper functions. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tinfl_decompressor decomp; void *pBuf = NULL, *pNew_buf; size_t src_buf_ofs = 0, out_buf_capacity = 0; *pOut_len = 0; tinfl_init(&decomp); for (;;) { size_t src_buf_size = src_buf_len - src_buf_ofs, dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity; tinfl_status status = tinfl_decompress( &decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size, (mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL, &dst_buf_size, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } src_buf_ofs += src_buf_size; *pOut_len += dst_buf_size; if (status == TINFL_STATUS_DONE) break; new_out_buf_capacity = out_buf_capacity * 2; if (new_out_buf_capacity < 128) new_out_buf_capacity = 128; pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity); if (!pNew_buf) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } pBuf = pNew_buf; out_buf_capacity = new_out_buf_capacity; } return pBuf; } size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tinfl_decompressor decomp; tinfl_status status; tinfl_init(&decomp); status = tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len, (mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED : out_buf_len; } int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { int result = 0; tinfl_decompressor decomp; mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE); size_t in_buf_ofs = 0, dict_ofs = 0; if (!pDict) return TINFL_STATUS_FAILED; tinfl_init(&decomp); for (;;) { size_t in_buf_size = *pIn_buf_size - in_buf_ofs, dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs; tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs, &in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size, (flags & ~(TINFL_FLAG_HAS_MORE_INPUT | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))); in_buf_ofs += in_buf_size; if ((dst_buf_size) && (!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user))) break; if (status != TINFL_STATUS_HAS_MORE_OUTPUT) { result = (status == TINFL_STATUS_DONE); break; } dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1); } MZ_FREE(pDict); *pIn_buf_size = in_buf_ofs; return result; } // ------------------- Low-level Compression (independent from all decompression // API's) // Purposely making these tables static for faster init and thread safety. static const mz_uint16 s_tdefl_len_sym[256] = { 257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268, 268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272, 272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274, 274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276, 276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 285}; static const mz_uint8 s_tdefl_len_extra[256] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0}; static const mz_uint8 s_tdefl_small_dist_sym[512] = { 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17}; static const mz_uint8 s_tdefl_small_dist_extra[512] = { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7}; static const mz_uint8 s_tdefl_large_dist_sym[128] = { 0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29}; static const mz_uint8 s_tdefl_large_dist_extra[128] = { 0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13}; // Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted // values. typedef struct { mz_uint16 m_key, m_sym_index; } tdefl_sym_freq; static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms, tdefl_sym_freq *pSyms0, tdefl_sym_freq *pSyms1) { mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2]; tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1; MZ_CLEAR_OBJ(hist); for (i = 0; i < num_syms; i++) { mz_uint freq = pSyms0[i].m_key; hist[freq & 0xFF]++; hist[256 + ((freq >> 8) & 0xFF)]++; } while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256])) total_passes--; for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) { const mz_uint32 *pHist = &hist[pass << 8]; mz_uint offsets[256], cur_ofs = 0; for (i = 0; i < 256; i++) { offsets[i] = cur_ofs; cur_ofs += pHist[i]; } for (i = 0; i < num_syms; i++) pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i]; { tdefl_sym_freq *t = pCur_syms; pCur_syms = pNew_syms; pNew_syms = t; } } return pCur_syms; } // tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat, // alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996. static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) { int root, leaf, next, avbl, used, dpth; if (n == 0) return; else if (n == 1) { A[0].m_key = 1; return; } A[0].m_key += A[1].m_key; root = 0; leaf = 2; for (next = 1; next < n - 1; next++) { if (leaf >= n || A[root].m_key < A[leaf].m_key) { A[next].m_key = A[root].m_key; A[root++].m_key = (mz_uint16)next; } else A[next].m_key = A[leaf++].m_key; if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) { A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key); A[root++].m_key = (mz_uint16)next; } else A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key); } A[n - 2].m_key = 0; for (next = n - 3; next >= 0; next--) A[next].m_key = A[A[next].m_key].m_key + 1; avbl = 1; used = dpth = 0; root = n - 2; next = n - 1; while (avbl > 0) { while (root >= 0 && (int)A[root].m_key == dpth) { used++; root--; } while (avbl > used) { A[next--].m_key = (mz_uint16)(dpth); avbl--; } avbl = 2 * used; dpth++; used = 0; } } // Limits canonical Huffman code table's max code size. enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 }; static void tdefl_huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size) { int i; mz_uint32 total = 0; if (code_list_len <= 1) return; for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++) pNum_codes[max_code_size] += pNum_codes[i]; for (i = max_code_size; i > 0; i--) total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i)); while (total != (1UL << max_code_size)) { pNum_codes[max_code_size]--; for (i = max_code_size - 1; i > 0; i--) if (pNum_codes[i]) { pNum_codes[i]--; pNum_codes[i + 1] += 2; break; } total--; } } static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num, int table_len, int code_size_limit, int static_table) { int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE]; mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1]; MZ_CLEAR_OBJ(num_codes); if (static_table) { for (i = 0; i < table_len; i++) num_codes[d->m_huff_code_sizes[table_num][i]]++; } else { tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS], *pSyms; int num_used_syms = 0; const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0]; for (i = 0; i < table_len; i++) if (pSym_count[i]) { syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i]; syms0[num_used_syms++].m_sym_index = (mz_uint16)i; } pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1); tdefl_calculate_minimum_redundancy(pSyms, num_used_syms); for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++; tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms, code_size_limit); MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]); MZ_CLEAR_OBJ(d->m_huff_codes[table_num]); for (i = 1, j = num_used_syms; i <= code_size_limit; i++) for (l = num_codes[i]; l > 0; l--) d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i); } next_code[1] = 0; for (j = 0, i = 2; i <= code_size_limit; i++) next_code[i] = j = ((j + num_codes[i - 1]) << 1); for (i = 0; i < table_len; i++) { mz_uint rev_code = 0, code, code_size; if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue; code = next_code[code_size]++; for (l = code_size; l > 0; l--, code >>= 1) rev_code = (rev_code << 1) | (code & 1); d->m_huff_codes[table_num][i] = (mz_uint16)rev_code; } } #define TDEFL_PUT_BITS(b, l) \ do { \ mz_uint bits = b; \ mz_uint len = l; \ MZ_ASSERT(bits <= ((1U << len) - 1U)); \ d->m_bit_buffer |= (bits << d->m_bits_in); \ d->m_bits_in += len; \ while (d->m_bits_in >= 8) { \ if (d->m_pOutput_buf < d->m_pOutput_buf_end) \ *d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \ d->m_bit_buffer >>= 8; \ d->m_bits_in -= 8; \ } \ } \ MZ_MACRO_END #define TDEFL_RLE_PREV_CODE_SIZE() \ { \ if (rle_repeat_count) { \ if (rle_repeat_count < 3) { \ d->m_huff_count[2][prev_code_size] = (mz_uint16)( \ d->m_huff_count[2][prev_code_size] + rle_repeat_count); \ while (rle_repeat_count--) \ packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \ } else { \ d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 16; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_repeat_count - 3); \ } \ rle_repeat_count = 0; \ } \ } #define TDEFL_RLE_ZERO_CODE_SIZE() \ { \ if (rle_z_count) { \ if (rle_z_count < 3) { \ d->m_huff_count[2][0] = \ (mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \ while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \ } else if (rle_z_count <= 10) { \ d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 17; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_z_count - 3); \ } else { \ d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 18; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_z_count - 11); \ } \ rle_z_count = 0; \ } \ } static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; static void tdefl_start_dynamic_block(tdefl_compressor *d) { int num_lit_codes, num_dist_codes, num_bit_lengths; mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count, rle_repeat_count, packed_code_sizes_index; mz_uint8 code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], prev_code_size = 0xFF; d->m_huff_count[0][256] = 1; tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE); tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE); for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--) if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break; for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--) if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break; memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes); memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0], num_dist_codes); total_code_sizes_to_pack = num_lit_codes + num_dist_codes; num_packed_code_sizes = 0; rle_z_count = 0; rle_repeat_count = 0; memset(&d->m_huff_count[2][0], 0, sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2); for (i = 0; i < total_code_sizes_to_pack; i++) { mz_uint8 code_size = code_sizes_to_pack[i]; if (!code_size) { TDEFL_RLE_PREV_CODE_SIZE(); if (++rle_z_count == 138) { TDEFL_RLE_ZERO_CODE_SIZE(); } } else { TDEFL_RLE_ZERO_CODE_SIZE(); if (code_size != prev_code_size) { TDEFL_RLE_PREV_CODE_SIZE(); d->m_huff_count[2][code_size] = (mz_uint16)(d->m_huff_count[2][code_size] + 1); packed_code_sizes[num_packed_code_sizes++] = code_size; } else if (++rle_repeat_count == 6) { TDEFL_RLE_PREV_CODE_SIZE(); } } prev_code_size = code_size; } if (rle_repeat_count) { TDEFL_RLE_PREV_CODE_SIZE(); } else { TDEFL_RLE_ZERO_CODE_SIZE(); } tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE); TDEFL_PUT_BITS(2, 2); TDEFL_PUT_BITS(num_lit_codes - 257, 5); TDEFL_PUT_BITS(num_dist_codes - 1, 5); for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--) if (d->m_huff_code_sizes [2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]]) break; num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1)); TDEFL_PUT_BITS(num_bit_lengths - 4, 4); for (i = 0; (int)i < num_bit_lengths; i++) TDEFL_PUT_BITS( d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3); for (packed_code_sizes_index = 0; packed_code_sizes_index < num_packed_code_sizes;) { mz_uint code = packed_code_sizes[packed_code_sizes_index++]; MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2); TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]); if (code >= 16) TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++], "\02\03\07"[code - 16]); } } static void tdefl_start_static_block(tdefl_compressor *d) { mz_uint i; mz_uint8 *p = &d->m_huff_code_sizes[0][0]; for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; memset(d->m_huff_code_sizes[1], 5, 32); tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE); tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE); TDEFL_PUT_BITS(1, 2); } static const mz_uint mz_bitmasks[17] = { 0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF, 0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF}; #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \ MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; mz_uint8 *pOutput_buf = d->m_pOutput_buf; mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf; mz_uint64 bit_buffer = d->m_bit_buffer; mz_uint bits_in = d->m_bits_in; #define TDEFL_PUT_BITS_FAST(b, l) \ { \ bit_buffer |= (((mz_uint64)(b)) << bits_in); \ bits_in += (l); \ } flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint s0, s1, n0, n1, sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = *(const mz_uint16 *)(pLZ_codes + 1); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); // This sequence coaxes MSVC into using cmov's vs. jmp's. s0 = s_tdefl_small_dist_sym[match_dist & 511]; n0 = s_tdefl_small_dist_extra[match_dist & 511]; s1 = s_tdefl_large_dist_sym[match_dist >> 8]; n1 = s_tdefl_large_dist_extra[match_dist >> 8]; sym = (match_dist < 512) ? s0 : s1; num_extra_bits = (match_dist < 512) ? n0 : n1; MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } } if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE; *(mz_uint64 *)pOutput_buf = bit_buffer; pOutput_buf += (bits_in >> 3); bit_buffer >>= (bits_in & ~7); bits_in &= 7; } #undef TDEFL_PUT_BITS_FAST d->m_pOutput_buf = pOutput_buf; d->m_bits_in = 0; d->m_bit_buffer = 0; while (bits_in) { mz_uint32 n = MZ_MIN(bits_in, 16); TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n); bit_buffer >>= n; bits_in -= n; } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #else static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8)); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); if (match_dist < 512) { sym = s_tdefl_small_dist_sym[match_dist]; num_extra_bits = s_tdefl_small_dist_extra[match_dist]; } else { sym = s_tdefl_large_dist_sym[match_dist >> 8]; num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8]; } MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && // MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) { if (static_block) tdefl_start_static_block(d); else tdefl_start_dynamic_block(d); return tdefl_compress_lz_codes(d); } static int tdefl_flush_block(tdefl_compressor *d, int flush) { mz_uint saved_bit_buf, saved_bits_in; mz_uint8 *pSaved_output_buf; mz_bool comp_block_succeeded = MZ_FALSE; int n, use_raw_block = ((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) && (d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size; mz_uint8 *pOutput_buf_start = ((d->m_pPut_buf_func == NULL) && ((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE)) ? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs) : d->m_output_buf; d->m_pOutput_buf = pOutput_buf_start; d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16; MZ_ASSERT(!d->m_output_flush_remaining); d->m_output_flush_ofs = 0; d->m_output_flush_remaining = 0; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left); d->m_pLZ_code_buf -= (d->m_num_flags_left == 8); if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) { TDEFL_PUT_BITS(0x78, 8); TDEFL_PUT_BITS(0x01, 8); } TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1); pSaved_output_buf = d->m_pOutput_buf; saved_bit_buf = d->m_bit_buffer; saved_bits_in = d->m_bits_in; if (!use_raw_block) comp_block_succeeded = tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) || (d->m_total_lz_bytes < 48)); // If the block gets expanded, forget the current contents of the output // buffer and send a raw block instead. if (((use_raw_block) || ((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >= d->m_total_lz_bytes))) && ((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) { mz_uint i; d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; TDEFL_PUT_BITS(0, 2); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) { TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16); } for (i = 0; i < d->m_total_lz_bytes; ++i) { TDEFL_PUT_BITS( d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK], 8); } } // Check for the extremely unlikely (if not impossible) case of the compressed // block not fitting into the output buffer when using dynamic codes. else if (!comp_block_succeeded) { d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; tdefl_compress_block(d, MZ_TRUE); } if (flush) { if (flush == TDEFL_FINISH) { if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) { mz_uint i, a = d->m_adler32; for (i = 0; i < 4; i++) { TDEFL_PUT_BITS((a >> 24) & 0xFF, 8); a <<= 8; } } } else { mz_uint i, z = 0; TDEFL_PUT_BITS(0, 3); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, z ^= 0xFFFF) { TDEFL_PUT_BITS(z & 0xFFFF, 16); } } } MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end); memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes; d->m_total_lz_bytes = 0; d->m_block_index++; if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) { if (d->m_pPut_buf_func) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user)) return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED); } else if (pOutput_buf_start == d->m_output_buf) { int bytes_to_copy = (int)MZ_MIN( (size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs)); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf, bytes_to_copy); d->m_out_buf_ofs += bytes_to_copy; if ((n -= bytes_to_copy) != 0) { d->m_output_flush_ofs = bytes_to_copy; d->m_output_flush_remaining = n; } } else { d->m_out_buf_ofs += n; } } return d->m_output_flush_remaining; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p) static MZ_FORCEINLINE void tdefl_find_match( tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q; mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]), s01 = TDEFL_READ_UNALIGNED_WORD(s); MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || \ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; q = (const mz_uint16 *)(d->m_dict + probe_pos); if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue; p = s; probe_len = 32; do { } while ( (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0)); if (!probe_len) { *pMatch_dist = dist; *pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN); break; } else if ((probe_len = ((mz_uint)(p - s) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q)) > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) == max_match_len) break; c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]); } } } #else static MZ_FORCEINLINE void tdefl_find_match( tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint8 *s = d->m_dict + pos, *p, *q; mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1]; MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || \ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if ((d->m_dict[probe_pos + match_len] == c0) && \ (d->m_dict[probe_pos + match_len - 1] == c1)) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; p = s; q = d->m_dict + probe_pos; for (probe_len = 0; probe_len < max_match_len; probe_len++) if (*p++ != *q++) break; if (probe_len > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = probe_len) == max_match_len) return; c0 = d->m_dict[pos + match_len]; c1 = d->m_dict[pos + match_len - 1]; } } } #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static mz_bool tdefl_compress_fast(tdefl_compressor *d) { // Faster, minimally featured LZRW1-style match+parse loop with better // register utilization. Intended for applications where raw throughput is // valued more highly than ratio. mz_uint lookahead_pos = d->m_lookahead_pos, lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size, total_lz_bytes = d->m_total_lz_bytes, num_flags_left = d->m_num_flags_left; mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags; mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) { const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096; mz_uint dst_pos = (lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size); d->m_src_buf_left -= num_bytes_to_process; lookahead_size += num_bytes_to_process; while (num_bytes_to_process) { mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process); memcpy(d->m_dict + dst_pos, d->m_pSrc, n); if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc, MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos)); d->m_pSrc += n; dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK; num_bytes_to_process -= n; } dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size); if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE)) break; while (lookahead_size >= 4) { mz_uint cur_match_dist, cur_match_len = 1; mz_uint8 *pCur_dict = d->m_dict + cur_pos; mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF; mz_uint hash = (first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) & TDEFL_LEVEL1_HASH_SIZE_MASK; mz_uint probe_pos = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)lookahead_pos; if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <= dict_size) && ((*(const mz_uint32 *)(d->m_dict + (probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) & 0xFFFFFF) == first_trigram)) { const mz_uint16 *p = (const mz_uint16 *)pCur_dict; const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos); mz_uint32 probe_len = 32; do { } while ((TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0)); cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q); if (!probe_len) cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0; if ((cur_match_len < TDEFL_MIN_MATCH_LEN) || ((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U))) { cur_match_len = 1; *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } else { mz_uint32 s0, s1; cur_match_len = MZ_MIN(cur_match_len, lookahead_size); MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 1) && (cur_match_dist <= TDEFL_LZ_DICT_SIZE)); cur_match_dist--; pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN); *(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist; pLZ_code_buf += 3; *pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80); s0 = s_tdefl_small_dist_sym[cur_match_dist & 511]; s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8]; d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++; d->m_huff_count[0][s_tdefl_len_sym[cur_match_len - TDEFL_MIN_MATCH_LEN]]++; } } else { *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } total_lz_bytes += cur_match_len; lookahead_pos += cur_match_len; dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK; MZ_ASSERT(lookahead_size >= cur_match_len); lookahead_size -= cur_match_len; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } while (lookahead_size) { mz_uint8 lit = d->m_dict[cur_pos]; total_lz_bytes++; *pLZ_code_buf++ = lit; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } d->m_huff_count[0][lit]++; lookahead_pos++; dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; lookahead_size--; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } } d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; return MZ_TRUE; } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d, mz_uint8 lit) { d->m_total_lz_bytes++; *d->m_pLZ_code_buf++ = lit; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } d->m_huff_count[0][lit]++; } static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d, mz_uint match_len, mz_uint match_dist) { mz_uint32 s0, s1; MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) && (match_dist <= TDEFL_LZ_DICT_SIZE)); d->m_total_lz_bytes += match_len; d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN); match_dist -= 1; d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF); d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8); d->m_pLZ_code_buf += 3; *d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } s0 = s_tdefl_small_dist_sym[match_dist & 511]; s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127]; d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++; if (match_len >= TDEFL_MIN_MATCH_LEN) d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++; } static mz_bool tdefl_compress_normal(tdefl_compressor *d) { const mz_uint8 *pSrc = d->m_pSrc; size_t src_buf_left = d->m_src_buf_left; tdefl_flush flush = d->m_flush; while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) { mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos; // Update dictionary and hash chains. Keeps the lookahead size equal to // TDEFL_MAX_MATCH_LEN. if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) { mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK, ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2; mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size); const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process; src_buf_left -= num_bytes_to_process; d->m_lookahead_size += num_bytes_to_process; while (pSrc != pSrc_end) { mz_uint8 c = *pSrc++; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; ins_pos++; } } else { while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) { mz_uint8 c = *pSrc++; mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; src_buf_left--; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) { mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2; mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << (TDEFL_LZ_HASH_SHIFT * 2)) ^ (d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); } } } d->m_dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size); if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break; // Simple lazy/greedy parsing state machine. len_to_move = 1; cur_match_dist = 0; cur_match_len = d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1); cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) { if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) { mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK]; cur_match_len = 0; while (cur_match_len < d->m_lookahead_size) { if (d->m_dict[cur_pos + cur_match_len] != c) break; cur_match_len++; } if (cur_match_len < TDEFL_MIN_MATCH_LEN) cur_match_len = 0; else cur_match_dist = 1; } } else { tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size, d->m_lookahead_size, &cur_match_dist, &cur_match_len); } if (((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U)) || (cur_pos == cur_match_dist) || ((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) { cur_match_dist = cur_match_len = 0; } if (d->m_saved_match_len) { if (cur_match_len > d->m_saved_match_len) { tdefl_record_literal(d, (mz_uint8)d->m_saved_lit); if (cur_match_len >= 128) { tdefl_record_match(d, cur_match_len, cur_match_dist); d->m_saved_match_len = 0; len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[cur_pos]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } } else { tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist); len_to_move = d->m_saved_match_len - 1; d->m_saved_match_len = 0; } } else if (!cur_match_dist) tdefl_record_literal(d, d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]); else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) || (cur_match_len >= 128)) { tdefl_record_match(d, cur_match_len, cur_match_dist); len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } // Move the lookahead forward by len_to_move bytes. d->m_lookahead_pos += len_to_move; MZ_ASSERT(d->m_lookahead_size >= len_to_move); d->m_lookahead_size -= len_to_move; d->m_dict_size = MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE); // Check if it's time to flush the current LZ codes to the internal output // buffer. if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) || ((d->m_total_lz_bytes > 31 * 1024) && (((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >= d->m_total_lz_bytes) || (d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) { int n; d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; } } d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; return MZ_TRUE; } static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) { if (d->m_pIn_buf_size) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; } if (d->m_pOut_buf_size) { size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs, d->m_output_flush_remaining); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf + d->m_output_flush_ofs, n); d->m_output_flush_ofs += (mz_uint)n; d->m_output_flush_remaining -= (mz_uint)n; d->m_out_buf_ofs += n; *d->m_pOut_buf_size = d->m_out_buf_ofs; } return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE : TDEFL_STATUS_OKAY; } tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush) { if (!d) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return TDEFL_STATUS_BAD_PARAM; } d->m_pIn_buf = pIn_buf; d->m_pIn_buf_size = pIn_buf_size; d->m_pOut_buf = pOut_buf; d->m_pOut_buf_size = pOut_buf_size; d->m_pSrc = (const mz_uint8 *)(pIn_buf); d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0; d->m_out_buf_ofs = 0; d->m_flush = flush; if (((d->m_pPut_buf_func != NULL) == ((pOut_buf != NULL) || (pOut_buf_size != NULL))) || (d->m_prev_return_status != TDEFL_STATUS_OKAY) || (d->m_wants_to_finish && (flush != TDEFL_FINISH)) || (pIn_buf_size && *pIn_buf_size && !pIn_buf) || (pOut_buf_size && *pOut_buf_size && !pOut_buf)) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM); } d->m_wants_to_finish |= (flush == TDEFL_FINISH); if ((d->m_output_flush_remaining) || (d->m_finished)) return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) && ((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) && ((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS | TDEFL_RLE_MATCHES)) == 0)) { if (!tdefl_compress_fast(d)) return d->m_prev_return_status; } else #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN { if (!tdefl_compress_normal(d)) return d->m_prev_return_status; } if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) && (pIn_buf)) d->m_adler32 = (mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf, d->m_pSrc - (const mz_uint8 *)pIn_buf); if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) && (!d->m_output_flush_remaining)) { if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status; d->m_finished = (flush == TDEFL_FINISH); if (flush == TDEFL_FULL_FLUSH) { MZ_CLEAR_OBJ(d->m_hash); MZ_CLEAR_OBJ(d->m_next); d->m_dict_size = 0; } } return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); } tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush) { MZ_ASSERT(d->m_pPut_buf_func); return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush); } tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { d->m_pPut_buf_func = pPut_buf_func; d->m_pPut_buf_user = pPut_buf_user; d->m_flags = (mz_uint)(flags); d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3; d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0; d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3; if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash); d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size = d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0; d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished = d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0; d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_pOutput_buf = d->m_output_buf; d->m_pOutput_buf_end = d->m_output_buf; d->m_prev_return_status = TDEFL_STATUS_OKAY; d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0; d->m_adler32 = 1; d->m_pIn_buf = NULL; d->m_pOut_buf = NULL; d->m_pIn_buf_size = NULL; d->m_pOut_buf_size = NULL; d->m_flush = TDEFL_NO_FLUSH; d->m_pSrc = NULL; d->m_src_buf_left = 0; d->m_out_buf_ofs = 0; memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); return TDEFL_STATUS_OKAY; } tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) { return d->m_prev_return_status; } mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; } mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { tdefl_compressor *pComp; mz_bool succeeded; if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE; pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); if (!pComp) return MZ_FALSE; succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) == TDEFL_STATUS_OKAY); succeeded = succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) == TDEFL_STATUS_DONE); MZ_FREE(pComp); return succeeded; } typedef struct { size_t m_size, m_capacity; mz_uint8 *m_pBuf; mz_bool m_expandable; } tdefl_output_buffer; static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len, void *pUser) { tdefl_output_buffer *p = (tdefl_output_buffer *)pUser; size_t new_size = p->m_size + len; if (new_size > p->m_capacity) { size_t new_capacity = p->m_capacity; mz_uint8 *pNew_buf; if (!p->m_expandable) return MZ_FALSE; do { new_capacity = MZ_MAX(128U, new_capacity << 1U); } while (new_size > new_capacity); pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity); if (!pNew_buf) return MZ_FALSE; p->m_pBuf = pNew_buf; p->m_capacity = new_capacity; } memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len); p->m_size = new_size; return MZ_TRUE; } void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_len) return MZ_FALSE; else *pOut_len = 0; out_buf.m_expandable = MZ_TRUE; if (!tdefl_compress_mem_to_output( pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return NULL; *pOut_len = out_buf.m_size; return out_buf.m_pBuf; } size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_buf) return 0; out_buf.m_pBuf = (mz_uint8 *)pOut_buf; out_buf.m_capacity = out_buf_len; if (!tdefl_compress_mem_to_output( pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return 0; return out_buf.m_size; } #ifndef MINIZ_NO_ZLIB_APIS static const mz_uint s_tdefl_num_probes[11] = {0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500}; // level may actually range from [0,10] (10 is a "hidden" max level, where we // want a bit more compression and it's fine if throughput to fall off a cliff // on some files). mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy) { mz_uint comp_flags = s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] | ((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0); if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER; if (!level) comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS; else if (strategy == MZ_FILTERED) comp_flags |= TDEFL_FILTER_MATCHES; else if (strategy == MZ_HUFFMAN_ONLY) comp_flags &= ~TDEFL_MAX_PROBES_MASK; else if (strategy == MZ_FIXED) comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS; else if (strategy == MZ_RLE) comp_flags |= TDEFL_RLE_MATCHES; return comp_flags; } #endif // MINIZ_NO_ZLIB_APIS #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning( \ disable : 4267) // 'argument': conversion from '__int64' to 'int', // possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif // Simple PNG writer function by Alex Evans, 2011. Released into the public // domain: https://gist.github.com/908299, more context at // http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/. // This is actually a modification of Alex's original code so PNG files // generated by this function pass pngcheck. void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip) { // Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was // defined. static const mz_uint s_tdefl_png_num_probes[11] = { 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500}; tdefl_compressor *pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); tdefl_output_buffer out_buf; int i, bpl = w * num_chans, y, z; mz_uint32 c; *pLen_out = 0; if (!pComp) return NULL; MZ_CLEAR_OBJ(out_buf); out_buf.m_expandable = MZ_TRUE; out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h); if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) { MZ_FREE(pComp); return NULL; } // write dummy header for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf); // compress image data tdefl_init( pComp, tdefl_output_buffer_putter, &out_buf, s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER); for (y = 0; y < h; ++y) { tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH); tdefl_compress_buffer(pComp, (mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl, bpl, TDEFL_NO_FLUSH); } if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) != TDEFL_STATUS_DONE) { MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } // write real header *pLen_out = out_buf.m_size - 41; { static const mz_uint8 chans[] = {0x00, 0x00, 0x04, 0x02, 0x06}; mz_uint8 pnghdr[41] = {0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0, 0, (mz_uint8)(w >> 8), (mz_uint8)w, 0, 0, (mz_uint8)(h >> 8), (mz_uint8)h, 8, chans[num_chans], 0, 0, 0, 0, 0, 0, 0, (mz_uint8)(*pLen_out >> 24), (mz_uint8)(*pLen_out >> 16), (mz_uint8)(*pLen_out >> 8), (mz_uint8)*pLen_out, 0x49, 0x44, 0x41, 0x54}; c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17); for (i = 0; i < 4; ++i, c <<= 8) ((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24); memcpy(out_buf.m_pBuf, pnghdr, 41); } // write footer (IDAT CRC-32, followed by IEND chunk) if (!tdefl_output_buffer_putter( "\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) { *pLen_out = 0; MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4, *pLen_out + 4); for (i = 0; i < 4; ++i, c <<= 8) (out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24); // compute final size of file, grab compressed data buffer and return *pLen_out += 57; MZ_FREE(pComp); return out_buf.m_pBuf; } void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out) { // Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we // can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's // where #defined out) return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans, pLen_out, 6, MZ_FALSE); } // ------------------- .ZIP archive reading #ifndef MINIZ_NO_ARCHIVE_APIS #ifdef MINIZ_NO_STDIO #define MZ_FILE void * #else #include <stdio.h> #include <sys/stat.h> #if defined(_MSC_VER) || defined(__MINGW64__) static FILE *mz_fopen(const char *pFilename, const char *pMode) { FILE *pFile = NULL; fopen_s(&pFile, pFilename, pMode); return pFile; } static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) { FILE *pFile = NULL; if (freopen_s(&pFile, pPath, pMode, pStream)) return NULL; return pFile; } #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN mz_fopen #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 _ftelli64 #define MZ_FSEEK64 _fseeki64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN mz_freopen #define MZ_DELETE_FILE remove #elif defined(__MINGW32__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__TINYC__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftell #define MZ_FSEEK64 fseek #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__GNUC__) && defined(_LARGEFILE64_SOURCE) && _LARGEFILE64_SOURCE #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen64(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT stat64 #define MZ_FILE_STAT stat64 #define MZ_FFLUSH fflush #define MZ_FREOPEN(p, m, s) freopen64(p, m, s) #define MZ_DELETE_FILE remove #else #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello #define MZ_FSEEK64 fseeko #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #endif // #ifdef _MSC_VER #endif // #ifdef MINIZ_NO_STDIO #define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c)) // Various ZIP archive enums. To completely avoid cross platform compiler // alignment and platform endian issues, miniz.c doesn't use structs for any of // this stuff. enum { // ZIP archive identifiers and record sizes MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50, MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50, MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50, MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22, // Central directory header record offsets MZ_ZIP_CDH_SIG_OFS = 0, MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4, MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6, MZ_ZIP_CDH_BIT_FLAG_OFS = 8, MZ_ZIP_CDH_METHOD_OFS = 10, MZ_ZIP_CDH_FILE_TIME_OFS = 12, MZ_ZIP_CDH_FILE_DATE_OFS = 14, MZ_ZIP_CDH_CRC32_OFS = 16, MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20, MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24, MZ_ZIP_CDH_FILENAME_LEN_OFS = 28, MZ_ZIP_CDH_EXTRA_LEN_OFS = 30, MZ_ZIP_CDH_COMMENT_LEN_OFS = 32, MZ_ZIP_CDH_DISK_START_OFS = 34, MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36, MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38, MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42, // Local directory header offsets MZ_ZIP_LDH_SIG_OFS = 0, MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4, MZ_ZIP_LDH_BIT_FLAG_OFS = 6, MZ_ZIP_LDH_METHOD_OFS = 8, MZ_ZIP_LDH_FILE_TIME_OFS = 10, MZ_ZIP_LDH_FILE_DATE_OFS = 12, MZ_ZIP_LDH_CRC32_OFS = 14, MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18, MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22, MZ_ZIP_LDH_FILENAME_LEN_OFS = 26, MZ_ZIP_LDH_EXTRA_LEN_OFS = 28, // End of central directory offsets MZ_ZIP_ECDH_SIG_OFS = 0, MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4, MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6, MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8, MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10, MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12, MZ_ZIP_ECDH_CDIR_OFS_OFS = 16, MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20, }; typedef struct { void *m_p; size_t m_size, m_capacity; mz_uint m_element_size; } mz_zip_array; struct mz_zip_internal_state_tag { mz_zip_array m_central_dir; mz_zip_array m_central_dir_offsets; mz_zip_array m_sorted_central_dir_offsets; MZ_FILE *m_pFile; void *m_pMem; size_t m_mem_size; size_t m_mem_capacity; }; #define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \ (array_ptr)->m_element_size = element_size #define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \ ((element_type *)((array_ptr)->m_p))[index] static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip, mz_zip_array *pArray) { pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p); memset(pArray, 0, sizeof(mz_zip_array)); } static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip, mz_zip_array *pArray, size_t min_new_capacity, mz_uint growing) { void *pNew_p; size_t new_capacity = min_new_capacity; MZ_ASSERT(pArray->m_element_size); if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE; if (growing) { new_capacity = MZ_MAX(1, pArray->m_capacity); while (new_capacity < min_new_capacity) new_capacity *= 2; } if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p, pArray->m_element_size, new_capacity))) return MZ_FALSE; pArray->m_p = pNew_p; pArray->m_capacity = new_capacity; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_capacity, mz_uint growing) { if (new_capacity > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing)) return MZ_FALSE; } return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_size, mz_uint growing) { if (new_size > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing)) return MZ_FALSE; } pArray->m_size = new_size; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip, mz_zip_array *pArray, size_t n) { return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE); } static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip, mz_zip_array *pArray, const void *pElements, size_t n) { size_t orig_size = pArray->m_size; if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE)) return MZ_FALSE; memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size, pElements, n * pArray->m_element_size); return MZ_TRUE; } #ifndef MINIZ_NO_TIME static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) { struct tm tm; memset(&tm, 0, sizeof(tm)); tm.tm_isdst = -1; tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900; tm.tm_mon = ((dos_date >> 5) & 15) - 1; tm.tm_mday = dos_date & 31; tm.tm_hour = (dos_time >> 11) & 31; tm.tm_min = (dos_time >> 5) & 63; tm.tm_sec = (dos_time << 1) & 62; return mktime(&tm); } static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef _MSC_VER struct tm tm_struct; struct tm *tm = &tm_struct; errno_t err = localtime_s(tm, &time); if (err) { *pDOS_date = 0; *pDOS_time = 0; return; } #else struct tm *tm = localtime(&time); #endif *pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) + ((tm->tm_sec) >> 1)); *pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) + ((tm->tm_mon + 1) << 5) + tm->tm_mday); } #endif #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_get_file_modified_time(const char *pFilename, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef MINIZ_NO_TIME (void)pFilename; *pDOS_date = *pDOS_time = 0; #else struct MZ_FILE_STAT_STRUCT file_stat; // On Linux with x86 glibc, this call will fail on large files (>= 0x80000000 // bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh. if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE; mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date); #endif // #ifdef MINIZ_NO_TIME return MZ_TRUE; } #ifndef MINIZ_NO_TIME static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time, time_t modified_time) { struct utimbuf t; t.actime = access_time; t.modtime = modified_time; return !utime(pFilename, &t); } #endif // #ifndef MINIZ_NO_TIME #endif // #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip, mz_uint32 flags) { (void)flags; if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_READING; pZip->m_archive_size = 0; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, mz_uint r_index) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index)); mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS), r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (l_len < r_len) : (l < r); } #define MZ_SWAP_UINT32(a, b) \ do { \ mz_uint32 t = a; \ a = b; \ b = t; \ } \ MZ_MACRO_END // Heap sort of lowercased filenames, used to help accelerate plain central // directory searches by mz_zip_reader_locate_file(). (Could also use qsort(), // but it could allocate memory.) static void mz_zip_reader_sort_central_dir_offsets_by_filename( mz_zip_archive *pZip) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT( &pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; int start = (size - 2) >> 1, end; while (start >= 0) { int child, root = start; for (;;) { if ((child = (root << 1) + 1) >= size) break; child += (((child + 1) < size) && (mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1]))); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } start--; } end = size - 1; while (end > 0) { int child, root = 0; MZ_SWAP_UINT32(pIndices[end], pIndices[0]); for (;;) { if ((child = (root << 1) + 1) >= end) break; child += (((child + 1) < end) && mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1])); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } end--; } } static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip, mz_uint32 flags) { mz_uint cdir_size, num_this_disk, cdir_disk_index; mz_uint64 cdir_ofs; mz_int64 cur_file_ofs; const mz_uint8 *p; mz_uint32 buf_u32[4096 / sizeof(mz_uint32)]; mz_uint8 *pBuf = (mz_uint8 *)buf_u32; mz_bool sort_central_dir = ((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0); // Basic sanity checks - reject files which are too small, and check the first // 4 bytes of the file to make sure a local header is there. if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; // Find the end of central directory record by scanning the file from the end // towards the beginning. cur_file_ofs = MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0); for (;;) { int i, n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n) return MZ_FALSE; for (i = n - 4; i >= 0; --i) if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) break; if (i >= 0) { cur_file_ofs += i; break; } if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >= (0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE))) return MZ_FALSE; cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0); } // Read and verify the end of central directory record. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) || ((pZip->m_total_files = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) != MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS))) return MZ_FALSE; num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS); cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS); if (((num_this_disk | cdir_disk_index) != 0) && ((num_this_disk != 1) || (cdir_disk_index != 1))) return MZ_FALSE; if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) < pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS); if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return MZ_FALSE; pZip->m_central_directory_file_ofs = cdir_ofs; if (pZip->m_total_files) { mz_uint i, n; // Read the entire central directory into a heap block, and allocate another // heap block to hold the unsorted central dir file record offsets, and // another to hold the sorted indices. if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size, MZ_FALSE)) || (!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets, pZip->m_total_files, MZ_FALSE))) return MZ_FALSE; if (sort_central_dir) { if (!mz_zip_array_resize(pZip, &pZip->m_pState->m_sorted_central_dir_offsets, pZip->m_total_files, MZ_FALSE)) return MZ_FALSE; } if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs, pZip->m_pState->m_central_dir.m_p, cdir_size) != cdir_size) return MZ_FALSE; // Now create an index into the central directory file records, do some // basic sanity checking on each record, and check for zip64 entries (which // are not yet supported). p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p; for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) { mz_uint total_header_size, comp_size, decomp_size, disk_index; if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) || (MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG)) return MZ_FALSE; MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, i) = (mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p); if (sort_central_dir) MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets, mz_uint32, i) = i; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) && (decomp_size != comp_size)) || (decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) || (comp_size == 0xFFFFFFFF)) return MZ_FALSE; disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS); if ((disk_index != num_this_disk) && (disk_index != 1)) return MZ_FALSE; if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) > n) return MZ_FALSE; n -= total_header_size; p += total_header_size; } } if (sort_central_dir) mz_zip_reader_sort_central_dir_offsets_by_filename(pZip); return MZ_TRUE; } mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags) { if ((!pZip) || (!pZip->m_pRead)) return MZ_FALSE; if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; size_t s = (file_ofs >= pZip->m_archive_size) ? 0 : (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n); memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s); return s; } mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags) { if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; pZip->m_pRead = mz_zip_mem_read_func; pZip->m_pIO_opaque = pZip; #ifdef __cplusplus pZip->m_pState->m_pMem = const_cast<void *>(pMem); #else pZip->m_pState->m_pMem = (void *)pMem; #endif pZip->m_pState->m_mem_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags) { mz_uint64 file_size; MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb"); if (!pFile) return MZ_FALSE; if (MZ_FSEEK64(pFile, 0, SEEK_END)) { MZ_FCLOSE(pFile); return MZ_FALSE; } file_size = MZ_FTELL64(pFile); if (!mz_zip_reader_init_internal(pZip, flags)) { MZ_FCLOSE(pFile); return MZ_FALSE; } pZip->m_pRead = mz_zip_file_read_func; pZip->m_pIO_opaque = pZip; pZip->m_pState->m_pFile = pFile; pZip->m_archive_size = file_size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) { return pZip ? pZip->m_total_files : 0; } static MZ_FORCEINLINE const mz_uint8 *mz_zip_reader_get_cdh( mz_zip_archive *pZip, mz_uint file_index) { if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return NULL; return &MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); } mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index) { mz_uint m_bit_flag; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); return (m_bit_flag & 1); } mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index) { mz_uint filename_len, external_attr; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; // First see if the filename ends with a '/' character. filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_len) { if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/') return MZ_TRUE; } // Bugfix: This code was also checking if the internal attribute was non-zero, // which wasn't correct. // Most/all zip writers (hopefully) set DOS file/directory attributes in the // low 16-bits, so check for the DOS directory flag and ignore the source OS // ID in the created by field. // FIXME: Remove this check? Is it necessary - we already check the filename. external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); if ((external_attr & 0x10) != 0) return MZ_TRUE; return MZ_FALSE; } mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if ((!p) || (!pStat)) return MZ_FALSE; // Unpack the central directory record. pStat->m_file_index = file_index; pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index); pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS); pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS); pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS); #ifndef MINIZ_NO_TIME pStat->m_time = mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS), MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS)); #endif pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS); pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS); pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS); // Copy as much of the filename and comment as possible. n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1); memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pStat->m_filename[n] = '\0'; n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1); pStat->m_comment_size = n; memcpy(pStat->m_comment, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS), n); pStat->m_comment[n] = '\0'; return MZ_TRUE; } mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) { if (filename_buf_size) pFilename[0] = '\0'; return 0; } n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_buf_size) { n = MZ_MIN(n, filename_buf_size - 1); memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pFilename[n] = '\0'; } return n + 1; } static MZ_FORCEINLINE mz_bool mz_zip_reader_string_equal(const char *pA, const char *pB, mz_uint len, mz_uint flags) { mz_uint i; if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len); for (i = 0; i < len; ++i) if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE; return MZ_TRUE; } static MZ_FORCEINLINE int mz_zip_reader_filename_compare( const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR, mz_uint r_len) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (int)(l_len - r_len) : (l - r); } static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT( &pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; const mz_uint filename_len = (mz_uint)strlen(pFilename); int l = 0, h = size - 1; while (l <= h) { int m = (l + h) >> 1, file_index = pIndices[m], comp = mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets, file_index, pFilename, filename_len); if (!comp) return file_index; else if (comp < 0) l = m + 1; else h = m - 1; } return -1; } int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags) { mz_uint file_index; size_t name_len, comment_len; if ((!pZip) || (!pZip->m_pState) || (!pName) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return -1; if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) && (!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size)) return mz_zip_reader_locate_file_binary_search(pZip, pName); name_len = strlen(pName); if (name_len > 0xFFFF) return -1; comment_len = pComment ? strlen(pComment) : 0; if (comment_len > 0xFFFF) return -1; for (file_index = 0; file_index < pZip->m_total_files; file_index++) { const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS); const char *pFilename = (const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; if (filename_len < name_len) continue; if (comment_len) { mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS), file_comment_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS); const char *pFile_comment = pFilename + filename_len + file_extra_len; if ((file_comment_len != comment_len) || (!mz_zip_reader_string_equal(pComment, pFile_comment, file_comment_len, flags))) continue; } if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) { int ofs = filename_len - 1; do { if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') || (pFilename[ofs] == ':')) break; } while (--ofs >= 0); ofs++; pFilename += ofs; filename_len -= ofs; } if ((filename_len == name_len) && (mz_zip_reader_string_equal(pName, pFilename, filename_len, flags))) return file_index; } return -1; } mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int status = TINFL_STATUS_DONE; mz_uint64 needed_size, cur_file_ofs, comp_remaining, out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail; mz_zip_archive_file_stat file_stat; void *pRead_buf; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; tinfl_decompressor inflator; if ((buf_size) && (!pBuf)) return MZ_FALSE; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; // Empty file, or a directory (but not always a directory - I've seen odd zips // with directories that have compressed data which inflates to 0 bytes) if (!file_stat.m_comp_size) return MZ_TRUE; // Entry is a subdirectory (I've seen old zips with dir entries which have // compressed deflate data which inflates to 0 bytes, but these entries claim // to uncompress to 512 bytes in the headers). // I'm torn how to handle this case - should it fail instead? if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Ensure supplied output buffer is large enough. needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size : file_stat.m_uncomp_size; if (buf_size < needed_size) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, (size_t)needed_size) != needed_size) return MZ_FALSE; return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) == file_stat.m_crc32); } // Decompress the file either directly from memory or from a file input // buffer. tinfl_init(&inflator); if (pZip->m_pState->m_pMem) { // Read directly from the archive in memory. pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else if (pUser_read_buf) { // Use a user provided read buffer. if (!user_read_buf_size) return MZ_FALSE; pRead_buf = (mz_uint8 *)pUser_read_buf; read_buf_size = user_read_buf_size; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } else { // Temporarily allocate a read buffer. read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) #endif return MZ_FALSE; if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } do { size_t in_buf_size, out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress( &inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF | (comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0)); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; out_buf_ofs += out_buf_size; } while (status == TINFL_STATUS_NEEDS_MORE_INPUT); if (status == TINFL_STATUS_DONE) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf)) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, pUser_read_buf, user_read_buf_size); } mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, NULL, 0); } mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf, buf_size, flags, NULL, 0); } void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags) { mz_uint64 comp_size, uncomp_size, alloc_size; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); void *pBuf; if (pSize) *pSize = 0; if (!p) return NULL; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size; #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) #endif return NULL; if (NULL == (pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size))) return NULL; if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size, flags)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return NULL; } if (pSize) *pSize = (size_t)alloc_size; return pBuf; } void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) { if (pSize) *pSize = 0; return MZ_FALSE; } return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags); } mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int status = TINFL_STATUS_DONE; mz_uint file_crc32 = MZ_CRC32_INIT; mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining, out_buf_ofs = 0, cur_file_ofs; mz_zip_archive_file_stat file_stat; void *pRead_buf = NULL; void *pWrite_buf = NULL; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; // Empty file, or a directory (but not always a directory - I've seen odd zips // with directories that have compressed data which inflates to 0 bytes) if (!file_stat.m_comp_size) return MZ_TRUE; // Entry is a subdirectory (I've seen old zips with dir entries which have // compressed deflate data which inflates to 0 bytes, but these entries claim // to uncompress to 512 bytes in the headers). // I'm torn how to handle this case - should it fail instead? if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; // Decompress the file either directly from memory or from a file input // buffer. if (pZip->m_pState->m_pMem) { pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else { read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pState->m_pMem) { #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) #endif return MZ_FALSE; if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)file_stat.m_comp_size) != file_stat.m_comp_size) status = TINFL_STATUS_FAILED; else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf, (size_t)file_stat.m_comp_size); cur_file_ofs += file_stat.m_comp_size; out_buf_ofs += file_stat.m_comp_size; comp_remaining = 0; } else { while (comp_remaining) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32( file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail); if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; out_buf_ofs += read_buf_avail; comp_remaining -= read_buf_avail; } } } else { tinfl_decompressor inflator; tinfl_init(&inflator); if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, TINFL_LZ_DICT_SIZE))) status = TINFL_STATUS_FAILED; else { do { mz_uint8 *pWrite_buf_cur = (mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); size_t in_buf_size, out_buf_size = TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress( &inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size, comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; if (out_buf_size) { if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) != out_buf_size) { status = TINFL_STATUS_FAILED; break; } file_crc32 = (mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size); if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) { status = TINFL_STATUS_FAILED; break; } } } while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) || (status == TINFL_STATUS_HAS_MORE_OUTPUT)); } } if ((status == TINFL_STATUS_DONE) && (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (file_crc32 != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque, flags); } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs, const void *pBuf, size_t n) { (void)ofs; return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque); } mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags) { mz_bool status; mz_zip_archive_file_stat file_stat; MZ_FILE *pFile; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; pFile = MZ_FOPEN(pDst_filename, "wb"); if (!pFile) return MZ_FALSE; status = mz_zip_reader_extract_to_callback( pZip, file_index, mz_zip_file_write_callback, pFile, flags); if (MZ_FCLOSE(pFile) == EOF) return MZ_FALSE; #ifndef MINIZ_NO_TIME if (status) mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time); #endif return status; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_end(mz_zip_archive *pZip) { if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; if (pZip->m_pState) { mz_zip_internal_state *pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO pZip->m_pFree(pZip->m_pAlloc_opaque, pState); } pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags); } #endif // ------------------- .ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS static void mz_write_le16(mz_uint8 *p, mz_uint16 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); } static void mz_write_le32(mz_uint8 *p, mz_uint32 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); p[2] = (mz_uint8)(v >> 16); p[3] = (mz_uint8)(v >> 24); } #define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v)) #define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v)) mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) { if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (pZip->m_file_offset_alignment) { // Ensure user specified file offset alignment is a power of 2. if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1)) return MZ_FALSE; } if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_archive_size = existing_size; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_zip_internal_state *pState = pZip->m_pState; mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size); #ifdef _MSC_VER if ((!n) || ((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) #else if ((!n) || ((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) #endif return 0; if (new_size > pState->m_mem_capacity) { void *pNew_block; size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity); while (new_capacity < new_size) new_capacity *= 2; if (NULL == (pNew_block = pZip->m_pRealloc( pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity))) return 0; pState->m_pMem = pNew_block; pState->m_mem_capacity = new_capacity; } memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n); pState->m_mem_size = (size_t)new_size; return n; } mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size) { pZip->m_pWrite = mz_zip_heap_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size, size_to_reserve_at_beginning))) { if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, initial_allocation_size))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_mem_capacity = initial_allocation_size; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning) { MZ_FILE *pFile; pZip->m_pWrite = mz_zip_file_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_pFile = pFile; if (size_to_reserve_at_beginning) { mz_uint64 cur_ofs = 0; char buf[4096]; MZ_CLEAR_OBJ(buf); do { size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) { mz_zip_writer_end(pZip); return MZ_FALSE; } cur_ofs += n; size_to_reserve_at_beginning -= n; } while (size_to_reserve_at_beginning); } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; // No sense in trying to write to an archive that's already at the support max // size if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; pState = pZip->m_pState; if (pState->m_pFile) { #ifdef MINIZ_NO_STDIO pFilename; return MZ_FALSE; #else // Archive is being read from stdio - try to reopen as writable. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; if (!pFilename) return MZ_FALSE; pZip->m_pWrite = mz_zip_file_write_func; if (NULL == (pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) { // The mz_zip_archive is now in a bogus state because pState->m_pFile is // NULL, so just close it. mz_zip_reader_end(pZip); return MZ_FALSE; } #endif // #ifdef MINIZ_NO_STDIO } else if (pState->m_pMem) { // Archive lives in a memory block. Assume it's from the heap that we can // resize using the realloc callback. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; pState->m_mem_capacity = pState->m_mem_size; pZip->m_pWrite = mz_zip_heap_write_func; } // Archive is being read via a user provided read function - make sure the // user has specified a write function too. else if (!pZip->m_pWrite) return MZ_FALSE; // Start writing new files at the archive's current central directory // location. pZip->m_archive_size = pZip->m_central_directory_file_ofs; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_central_directory_file_ofs = 0; return MZ_TRUE; } mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags) { return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0, level_and_flags, 0, 0); } typedef struct { mz_zip_archive *m_pZip; mz_uint64 m_cur_archive_file_ofs; mz_uint64 m_comp_size; } mz_zip_writer_add_state; static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len, void *pUser) { mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser; if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque, pState->m_cur_archive_file_ofs, pBuf, len) != len) return MZ_FALSE; pState->m_cur_archive_file_ofs += len; pState->m_comp_size += len; return MZ_TRUE; } static mz_bool mz_zip_writer_create_local_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date) { (void)pZip; memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size); return MZ_TRUE; } static mz_bool mz_zip_writer_create_central_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { (void)pZip; memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs); return MZ_TRUE; } static mz_bool mz_zip_writer_add_to_central_dir( mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size, const void *pExtra, mz_uint16 extra_size, const void *pComment, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { mz_zip_internal_state *pState = pZip->m_pState; mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size; size_t orig_central_dir_size = pState->m_central_dir.m_size; mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; // No zip64 support yet if ((local_header_ofs > 0xFFFFFFFF) || (((mz_uint64)pState->m_central_dir.m_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size + comment_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_central_dir_header( pZip, central_dir_header, filename_size, extra_size, comment_size, uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time, dos_date, local_header_ofs, ext_attributes)) return MZ_FALSE; if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename, filename_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra, extra_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment, comment_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &central_dir_ofs, 1))) { // Try to push the central directory array back into its original state. mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } return MZ_TRUE; } static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) { // Basic ZIP archive filename validity checks: Valid filenames cannot start // with a forward slash, cannot contain a drive letter, and cannot use // DOS-style backward slashes. if (*pArchive_name == '/') return MZ_FALSE; while (*pArchive_name) { if ((*pArchive_name == '\\') || (*pArchive_name == ':')) return MZ_FALSE; pArchive_name++; } return MZ_TRUE; } static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment( mz_zip_archive *pZip) { mz_uint32 n; if (!pZip->m_file_offset_alignment) return 0; n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1)); return (pZip->m_file_offset_alignment - n) & (pZip->m_file_offset_alignment - 1); } static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip, mz_uint64 cur_file_ofs, mz_uint32 n) { char buf[4096]; memset(buf, 0, MZ_MIN(sizeof(buf), n)); while (n) { mz_uint32 s = MZ_MIN(sizeof(buf), n); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s) return MZ_FALSE; cur_file_ofs += s; n -= s; } return MZ_TRUE; } mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32) { mz_uint16 method = 0, dos_time = 0, dos_date = 0; mz_uint level, ext_attributes = 0, num_alignment_padding_bytes; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; tdefl_compressor *pComp = NULL; mz_bool store_data_uncompressed; mz_zip_internal_state *pState; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; store_data_uncompressed = ((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)); if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) || (!pArchive_name) || ((comment_size) && (!pComment)) || (pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION)) return MZ_FALSE; pState = pZip->m_pState; if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size)) return MZ_FALSE; // No zip64 support yet if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; #ifndef MINIZ_NO_TIME { time_t cur_time; time(&cur_time); mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date); } #endif // #ifndef MINIZ_NO_TIME archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) { // Set DOS Subdirectory attribute bit. ext_attributes |= 0x10; // Subdirectories cannot contain data. if ((buf_size) || (uncomp_size)) return MZ_FALSE; } // Try to do any allocations before writing to the archive, so if an // allocation fails the file remains unmodified. (A good idea if we're doing // an in-place modification.) if ((!mz_zip_array_ensure_room( pZip, &pState->m_central_dir, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size)) || (!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1))) return MZ_FALSE; if ((!store_data_uncompressed) && (buf_size)) { if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)))) return MZ_FALSE; } if (!mz_zip_writer_write_zeros( pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) { uncomp_crc32 = (mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size); uncomp_size = buf_size; if (uncomp_size <= 3) { level = 0; store_data_uncompressed = MZ_TRUE; } } if (store_data_uncompressed) { if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf, buf_size) != buf_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += buf_size; comp_size = buf_size; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) method = MZ_DEFLATED; } else if (buf_size) { mz_zip_writer_add_state state; state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params( level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) || (tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) != TDEFL_STATUS_DONE)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pComp = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header( pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir( pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes; mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; MZ_FILE *pSrc_file = NULL; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) || ((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION)) return MZ_FALSE; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date)) return MZ_FALSE; pSrc_file = MZ_FOPEN(pSrc_filename, "rb"); if (!pSrc_file) return MZ_FALSE; MZ_FSEEK64(pSrc_file, 0, SEEK_END); uncomp_size = MZ_FTELL64(pSrc_file); MZ_FSEEK64(pSrc_file, 0, SEEK_SET); if (uncomp_size > 0xFFFFFFFF) { // No zip64 support yet MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (uncomp_size <= 3) level = 0; if (!mz_zip_writer_write_zeros( pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (uncomp_size) { mz_uint64 uncomp_remaining = uncomp_size; void *pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE); if (!pRead_buf) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (!level) { while (uncomp_remaining) { mz_uint n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining); if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) || (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf, n) != n)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } uncomp_crc32 = (mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n); uncomp_remaining -= n; cur_archive_file_ofs += n; } comp_size = uncomp_size; } else { mz_bool result = MZ_FALSE; mz_zip_writer_add_state state; tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)); if (!pComp) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params( level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } for (;;) { size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); tdefl_status status; if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size) break; uncomp_crc32 = (mz_uint32)mz_crc32( uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size); uncomp_remaining -= in_buf_size; status = tdefl_compress_buffer( pComp, pRead_buf, in_buf_size, uncomp_remaining ? TDEFL_NO_FLUSH : TDEFL_FINISH); if (status == TDEFL_STATUS_DONE) { result = MZ_TRUE; break; } else if (status != TDEFL_STATUS_OKAY) break; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); if (!result) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); } MZ_FCLOSE(pSrc_file); pSrc_file = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header( pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir( pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index) { mz_uint n, bit_flags, num_alignment_padding_bytes; mz_uint64 comp_bytes_remaining, local_dir_header_ofs; mz_uint64 cur_src_file_ofs, cur_dst_file_ofs; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; size_t orig_central_dir_size; mz_zip_internal_state *pState; void *pBuf; const mz_uint8 *pSrc_central_header; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; if (NULL == (pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index))) return MZ_FALSE; pState = pZip->m_pState; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; cur_src_file_ofs = MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS); cur_dst_file_ofs = pZip->m_archive_size; if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs, num_alignment_padding_bytes)) return MZ_FALSE; cur_dst_file_ofs += num_alignment_padding_bytes; local_dir_header_ofs = cur_dst_file_ofs; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); comp_bytes_remaining = n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); if (NULL == (pBuf = pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, (size_t)MZ_MAX(sizeof(mz_uint32) * 4, MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining))))) return MZ_FALSE; while (comp_bytes_remaining) { n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining); if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_dst_file_ofs += n; comp_bytes_remaining -= n; } bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS); if (bit_flags & 8) { // Copy data descriptor if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; cur_dst_file_ofs += n; } pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); // no zip64 support yet if (cur_dst_file_ofs > 0xFFFFFFFF) return MZ_FALSE; orig_central_dir_size = pState->m_central_dir.m_size; memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_dir_header_ofs); if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) return MZ_FALSE; n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS); if (!mz_zip_array_push_back( pZip, &pState->m_central_dir, pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } if (pState->m_central_dir.m_size > 0xFFFFFFFF) return MZ_FALSE; n = (mz_uint32)orig_central_dir_size; if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } pZip->m_total_files++; pZip->m_archive_size = cur_dst_file_ofs; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_uint64 central_dir_ofs, central_dir_size; mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE]; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; pState = pZip->m_pState; // no zip64 support yet if ((pZip->m_total_files > 0xFFFF) || ((pZip->m_archive_size + pState->m_central_dir.m_size + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; central_dir_ofs = 0; central_dir_size = 0; if (pZip->m_total_files) { // Write central directory central_dir_ofs = pZip->m_archive_size; central_dir_size = pState->m_central_dir.m_size; pZip->m_central_directory_file_ofs = central_dir_ofs; if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs, pState->m_central_dir.m_p, (size_t)central_dir_size) != central_dir_size) return MZ_FALSE; pZip->m_archive_size += central_dir_size; } // Write end of central directory record MZ_CLEAR_OBJ(hdr); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS, pZip->m_total_files); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs); if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr, sizeof(hdr)) != sizeof(hdr)) return MZ_FALSE; #ifndef MINIZ_NO_STDIO if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return MZ_FALSE; #endif // #ifndef MINIZ_NO_STDIO pZip->m_archive_size += sizeof(hdr); pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize) { if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) return MZ_FALSE; if (pZip->m_pWrite != mz_zip_heap_write_func) return MZ_FALSE; if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE; *pBuf = pZip->m_pState->m_pMem; *pSize = pZip->m_pState->m_mem_size; pZip->m_pState->m_pMem = NULL; pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0; return MZ_TRUE; } mz_bool mz_zip_writer_end(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_bool status = MZ_TRUE; if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || ((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) && (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED))) return MZ_FALSE; pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem); pState->m_pMem = NULL; } pZip->m_pFree(pZip->m_pAlloc_opaque, pState); pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return status; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_bool status, created_new_archive = MZ_FALSE; mz_zip_archive zip_archive; struct MZ_FILE_STAT_STRUCT file_stat; MZ_CLEAR_OBJ(zip_archive); if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) || ((comment_size) && (!pComment)) || ((level_and_flags & 0xF) > MZ_UBER_COMPRESSION)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) { // Create a new archive. if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0)) return MZ_FALSE; created_new_archive = MZ_TRUE; } else { // Append to an existing archive. if (!mz_zip_reader_init_file( &zip_archive, pZip_filename, level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return MZ_FALSE; if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) { mz_zip_reader_end(&zip_archive); return MZ_FALSE; } } status = mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size, pComment, comment_size, level_and_flags, 0, 0); // Always finalize, even if adding failed for some reason, so we have a valid // central directory. (This may not always succeed, but we can try.) if (!mz_zip_writer_finalize_archive(&zip_archive)) status = MZ_FALSE; if (!mz_zip_writer_end(&zip_archive)) status = MZ_FALSE; if ((!status) && (created_new_archive)) { // It's a new archive and something went wrong, so just delete it. int ignoredStatus = MZ_DELETE_FILE(pZip_filename); (void)ignoredStatus; } return status; } void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint flags) { int file_index; mz_zip_archive zip_archive; void *p = NULL; if (pSize) *pSize = 0; if ((!pZip_filename) || (!pArchive_name)) return NULL; MZ_CLEAR_OBJ(zip_archive); if (!mz_zip_reader_init_file( &zip_archive, pZip_filename, flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return NULL; if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL, flags)) >= 0) p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags); mz_zip_reader_end(&zip_archive); return p; } #endif // #ifndef MINIZ_NO_STDIO #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS #ifdef __cplusplus } #endif #endif // MINIZ_HEADER_FILE_ONLY /* This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to <http://unlicense.org/> */ // ---------------------- end of miniz ---------------------------------------- #ifdef __clang__ #pragma clang diagnostic pop #endif } #else // Reuse MINIZ_LITTE_ENDIAN macro #if defined(__sparcv9) // Big endian #else #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. #define MINIZ_LITTLE_ENDIAN 1 #endif #endif #endif // TINYEXR_USE_MINIZ // static bool IsBigEndian(void) { // union { // unsigned int i; // char c[4]; // } bint = {0x01020304}; // // return bint.c[0] == 1; //} static const int kEXRVersionSize = 8; static void swap2(unsigned short *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else unsigned short tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[1]; dst[1] = src[0]; #endif } static void swap4(unsigned int *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else unsigned int tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; #endif } static void swap8(tinyexr::tinyexr_uint64 *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else tinyexr::tinyexr_uint64 tmp = (*val); unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[7]; dst[1] = src[6]; dst[2] = src[5]; dst[3] = src[4]; dst[4] = src[3]; dst[5] = src[2]; dst[6] = src[1]; dst[7] = src[0]; #endif } // https://gist.github.com/rygorous/2156668 // Reuse MINIZ_LITTLE_ENDIAN flag from miniz. union FP32 { unsigned int u; float f; struct { #if MINIZ_LITTLE_ENDIAN unsigned int Mantissa : 23; unsigned int Exponent : 8; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 8; unsigned int Mantissa : 23; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpadded" #endif union FP16 { unsigned short u; struct { #if MINIZ_LITTLE_ENDIAN unsigned int Mantissa : 10; unsigned int Exponent : 5; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 5; unsigned int Mantissa : 10; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic pop #endif static FP32 half_to_float(FP16 h) { static const FP32 magic = {113 << 23}; static const unsigned int shifted_exp = 0x7c00 << 13; // exponent mask after shift FP32 o; o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits unsigned int exp_ = shifted_exp & o.u; // just the exponent o.u += (127 - 15) << 23; // exponent adjust // handle exponent special cases if (exp_ == shifted_exp) // Inf/NaN? o.u += (128 - 16) << 23; // extra exp adjust else if (exp_ == 0) // Zero/Denormal? { o.u += 1 << 23; // extra exp adjust o.f -= magic.f; // renormalize } o.u |= (h.u & 0x8000U) << 16U; // sign bit return o; } static FP16 float_to_half_full(FP32 f) { FP16 o = {0}; // Based on ISPC reference code (with minor modifications) if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow) o.s.Exponent = 0; else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set) { o.s.Exponent = 31; o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf } else // Normalized number { // Exponent unbias the single, then bias the halfp int newexp = f.s.Exponent - 127 + 15; if (newexp >= 31) // Overflow, return signed infinity o.s.Exponent = 31; else if (newexp <= 0) // Underflow { if ((14 - newexp) <= 24) // Mantissa might be non-zero { unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit o.s.Mantissa = mant >> (14 - newexp); if ((mant >> (13 - newexp)) & 1) // Check for rounding o.u++; // Round, might overflow into exp bit, but this is OK } } else { o.s.Exponent = static_cast<unsigned int>(newexp); o.s.Mantissa = f.s.Mantissa >> 13; if (f.s.Mantissa & 0x1000) // Check for rounding o.u++; // Round, might overflow to inf, this is OK } } o.s.Sign = f.s.Sign; return o; } // NOTE: From OpenEXR code // #define IMF_INCREASING_Y 0 // #define IMF_DECREASING_Y 1 // #define IMF_RAMDOM_Y 2 // // #define IMF_NO_COMPRESSION 0 // #define IMF_RLE_COMPRESSION 1 // #define IMF_ZIPS_COMPRESSION 2 // #define IMF_ZIP_COMPRESSION 3 // #define IMF_PIZ_COMPRESSION 4 // #define IMF_PXR24_COMPRESSION 5 // #define IMF_B44_COMPRESSION 6 // #define IMF_B44A_COMPRESSION 7 static const char *ReadString(std::string *s, const char *ptr) { // Read untile NULL(\0). const char *p = ptr; const char *q = ptr; while ((*q) != 0) q++; (*s) = std::string(p, q); return q + 1; // skip '\0' } static bool ReadAttribute(std::string *name, std::string *type, std::vector<unsigned char> *data, size_t *marker_size, const char *marker, size_t size) { size_t name_len = strnlen(marker, size); if (name_len == size) { // String does not have a terminating character. return false; } *name = std::string(marker, name_len); marker += name_len + 1; size -= name_len + 1; size_t type_len = strnlen(marker, size); if (type_len == size) { return false; } *type = std::string(marker, type_len); marker += type_len + 1; size -= type_len + 1; if (size < sizeof(uint32_t)) { return false; } uint32_t data_len; memcpy(&data_len, marker, sizeof(uint32_t)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); marker += sizeof(uint32_t); size -= sizeof(uint32_t); if (size < data_len) { return false; } data->resize(static_cast<size_t>(data_len)); memcpy(&data->at(0), marker, static_cast<size_t>(data_len)); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len; return true; } static void WriteAttributeToMemory(std::vector<unsigned char> *out, const char *name, const char *type, const unsigned char *data, int len) { out->insert(out->end(), name, name + strlen(name) + 1); out->insert(out->end(), type, type + strlen(type) + 1); int outLen = len; tinyexr::swap4(reinterpret_cast<unsigned int *>(&outLen)); out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen), reinterpret_cast<unsigned char *>(&outLen) + sizeof(int)); out->insert(out->end(), data, data + len); } typedef struct { std::string name; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } ChannelInfo; typedef struct { std::vector<tinyexr::ChannelInfo> channels; std::vector<EXRAttribute> attributes; int data_window[4]; int line_order; int display_window[4]; float screen_window_center[2]; float screen_window_width; float pixel_aspect_ratio; int chunk_count; // Tiled format int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; unsigned int header_len; int compression_type; void clear() { channels.clear(); attributes.clear(); data_window[0] = 0; data_window[1] = 0; data_window[2] = 0; data_window[3] = 0; line_order = 0; display_window[0] = 0; display_window[1] = 0; display_window[2] = 0; display_window[3] = 0; screen_window_center[0] = 0.0f; screen_window_center[1] = 0.0f; screen_window_width = 0.0f; pixel_aspect_ratio = 0.0f; chunk_count = 0; // Tiled format tile_size_x = 0; tile_size_y = 0; tile_level_mode = 0; tile_rounding_mode = 0; header_len = 0; compression_type = 0; } } HeaderInfo; static void ReadChannelInfo(std::vector<ChannelInfo> &channels, const std::vector<unsigned char> &data) { const char *p = reinterpret_cast<const char *>(&data.at(0)); for (;;) { if ((*p) == 0) { break; } ChannelInfo info; p = ReadString(&info.name, p); memcpy(&info.pixel_type, p, sizeof(int)); p += 4; info.p_linear = static_cast<unsigned char>(p[0]); // uchar p += 1 + 3; // reserved: uchar[3] memcpy(&info.x_sampling, p, sizeof(int)); // int p += 4; memcpy(&info.y_sampling, p, sizeof(int)); // int p += 4; tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.pixel_type)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.x_sampling)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.y_sampling)); channels.push_back(info); } } static void WriteChannelInfo(std::vector<unsigned char> &data, const std::vector<ChannelInfo> &channels) { size_t sz = 0; // Calculate total size. for (size_t c = 0; c < channels.size(); c++) { sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0 sz += 16; // 4 * int } data.resize(sz + 1); unsigned char *p = &data.at(0); for (size_t c = 0; c < channels.size(); c++) { memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str())); p += strlen(channels[c].name.c_str()); (*p) = '\0'; p++; int pixel_type = channels[c].pixel_type; int x_sampling = channels[c].x_sampling; int y_sampling = channels[c].y_sampling; tinyexr::swap4(reinterpret_cast<unsigned int *>(&pixel_type)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&x_sampling)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&y_sampling)); memcpy(p, &pixel_type, sizeof(int)); p += sizeof(int); (*p) = channels[c].p_linear; p += 4; memcpy(p, &x_sampling, sizeof(int)); p += sizeof(int); memcpy(p, &y_sampling, sizeof(int)); p += sizeof(int); } (*p) = '\0'; } static void CompressZip(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } #if TINYEXR_USE_MINIZ // // Compress the data using miniz // miniz::mz_ulong outSize = miniz::mz_compressBound(src_size); int ret = miniz::mz_compress( dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)), src_size); assert(ret == miniz::MZ_OK); (void)ret; compressedSize = outSize; #else uLong outSize = compressBound(static_cast<uLong>(src_size)); int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)), src_size); assert(ret == Z_OK); compressedSize = outSize; #endif } static void DecompressZip(unsigned char *dst, unsigned long *uncompressed_size /* inout */, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(*uncompressed_size); #if TINYEXR_USE_MINIZ int ret = miniz::mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); assert(ret == miniz::MZ_OK); (void)ret; #else int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); assert(ret == Z_OK); (void)ret; #endif // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size); while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (*uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + (*uncompressed_size); for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } } // RLE code from OpenEXR -------------------------------------- #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wsign-conversion" #endif const int MIN_RUN_LENGTH = 3; const int MAX_RUN_LENGTH = 127; // // Compress an array of bytes, using run-length encoding, // and return the length of the compressed data. // static int rleCompress(int inLength, const char in[], signed char out[]) { const char *inEnd = in + inLength; const char *runStart = in; const char *runEnd = in + 1; signed char *outWrite = out; while (runStart < inEnd) { while (runEnd < inEnd && *runStart == *runEnd && runEnd - runStart - 1 < MAX_RUN_LENGTH) { ++runEnd; } if (runEnd - runStart >= MIN_RUN_LENGTH) { // // Compressable run // *outWrite++ = static_cast<char>(runEnd - runStart) - 1; *outWrite++ = *(reinterpret_cast<const signed char *>(runStart)); runStart = runEnd; } else { // // Uncompressable run // while (runEnd < inEnd && ((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) || (runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) && runEnd - runStart < MAX_RUN_LENGTH) { ++runEnd; } *outWrite++ = static_cast<char>(runStart - runEnd); while (runStart < runEnd) { *outWrite++ = *(reinterpret_cast<const signed char *>(runStart++)); } } ++runEnd; } return static_cast<int>(outWrite - out); } // // Uncompress an array of bytes compressed with rleCompress(). // Returns the length of the oncompressed data, or 0 if the // length of the uncompressed data would be more than maxLength. // static int rleUncompress(int inLength, int maxLength, const signed char in[], char out[]) { char *outStart = out; while (inLength > 0) { if (*in < 0) { int count = -(static_cast<int>(*in++)); inLength -= count + 1; if (0 > (maxLength -= count)) return 0; memcpy(out, in, count); out += count; in += count; } else { int count = *in++; inLength -= 2; if (0 > (maxLength -= count + 1)) return 0; memset(out, *reinterpret_cast<const char *>(in), count + 1); out += count + 1; in++; } } return static_cast<int>(out - outStart); } #ifdef __clang__ #pragma clang diagnostic pop #endif // End of RLE code from OpenEXR ----------------------------------- static void CompressRle(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } // outSize will be (srcSiz * 3) / 2 at max. int outSize = rleCompress(static_cast<int>(src_size), reinterpret_cast<const char *>(&tmpBuf.at(0)), reinterpret_cast<signed char *>(dst)); assert(outSize > 0); compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize); } static void DecompressRle(unsigned char *dst, const unsigned long uncompressed_size, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(uncompressed_size); int ret = rleUncompress(static_cast<int>(src_size), static_cast<int>(uncompressed_size), reinterpret_cast<const signed char *>(src), reinterpret_cast<char *>(&tmpBuf.at(0))); assert(ret == static_cast<int>(uncompressed_size)); (void)ret; // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + uncompressed_size; while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + uncompressed_size; for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } } #if TINYEXR_USE_PIZ #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #endif // // PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp // // ----------------------------------------------------------------- // Copyright (c) 2004, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC) // (3 clause BSD license) // struct PIZChannelData { unsigned short *start; unsigned short *end; int nx; int ny; int ys; int size; }; //----------------------------------------------------------------------------- // // 16-bit Haar Wavelet encoding and decoding // // The source code in this file is derived from the encoding // and decoding routines written by Christian Rouet for his // PIZ image file format. // //----------------------------------------------------------------------------- // // Wavelet basis functions without modulo arithmetic; they produce // the best compression ratios when the wavelet-transformed data are // Huffman-encoded, but the wavelet transform works only for 14-bit // data (untransformed data values must be less than (1 << 14)). // inline void wenc14(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { short as = static_cast<short>(a); short bs = static_cast<short>(b); short ms = (as + bs) >> 1; short ds = as - bs; l = static_cast<unsigned short>(ms); h = static_cast<unsigned short>(ds); } inline void wdec14(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { short ls = static_cast<short>(l); short hs = static_cast<short>(h); int hi = hs; int ai = ls + (hi & 1) + (hi >> 1); short as = static_cast<short>(ai); short bs = static_cast<short>(ai - hi); a = static_cast<unsigned short>(as); b = static_cast<unsigned short>(bs); } // // Wavelet basis functions with modulo arithmetic; they work with full // 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't // compress the data quite as well. // const int NBITS = 16; const int A_OFFSET = 1 << (NBITS - 1); const int M_OFFSET = 1 << (NBITS - 1); const int MOD_MASK = (1 << NBITS) - 1; inline void wenc16(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { int ao = (a + A_OFFSET) & MOD_MASK; int m = ((ao + b) >> 1); int d = ao - b; if (d < 0) m = (m + M_OFFSET) & MOD_MASK; d &= MOD_MASK; l = static_cast<unsigned short>(m); h = static_cast<unsigned short>(d); } inline void wdec16(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { int m = l; int d = h; int bb = (m - (d >> 1)) & MOD_MASK; int aa = (d + bb - A_OFFSET) & MOD_MASK; b = static_cast<unsigned short>(bb); a = static_cast<unsigned short>(aa); } // // 2D Wavelet encoding: // static void wav2Encode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; // == 1 << level int p2 = 2; // == 1 << (level+1) // // Hierachical loop on smaller dimension n // while (p2 <= n) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet encoding // if (w14) { wenc14(*px, *p01, i00, i01); wenc14(*p10, *p11, i10, i11); wenc14(i00, i10, *px, *p10); wenc14(i01, i11, *p01, *p11); } else { wenc16(*px, *p01, i00, i01); wenc16(*p10, *p11, i10, i11); wenc16(i00, i10, *px, *p10); wenc16(i01, i11, *p01, *p11); } } // // Encode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wenc14(*px, *p10, i00, *p10); else wenc16(*px, *p10, i00, *p10); *px = i00; } } // // Encode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wenc14(*px, *p01, i00, *p01); else wenc16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p = p2; p2 <<= 1; } } // // 2D Wavelet decoding: // static void wav2Decode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; int p2; // // Search max level // while (p <= n) p <<= 1; p >>= 1; p2 = p; p >>= 1; // // Hierarchical loop on smaller dimension n // while (p >= 1) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet decoding // if (w14) { wdec14(*px, *p10, i00, i10); wdec14(*p01, *p11, i01, i11); wdec14(i00, i01, *px, *p01); wdec14(i10, i11, *p10, *p11); } else { wdec16(*px, *p10, i00, i10); wdec16(*p01, *p11, i01, i11); wdec16(i00, i01, *px, *p01); wdec16(i10, i11, *p10, *p11); } } // // Decode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wdec14(*px, *p10, i00, *p10); else wdec16(*px, *p10, i00, *p10); *px = i00; } } // // Decode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wdec14(*px, *p01, i00, *p01); else wdec16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p2 = p; p >>= 1; } } //----------------------------------------------------------------------------- // // 16-bit Huffman compression and decompression. // // The source code in this file is derived from the 8-bit // Huffman compression and decompression routines written // by Christian Rouet for his PIZ image file format. // //----------------------------------------------------------------------------- // Adds some modification for tinyexr. const int HUF_ENCBITS = 16; // literal (value) bit length const int HUF_DECBITS = 14; // decoding bit size (>= 8) const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size const int HUF_DECMASK = HUF_DECSIZE - 1; struct HufDec { // short code long code //------------------------------- int len : 8; // code length 0 int lit : 24; // lit p size int *p; // 0 lits }; inline long long hufLength(long long code) { return code & 63; } inline long long hufCode(long long code) { return code >> 6; } inline void outputBits(int nBits, long long bits, long long &c, int &lc, char *&out) { c <<= nBits; lc += nBits; c |= bits; while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8))); } inline long long getBits(int nBits, long long &c, int &lc, const char *&in) { while (lc < nBits) { c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++)); lc += 8; } lc -= nBits; return (c >> lc) & ((1 << nBits) - 1); } // // ENCODING TABLE BUILDING & (UN)PACKING // // // Build a "canonical" Huffman code table: // - for each (uncompressed) symbol, hcode contains the length // of the corresponding code (in the compressed data) // - canonical codes are computed and stored in hcode // - the rules for constructing canonical codes are as follows: // * shorter codes (if filled with zeroes to the right) // have a numerically higher value than longer codes // * for codes with the same length, numerical values // increase with numerical symbol values // - because the canonical code table can be constructed from // symbol lengths alone, the code table can be transmitted // without sending the actual code values // - see http://www.compressconsult.com/huffman/ // static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) { long long n[59]; // // For each i from 0 through 58, count the // number of different codes of length i, and // store the count in n[i]. // for (int i = 0; i <= 58; ++i) n[i] = 0; for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1; // // For each i from 58 through 1, compute the // numerically lowest code with length i, and // store that code in n[i]. // long long c = 0; for (int i = 58; i > 0; --i) { long long nc = ((c + n[i]) >> 1); n[i] = c; c = nc; } // // hcode[i] contains the length, l, of the // code for symbol i. Assign the next available // code of length l to the symbol and store both // l and the code in hcode[i]. // for (int i = 0; i < HUF_ENCSIZE; ++i) { int l = static_cast<int>(hcode[i]); if (l > 0) hcode[i] = l | (n[l]++ << 6); } } // // Compute Huffman codes (based on frq input) and store them in frq: // - code structure is : [63:lsb - 6:msb] | [5-0: bit length]; // - max code length is 58 bits; // - codes outside the range [im-iM] have a null length (unused values); // - original frequencies are destroyed; // - encoding tables are used by hufEncode() and hufBuildDecTable(); // struct FHeapCompare { bool operator()(long long *a, long long *b) { return *a > *b; } }; static void hufBuildEncTable( long long *frq, // io: input frequencies [HUF_ENCSIZE], output table int *im, // o: min frq index int *iM) // o: max frq index { // // This function assumes that when it is called, array frq // indicates the frequency of all possible symbols in the data // that are to be Huffman-encoded. (frq[i] contains the number // of occurrences of symbol i in the data.) // // The loop below does three things: // // 1) Finds the minimum and maximum indices that point // to non-zero entries in frq: // // frq[im] != 0, and frq[i] == 0 for all i < im // frq[iM] != 0, and frq[i] == 0 for all i > iM // // 2) Fills array fHeap with pointers to all non-zero // entries in frq. // // 3) Initializes array hlink such that hlink[i] == i // for all array entries. // int hlink[HUF_ENCSIZE]; long long *fHeap[HUF_ENCSIZE]; *im = 0; while (!frq[*im]) (*im)++; int nf = 0; for (int i = *im; i < HUF_ENCSIZE; i++) { hlink[i] = i; if (frq[i]) { fHeap[nf] = &frq[i]; nf++; *iM = i; } } // // Add a pseudo-symbol, with a frequency count of 1, to frq; // adjust the fHeap and hlink array accordingly. Function // hufEncode() uses the pseudo-symbol for run-length encoding. // (*iM)++; frq[*iM] = 1; fHeap[nf] = &frq[*iM]; nf++; // // Build an array, scode, such that scode[i] contains the number // of bits assigned to symbol i. Conceptually this is done by // constructing a tree whose leaves are the symbols with non-zero // frequency: // // Make a heap that contains all symbols with a non-zero frequency, // with the least frequent symbol on top. // // Repeat until only one symbol is left on the heap: // // Take the two least frequent symbols off the top of the heap. // Create a new node that has first two nodes as children, and // whose frequency is the sum of the frequencies of the first // two nodes. Put the new node back into the heap. // // The last node left on the heap is the root of the tree. For each // leaf node, the distance between the root and the leaf is the length // of the code for the corresponding symbol. // // The loop below doesn't actually build the tree; instead we compute // the distances of the leaves from the root on the fly. When a new // node is added to the heap, then that node's descendants are linked // into a single linear list that starts at the new node, and the code // lengths of the descendants (that is, their distance from the root // of the tree) are incremented by one. // std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); long long scode[HUF_ENCSIZE]; memset(scode, 0, sizeof(long long) * HUF_ENCSIZE); while (nf > 1) { // // Find the indices, mm and m, of the two smallest non-zero frq // values in fHeap, add the smallest frq to the second-smallest // frq, and remove the smallest frq value from fHeap. // int mm = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); --nf; int m = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); frq[m] += frq[mm]; std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); // // The entries in scode are linked into lists with the // entries in hlink serving as "next" pointers and with // the end of a list marked by hlink[j] == j. // // Traverse the lists that start at scode[m] and scode[mm]. // For each element visited, increment the length of the // corresponding code by one bit. (If we visit scode[j] // during the traversal, then the code for symbol j becomes // one bit longer.) // // Merge the lists that start at scode[m] and scode[mm] // into a single list that starts at scode[m]. // // // Add a bit to all codes in the first list. // for (int j = m;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) { // // Merge the two lists. // hlink[j] = mm; break; } } // // Add a bit to all codes in the second list // for (int j = mm;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) break; } } // // Build a canonical Huffman code table, replacing the code // lengths in scode with (code, code length) pairs. Copy the // code table from scode into frq. // hufCanonicalCodeTable(scode); memcpy(frq, scode, sizeof(long long) * HUF_ENCSIZE); } // // Pack an encoding table: // - only code lengths, not actual codes, are stored // - runs of zeroes are compressed as follows: // // unpacked packed // -------------------------------- // 1 zero 0 (6 bits) // 2 zeroes 59 // 3 zeroes 60 // 4 zeroes 61 // 5 zeroes 62 // n zeroes (6 or more) 63 n-6 (6 + 8 bits) // const int SHORT_ZEROCODE_RUN = 59; const int LONG_ZEROCODE_RUN = 63; const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN; const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN; static void hufPackEncTable( const long long *hcode, // i : encoding table [HUF_ENCSIZE] int im, // i : min hcode index int iM, // i : max hcode index char **pcode) // o: ptr to packed table (updated) { char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { int l = hufLength(hcode[im]); if (l == 0) { int zerun = 1; while ((im < iM) && (zerun < LONGEST_LONG_RUN)) { if (hufLength(hcode[im + 1]) > 0) break; im++; zerun++; } if (zerun >= 2) { if (zerun >= SHORTEST_LONG_RUN) { outputBits(6, LONG_ZEROCODE_RUN, c, lc, p); outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p); } else { outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p); } continue; } } outputBits(6, l, c, lc, p); } if (lc > 0) *p++ = (unsigned char)(c << (8 - lc)); *pcode = p; } // // Unpack an encoding table packed by hufPackEncTable(): // static bool hufUnpackEncTable( const char **pcode, // io: ptr to packed table (updated) int ni, // i : input size (in bytes) int im, // i : min hcode index int iM, // i : max hcode index long long *hcode) // o: encoding table [HUF_ENCSIZE] { memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE); const char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { if (p - *pcode > ni) { return false; } long long l = hcode[im] = getBits(6, c, lc, p); // code length if (l == (long long)LONG_ZEROCODE_RUN) { if (p - *pcode > ni) { return false; } int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } else if (l >= (long long)SHORT_ZEROCODE_RUN) { int zerun = l - SHORT_ZEROCODE_RUN + 2; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } } *pcode = const_cast<char *>(p); hufCanonicalCodeTable(hcode); return true; } // // DECODING TABLE BUILDING // // // Clear a newly allocated decoding table so that it contains only zeroes. // static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller) // decoding table [HUF_DECSIZE] { for (int i = 0; i < HUF_DECSIZE; i++) { hdecod[i].len = 0; hdecod[i].lit = 0; hdecod[i].p = NULL; } // memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE); } // // Build a decoding hash table based on the encoding table hcode: // - short codes (<= HUF_DECBITS) are resolved with a single table access; // - long code entry allocations are not optimized, because long codes are // unfrequent; // - decoding tables are used by hufDecode(); // static bool hufBuildDecTable(const long long *hcode, // i : encoding table int im, // i : min index in hcode int iM, // i : max index in hcode HufDec *hdecod) // o: (allocated by caller) // decoding table [HUF_DECSIZE] { // // Init hashtable & loop on all codes. // Assumes that hufClearDecTable(hdecod) has already been called. // for (; im <= iM; im++) { long long c = hufCode(hcode[im]); int l = hufLength(hcode[im]); if (c >> l) { // // Error: c is supposed to be an l-bit code, // but c contains a value that is greater // than the largest l-bit number. // // invalidTableEntry(); return false; } if (l > HUF_DECBITS) { // // Long code: add a secondary entry // HufDec *pl = hdecod + (c >> (l - HUF_DECBITS)); if (pl->len) { // // Error: a short code has already // been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->lit++; if (pl->p) { int *p = pl->p; pl->p = new int[pl->lit]; for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i]; delete[] p; } else { pl->p = new int[1]; } pl->p[pl->lit - 1] = im; } else if (l) { // // Short code: init all primary entries // HufDec *pl = hdecod + (c << (HUF_DECBITS - l)); for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) { if (pl->len || pl->p) { // // Error: a short code or a long code has // already been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->len = l; pl->lit = im; } } } return true; } // // Free the long code entries of a decoding table built by hufBuildDecTable() // static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table { for (int i = 0; i < HUF_DECSIZE; i++) { if (hdecod[i].p) { delete[] hdecod[i].p; hdecod[i].p = 0; } } } // // ENCODING // inline void outputCode(long long code, long long &c, int &lc, char *&out) { outputBits(hufLength(code), hufCode(code), c, lc, out); } inline void sendCode(long long sCode, int runCount, long long runCode, long long &c, int &lc, char *&out) { // // Output a run of runCount instances of the symbol sCount. // Output the symbols explicitly, or if that is shorter, output // the sCode symbol once followed by a runCode symbol and runCount // expressed as an 8-bit number. // if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) { outputCode(sCode, c, lc, out); outputCode(runCode, c, lc, out); outputBits(8, runCount, c, lc, out); } else { while (runCount-- >= 0) outputCode(sCode, c, lc, out); } } // // Encode (compress) ni values based on the Huffman encoding table hcode: // static int hufEncode // return: output size (in bits) (const long long *hcode, // i : encoding table const unsigned short *in, // i : uncompressed input buffer const int ni, // i : input buffer size (in bytes) int rlc, // i : rl code char *out) // o: compressed output buffer { char *outStart = out; long long c = 0; // bits not yet written to out int lc = 0; // number of valid bits in c (LSB) int s = in[0]; int cs = 0; // // Loop on input values // for (int i = 1; i < ni; i++) { // // Count same values or send code // if (s == in[i] && cs < 255) { cs++; } else { sendCode(hcode[s], cs, hcode[rlc], c, lc, out); cs = 0; } s = in[i]; } // // Send remaining code // sendCode(hcode[s], cs, hcode[rlc], c, lc, out); if (lc) *out = (c << (8 - lc)) & 0xff; return (out - outStart) * 8 + lc; } // // DECODING // // // In order to force the compiler to inline them, // getChar() and getCode() are implemented as macros // instead of "inline" functions. // #define getChar(c, lc, in) \ { \ c = (c << 8) | *(unsigned char *)(in++); \ lc += 8; \ } #define getCode(po, rlc, c, lc, in, out, oe) \ { \ if (po == rlc) { \ if (lc < 8) getChar(c, lc, in); \ \ lc -= 8; \ \ unsigned char cs = (c >> lc); \ \ if (out + cs > oe) return false; \ \ unsigned short s = out[-1]; \ \ while (cs-- > 0) *out++ = s; \ } else if (out < oe) { \ *out++ = po; \ } else { \ return false; \ } \ } // // Decode (uncompress) ni bits based on encoding & decoding tables: // static bool hufDecode(const long long *hcode, // i : encoding table const HufDec *hdecod, // i : decoding table const char *in, // i : compressed input buffer int ni, // i : input size (in bits) int rlc, // i : run-length code int no, // i : expected output size (in bytes) unsigned short *out) // o: uncompressed output buffer { long long c = 0; int lc = 0; unsigned short *outb = out; unsigned short *oe = out + no; const char *ie = in + (ni + 7) / 8; // input byte size // // Loop on input bytes // while (in < ie) { getChar(c, lc, in); // // Access decoding table // while (lc >= HUF_DECBITS) { const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK]; if (pl.len) { // // Get short code // lc -= pl.len; getCode(pl.lit, rlc, c, lc, in, out, oe); } else { if (!pl.p) { return false; } // invalidCode(); // wrong code // // Search long code // int j; for (j = 0; j < pl.lit; j++) { int l = hufLength(hcode[pl.p[j]]); while (lc < l && in < ie) // get more bits getChar(c, lc, in); if (lc >= l) { if (hufCode(hcode[pl.p[j]]) == ((c >> (lc - l)) & (((long long)(1) << l) - 1))) { // // Found : get long code // lc -= l; getCode(pl.p[j], rlc, c, lc, in, out, oe); break; } } } if (j == pl.lit) { return false; // invalidCode(); // Not found } } } } // // Get remaining (short) codes // int i = (8 - ni) & 7; c >>= i; lc -= i; while (lc > 0) { const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK]; if (pl.len) { lc -= pl.len; getCode(pl.lit, rlc, c, lc, in, out, oe); } else { return false; // invalidCode(); // wrong (long) code } } if (out - outb != no) { return false; } // notEnoughData (); return true; } static void countFrequencies(long long freq[HUF_ENCSIZE], const unsigned short data[/*n*/], int n) { for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0; for (int i = 0; i < n; ++i) ++freq[data[i]]; } static void writeUInt(char buf[4], unsigned int i) { unsigned char *b = (unsigned char *)buf; b[0] = i; b[1] = i >> 8; b[2] = i >> 16; b[3] = i >> 24; } static unsigned int readUInt(const char buf[4]) { const unsigned char *b = (const unsigned char *)buf; return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) | ((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000); } // // EXTERNAL INTERFACE // static int hufCompress(const unsigned short raw[], int nRaw, char compressed[]) { if (nRaw == 0) return 0; long long freq[HUF_ENCSIZE]; countFrequencies(freq, raw, nRaw); int im = 0; int iM = 0; hufBuildEncTable(freq, &im, &iM); char *tableStart = compressed + 20; char *tableEnd = tableStart; hufPackEncTable(freq, im, iM, &tableEnd); int tableLength = tableEnd - tableStart; char *dataStart = tableEnd; int nBits = hufEncode(freq, raw, nRaw, iM, dataStart); int data_length = (nBits + 7) / 8; writeUInt(compressed, im); writeUInt(compressed + 4, iM); writeUInt(compressed + 8, tableLength); writeUInt(compressed + 12, nBits); writeUInt(compressed + 16, 0); // room for future extensions return dataStart + data_length - compressed; } static bool hufUncompress(const char compressed[], int nCompressed, unsigned short raw[], int nRaw) { if (nCompressed == 0) { if (nRaw != 0) return false; return false; } int im = readUInt(compressed); int iM = readUInt(compressed + 4); // int tableLength = readUInt (compressed + 8); int nBits = readUInt(compressed + 12); if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false; const char *ptr = compressed + 20; // // Fast decoder needs at least 2x64-bits of compressed data, and // needs to be run-able on this platform. Otherwise, fall back // to the original decoder // // if (FastHufDecoder::enabled() && nBits > 128) //{ // FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM); // fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw); //} // else { std::vector<long long> freq(HUF_ENCSIZE); std::vector<HufDec> hdec(HUF_DECSIZE); hufClearDecTable(&hdec.at(0)); hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM, &freq.at(0)); { if (nBits > 8 * (nCompressed - (ptr - compressed))) { return false; } hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0)); hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, nRaw, raw); } // catch (...) //{ // hufFreeDecTable (hdec); // throw; //} hufFreeDecTable(&hdec.at(0)); } return true; } // // Functions to compress the range of values in the pixel data // const int USHORT_RANGE = (1 << 16); const int BITMAP_SIZE = (USHORT_RANGE >> 3); static void bitmapFromData(const unsigned short data[/*nData*/], int nData, unsigned char bitmap[BITMAP_SIZE], unsigned short &minNonZero, unsigned short &maxNonZero) { for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0; for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7)); bitmap[0] &= ~1; // zero is not explicitly stored in // the bitmap; we assume that the // data always contain zeroes minNonZero = BITMAP_SIZE - 1; maxNonZero = 0; for (int i = 0; i < BITMAP_SIZE; ++i) { if (bitmap[i]) { if (minNonZero > i) minNonZero = i; if (maxNonZero < i) maxNonZero = i; } } } static unsigned short forwardLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[i] = k++; else lut[i] = 0; } return k - 1; // maximum value stored in lut[], } // i.e. number of ones in bitmap minus 1 static unsigned short reverseLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i; } int n = k - 1; while (k < USHORT_RANGE) lut[k++] = 0; return n; // maximum k where lut[k] is non-zero, } // i.e. number of ones in bitmap minus 1 static void applyLut(const unsigned short lut[USHORT_RANGE], unsigned short data[/*nData*/], int nData) { for (int i = 0; i < nData; ++i) data[i] = lut[data[i]]; } #ifdef __clang__ #pragma clang diagnostic pop #endif // __clang__ static bool CompressPiz(unsigned char *outPtr, unsigned int &outSize, const unsigned char *inPtr, size_t inSize, const std::vector<ChannelInfo> &channelInfo, int data_width, int num_lines) { unsigned char bitmap[BITMAP_SIZE]; unsigned short minNonZero; unsigned short maxNonZero; #if !MINIZ_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif // Assume `inSize` is multiple of 2 or 4. std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short)); std::vector<PIZChannelData> channelData(channelInfo.size()); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t c = 0; c < channelData.size(); c++) { PIZChannelData &cd = channelData[c]; cd.start = tmpBufferEnd; cd.end = cd.start; cd.nx = data_width; cd.ny = num_lines; // cd.ys = c.channel().ySampling; size_t pixelSize = sizeof(int); // UINT and FLOAT if (channelInfo[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } cd.size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += cd.nx * cd.ny * cd.size; } const unsigned char *ptr = inPtr; for (int y = 0; y < num_lines; ++y) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(cd.end, ptr, n * sizeof(unsigned short)); ptr += n * sizeof(unsigned short); cd.end += n; } } bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), bitmap, minNonZero, maxNonZero); unsigned short lut[USHORT_RANGE]; unsigned short maxValue = forwardLutFromBitmap(bitmap, lut); applyLut(lut, &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size())); // // Store range compression info in _outBuffer // char *buf = reinterpret_cast<char *>(outPtr); memcpy(buf, &minNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); memcpy(buf, &maxNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); if (minNonZero <= maxNonZero) { memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero), maxNonZero - minNonZero + 1); buf += maxNonZero - minNonZero + 1; } // // Apply wavelet encoding // for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Apply Huffman encoding; append the result to _outBuffer // // length header(4byte), then huff data. Initialize length header with zero, // then later fill it by `length`. char *lengthPtr = buf; int zero = 0; memcpy(buf, &zero, sizeof(int)); buf += sizeof(int); int length = hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf); memcpy(lengthPtr, &length, sizeof(int)); outSize = static_cast<unsigned int>( (reinterpret_cast<unsigned char *>(buf) - outPtr) + static_cast<unsigned int>(length)); return true; } static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr, size_t tmpBufSize, int num_channels, const EXRChannelInfo *channels, int data_width, int num_lines) { unsigned char bitmap[BITMAP_SIZE]; unsigned short minNonZero; unsigned short maxNonZero; #if !MINIZ_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif memset(bitmap, 0, BITMAP_SIZE); const unsigned char *ptr = inPtr; minNonZero = *(reinterpret_cast<const unsigned short *>(ptr)); maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2)); ptr += 4; if (maxNonZero >= BITMAP_SIZE) { return false; } if (minNonZero <= maxNonZero) { memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr, maxNonZero - minNonZero + 1); ptr += maxNonZero - minNonZero + 1; } unsigned short lut[USHORT_RANGE]; memset(lut, 0, sizeof(unsigned short) * USHORT_RANGE); unsigned short maxValue = reverseLutFromBitmap(bitmap, lut); // // Huffman decoding // int length; length = *(reinterpret_cast<const int *>(ptr)); ptr += sizeof(int); std::vector<unsigned short> tmpBuffer(tmpBufSize); hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer.at(0), static_cast<int>(tmpBufSize)); // // Wavelet decoding // std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels)); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) { const EXRChannelInfo &chan = channels[i]; size_t pixelSize = sizeof(int); // UINT and FLOAT if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } channelData[i].start = tmpBufferEnd; channelData[i].end = channelData[i].start; channelData[i].nx = data_width; channelData[i].ny = num_lines; // channelData[i].ys = 1; channelData[i].size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size; } for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Expand the pixel data to their original range // applyLut(lut, &tmpBuffer.at(0), static_cast<int>(tmpBufSize)); for (int y = 0; y < num_lines; y++) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short))); outPtr += n * sizeof(unsigned short); cd.end += n; } } return true; } #endif // TINYEXR_USE_PIZ #if TINYEXR_USE_ZFP struct ZFPCompressionParam { double rate; int precision; double tolerance; int type; // TINYEXR_ZFP_COMPRESSIONTYPE_* ZFPCompressionParam() { type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE; rate = 2.0; precision = 0; tolerance = 0.0f; } }; bool FindZFPCompressionParam(ZFPCompressionParam *param, const EXRAttribute *attributes, int num_attributes) { bool foundType = false; for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionType") == 0) && (attributes[i].size == 1)) { param->type = static_cast<int>(attributes[i].value[0]); foundType = true; } } if (!foundType) { return false; } if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) && (attributes[i].size == 8)) { param->rate = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) && (attributes[i].size == 4)) { param->rate = *(reinterpret_cast<int *>(attributes[i].value)); return true; } } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) && (attributes[i].size == 8)) { param->tolerance = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } } else { assert(0); } return false; } // Assume pixel format is FLOAT for all channels. static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines, int num_channels, const unsigned char *src, unsigned long src_size, const ZFPCompressionParam &param) { size_t uncompressed_size = dst_width * dst_num_lines * num_channels; zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((dst_width % 4) == 0); assert((dst_num_lines % 4) == 0); if ((dst_width & 3U) || (dst_num_lines & 3U)) { return false; } field = zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)), zfp_type_float, dst_width, dst_num_lines * num_channels); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimention */ 2, /* write random access */ 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision, zfp_type_float); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); std::vector<unsigned char> buf(buf_size); memcpy(&buf.at(0), src, src_size); bitstream *stream = stream_open(&buf.at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_stream_rewind(zfp); size_t image_size = dst_width * dst_num_lines; for (int c = 0; c < num_channels; c++) { // decompress 4x4 pixel block. for (int y = 0; y < dst_num_lines; y += 4) { for (int x = 0; x < dst_width; x += 4) { float fblock[16]; zfp_decode_block_float_2(zfp, fblock); for (int j = 0; j < 4; j++) { for (int i = 0; i < 4; i++) { dst[c * image_size + ((y + j) * dst_width + (x + i))] = fblock[j * 4 + i]; } } } } } zfp_field_free(field); zfp_stream_close(zfp); stream_close(stream); return true; } // Assume pixel format is FLOAT for all channels. bool CompressZfp(std::vector<unsigned char> *outBuf, unsigned int *outSize, const float *inPtr, int width, int num_lines, int num_channels, const ZFPCompressionParam &param) { zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((width % 4) == 0); assert((num_lines % 4) == 0); if ((width & 3U) || (num_lines & 3U)) { return false; } // create input array. field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)), zfp_type_float, width, num_lines * num_channels); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision, zfp_type_float); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); outBuf->resize(buf_size); bitstream *stream = stream_open(&outBuf->at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_field_free(field); size_t image_size = width * num_lines; for (int c = 0; c < num_channels; c++) { // compress 4x4 pixel block. for (int y = 0; y < num_lines; y += 4) { for (int x = 0; x < width; x += 4) { float fblock[16]; for (int j = 0; j < 4; j++) { for (int i = 0; i < 4; i++) { fblock[j * 4 + i] = inPtr[c * image_size + ((y + j) * width + (x + i))]; } } zfp_encode_block_float_2(zfp, fblock); } } } zfp_stream_flush(zfp); (*outSize) = zfp_stream_compressed_size(zfp); zfp_stream_close(zfp); return true; } #endif // // ----------------------------------------------------------------- // static void DecodePixelData(/* out */ unsigned char **out_images, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int width, int height, int x_stride, int y, int line_no, int num_lines, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ #if TINYEXR_USE_PIZ // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>( static_cast<size_t>(width * num_lines) * pixel_data_size)); size_t tmpBufLen = static_cast<size_t>( static_cast<size_t>(width * num_lines) * pixel_data_size); bool ret = tinyexr::DecompressPiz( reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen, static_cast<int>(num_channels), channels, width, num_lines); assert(ret); (void)ret; // For PIZ_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { FP16 hf; hf.u = line_ptr[u]; tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val = line_ptr[u]; tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>(&outBuf.at( v * pixel_data_size * static_cast<size_t>(x_stride) + channel_offset_list[c] * static_cast<size_t>(x_stride))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val = line_ptr[u]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); } } #else assert(0 && "PIZ is enabled in this build"); #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS || compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = outBuf.size(); assert(dstLen > 0); tinyexr::DecompressZip(reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr, static_cast<unsigned long>(data_len)); // For ZIP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; hf.u = line_ptr[u]; tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val = line_ptr[u]; tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val = line_ptr[u]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = outBuf.size(); assert(dstLen > 0); tinyexr::DecompressRle(reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen, data_ptr, static_cast<unsigned long>(data_len)); // For RLE_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; hf.u = line_ptr[u]; tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val = line_ptr[u]; tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val = line_ptr[u]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; if (!FindZFPCompressionParam(&zfp_compression_param, attributes, num_attributes)) { assert(0); return; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = outBuf.size(); assert(dstLen > 0); tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width, num_lines, num_channels, data_ptr, static_cast<unsigned long>(data_len), zfp_compression_param); // For ZFP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val = line_ptr[u]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); } } #else (void)attributes; (void)num_attributes; (void)num_channels; assert(0); #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { for (size_t c = 0; c < num_channels; c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { const unsigned short *line_ptr = reinterpret_cast<const unsigned short *>( data_ptr + c * static_cast<size_t>(width) * sizeof(unsigned short)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *outLine = reinterpret_cast<unsigned short *>(out_images[c]); if (line_order == 0) { outLine += y * x_stride; } else { outLine += (height - 1 - y) * x_stride; } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; hf.u = line_ptr[u]; tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); outLine[u] = hf.u; } } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += y * x_stride; } else { outLine += (height - 1 - y) * x_stride; } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; hf.u = line_ptr[u]; tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); tinyexr::FP32 f32 = half_to_float(hf); outLine[u] = f32.f; } } else { assert(0); } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { const float *line_ptr = reinterpret_cast<const float *>( data_ptr + c * static_cast<size_t>(width) * sizeof(float)); float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += y * x_stride; } else { outLine += (height - 1 - y) * x_stride; } for (int u = 0; u < width; u++) { float val = line_ptr[u]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>( data_ptr + c * static_cast<size_t>(width) * sizeof(unsigned int)); unsigned int *outLine = reinterpret_cast<unsigned int *>(out_images[c]); if (line_order == 0) { outLine += y * x_stride; } else { outLine += (height - 1 - y) * x_stride; } for (int u = 0; u < width; u++) { unsigned int val = line_ptr[u]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } } } } static void DecodeTiledPixelData( unsigned char **out_images, int *width, int *height, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int data_width, int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x, int tile_size_y, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { assert(tile_offset_x * tile_size_x < data_width); assert(tile_offset_y * tile_size_y < data_height); // Compute actual image size in a tile. if ((tile_offset_x + 1) * tile_size_x >= data_width) { (*width) = data_width - (tile_offset_x * tile_size_x); } else { (*width) = tile_size_x; } if ((tile_offset_y + 1) * tile_size_y >= data_height) { (*height) = data_height - (tile_offset_y * tile_size_y); } else { (*height) = tile_size_y; } // Image size = tile size. DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len, compression_type, line_order, (*width), tile_size_y, /* stride */ tile_size_x, /* y */ 0, /* line_no */ 0, (*height), pixel_data_size, num_attributes, attributes, num_channels, channels, channel_offset_list); } static void ComputeChannelLayout(std::vector<size_t> *channel_offset_list, int *pixel_data_size, size_t *channel_offset, int num_channels, const EXRChannelInfo *channels) { channel_offset_list->resize(static_cast<size_t>(num_channels)); (*pixel_data_size) = 0; (*channel_offset) = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { (*channel_offset_list)[c] = (*channel_offset); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { (*pixel_data_size) += sizeof(unsigned short); (*channel_offset) += sizeof(unsigned short); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { (*pixel_data_size) += sizeof(float); (*channel_offset) += sizeof(float); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { (*pixel_data_size) += sizeof(unsigned int); (*channel_offset) += sizeof(unsigned int); } else { assert(0); } } } static unsigned char **AllocateImage(int num_channels, const EXRChannelInfo *channels, const int *requested_pixel_types, int data_width, int data_height) { unsigned char **images = reinterpret_cast<unsigned char **>(static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(num_channels)))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { size_t data_len = static_cast<size_t>(data_width) * static_cast<size_t>(data_height); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { // pixel_data_size += sizeof(unsigned short); // channel_offset += sizeof(unsigned short); // Alloc internal image for half type. if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { images[c] = reinterpret_cast<unsigned char *>(static_cast<unsigned short *>( malloc(sizeof(unsigned short) * data_len))); } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else { assert(0); } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // pixel_data_size += sizeof(float); // channel_offset += sizeof(float); images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { // pixel_data_size += sizeof(unsigned int); // channel_offset += sizeof(unsigned int); images[c] = reinterpret_cast<unsigned char *>( static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len))); } else { assert(0); } } return images; } static int ParseEXRHeader(HeaderInfo *info, bool *empty_header, const EXRVersion *version, std::string *err, const unsigned char *buf, size_t size) { const char *marker = reinterpret_cast<const char *>(&buf[0]); if (empty_header) { (*empty_header) = false; } if (version->multipart) { if (size > 0 && marker[0] == '\0') { // End of header list. if (empty_header) { (*empty_header) = true; } return TINYEXR_SUCCESS; } } // According to the spec, the header of every OpenEXR file must contain at // least the following attributes: // // channels chlist // compression compression // dataWindow box2i // displayWindow box2i // lineOrder lineOrder // pixelAspectRatio float // screenWindowCenter v2f // screenWindowWidth float bool has_channels = false; bool has_compression = false; bool has_data_window = false; bool has_display_window = false; bool has_line_order = false; bool has_pixel_aspect_ratio = false; bool has_screen_window_center = false; bool has_screen_window_width = false; info->data_window[0] = 0; info->data_window[1] = 0; info->data_window[2] = 0; info->data_window[3] = 0; info->line_order = 0; // @fixme info->display_window[0] = 0; info->display_window[1] = 0; info->display_window[2] = 0; info->display_window[3] = 0; info->screen_window_center[0] = 0.0f; info->screen_window_center[1] = 0.0f; info->screen_window_width = -1.0f; info->pixel_aspect_ratio = -1.0f; info->tile_size_x = -1; info->tile_size_y = -1; info->tile_level_mode = -1; info->tile_rounding_mode = -1; info->attributes.clear(); // Read attributes size_t orig_size = size; for (;;) { if (0 == size) { return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (version->tiled && attr_name.compare("tiles") == 0) { unsigned int x_size, y_size; unsigned char tile_mode; assert(data.size() == 9); memcpy(&x_size, &data.at(0), sizeof(int)); memcpy(&y_size, &data.at(4), sizeof(int)); tile_mode = data[8]; tinyexr::swap4(&x_size); tinyexr::swap4(&y_size); info->tile_size_x = static_cast<int>(x_size); info->tile_size_y = static_cast<int>(y_size); // mode = levelMode + roundingMode * 16 info->tile_level_mode = tile_mode & 0x3; info->tile_rounding_mode = (tile_mode >> 4) & 0x1; } else if (attr_name.compare("compression") == 0) { bool ok = false; if ((data[0] >= TINYEXR_COMPRESSIONTYPE_NONE) && (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ)) { ok = true; } if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ ok = true; #else if (err) { (*err) = "PIZ compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP ok = true; #else if (err) { (*err) = "ZFP compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (!ok) { if (err) { (*err) = "Unknown compression type."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } info->compression_type = static_cast<int>(data[0]); has_compression = true; } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int ReadChannelInfo(info->channels, data); if (info->channels.size() < 1) { if (err) { (*err) = "# of channels is zero."; } return TINYEXR_ERROR_INVALID_DATA; } has_channels = true; } else if (attr_name.compare("dataWindow") == 0) { memcpy(&info->data_window[0], &data.at(0), sizeof(int)); memcpy(&info->data_window[1], &data.at(4), sizeof(int)); memcpy(&info->data_window[2], &data.at(8), sizeof(int)); memcpy(&info->data_window[3], &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[3])); has_data_window = true; } else if (attr_name.compare("displayWindow") == 0) { memcpy(&info->display_window[0], &data.at(0), sizeof(int)); memcpy(&info->display_window[1], &data.at(4), sizeof(int)); memcpy(&info->display_window[2], &data.at(8), sizeof(int)); memcpy(&info->display_window[3], &data.at(12), sizeof(int)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[0])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[1])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[2])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[3])); has_display_window = true; } else if (attr_name.compare("lineOrder") == 0) { info->line_order = static_cast<int>(data[0]); has_line_order = true; } else if (attr_name.compare("pixelAspectRatio") == 0) { memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->pixel_aspect_ratio)); has_pixel_aspect_ratio = true; } else if (attr_name.compare("screenWindowCenter") == 0) { memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float)); memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_center[0])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_center[1])); has_screen_window_center = true; } else if (attr_name.compare("screenWindowWidth") == 0) { memcpy(&info->screen_window_width, &data.at(0), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_width)); has_screen_window_width = true; } else if (attr_name.compare("chunkCount") == 0) { memcpy(&info->chunk_count, &data.at(0), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->chunk_count)); } else { // Custom attribute(up to TINYEXR_MAX_ATTRIBUTES) if (info->attributes.size() < TINYEXR_MAX_ATTRIBUTES) { EXRAttribute attrib; strncpy(attrib.name, attr_name.c_str(), 255); attrib.name[255] = '\0'; strncpy(attrib.type, attr_type.c_str(), 255); attrib.type[255] = '\0'; attrib.size = static_cast<int>(data.size()); attrib.value = static_cast<unsigned char *>(malloc(data.size())); memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0), data.size()); info->attributes.push_back(attrib); } } } // Check if required attributes exist { std::stringstream ss_err; if (!has_compression) { ss_err << "\"compression\" attribute not found in the header." << std::endl; } if (!has_channels) { ss_err << "\"channels\" attribute not found in the header." << std::endl; } if (!has_line_order) { ss_err << "\"lineOrder\" attribute not found in the header." << std::endl; } if (!has_display_window) { ss_err << "\"displayWindow\" attribute not found in the header." << std::endl; } if (!has_data_window) { ss_err << "\"dataWindow\" attribute not found in the header." << std::endl; } if (!has_pixel_aspect_ratio) { ss_err << "\"pixelAspectRatio\" attribute not found in the header." << std::endl; } if (!has_screen_window_width) { ss_err << "\"screenWindowWidth\" attribute not found in the header." << std::endl; } if (!has_screen_window_center) { ss_err << "\"screenWindowCenter\" attribute not found in the header." << std::endl; } if (!(ss_err.str().empty())) { if (err) { (*err) += ss_err.str(); } return TINYEXR_ERROR_INVALID_HEADER; } } info->header_len = static_cast<unsigned int>(orig_size - size); return TINYEXR_SUCCESS; } // C++ HeaderInfo to C EXRHeader conversion. static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) { exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio; exr_header->screen_window_center[0] = info.screen_window_center[0]; exr_header->screen_window_center[1] = info.screen_window_center[1]; exr_header->screen_window_width = info.screen_window_width; exr_header->chunk_count = info.chunk_count; exr_header->display_window[0] = info.display_window[0]; exr_header->display_window[1] = info.display_window[1]; exr_header->display_window[2] = info.display_window[2]; exr_header->display_window[3] = info.display_window[3]; exr_header->data_window[0] = info.data_window[0]; exr_header->data_window[1] = info.data_window[1]; exr_header->data_window[2] = info.data_window[2]; exr_header->data_window[3] = info.data_window[3]; exr_header->line_order = info.line_order; exr_header->compression_type = info.compression_type; exr_header->tile_size_x = info.tile_size_x; exr_header->tile_size_y = info.tile_size_y; exr_header->tile_level_mode = info.tile_level_mode; exr_header->tile_rounding_mode = info.tile_rounding_mode; exr_header->num_channels = static_cast<int>(info.channels.size()); exr_header->channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); // manually add '\0' for safety. exr_header->channels[c].name[255] = '\0'; exr_header->channels[c].pixel_type = info.channels[c].pixel_type; exr_header->channels[c].p_linear = info.channels[c].p_linear; exr_header->channels[c].x_sampling = info.channels[c].x_sampling; exr_header->channels[c].y_sampling = info.channels[c].y_sampling; } exr_header->pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->pixel_types[c] = info.channels[c].pixel_type; } // Initially fill with values of `pixel_types` exr_header->requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->requested_pixel_types[c] = info.channels[c].pixel_type; } assert(info.attributes.size() < TINYEXR_MAX_ATTRIBUTES); exr_header->num_custom_attributes = static_cast<int>(info.attributes.size()); for (size_t i = 0; i < info.attributes.size(); i++) { memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name, 256); memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type, 256); exr_header->custom_attributes[i].size = info.attributes[i].size; // Just copy poiner exr_header->custom_attributes[i].value = info.attributes[i].value; } exr_header->header_len = info.header_len; } static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header, const std::vector<tinyexr::tinyexr_uint64> &offsets, const unsigned char *head) { int num_channels = exr_header->num_channels; int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } int data_width = exr_header->data_window[2] - exr_header->data_window[0] + 1; int data_height = exr_header->data_window[3] - exr_header->data_window[1] + 1; size_t num_blocks = offsets.size(); std::vector<size_t> channel_offset_list; int pixel_data_size = 0; size_t channel_offset = 0; tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size, &channel_offset, num_channels, exr_header->channels); if (exr_header->tiled) { size_t num_tiles = offsets.size(); // = # of blocks exr_image->tiles = static_cast<EXRTile *>( malloc(sizeof(EXRTile) * static_cast<size_t>(num_tiles))); for (size_t tile_idx = 0; tile_idx < num_tiles; tile_idx++) { // Allocate memory for each tile. exr_image->tiles[tile_idx].images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, data_width, data_height); // 16 byte: tile coordinates // 4 byte : data size // ~ : data(uncompressed or compressed) const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[tile_idx]); int tile_coordinates[4]; memcpy(tile_coordinates, data_ptr, sizeof(int) * 4); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[3])); // @todo{ LoD } assert(tile_coordinates[2] == 0); assert(tile_coordinates[3] == 0); int data_len; memcpy(&data_len, data_ptr + 16, sizeof(int)); // 16 = sizeof(tile_coordinates) tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); assert(data_len >= 4); // Move to data addr: 20 = 16 + 4; data_ptr += 20; tinyexr::DecodeTiledPixelData( exr_image->tiles[tile_idx].images, &(exr_image->tiles[tile_idx].width), &(exr_image->tiles[tile_idx].height), exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x, exr_header->tile_size_y, static_cast<size_t>(pixel_data_size), static_cast<size_t>(exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list); exr_image->tiles[tile_idx].offset_x = tile_coordinates[0]; exr_image->tiles[tile_idx].offset_y = tile_coordinates[1]; exr_image->tiles[tile_idx].level_x = tile_coordinates[2]; exr_image->tiles[tile_idx].level_y = tile_coordinates[3]; exr_image->num_tiles = static_cast<int>(num_tiles); } } else { // scanline format exr_image->images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, data_width, data_height); #ifdef _OPENMP #pragma omp parallel for #endif for (int y = 0; y < static_cast<int>(num_blocks); y++) { size_t y_idx = static_cast<size_t>(y); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y_idx]); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed or compressed) int line_no; memcpy(&line_no, data_ptr, sizeof(int)); int data_len; memcpy(&data_len, data_ptr + 4, sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); int end_line_no = (std::min)(line_no + num_scanline_blocks, (exr_header->data_window[3] + 1)); int num_lines = end_line_no - line_no; assert(num_lines > 0); // Move to data addr: 8 = 4 + 4; data_ptr += 8; // Adjust line_no with data_window.bmin.y line_no -= exr_header->data_window[1]; tinyexr::DecodePixelData( exr_image->images, exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, data_width, y, line_no, num_lines, static_cast<size_t>(pixel_data_size), static_cast<size_t>(exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list); } // omp parallel } // Overwrite `pixel_type` with `requested_pixel_type`. { for (int c = 0; c < exr_header->num_channels; c++) { exr_header->pixel_types[c] = exr_header->requested_pixel_types[c]; } } { exr_image->num_channels = num_channels; exr_image->width = data_width; exr_image->height = data_height; } return TINYEXR_SUCCESS; } static void ReconstructLineOffsets(std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n, const unsigned char *head, const unsigned char *marker, const size_t size) { assert(head < marker); assert(offsets->size() == n); for (size_t i = 0; i < n; i++) { size_t offset = static_cast<size_t>(marker - head); assert(offset < size); // Offset should not exceed whole EXR file/data size. int y; int data_len; memcpy(&y, marker, sizeof(int)); memcpy(&data_len, marker + 4, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&y)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); (*offsets)[i] = offset; marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len) } } static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *head, const unsigned char *marker, const size_t size, const char **err) { if (exr_image == NULL || exr_header == NULL || head == NULL || marker == NULL || (size <= tinyexr::kEXRVersionSize)) { if (err) { (*err) = "Invalid argument."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } int data_width = exr_header->data_window[2] - exr_header->data_window[0] + 1; int data_height = exr_header->data_window[3] - exr_header->data_window[1] + 1; // Read offset tables. size_t num_blocks; if (exr_header->chunk_count > 0) { // Use `chunkCount` attribute. num_blocks = static_cast<size_t>(exr_header->chunk_count); } else if (exr_header->tiled) { // @todo { LoD } size_t num_x_tiles = static_cast<size_t>(data_width) / static_cast<size_t>(exr_header->tile_size_x); if (num_x_tiles * static_cast<size_t>(exr_header->tile_size_x) < static_cast<size_t>(data_width)) { num_x_tiles++; } size_t num_y_tiles = static_cast<size_t>(data_height) / static_cast<size_t>(exr_header->tile_size_y); if (num_y_tiles * static_cast<size_t>(exr_header->tile_size_y) < static_cast<size_t>(data_height)) { num_y_tiles++; } num_blocks = num_x_tiles * num_y_tiles; } else { num_blocks = static_cast<size_t>(data_height) / static_cast<size_t>(num_scanline_blocks); if (num_blocks * static_cast<size_t>(num_scanline_blocks) < static_cast<size_t>(data_height)) { num_blocks++; } } std::vector<tinyexr::tinyexr_uint64> offsets(num_blocks); for (size_t y = 0; y < num_blocks; y++) { tinyexr::tinyexr_uint64 offset; memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64)); tinyexr::swap8(&offset); marker += sizeof(tinyexr::tinyexr_uint64); // = 8 offsets[y] = offset; } // If line offsets are invalid, we try to reconstruct it. // See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details. for (size_t y = 0; y < num_blocks; y++) { if (offsets[y] <= 0) { // TODO(syoyo) Report as warning. //if (err) { // stringstream ss; // ss << "Incomplete lineOffsets." << std::endl; // (*err) += ss.str(); //} ReconstructLineOffsets(&offsets, num_blocks, head, marker, size); break; } } return DecodeChunk(exr_image, exr_header, offsets, head); } } // namespace tinyexr int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err) { if (out_rgba == NULL) { if (err) { (*err) = "Invalid argument.\n"; } return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); InitEXRImage(&exr_image); { int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { return ret; } if (exr_version.multipart || exr_version.non_image) { if (err) { (*err) = "Loading multipart or DeepImage is not supported yet.\n"; } return TINYEXR_ERROR_INVALID_DATA; // @fixme. } } { int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err); if (ret != TINYEXR_SUCCESS) { return ret; } } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } { int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err); if (ret != TINYEXR_SUCCESS) { return ret; } } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; for (int c = 0; c < exr_header.num_channels; c++) { if (strcmp(exr_header.channels[c].name, "R") == 0) { idxR = c; } else if (strcmp(exr_header.channels[c].name, "G") == 0) { idxG = c; } else if (strcmp(exr_header.channels[c].name, "B") == 0) { idxB = c; } else if (strcmp(exr_header.channels[c].name, "A") == 0) { idxA = c; } } if (idxR == -1) { if (err) { (*err) = "R channel not found\n"; } // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { if (err) { (*err) = "G channel not found\n"; } // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { if (err) { (*err) = "B channel not found\n"; } // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_header == NULL) { if (err) { (*err) = "Invalid argument.\n"; } // Invalid argument return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; tinyexr::HeaderInfo info; info.clear(); std::string err_str; int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { if (err && !err_str.empty()) { (*err) = strdup(err_str.c_str()); // May leak } } ConvertHeader(exr_header, info); // transfoer `tiled` from version. exr_header->tiled = version->tiled; return ret; } int LoadEXRFromMemory(float *out_rgba, const unsigned char *memory, size_t size, const char **err) { if (out_rgba == NULL || memory == NULL) { if (err) { (*err) = "Invalid argument.\n"; } return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); int ret = ParseEXRVersionFromMemory(&exr_version, memory, size); if (ret != TINYEXR_SUCCESS) { return ret; } ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } InitEXRImage(&exr_image); ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; for (int c = 0; c < exr_header.num_channels; c++) { if (strcmp(exr_header.channels[c].name, "R") == 0) { idxR = c; } else if (strcmp(exr_header.channels[c].name, "G") == 0) { idxG = c; } else if (strcmp(exr_header.channels[c].name, "B") == 0) { idxB = c; } else if (strcmp(exr_header.channels[c].name, "A") == 0) { idxA = c; } } if (idxR == -1) { if (err) { (*err) = "R channel not found\n"; } // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { if (err) { (*err) = "G channel not found\n"; } // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { if (err) { (*err) = "B channel not found\n"; } // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } // Assume `out_rgba` have enough memory allocated. for (int i = 0; i < exr_image.width * exr_image.height; i++) { out_rgba[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; out_rgba[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; out_rgba[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA > 0) { out_rgba[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { out_rgba[4 * i + 3] = 1.0; } } return TINYEXR_SUCCESS; } int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL) { if (err) { (*err) = "Invalid argument."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } #if 0 //def _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { if (err) { (*err) = "Cannot read file."; } return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize, err); } int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *memory, const size_t size, const char **err) { if (exr_image == NULL || memory == NULL || (size < tinyexr::kEXRVersionSize)) { if (err) { (*err) = "Invalid argument."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->header_len == 0) { if (err) { (*err) = "EXRHeader is not initialized."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } const unsigned char *head = memory; const unsigned char *marker = reinterpret_cast<const unsigned char *>( memory + exr_header->header_len + 8); // +8 for magic number + version header. return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size, err); } size_t SaveEXRImageToMemory(const EXRImage *exr_image, const EXRHeader *exr_header, unsigned char **memory_out, const char **err) { if (exr_image == NULL || memory_out == NULL || exr_header->compression_type < 0) { if (err) { (*err) = "Invalid argument."; } return 0; // @fixme } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { if (err) { (*err) = "PIZ compression is not supported in this build."; } return 0; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { if (err) { (*err) = "ZFP compression is not supported in this build."; } return 0; } #endif #if TINYEXR_USE_ZFP for (size_t i = 0; i < static_cast<size_t>(exr_header->num_channels); i++) { if (exr_header->requested_pixel_types[i] != TINYEXR_PIXELTYPE_FLOAT) { if (err) { (*err) = "Pixel type must be FLOAT for ZFP compression."; } return 0; } } #endif std::vector<unsigned char> memory; // Header { const char header[] = {0x76, 0x2f, 0x31, 0x01}; memory.insert(memory.end(), header, header + 4); } // Version, scanline. { char marker[] = {2, 0, 0, 0}; /* @todo if (exr_header->tiled) { marker[1] |= 0x2; } if (exr_header->long_name) { marker[1] |= 0x4; } if (exr_header->non_image) { marker[1] |= 0x8; } if (exr_header->multipart) { marker[1] |= 0x10; } */ memory.insert(memory.end(), marker, marker + 4); } int num_scanlines = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanlines = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanlines = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanlines = 16; } // Write attributes. std::vector<tinyexr::ChannelInfo> channels; { std::vector<unsigned char> data; for (int c = 0; c < exr_header->num_channels; c++) { tinyexr::ChannelInfo info; info.p_linear = 0; info.pixel_type = exr_header->requested_pixel_types[c]; info.x_sampling = 1; info.y_sampling = 1; info.name = std::string(exr_header->channels[c].name); channels.push_back(info); } tinyexr::WriteChannelInfo(data, channels); tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0), static_cast<int>(data.size())); } { int comp = exr_header->compression_type; tinyexr::swap4(reinterpret_cast<unsigned int *>(&comp)); tinyexr::WriteAttributeToMemory( &memory, "compression", "compression", reinterpret_cast<const unsigned char *>(&comp), 1); } { int data[4] = {0, 0, exr_image->width - 1, exr_image->height - 1}; tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[3])); tinyexr::WriteAttributeToMemory( &memory, "dataWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); tinyexr::WriteAttributeToMemory( &memory, "displayWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); } { unsigned char line_order = 0; // @fixme { read line_order from EXRHeader } tinyexr::WriteAttributeToMemory(&memory, "lineOrder", "lineOrder", &line_order, 1); } { float aspectRatio = 1.0f; tinyexr::swap4(reinterpret_cast<unsigned int *>(&aspectRatio)); tinyexr::WriteAttributeToMemory( &memory, "pixelAspectRatio", "float", reinterpret_cast<const unsigned char *>(&aspectRatio), sizeof(float)); } { float center[2] = {0.0f, 0.0f}; tinyexr::swap4(reinterpret_cast<unsigned int *>(&center[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&center[1])); tinyexr::WriteAttributeToMemory( &memory, "screenWindowCenter", "v2f", reinterpret_cast<const unsigned char *>(center), 2 * sizeof(float)); } { float w = static_cast<float>(exr_image->width); tinyexr::swap4(reinterpret_cast<unsigned int *>(&w)); tinyexr::WriteAttributeToMemory(&memory, "screenWindowWidth", "float", reinterpret_cast<const unsigned char *>(&w), sizeof(float)); } // Custom attributes if (exr_header->num_custom_attributes > 0) { for (int i = 0; i < exr_header->num_custom_attributes; i++) { tinyexr::WriteAttributeToMemory( &memory, exr_header->custom_attributes[i].name, exr_header->custom_attributes[i].type, reinterpret_cast<const unsigned char *>( exr_header->custom_attributes[i].value), exr_header->custom_attributes[i].size); } } { // end of header unsigned char e = 0; memory.push_back(e); } int num_blocks = exr_image->height / num_scanlines; if (num_blocks * num_scanlines < exr_image->height) { num_blocks++; } std::vector<tinyexr::tinyexr_uint64> offsets(static_cast<size_t>(num_blocks)); size_t headerSize = memory.size(); tinyexr::tinyexr_uint64 offset = headerSize + static_cast<size_t>(num_blocks) * sizeof( tinyexr::tinyexr_int64); // sizeof(header) + sizeof(offsetTable) std::vector<unsigned char> data; std::vector<std::vector<unsigned char> > data_list( static_cast<size_t>(num_blocks)); std::vector<size_t> channel_offset_list( static_cast<size_t>(exr_header->num_channels)); int pixel_data_size = 0; size_t channel_offset = 0; for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { channel_offset_list[c] = channel_offset; if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { pixel_data_size += sizeof(unsigned short); channel_offset += sizeof(unsigned short); } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { pixel_data_size += sizeof(float); channel_offset += sizeof(float); } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT) { pixel_data_size += sizeof(unsigned int); channel_offset += sizeof(unsigned int); } else { assert(0); } } #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; // Use ZFP compression parameter from custom attributes(if such a parameter // exists) { bool ret = tinyexr::FindZFPCompressionParam( &zfp_compression_param, exr_header->custom_attributes, exr_header->num_custom_attributes); if (!ret) { // Use predefined compression parameter. zfp_compression_param.type = 0; zfp_compression_param.rate = 2; } } #endif // Use signed int since some OpenMP compiler doesn't allow unsigned type for // `parallel for` #ifdef _OPENMP #pragma omp parallel for #endif for (int i = 0; i < num_blocks; i++) { size_t ii = static_cast<size_t>(i); int start_y = num_scanlines * i; int endY = (std::min)(num_scanlines * (i + 1), exr_image->height); int h = endY - start_y; std::vector<unsigned char> buf( static_cast<size_t>(exr_image->width * h * pixel_data_size)); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < h; y++) { for (int x = 0; x < exr_image->width; x++) { tinyexr::FP16 h16; h16.u = reinterpret_cast<unsigned short **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::FP32 f32 = half_to_float(h16); tinyexr::swap4(reinterpret_cast<unsigned int *>(&f32.f)); // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); line_ptr[x] = f32.f; } } } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < h; y++) { for (int x = 0; x < exr_image->width; x++) { unsigned short val = reinterpret_cast<unsigned short **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap2(&val); // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); line_ptr[x] = val; } } } else { assert(0); } } else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < h; y++) { for (int x = 0; x < exr_image->width; x++) { tinyexr::FP32 f32; f32.f = reinterpret_cast<float **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::FP16 h16; h16 = float_to_half_full(f32); tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u)); // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); line_ptr[x] = h16.u; } } } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < h; y++) { for (int x = 0; x < exr_image->width; x++) { float val = reinterpret_cast<float **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); line_ptr[x] = val; } } } else { assert(0); } } else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_UINT) { for (int y = 0; y < h; y++) { for (int x = 0; x < exr_image->width; x++) { unsigned int val = reinterpret_cast<unsigned int **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap4(&val); // Assume increasing Y unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); line_ptr[x] = val; } } } } if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(buf.size()); memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), buf.begin(), buf.begin() + data_len); } else if ((exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #if TINYEXR_USE_MINIZ std::vector<unsigned char> block( tinyexr::miniz::mz_compressBound(buf.size())); #else std::vector<unsigned char> block(compressBound(buf.size())); #endif tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressZip(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), buf.size()); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(outSize); // truncate memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // (buf.size() * 3) / 2 would be enough. std::vector<unsigned char> block((buf.size() * 3) / 2); tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressRle(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), buf.size()); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(outSize); // truncate memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ unsigned int bufLen = 1024 + static_cast<unsigned int>( 1.2 * static_cast<unsigned int>( buf.size())); // @fixme { compute good bound. } std::vector<unsigned char> block(bufLen); unsigned int outSize = static_cast<unsigned int>(block.size()); CompressPiz(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), buf.size(), channels, exr_image->width, h); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = outSize; memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP std::vector<unsigned char> block; unsigned int outSize; tinyexr::CompressZfp( &block, &outSize, reinterpret_cast<const float *>(&buf.at(0)), exr_image->width, h, exr_header->num_channels, zfp_compression_param); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = outSize; memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else { assert(0); } } // omp parallel for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) { data.insert(data.end(), data_list[i].begin(), data_list[i].end()); offsets[i] = offset; tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i])); offset += data_list[i].size(); } { memory.insert( memory.end(), reinterpret_cast<unsigned char *>(&offsets.at(0)), reinterpret_cast<unsigned char *>(&offsets.at(0)) + sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(num_blocks)); } { memory.insert(memory.end(), data.begin(), data.end()); } assert(memory.size() > 0); (*memory_out) = static_cast<unsigned char *>(malloc(memory.size())); memcpy((*memory_out), &memory.at(0), memory.size()); return memory.size(); // OK } int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL || filename == NULL || exr_header->compression_type < 0) { if (err) { (*err) = "Invalid argument."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { if (err) { (*err) = "PIZ compression is not supported in this build."; } return 0; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { if (err) { (*err) = "ZFP compression is not supported in this build."; } return 0; } #endif #if 0 //def _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "wb"); #else FILE *fp = fopen(filename, "wb"); #endif if (!fp) { if (err) { (*err) = "Cannot write a file."; } return TINYEXR_ERROR_CANT_OPEN_FILE; } unsigned char *mem = NULL; size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err); if ((mem_size > 0) && mem) { fwrite(mem, 1, mem_size, fp); } free(mem); fclose(fp); return TINYEXR_SUCCESS; } int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) { if (deep_image == NULL) { if (err) { (*err) = "Invalid argument."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = fopen(filename, "rb"); if (!fp) { if (err) { (*err) = "Cannot read file."; } return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize == 0) { fclose(fp); if (err) { (*err) = "File size is zero."; } return TINYEXR_ERROR_INVALID_FILE; } std::vector<char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); (void)ret; } fclose(fp); const char *head = &buf[0]; const char *marker = &buf[0]; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { if (err) { (*err) = "Invalid magic number."; } return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } // Version, scanline. { // ver 2.0, scanline, deep bit on(0x800) // must be [2, 0, 0, 0] if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) { if (err) { (*err) = "Unsupported version or scanline."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } marker += 4; } int dx = -1; int dy = -1; int dw = -1; int dh = -1; int num_scanline_blocks = 1; // 16 for ZIP compression. int compression_type = -1; int num_channels = -1; std::vector<tinyexr::ChannelInfo> channels; // Read attributes size_t size = filesize - tinyexr::kEXRVersionSize; for (;;) { if (0 == size) { return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (attr_name.compare("compression") == 0) { compression_type = data[0]; if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) { if (err) { (*err) = "Unsupported compression type."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int tinyexr::ReadChannelInfo(channels, data); num_channels = static_cast<int>(channels.size()); if (num_channels < 1) { if (err) { (*err) = "Invalid channels format."; } return TINYEXR_ERROR_INVALID_DATA; } } else if (attr_name.compare("dataWindow") == 0) { memcpy(&dx, &data.at(0), sizeof(int)); memcpy(&dy, &data.at(4), sizeof(int)); memcpy(&dw, &data.at(8), sizeof(int)); memcpy(&dh, &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dx)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dy)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dw)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dh)); } else if (attr_name.compare("displayWindow") == 0) { int x; int y; int w; int h; memcpy(&x, &data.at(0), sizeof(int)); memcpy(&y, &data.at(4), sizeof(int)); memcpy(&w, &data.at(8), sizeof(int)); memcpy(&h, &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&x)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&y)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&w)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&h)); } } assert(dx >= 0); assert(dy >= 0); assert(dw >= 0); assert(dh >= 0); assert(num_channels >= 1); int data_width = dw - dx + 1; int data_height = dh - dy + 1; std::vector<float> image( static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA // Read offset tables. int num_blocks = data_height / num_scanline_blocks; if (num_blocks * num_scanline_blocks < data_height) { num_blocks++; } std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks)); for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { tinyexr::tinyexr_int64 offset; memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset)); marker += sizeof(tinyexr::tinyexr_int64); // = 8 offsets[y] = offset; } #if TINYEXR_USE_PIZ if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) || (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) { #else if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #endif // OK } else { if (err) { (*err) = "Unsupported format."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } deep_image->image = static_cast<float ***>( malloc(sizeof(float **) * static_cast<size_t>(num_channels))); for (int c = 0; c < num_channels; c++) { deep_image->image[c] = static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { } } deep_image->offset_table = static_cast<int **>( malloc(sizeof(int *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { deep_image->offset_table[y] = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(data_width))); } for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y]); // int: y coordinate // int64: packed size of pixel offset table // int64: packed size of sample data // int64: unpacked size of sample data // compressed pixel offset table // compressed sample data int line_no; tinyexr::tinyexr_int64 packedOffsetTableSize; tinyexr::tinyexr_int64 packedSampleDataSize; tinyexr::tinyexr_int64 unpackedSampleDataSize; memcpy(&line_no, data_ptr, sizeof(int)); memcpy(&packedOffsetTableSize, data_ptr + 4, sizeof(tinyexr::tinyexr_int64)); memcpy(&packedSampleDataSize, data_ptr + 12, sizeof(tinyexr::tinyexr_int64)); memcpy(&unpackedSampleDataSize, data_ptr + 20, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize)); std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width)); // decode pixel offset table. { unsigned long dstLen = pixelOffsetTable.size() * sizeof(int); tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)), &dstLen, data_ptr + 28, static_cast<size_t>(packedOffsetTableSize)); assert(dstLen == pixelOffsetTable.size() * sizeof(int)); for (size_t i = 0; i < static_cast<size_t>(data_width); i++) { deep_image->offset_table[y][i] = pixelOffsetTable[i]; } } std::vector<unsigned char> sample_data( static_cast<size_t>(unpackedSampleDataSize)); // decode sample data. { unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize); tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen, data_ptr + 28 + packedOffsetTableSize, static_cast<size_t>(packedSampleDataSize)); assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize)); } // decode sample int sampleSize = -1; std::vector<int> channel_offset_list(static_cast<size_t>(num_channels)); { int channel_offset = 0; for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) { channel_offset_list[i] = channel_offset; if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT channel_offset += 4; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half channel_offset += 2; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // float channel_offset += 4; } else { assert(0); } } sampleSize = channel_offset; } assert(sampleSize >= 2); assert(static_cast<size_t>( pixelOffsetTable[static_cast<size_t>(data_width - 1)] * sampleSize) == sample_data.size()); int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize; // // Alloc memory // // // pixel data is stored as image[channels][pixel_samples] // { tinyexr::tinyexr_uint64 data_offset = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { deep_image->image[c][y] = static_cast<float *>( malloc(sizeof(float) * static_cast<size_t>(samples_per_line))); if (channels[c].pixel_type == 0) { // UINT for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { unsigned int ui = *reinterpret_cast<unsigned int *>( &sample_data.at(data_offset + x * sizeof(int))); deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme } data_offset += sizeof(unsigned int) * static_cast<size_t>(samples_per_line); } else if (channels[c].pixel_type == 1) { // half for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { tinyexr::FP16 f16; f16.u = *reinterpret_cast<unsigned short *>( &sample_data.at(data_offset + x * sizeof(short))); tinyexr::FP32 f32 = half_to_float(f16); deep_image->image[c][y][x] = f32.f; } data_offset += sizeof(short) * static_cast<size_t>(samples_per_line); } else { // float for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { float f = *reinterpret_cast<float *>( &sample_data.at(data_offset + x * sizeof(float))); deep_image->image[c][y][x] = f; } data_offset += sizeof(float) * static_cast<size_t>(samples_per_line); } } } } // y deep_image->width = data_width; deep_image->height = data_height; deep_image->channel_names = static_cast<const char **>( malloc(sizeof(const char *) * static_cast<size_t>(num_channels))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { #ifdef _WIN32 deep_image->channel_names[c] = _strdup(channels[c].name.c_str()); #else deep_image->channel_names[c] = strdup(channels[c].name.c_str()); #endif } deep_image->num_channels = num_channels; return TINYEXR_SUCCESS; } void InitEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return; } exr_image->width = 0; exr_image->height = 0; exr_image->num_channels = 0; exr_image->images = NULL; exr_image->tiles = NULL; } void InitEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return; } memset(exr_header, 0, sizeof(EXRHeader)); } int FreeEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->channels) { free(exr_header->channels); } if (exr_header->pixel_types) { free(exr_header->pixel_types); } if (exr_header->requested_pixel_types) { free(exr_header->requested_pixel_types); } for (int i = 0; i < exr_header->num_custom_attributes; i++) { if (exr_header->custom_attributes[i].value) { free(exr_header->custom_attributes[i].value); } } return TINYEXR_SUCCESS; } int FreeEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->images && exr_image->images[i]) { free(exr_image->images[i]); } } if (exr_image->images) { free(exr_image->images); } if (exr_image->tiles) { for (int tid = 0; tid < exr_image->num_tiles; tid++) { for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) { free(exr_image->tiles[tid].images[i]); } } if (exr_image->tiles[tid].images) { free(exr_image->tiles[tid].images); } } } return TINYEXR_SUCCESS; } int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_header == NULL || exr_version == NULL || filename == NULL) { if (err) { (*err) = "Invalid argument."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } #if 0 //def _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { if (err) { (*err) = "Cannot read file."; } return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { if (err) { (*err) = "fread error."; } return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize, err); } int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_headers == NULL || num_headers == NULL || exr_version == NULL) { // Invalid argument return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; std::vector<tinyexr::HeaderInfo> infos; for (;;) { tinyexr::HeaderInfo info; info.clear(); std::string err_str; bool empty_header = false; int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { if (err) { (*err) = strdup(err_str.c_str()); // may leak } return ret; } if (empty_header) { marker += 1; // skip '\0' break; } // `chunkCount` must exist in the header. if (info.chunk_count == 0) { if (err) { (*err) = "`chunkCount' attribute is not found in the header."; } return TINYEXR_ERROR_INVALID_DATA; } infos.push_back(info); // move to next header. marker += info.header_len; size -= info.header_len; } // allocate memory for EXRHeader and create array of EXRHeader pointers. (*exr_headers) = static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size())); for (size_t i = 0; i < infos.size(); i++) { EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader))); ConvertHeader(exr_header, infos[i]); // transfoer `tiled` from version. exr_header->tiled = exr_version->tiled; (*exr_headers)[i] = exr_header; } (*num_headers) = static_cast<int>(infos.size()); return TINYEXR_SUCCESS; } int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_headers == NULL || num_headers == NULL || exr_version == NULL || filename == NULL) { if (err) { (*err) = "Invalid argument."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } #if 0 //def _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { if (err) { (*err) = "Cannot read file."; } return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { if (err) { (*err) = "fread error."; } return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRMultipartHeaderFromMemory( exr_headers, num_headers, exr_version, &buf.at(0), filesize, err); } int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size) { if (version == NULL || memory == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } version->tiled = false; version->long_name = false; version->non_image = false; version->multipart = false; // Parse version header. { // must be 2 if (marker[0] != 2) { return TINYEXR_ERROR_INVALID_EXR_VERSION; } if (version == NULL) { return TINYEXR_SUCCESS; // May OK } version->version = 2; if (marker[1] & 0x2) { // 9th bit version->tiled = true; } if (marker[1] & 0x4) { // 10th bit version->long_name = true; } if (marker[1] & 0x8) { // 11th bit version->non_image = true; // (deep image) } if (marker[1] & 0x10) { // 12th bit version->multipart = true; } } return TINYEXR_SUCCESS; } int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) { if (filename == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } #if 0 //def _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t file_size; // Compute size fseek(fp, 0, SEEK_END); file_size = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (file_size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } unsigned char buf[tinyexr::kEXRVersionSize]; size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp); fclose(fp); if (ret != tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize); } int LoadEXRMultipartImageFromMemory(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const unsigned char *memory, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0 || memory == NULL) { if (err) { (*err) = "Invalid argument."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } // compute total header size. size_t total_header_size = 0; for (unsigned int i = 0; i < num_parts; i++) { if (exr_headers[i]->header_len == 0) { if (err) { (*err) = "EXRHeader is not initialized."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } total_header_size += exr_headers[i]->header_len; } const char *marker = reinterpret_cast<const char *>( memory + total_header_size + 4 + 4); // +8 for magic number and version header. marker += 1; // Skip empty header. // NOTE 1: // In multipart image, There is 'part number' before chunk data. // 4 byte : part number // 4+ : chunk // // NOTE 2: // EXR spec says 'part number' is 'unsigned long' but actually this is // 'unsigned int(4 bytes)' in OpenEXR implementation... // http://www.openexr.com/openexrfilelayout.pdf // Load chunk offset table. std::vector<std::vector<tinyexr::tinyexr_uint64> > chunk_offset_table_list; for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { std::vector<tinyexr::tinyexr_uint64> offset_table( static_cast<size_t>(exr_headers[i]->chunk_count)); for (size_t c = 0; c < offset_table.size(); c++) { tinyexr::tinyexr_uint64 offset; memcpy(&offset, marker, 8); tinyexr::swap8(&offset); offset_table[c] = offset + 4; // +4 to skip 'part number' marker += 8; } chunk_offset_table_list.push_back(offset_table); } // Decode image. for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { std::vector<tinyexr::tinyexr_uint64> &offset_table = chunk_offset_table_list[i]; // First check 'part number' is identitical to 'i' for (size_t c = 0; c < offset_table.size(); c++) { const unsigned char *part_number_addr = memory + offset_table[c] - 4; // -4 to move to 'part number' field. unsigned int part_no; memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4 tinyexr::swap4(&part_no); if (part_no != i) { assert(0); return TINYEXR_ERROR_INVALID_DATA; } } int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_table, memory); if (ret != TINYEXR_SUCCESS) { return ret; } } return TINYEXR_SUCCESS; } int LoadEXRMultipartImageFromFile(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const char *filename, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0) { if (err) { (*err) = "Invalid argument."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } #if 0 //def _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { if (err) { (*err) = "Cannot read file."; } return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts, &buf.at(0), err); } int SaveEXR(const float *data, int width, int height, int components, const char *outfilename) { if (components == 3 || components == 4) { // OK } else { return TINYEXR_ERROR_INVALID_ARGUMENT; } // Assume at least 16x16 pixels. if (width < 16) return TINYEXR_ERROR_INVALID_ARGUMENT; if (height < 16) return TINYEXR_ERROR_INVALID_ARGUMENT; EXRHeader header; InitEXRHeader(&header); EXRImage image; InitEXRImage(&image); image.num_channels = components; std::vector<float> images[4]; images[0].resize(static_cast<size_t>(width * height)); images[1].resize(static_cast<size_t>(width * height)); images[2].resize(static_cast<size_t>(width * height)); images[3].resize(static_cast<size_t>(width * height)); // Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers for (size_t i = 0; i < static_cast<size_t>(width * height); i++) { images[0][i] = data[static_cast<size_t>(components) * i + 0]; images[1][i] = data[static_cast<size_t>(components) * i + 1]; images[2][i] = data[static_cast<size_t>(components) * i + 2]; if (components == 4) { images[3][i] = data[static_cast<size_t>(components) * i + 3]; } } float *image_ptr[4] = {0, 0, 0, 0}; if (components == 4) { image_ptr[0] = &(images[3].at(0)); // A image_ptr[1] = &(images[2].at(0)); // B image_ptr[2] = &(images[1].at(0)); // G image_ptr[3] = &(images[0].at(0)); // R } else { image_ptr[0] = &(images[2].at(0)); // B image_ptr[1] = &(images[1].at(0)); // G image_ptr[2] = &(images[0].at(0)); // R } image.images = reinterpret_cast<unsigned char **>(image_ptr); image.width = width; image.height = height; header.num_channels = components; header.channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels))); // Must be (A)BGR order, since most of EXR viewers expect this channel order. if (components == 4) { strncpy(header.channels[0].name, "A", 255); header.channels[0].name[strlen("A")] = '\0'; strncpy(header.channels[1].name, "B", 255); header.channels[1].name[strlen("B")] = '\0'; strncpy(header.channels[2].name, "G", 255); header.channels[2].name[strlen("G")] = '\0'; strncpy(header.channels[3].name, "R", 255); header.channels[3].name[strlen("R")] = '\0'; } else { strncpy(header.channels[0].name, "B", 255); header.channels[0].name[strlen("B")] = '\0'; strncpy(header.channels[1].name, "G", 255); header.channels[1].name[strlen("G")] = '\0'; strncpy(header.channels[2].name, "R", 255); header.channels[2].name[strlen("R")] = '\0'; } header.pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); header.requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); for (int i = 0; i < header.num_channels; i++) { header.pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_HALF; // pixel type of output image to be stored in // .EXR } const char *err; int ret = SaveEXRImageToFile(&image, &header, outfilename, &err); if (ret != TINYEXR_SUCCESS) { return ret; } free(header.channels); free(header.pixel_types); free(header.requested_pixel_types); return ret; } #ifdef _MSC_VER #pragma warning(pop) #endif #endif #endif // TINYEXR_H_
maxpool_layer.c
#include "maxpool_layer.h" #include "convolutional_layer.h" #include "dark_cuda.h" #include "gemm.h" #include <stdio.h> image get_maxpool_image(maxpool_layer l) { int h = l.out_h; int w = l.out_w; int c = l.c; return float_to_image(w,h,c,l.output); } image get_maxpool_delta(maxpool_layer l) { int h = l.out_h; int w = l.out_w; int c = l.c; return float_to_image(w,h,c,l.delta); } void cudnn_maxpool_setup(layer *l) { #ifdef CUDNN cudnnStatus_t maxpool_status; maxpool_status = cudnnCreatePoolingDescriptor(&l->poolingDesc); maxpool_status = cudnnSetPooling2dDescriptor( l->poolingDesc, CUDNN_POOLING_MAX, CUDNN_NOT_PROPAGATE_NAN, // CUDNN_PROPAGATE_NAN, CUDNN_NOT_PROPAGATE_NAN l->size, l->size, l->pad/2, //0, //l.pad, l->pad/2, //0, //l.pad, l->stride_x, l->stride_y); cudnnCreateTensorDescriptor(&l->srcTensorDesc); cudnnCreateTensorDescriptor(&l->dstTensorDesc); cudnnSetTensor4dDescriptor(l->srcTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->c, l->h, l->w); cudnnSetTensor4dDescriptor(l->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->out_c, l->out_h, l->out_w); #endif // CUDNN } maxpool_layer make_maxpool_layer(int batch, int h, int w, int c, int size, int stride_x, int stride_y, int padding, int maxpool_depth, int out_channels, int antialiasing) { maxpool_layer l = { (LAYER_TYPE)0 }; l.type = MAXPOOL; const int blur_stride_x = stride_x; const int blur_stride_y = stride_y; l.antialiasing = antialiasing; if (antialiasing) { stride_x = stride_y = l.stride = l.stride_x = l.stride_y = 1; // use stride=1 in host-layer } l.batch = batch; l.h = h; l.w = w; l.c = c; l.pad = padding; l.maxpool_depth = maxpool_depth; l.out_channels = out_channels; if (maxpool_depth) { l.out_c = out_channels; l.out_w = l.w; l.out_h = l.h; } else { l.out_w = (w + padding - size) / stride_x + 1; l.out_h = (h + padding - size) / stride_y + 1; l.out_c = c; } l.outputs = l.out_h * l.out_w * l.out_c; l.inputs = h*w*c; l.size = size; l.stride = stride_x; l.stride_x = stride_x; l.stride_y = stride_y; int output_size = l.out_h * l.out_w * l.out_c * batch; l.indexes = (int*)calloc(output_size, sizeof(int)); l.output = (float*)calloc(output_size, sizeof(float)); l.delta = (float*)calloc(output_size, sizeof(float)); l.forward = forward_maxpool_layer; l.backward = backward_maxpool_layer; #ifdef GPU l.forward_gpu = forward_maxpool_layer_gpu; l.backward_gpu = backward_maxpool_layer_gpu; l.indexes_gpu = cuda_make_int_array(output_size); l.output_gpu = cuda_make_array(l.output, output_size); l.delta_gpu = cuda_make_array(l.delta, output_size); cudnn_maxpool_setup(&l); #endif // GPU l.bflops = (l.size*l.size*l.c * l.out_h*l.out_w) / 1000000000.; if (maxpool_depth) fprintf(stderr, "max-depth %2dx%2d/%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BF\n", size, size, stride_x, w, h, c, l.out_w, l.out_h, l.out_c, l.bflops); else if(stride_x == stride_y) fprintf(stderr, "max %2dx%2d/%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BF\n", size, size, stride_x, w, h, c, l.out_w, l.out_h, l.out_c, l.bflops); else fprintf(stderr, "max %2dx%2d/%2dx%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BF\n", size, size, stride_x, stride_y, w, h, c, l.out_w, l.out_h, l.out_c, l.bflops); if (l.antialiasing) { printf("AA: "); l.input_layer = (layer*)calloc(1, sizeof(layer)); int blur_size = 3; int blur_pad = blur_size / 2; if (l.antialiasing == 2) { blur_size = 2; blur_pad = 0; } *(l.input_layer) = make_convolutional_layer(batch, 1, l.out_h, l.out_w, l.out_c, l.out_c, l.out_c, blur_size, blur_stride_x, blur_stride_y, 1, blur_pad, LINEAR, 0, 0, 0, 0, 0, 1, 0, NULL, 0); const int blur_nweights = l.out_c * blur_size * blur_size; // (n / n) * n * blur_size * blur_size; int i; if (blur_size == 2) { for (i = 0; i < blur_nweights; i += (blur_size*blur_size)) { l.input_layer->weights[i + 0] = 1 / 4.f; l.input_layer->weights[i + 1] = 1 / 4.f; l.input_layer->weights[i + 2] = 1 / 4.f; l.input_layer->weights[i + 3] = 1 / 4.f; } } else { for (i = 0; i < blur_nweights; i += (blur_size*blur_size)) { /* l.input_layer->weights[i + 0] = 0; l.input_layer->weights[i + 1] = 0; l.input_layer->weights[i + 2] = 0; l.input_layer->weights[i + 3] = 0; l.input_layer->weights[i + 4] = 1; l.input_layer->weights[i + 5] = 0; l.input_layer->weights[i + 6] = 0; l.input_layer->weights[i + 7] = 0; l.input_layer->weights[i + 8] = 0; */ l.input_layer->weights[i + 0] = 1 / 16.f; l.input_layer->weights[i + 1] = 2 / 16.f; l.input_layer->weights[i + 2] = 1 / 16.f; l.input_layer->weights[i + 3] = 2 / 16.f; l.input_layer->weights[i + 4] = 4 / 16.f; l.input_layer->weights[i + 5] = 2 / 16.f; l.input_layer->weights[i + 6] = 1 / 16.f; l.input_layer->weights[i + 7] = 2 / 16.f; l.input_layer->weights[i + 8] = 1 / 16.f; } } for (i = 0; i < l.out_c; ++i) l.input_layer->biases[i] = 0; #ifdef GPU l.input_antialiasing_gpu = cuda_make_array(NULL, l.batch*l.outputs); push_convolutional_layer(*(l.input_layer)); #endif // GPU } return l; } void resize_maxpool_layer(maxpool_layer *l, int w, int h) { l->h = h; l->w = w; l->inputs = h*w*l->c; l->out_w = (w + l->pad - l->size) / l->stride_x + 1; l->out_h = (h + l->pad - l->size) / l->stride_y + 1; l->outputs = l->out_w * l->out_h * l->out_c; int output_size = l->outputs * l->batch; l->indexes = (int*)realloc(l->indexes, output_size * sizeof(int)); l->output = (float*)realloc(l->output, output_size * sizeof(float)); l->delta = (float*)realloc(l->delta, output_size * sizeof(float)); #ifdef GPU CHECK_CUDA(cudaFree((float *)l->indexes_gpu)); CHECK_CUDA(cudaFree(l->output_gpu)); CHECK_CUDA(cudaFree(l->delta_gpu)); l->indexes_gpu = cuda_make_int_array(output_size); l->output_gpu = cuda_make_array(l->output, output_size); l->delta_gpu = cuda_make_array(l->delta, output_size); cudnn_maxpool_setup(l); #endif } void forward_maxpool_layer(const maxpool_layer l, network_state state) { if (l.maxpool_depth) { int b, i, j, k, g; for (b = 0; b < l.batch; ++b) { #pragma omp parallel for for (i = 0; i < l.h; ++i) { for (j = 0; j < l.w; ++j) { for (g = 0; g < l.out_c; ++g) { int out_index = j + l.w*(i + l.h*(g + l.out_c*b)); float max = -FLT_MAX; int max_i = -1; for (k = g; k < l.c; k += l.out_c) { int in_index = j + l.w*(i + l.h*(k + l.c*b)); float val = state.input[in_index]; max_i = (val > max) ? in_index : max_i; max = (val > max) ? val : max; } l.output[out_index] = max; l.indexes[out_index] = max_i; } } } } return; } if (!state.train && l.stride_x == l.stride_y) { forward_maxpool_layer_avx(state.input, l.output, l.indexes, l.size, l.w, l.h, l.out_w, l.out_h, l.c, l.pad, l.stride, l.batch); } else { int b, i, j, k, m, n; int w_offset = -l.pad / 2; int h_offset = -l.pad / 2; int h = l.out_h; int w = l.out_w; int c = l.c; for (b = 0; b < l.batch; ++b) { for (k = 0; k < c; ++k) { for (i = 0; i < h; ++i) { for (j = 0; j < w; ++j) { int out_index = j + w*(i + h*(k + c*b)); float max = -FLT_MAX; int max_i = -1; for (n = 0; n < l.size; ++n) { for (m = 0; m < l.size; ++m) { int cur_h = h_offset + i*l.stride_y + n; int cur_w = w_offset + j*l.stride_x + m; int index = cur_w + l.w*(cur_h + l.h*(k + b*l.c)); int valid = (cur_h >= 0 && cur_h < l.h && cur_w >= 0 && cur_w < l.w); float val = (valid != 0) ? state.input[index] : -FLT_MAX; max_i = (val > max) ? index : max_i; max = (val > max) ? val : max; } } l.output[out_index] = max; l.indexes[out_index] = max_i; } } } } } if (l.antialiasing) { network_state s = { 0 }; s.train = state.train; s.workspace = state.workspace; s.net = state.net; s.input = l.output; forward_convolutional_layer(*(l.input_layer), s); //simple_copy_ongpu(l.outputs*l.batch, l.output, l.input_antialiasing); memcpy(l.output, l.input_layer->output, l.input_layer->outputs * l.input_layer->batch * sizeof(float)); } } void backward_maxpool_layer(const maxpool_layer l, network_state state) { int i; int h = l.out_h; int w = l.out_w; int c = l.out_c; #pragma omp parallel for for(i = 0; i < h*w*c*l.batch; ++i){ int index = l.indexes[i]; state.delta[index] += l.delta[i]; } }
axpy_cpu_omp_kernel.c
// // Created by Yonghong Yan on 1/7/16. // #include "axpy.h" #include <homp.h> #ifdef USE_INTEL_MKL #include <mkl.h> #endif void axpy_cpu_omp_wrapper(omp_offloading_t *off, long start_n, long length_n,REAL a,REAL *x,REAL *y) { int num_omp_threads = off->dev->num_cores; int i; #ifdef USE_INTEL_MKL mkl_mic_disable(); #endif #ifdef USE_INTEL_MKL #pragma omp parallel shared(y, x, a, start_n, length_n) private(i) num_threads(num_omp_threads) cblas_saxpy(length_n-start_n, a, x, 1, y, 1); //mkl_mic_enable(); #else #pragma omp parallel for simd shared(y, x, a, start_n, length_n) private(i) num_threads(num_omp_threads) for (i=start_n; i<start_n + length_n; i++) { y[i] += a*x[i]; // printf("x[%d]: %f, y[%d]: %f\n", i, x[i], i, y[i]); } #endif }
convolutiondepthwise_3x3.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convdw3x3s1_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); const float bias0 = bias ? bias[g] : 0.f; const float* kernel0 = kernel + g * 9; float* outptr0 = out; float* outptr1 = outptr0 + outw; const float* img0 = bottom_blob.channel(g); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; const float* r3 = img0 + w * 3; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; int i = 0; for (; i + 1 < outh; i += 2) { for (int j = 0; j < outw; j++) { float sum = bias0; float sum2 = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; *outptr0 = sum; *outptr1 = sum2; r0++; r1++; r2++; r3++; outptr0++; outptr1++; } r0 += 2 + w; r1 += 2 + w; r2 += 2 + w; r3 += 2 + w; outptr0 += outw; outptr1 += outw; } for (; i < outh; i++) { for (int j = 0; j < outw; j++) { float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr0 = sum; r0++; r1++; r2++; outptr0++; } r0 += 2; r1 += 2; r2 += 2; } } } static void convdw3x3s2_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const int tailstep = w - 2 * outw + w; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); const float bias0 = bias ? bias[g] : 0.f; const float* kernel0 = kernel + g * 9; float* outptr = out; const float* img0 = bottom_blob.channel(g); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; int i = 0; for (; i < outh; i++) { for (int j = 0; j < outw; j++) { float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr = sum; r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } }
critical.c
/* PMSIS includes */ #include "pmsis.h" #include "omp.h" #define ARRAY_SIZE 512 uint32_t a[ARRAY_SIZE] = {0}; uint32_t b[ARRAY_SIZE] = {0}; uint32_t c[ARRAY_SIZE] = {0}; /* Cluster main entry, executed by core 0. */ void cluster_delegate(void *arg) { printf("Cluster master core entry\n"); #pragma omp parallel { printf("[%d %d] Fork entry\n", pi_cluster_id(), omp_get_thread_num() ); #pragma omp for for (int i=0; i<ARRAY_SIZE; i++) { a[i] = 2 * i; b[i] = 3 * i; } for(volatile int i = 0; i < (10000 << omp_get_thread_num()); i++); #pragma omp barrier #pragma omp for for (int i=0; i<ARRAY_SIZE; i++) { c[i] = a[i] + b[i]; printf("[%d %d] c[%d]: %d\n", pi_cluster_id(), omp_get_thread_num(), i, c[i]); } #pragma omp barrier #pragma omp critical { uint32_t sum = 0; for (int i=0; i<ARRAY_SIZE; i++) { sum += c[i]; c[i] += i; } printf("Core sum %d: %d\n", pi_core_id(), sum); } } printf("Cluster master core exit\n"); } void helloworld(void) { printf("Entering main controller\n"); uint32_t errors = 0; uint32_t core_id = pi_core_id(), cluster_id = pi_cluster_id(); printf("[%d %d] Hello World!\n", cluster_id, core_id); struct pi_device cluster_dev = {0}; struct pi_cluster_conf cl_conf = {0}; /* Init cluster configuration structure. */ pi_cluster_conf_init(&cl_conf); cl_conf.id = 0; /* Set cluster ID. */ /* Configure & open cluster. */ pi_open_from_conf(&cluster_dev, &cl_conf); if (pi_cluster_open(&cluster_dev)) { printf("Cluster open failed !\n"); pmsis_exit(-1); } /* Prepare cluster task and send it to cluster. */ struct pi_cluster_task cl_task = {0}; cl_task.entry = cluster_delegate; cl_task.arg = NULL; pi_cluster_send_task_to_cl(&cluster_dev, &cl_task); pi_cluster_close(&cluster_dev); printf("Test success !\n"); pmsis_exit(errors); } /* Program Entry. */ int main(void) { printf("\n\n\t *** PMSIS HelloWorld ***\n\n"); return pmsis_kickoff((void *) helloworld); }
9569.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "atax.h" /* Array initialization. */ static void init_array (int nx, int ny, DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny), DATA_TYPE POLYBENCH_1D(x,NY,ny)) { int i, j; for (i = 0; i < ny; i++) x[i] = i * M_PI; for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) A[i][j] = ((DATA_TYPE) i*(j+1)) / nx; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int nx, DATA_TYPE POLYBENCH_1D(y,NX,nx)) { int i; for (i = 0; i < nx; i++) { fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]); if (i % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_atax(int nx, int ny, DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny), DATA_TYPE POLYBENCH_1D(x,NY,ny), DATA_TYPE POLYBENCH_1D(y,NY,ny), DATA_TYPE POLYBENCH_1D(tmp,NX,nx)) { int i, j; #pragma scop #pragma omp parallel { #pragma omp parallel for schedule(static, 8) num_threads(2) for (i = 0; i < _PB_NY; i++) y[i] = 0; #pragma omp parallel for private (j) schedule(static, 8) num_threads(2) for (i = 0; i < _PB_NX; i++) { tmp[i] = 0; for (j = 0; j < _PB_NY; j++) tmp[i] = tmp[i] + A[i][j] * x[j]; for (j = 0; j < _PB_NY; j++) y[j] = y[j] + A[i][j] * tmp[i]; } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int nx = NX; int ny = NY; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NX, NY, nx, ny); POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, NY, ny); POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, NY, ny); POLYBENCH_1D_ARRAY_DECL(tmp, DATA_TYPE, NX, nx); /* Initialize array(s). */ init_array (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_atax (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x), POLYBENCH_ARRAY(y), POLYBENCH_ARRAY(tmp)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(nx, POLYBENCH_ARRAY(y))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(x); POLYBENCH_FREE_ARRAY(y); POLYBENCH_FREE_ARRAY(tmp); return 0; }
matmul.c
/* * Matrices multiplication algorithms: a simple, strassen, and Intel BLAS * * This file is part of solution of a Intel Winter Summer School problem * Copyright (c) 2010 Roman Tsisyk <roman@tsisyk.com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include "matmul.h" #include <stddef.h> #include <stdlib.h> #include <stdio.h> #include <omp.h> #ifdef WITH_MKL #include <mkl.h> #endif /* * TODO: malloc checks */ /* * We use strassen algorithm if product size less that kMinStrassen */ static const size_t kMinStrassen = 64L * 64L; // best on tested 2 x Xeon E5440 /* * TODO: use col-major arrays for B matrix, its may improve prefetching * TODO: replace at_r with inline functions * _r functions for row-major arrays */ #define at_r(M, i, j) (M + (i * rstride##M + j)) #define at_r_ref(M, i, j) (*at_r(M, i, j)) void matmul_init() { #ifdef WITH_MKL mkl_set_dynamic(1); #endif omp_set_dynamic(1); } void matmul_set_num_threads(size_t count) { #ifdef WITH_MKL mkl_set_num_threads(count); #endif omp_set_num_threads(count); } void matmul_fini() { #ifdef WITH_MKL mkl_free_buffers(); #endif } /* void matmul_debug_print(data_t *A, size_t rstrideA, size_t height, size_t width) { const data_t *end0 = A + height * rstrideA; for (; A < end0; A += rstrideA) { const data_t *end1 = A + width; data_t *a = A; for (; a < end1; a++) { printf("%d ", *a); } printf("\n"); } } */ inline void matmul_matmul(size_t heightA, size_t widthA, size_t widthB, data_t *A, size_t rstrideA, data_t *B, size_t rstrideB, data_t *C, size_t rstrideC) { // I dont want to use MKL here, please call directly matmul_mkl if(heightA * widthB < kMinStrassen) matmul_simple(heightA, widthA, widthB, A, rstrideA, B, rstrideB, C, rstrideC); else matmul_strassen(heightA, widthA, widthB, A, rstrideA, B, rstrideB, C, rstrideC); } /* * Sum of matrices */ static void matmul_add_r(data_t *A, size_t rstrideA, data_t *B, size_t rstrideB, data_t *R, size_t rstrideR, size_t height, size_t width) { const data_t *end0 = A + height * rstrideA; // its making no sense to parallelize here for (; A < end0; A += rstrideA, B += rstrideB, R += rstrideR) { const data_t *end1 = A + width; data_t *a = A; data_t *b = B; data_t *r = R; for (; a < end1; a++, b++, r++) { *r = *a + *b; } } } /* * Substraction of matrices */ static void matmul_sub_r(data_t *A, size_t rstrideA, data_t *B, size_t rstrideB, data_t *R, size_t rstrideR, size_t height, size_t width) { const data_t *end0 = A + height * rstrideA; // its making no sense to parallelize here for (; A < end0; A += rstrideA, B += rstrideB, R += rstrideR) { const data_t *end1 = A + width; data_t *a = A; data_t *b = B; data_t *r = R; for (; a < end1; a++, b++, r++) { *r = *a - *b; } } } /* * Strassen P1 helper * P = (A11 + A22) * (B11 + B22) */ static void matmul_strassen_P1 (size_t heightA, size_t widthA, size_t widthB, data_t *A1, data_t *A2, size_t rstrideA, data_t *B1, data_t *B2, size_t rstrideB, data_t *P, size_t rstrideP) { const size_t heightB = widthA; data_t *F = (data_t *) malloc(sizeof(data_t) * heightA * widthA); const size_t rstrideF = widthA; data_t *S = (data_t *) malloc(sizeof(data_t) * heightB * widthB); const size_t rstrideS = widthB; #pragma omp parallel sections { #pragma omp section matmul_add_r(A1, rstrideA, A2, rstrideA, F, rstrideF, heightA, widthA); #pragma omp section matmul_add_r(B1, rstrideB, B2, rstrideB, S, rstrideS, heightB, widthB); } // start recursion here matmul_matmul(heightA, widthA, widthB, F, rstrideF, S, rstrideS, P, rstrideP); free(S); free(F); } /* * Strassen P2 and P5 helper * P = (A1 + A2) * B */ static void matmul_strassen_P2_P5 (size_t heightA, size_t widthA, size_t widthB, data_t *A1, data_t *A2, size_t rstrideA, data_t *B, size_t rstrideB, data_t *P, size_t rstrideP) { data_t *F = (data_t *) malloc(sizeof(data_t) * heightA * widthA); const size_t rstrideF = widthA; matmul_add_r(A1, rstrideA, A2, rstrideA, F, rstrideF, heightA, widthA); // start recursion here matmul_matmul(heightA, widthA, widthB, F, rstrideF, B, rstrideB, P, rstrideP); free(F); } /* * Strassen P3 and P4 helper * P = A * (B1 - B2) */ static void matmul_strassen_P3_P4 (size_t heightA, size_t widthA, size_t widthB, data_t *A, size_t rstrideA, data_t *B1, data_t *B2, size_t rstrideB, data_t *P, size_t rstrideP) { const size_t heightB = widthA; data_t *S = (data_t *) malloc(sizeof(data_t) * heightB * widthB); const size_t rstrideS = widthB; matmul_sub_r(B1, rstrideB, B2, rstrideB, S, rstrideS, heightB, widthB); // start recursion here matmul_matmul(heightA, widthA, widthB, A, rstrideA, S, rstrideS, P, rstrideP); free(S); } /* * Strassen P6 and P7 helper * P = (A1 - A2) * (B1 + B2) */ static void matmul_strassen_P6_P7 (size_t heightA, size_t widthA, size_t widthB, data_t *A1, data_t *A2, size_t rstrideA, data_t *B1, data_t *B2, size_t rstrideB, data_t *P, size_t rstrideP) { const size_t heightB = widthA; // Linux malloc is enough fast => not parallelize it below data_t *F = (data_t *) malloc(sizeof(data_t) * heightA * widthA); const size_t rstrideF = widthA; data_t *S = (data_t *) malloc(sizeof(data_t) * heightB * widthB); const size_t rstrideS = widthB; #pragma omp parallel sections { #pragma omp section matmul_sub_r(A1, rstrideA, A2, rstrideA, F, rstrideF, heightA, widthA); #pragma omp section matmul_add_r(B1, rstrideB, B2, rstrideB, S, rstrideS, heightB, widthB); } // omp paralell sections // start recursion here matmul_matmul(heightA, widthA, widthB, F, rstrideF, S, rstrideS, P, rstrideP); free(S); free(F); } /* * Strassen C11 and C22 helper * C = ([P1=C] + P2) + (P3 - P4) */ static void matmul_strassen_C11_C22 ( data_t *P1, size_t rstrideP1, data_t *P2, size_t rstrideP2, data_t *P3, size_t rstrideP3, data_t *P4, size_t rstrideP4, size_t heightP, size_t widthP) { data_t *S = (data_t *) malloc(sizeof(data_t) * heightP * widthP); const size_t rstrideS = widthP; #pragma omp parallel sections { #pragma omp section matmul_add_r(P1, rstrideP1, P2, rstrideP2, P1, rstrideP1, heightP, widthP); #pragma omp section matmul_sub_r(P3, rstrideP3, P4, rstrideP4, S, rstrideS, heightP, widthP); } // sync matmul_add_r(P1, rstrideP1, S, rstrideS, P1, rstrideP1, heightP, widthP); free(S); } /* * Strassen C12 and C21 helper * C = [P1=C] + P2 */ static void matmul_strassen_C12_C21 ( data_t *P1, size_t rstrideP1, data_t *P2, size_t rstrideP2, size_t heightP, size_t widthP) { matmul_add_r(P1, rstrideP1, P2, rstrideP2, P1, rstrideP1, heightP, widthP); } static void matmul_strassen_fix_heightA_odd(size_t heightA, size_t widthA, size_t widthB, data_t *A, size_t rstrideA, data_t *B, size_t rstrideB, data_t *C, size_t rstrideC) { A += (heightA - 1) * rstrideA; C += (heightA - 1) * rstrideC; matmul_simple(1, widthA, widthB, A, rstrideA, B, rstrideB, C, rstrideC); } static void matmul_strassen_fix_widthB_odd(size_t heightA, size_t widthA, size_t widthB, data_t *A, size_t rstrideA, data_t *B, size_t rstrideB, data_t *C, size_t rstrideC) { B += (widthB - 1); C += (widthB - 1); // edge was fixed by fix_heightA heightA &= ~1; matmul_simple(heightA, widthA, 1, A, rstrideA, B, rstrideB, C, rstrideC); } static void matmul_strassen_fix_widthA_odd(size_t heightA, size_t widthA, size_t widthB, data_t *A, size_t rstrideA, data_t *B, size_t rstrideB, data_t *C, size_t rstrideC) { size_t i; size_t j; size_t k = widthA - 1; // edges were fixed by fix_heightA and fix_widthB heightA &= ~1; widthB &= ~1; // FIXME: replace at_h with pointers #pragma omp parallel for for (i = 0; i < heightA; i++) { for (j = 0; j < widthB; j++) { at_r_ref(C, i, j) += at_r_ref(A, i, k) * at_r_ref(B, k, j); } } } /* * Strassen algorithm for multiplication * @see http://en.wikipedia.org/wiki/Strassen_algorithm */ void matmul_strassen(size_t heightA, size_t widthA, size_t widthB, data_t *A, size_t rstrideA, data_t *B, size_t rstrideB, data_t *C, size_t rstrideC) { /* * divide matrices first */ const size_t heightAh = heightA >> 1; const size_t widthAh = widthA >> 1; const size_t heightBh = widthAh; const size_t widthBh = widthB >> 1; // A data_t *A11 = at_r(A, 0, 0); data_t *A12 = at_r(A, 0, widthAh); data_t *A21 = at_r(A, heightAh, 0); data_t *A22 = at_r(A, heightAh, widthAh); // B data_t *B11 = at_r(B, 0, 0); data_t *B12 = at_r(B, 0, widthBh); data_t *B21 = at_r(B, heightBh, 0); data_t *B22 = at_r(B, heightBh, widthBh); // C data_t *C11 = at_r(C, 0, 0); data_t *C12 = at_r(C, 0, widthBh); data_t *C21 = at_r(C, heightAh, 0); data_t *C22 = at_r(C, heightAh, widthBh); const size_t heightP = heightAh; const size_t widthP = widthBh; data_t *P1 = (data_t *) malloc(sizeof(data_t) * heightP * widthP); const size_t rstrideP1 = widthP; data_t *P2 = C21; const size_t rstrideP2 = rstrideC; data_t *P3 = (data_t *) malloc(sizeof(data_t) * heightP * widthP); const size_t rstrideP3 = widthP; data_t *P4 = (data_t *) malloc(sizeof(data_t) * heightP * widthP); const size_t rstrideP4 = widthP; data_t *P5 = C12; const size_t rstrideP5 = rstrideC; data_t *P6 = C22; const size_t rstrideP6 = rstrideC; data_t *P7 = C11; const size_t rstrideP7 = rstrideC; #pragma omp parallel sections { // P1 #pragma omp section matmul_strassen_P1(heightAh, widthAh, widthBh, A11, A22, rstrideA, B11, B22, rstrideB, P1, rstrideP1); // P2, P5 #pragma omp section matmul_strassen_P2_P5(heightAh, widthAh, widthBh, A21, A22, rstrideA, B11, rstrideB, P2, rstrideP2); #pragma omp section matmul_strassen_P2_P5(heightAh, widthAh, widthBh, A11, A12, rstrideA, B22, rstrideB, P5, rstrideP5); // P3, P4 #pragma omp section matmul_strassen_P3_P4(heightAh, widthAh, widthBh, A11, rstrideA, B12, B22, rstrideB, P3, rstrideP3); #pragma omp section matmul_strassen_P3_P4(heightAh, widthAh, widthBh, A22, rstrideA, B21, B11, rstrideB, P4, rstrideP4); // P6, P7 #pragma omp section matmul_strassen_P6_P7(heightAh, widthAh, widthBh, A21, A11, rstrideA, B11, B12, rstrideB, P6, rstrideP6); #pragma omp section matmul_strassen_P6_P7(heightAh, widthAh, widthBh, A12, A22, rstrideA, B21, B22, rstrideB, P7, rstrideP7); } // omp paralell sections #pragma omp parallel sections { #pragma omp section matmul_strassen_C11_C22( P7, rstrideP7, P1, rstrideP1, P4, rstrideP4, P5, rstrideP5, heightP, widthP); #pragma omp section matmul_strassen_C11_C22( P6, rstrideP6, P1, rstrideP1, P3, rstrideP3, P2, rstrideP2, heightP, widthP); } // omp paralell sections #pragma omp parallel sections { #pragma omp section matmul_strassen_C12_C21(P5, rstrideP5, P3, rstrideP3, heightP, widthP); #pragma omp section matmul_strassen_C12_C21(P2, rstrideP2, P4, rstrideP4, heightP, widthP); } // omp paralell sections /* * Fix odd */ #pragma omp parallel sections { #pragma omp section if (heightA & 1) // heightA is odd matmul_strassen_fix_heightA_odd(heightA, widthA, widthB, A, rstrideA, B, rstrideB, C, rstrideC); #pragma omp section if (widthB & 1) // widthB is odd matmul_strassen_fix_widthB_odd(heightA, widthA, widthB, A, rstrideA, B, rstrideB, C, rstrideC); #pragma omp section if (widthA & 1) // widthA is odd matmul_strassen_fix_widthA_odd(heightA, widthA, widthB, A, rstrideA, B, rstrideB, C, rstrideC); } // omp paralell sections // sync free(P4); free(P3); free(P1); } /* * Simple, three-loop multiplication */ void matmul_simple(size_t heightA, size_t widthA, size_t widthB, data_t *A, size_t rstrideA, data_t *B, size_t rstrideB, data_t *C, size_t rstrideC) { size_t i; size_t j; size_t k; #pragma omp parallel for for (i = 0; i < heightA; i++) { for (j = 0; j < widthB; j++) { data_t sum = 0; /* unsafe, slowly, no sense #pragma omp parallel for reduction(+:sum) */ for(k = 0; k < widthA; k++) { sum += at_r_ref(A, i, k) * at_r_ref(B, k, j); } at_r_ref(C, i, j) = sum; } } } #ifdef WITH_MKL #ifdef USE_INT #error Not implemented yet #endif /* * MKL multiplication */ void matmul_mkl(size_t heightA, size_t widthA, size_t widthB, data_t *A, size_t rstrideA, data_t *B, size_t rstrideB, data_t *C, size_t rstrideC) { // C := alpha*op(A)*op(B) + beta*C cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, heightA, widthB, widthA, 1.0, A, rstrideA, B, rstrideB, 0.0, C, rstrideC); } #endif #undef at_r_ref #undef at_r
target_teams_distribute_simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s -Wuninitialized // expected-error@+1 {{unexpected OpenMP directive '#pragma omp target teams distribute simd'}} #pragma omp target teams distribute simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp target teams distribute simd'}} #pragma omp target teams distribute simd foo void test_no_clause() { int i; #pragma omp target teams distribute simd for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after '#pragma omp target teams distribute simd' must be a for loop}} #pragma omp target teams distribute simd ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp target teams distribute simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; // expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute simd' are ignored}} #pragma omp target teams distribute simd foo bar for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; // expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute simd' are ignored}} #pragma omp target teams distribute simd; for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute simd' are ignored}} #pragma omp target teams distribute simd private(x); for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute simd' are ignored}} #pragma omp target teams distribute simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_collapse() { int i; // expected-error@+1 {{expected '('}} #pragma omp target teams distribute simd collapse for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp target teams distribute simd collapse( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd collapse() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp target teams distribute simd collapse(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp target teams distribute simd collapse(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp target teams distribute simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp target teams distribute simd collapse 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp target teams distribute simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp target teams distribute simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp target teams distribute simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute simd', but found only 1}} // expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp target teams distribute simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp target teams distribute simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp target teams distribute simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute simd', but found only 1}} #pragma omp target teams distribute simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp target teams distribute simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute simd', but found only 1}} // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp target teams distribute simd collapse(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp target teams distribute simd collapse(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp target teams distribute simd collapse(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp target teams distribute simd collapse(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp target teams distribute simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; // expected-error@+4 {{OpenMP constructs may not be nested inside a simd region}} #pragma omp target teams distribute simd collapse(2) firstprivate(i) // expected-note {{defined as firstprivate}} for (i = 0; i < 16; ++i) // expected-error {{loop iteration variable in the associated loop of 'omp target teams distribute simd' directive may not be firstprivate, predetermined as lastprivate}} for (int j = 0; j < 16; ++j) #pragma omp parallel for reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; } void test_private() { int i; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp target teams distribute simd private( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp target teams distribute simd private(, for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} #pragma omp target teams distribute simd private(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd private() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd private(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp target teams distribute simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp target teams distribute simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp target teams distribute simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target teams distribute simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_lastprivate() { int i; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd lastprivate( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp target teams distribute simd lastprivate(, for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} #pragma omp target teams distribute simd lastprivate(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd lastprivate() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd lastprivate(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp target teams distribute simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp target teams distribute simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp target teams distribute simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target teams distribute simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_firstprivate() { int i; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd firstprivate( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp target teams distribute simd firstprivate(, for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} #pragma omp target teams distribute simd firstprivate(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd firstprivate() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd firstprivate(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp target teams distribute simd firstprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; // expected-error@+1 {{lastprivate variable cannot be firstprivate}} expected-note@+1 {{defined as lastprivate}} #pragma omp target teams distribute simd lastprivate(x) firstprivate(x) for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{lastprivate variable cannot be firstprivate}} expected-note@+1 2 {{defined as lastprivate}} #pragma omp target teams distribute simd lastprivate(x, y) firstprivate(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 3 {{lastprivate variable cannot be firstprivate}} expected-note@+1 3 {{defined as lastprivate}} #pragma omp target teams distribute simd lastprivate(x, y, z) firstprivate(x, y, z) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp target teams distribute simd simdlen(64) safelen(8) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp target teams distribute simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp target teams distribute simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } }
projectq.h
/** * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef MINDQUANTUM_BACKENDS_PROJECTQ_PROJECTQ_H_ #define MINDQUANTUM_BACKENDS_PROJECTQ_PROJECTQ_H_ #include <cmath> #include <functional> #include <memory> #include <random> #include <string> #include <thread> #include <vector> #include "gate/basic_gate.h" #include "gate/gates.h" #include "hamiltonian/hamiltonian.h" #include "pr/parameter_resolver.h" #include "projectq/backends/_sim/_cppkernels/simulator.hpp" #include "projectq_utils.h" namespace mindquantum { namespace omp { #ifdef _MSC_VER typedef int64_t idx_t; #else typedef uint64_t idx_t; #endif // _MSC_VER } // namespace omp namespace projectq { template <typename T> class Projectq : public Simulator { private: unsigned seed; unsigned n_qubits_; VT<unsigned> ordering_; unsigned len_; RndEngine rnd_eng_; std::function<double()> rng_; public: Projectq() : Simulator(1, 1), n_qubits_(1), rnd_eng_(1), seed(1) { for (unsigned i = 0; i < n_qubits_; i++) { ordering_.push_back(i); } len_ = (1UL << (n_qubits_ + 1)); std::uniform_real_distribution<double> dist(0., 1.); rng_ = std::bind(dist, std::ref(rnd_eng_)); } Projectq(unsigned seed, unsigned N) : Simulator(seed, N), n_qubits_(N), rnd_eng_(seed), seed(seed) { for (unsigned i = 0; i < n_qubits_; i++) { ordering_.push_back(i); } len_ = (1UL << (n_qubits_ + 1)); std::uniform_real_distribution<double> dist(0., 1.); rng_ = std::bind(dist, std::ref(rnd_eng_)); } Projectq(unsigned seed, unsigned N, calc_type *vec) : Simulator(seed, N), n_qubits_(N), rnd_eng_(seed), seed(seed) { for (unsigned i = 0; i < n_qubits_; i++) { ordering_.push_back(i); } len_ = (1UL << (n_qubits_ + 1)); set_wavefunction(vec, ordering_); std::uniform_real_distribution<double> dist(0., 1.); rng_ = std::bind(dist, std::ref(rnd_eng_)); } void InitializeSimulator() { if (vec_ != NULL) { free(vec_); } vec_ = (StateVector) calloc(len_, sizeof(calc_type)); vec_[0] = 1; } void InitializeSimulator(const VT<BasicGate<T>> &circ) { Projectq::InitializeSimulator(); Projectq::ApplyCircuit(circ); } void InitializeSimulator(CTP<T> vec) { } void SetState(VT<CT<T>> vec) { set_wavefunction(reinterpret_cast<calc_type *>(vec.data()), ordering_); } void ApplyGate(const BasicGate<T> &gate) { if (gate.is_pauli_channel_) { // gate is constructed be like: BasicGate(cPL, true, px, py, pz) VT<BasicGate<T>> gate_list_ = {XGate<T>, YGate<T>, ZGate<T>, IGate<T>}; double r = static_cast<double>(rng_()); // std::cout << "r = " << r << std::endl; auto it = std::lower_bound(gate.cumulative_probs_.begin(), gate.cumulative_probs_.end(), r); size_t gate_index; if (it != gate.cumulative_probs_.begin()) { gate_index = std::distance(gate.cumulative_probs_.begin(), it) - 1; } else { gate_index = 0; } BasicGate<T> gate_ = gate_list_[gate_index]; // Select the gate to execute according to r. // std::cout << gate_.name_ << std::endl; Projectq::apply_controlled_gate(MCast<T>(gate_.base_matrix_.matrix_), VCast(gate.obj_qubits_), VCast(gate.ctrl_qubits_)); } else if (gate.is_damping_channel_) { VT<T> base_index; for (unsigned i = 0; i < (1 << n_qubits_); i++) { if ((i >> gate.obj_qubits_[0]) & 1) { base_index.push_back(i); } } VT<CT<T>> curr_state_ = Projectq::cheat(); double reduced_factor_ = 0; for (unsigned i = 0; i < base_index.size(); i++) { reduced_factor_ += (curr_state_[base_index[i]] * std::conj(curr_state_[base_index[i]])).real(); } if (reduced_factor_ < pow(10, -8)) { return; } double prob_ = gate.damping_coeff_ * reduced_factor_; double r = static_cast<double>(rng_()); VT<CT<T>> aim_state_ = curr_state_; if (gate.name_ == "ADC") { unsigned j = 0; unsigned k = 0; for (unsigned i = 0; i < aim_state_.size(); i++) { if (base_index[j] == i) { aim_state_[i] = 0; j++; } else { aim_state_[i] = aim_state_[base_index[k]] / sqrt(reduced_factor_); k++; } } } else if (gate.name_ == "PDC") { unsigned j = 0; for (unsigned i = 0; i < aim_state_.size(); i++) { if (base_index[j] == i) { aim_state_[i] = aim_state_[i] / sqrt(reduced_factor_); j++; } else { aim_state_[i] = 0; } } } else { return; } if (r <= prob_) { Projectq::SetState(aim_state_); } else { VT<CT<T>> remain_state_ = curr_state_; unsigned j = 0; for (unsigned i = 0; i < remain_state_.size(); i++) { if (base_index[j] == i) { remain_state_[i] = remain_state_[i] * sqrt(1 - gate.damping_coeff_) / sqrt(1 - prob_); j++; } else { remain_state_[i] = remain_state_[i] / sqrt(1 - prob_); } } Projectq::SetState(remain_state_); } } else { Projectq::apply_controlled_gate(MCast<T>(gate.base_matrix_.matrix_), VCast(gate.obj_qubits_), VCast(gate.ctrl_qubits_)); } } void ApplyGate(const BasicGate<T> &gate, const ParameterResolver<T> &pr, bool diff = false) { T theta = gate.params_.Combination(pr).const_value; if (diff) { Projectq::apply_controlled_gate(MCast<T>(gate.param_diff_matrix_(theta).matrix_), VCast(gate.obj_qubits_), VCast(gate.ctrl_qubits_)); if (gate.ctrl_qubits_.size() != 0) { auto ctrl_mask = GetControlMask(gate.ctrl_qubits_); #pragma omp parallel for schedule(static) for (Index i = 0; i < (len_ >> 1); i++) { if ((i & ctrl_mask) != ctrl_mask) { this->vec_[2 * i] = 0; this->vec_[2 * i + 1] = 0; } } } } else { Projectq::apply_controlled_gate(MCast<T>(gate.param_matrix_(theta).matrix_), VCast(gate.obj_qubits_), VCast(gate.ctrl_qubits_)); } } unsigned ApplyMeasure(const BasicGate<T> &gate) { run(); auto qubit = gate.obj_qubits_[0]; auto mask = (1UL << qubit); calc_type zero_amps = 0; // #pragma omp parallel for schedule(static) reduction(+ : zero_amps) for (unsigned i = 0; i < (len_ >> 1); i++) { if ((i & mask) == 0) { zero_amps += vec_[2 * i] * vec_[2 * i] + vec_[2 * i + 1] * vec_[2 * i + 1]; } } unsigned collapse = (static_cast<unsigned>(rng_() > zero_amps) << qubit); auto norm = (collapse == 0) ? sqrt(zero_amps) : sqrt(1 - zero_amps); #pragma omp parallel for schedule(static) for (omp::idx_t i = 0; i < (len_ >> 1); i++) { if ((i & mask) == collapse) { vec_[2 * i] /= norm; vec_[2 * i + 1] /= norm; } else { vec_[2 * i] = 0; vec_[2 * i + 1] = 0; } } return (collapse >> qubit); } void ApplyCircuit(const VT<BasicGate<T>> &circ) { for (auto &gate : circ) { Projectq::ApplyGate(gate); } Projectq::run(); } void ApplyCircuit(const VT<BasicGate<T>> &circ, const ParameterResolver<T> &pr) { for (auto &gate : circ) { if (gate.parameterized_) { Projectq::ApplyGate(gate, pr); } else { Projectq::ApplyGate(gate); } } Projectq::run(); } VT<unsigned> Sampling(const VT<BasicGate<T>> &circ, const ParameterResolver<T> &pr, size_t shots, const MST<size_t> &key_map, unsigned seed) { auto key_size = key_map.size(); VT<unsigned> res(shots * key_size); RndEngine rnd_eng = RndEngine(seed); std::uniform_real_distribution<double> dist(1.0, (1 << 20) * 1.0); std::function<double()> rng = std::bind(dist, std::ref(rnd_eng)); for (size_t i = 0; i < shots; i++) { Projectq<T> sim = Projectq<T>(static_cast<unsigned>(rng()), n_qubits_, vec_); auto res0 = sim.ApplyCircuitWithMeasure(circ, pr, key_map); for (size_t j = 0; j < key_size; j++) { res[i * key_size + j] = res0[j]; } } return res; } VT<unsigned> ApplyCircuitWithMeasure(const VT<BasicGate<T>> &circ, const ParameterResolver<T> &pr, const MST<size_t> &key_map) { auto key_size = key_map.size(); VT<unsigned> res(key_size); for (auto &gate : circ) { if (gate.is_measure_) { auto collapse = ApplyMeasure(gate); res[key_map.at(gate.name_)] = collapse; } else if (gate.parameterized_) { ApplyGate(gate, pr); } else { ApplyGate(gate); } } return res; } void ApplyHamiltonian(const Hamiltonian<T> &ham) { Projectq::run(); if (ham.how_to_ == ORIGIN) { Projectq::apply_qubit_operator(HCast<T>(ham.ham_), Projectq::ordering_); } else if (ham.how_to_ == BACKEND) { Projectq::vec_ = sparse::Csr_Dot_Vec<T, double>(ham.ham_sparse_main_, ham.ham_sparse_second_, Projectq::vec_); } else { Projectq::vec_ = sparse::Csr_Dot_Vec<T, double>(ham.ham_sparse_main_, Projectq::vec_); } } VT<CT<T>> RightSizeGrad(calc_type *left_vec, calc_type *right_vec, const Hamiltonian<T> &ham, const VT<BasicGate<T>> &circ, const VT<BasicGate<T>> &herm_circ, const ParameterResolver<T> &pr, const MST<size_t> &p_map) { VT<CT<T>> f_g(p_map.size() + 1, 0); Projectq<T> sim_left = Projectq<T>(this->seed, n_qubits_, left_vec); sim_left.ApplyHamiltonian(ham); f_g[0] = ComplexInnerProduct<T, calc_type>(sim_left.vec_, right_vec, static_cast<Index>(len_)); Projectq<T> sim_right = Projectq<T>(this->seed, n_qubits_, right_vec); Projectq<T> sim_right_tmp = Projectq<T>(this->seed, n_qubits_); for (size_t j = 0; j < circ.size(); j++) { if ((!herm_circ[j].parameterized_) || (herm_circ[j].params_.data_.size() == herm_circ[j].params_.no_grad_parameters_.size())) { if (herm_circ[j].parameterized_) { sim_left.ApplyGate(herm_circ[j], pr, false); sim_right.ApplyGate(herm_circ[j], pr, false); } else { sim_left.ApplyGate(herm_circ[j]); sim_right.ApplyGate(herm_circ[j]); } } else { sim_right.ApplyGate(herm_circ[j], pr, false); sim_right.run(); sim_right_tmp.set_wavefunction(sim_right.vec_, ordering_); sim_right_tmp.ApplyGate(circ[circ.size() - j - 1], pr, true); sim_right_tmp.run(); sim_left.run(); CT<T> gi = 0; if (herm_circ[j].ctrl_qubits_.size() == 0) { gi = ComplexInnerProduct<T, calc_type>(sim_left.vec_, sim_right_tmp.vec_, static_cast<Index>(len_)); } else { gi = ComplexInnerProductWithControl<T, calc_type>(sim_left.vec_, sim_right_tmp.vec_, static_cast<Index>(len_), GetControlMask(herm_circ[j].ctrl_qubits_)); } for (auto &it : herm_circ[j].params_.GetRequiresGradParameters()) { f_g[1 + p_map.at(it)] += circ[circ.size() - j - 1].params_.data_.at(it) * gi; } sim_left.ApplyGate(herm_circ[j], pr, false); } } return f_g; } CT<T> GetExpectation(const Hamiltonian<T> &ham) { Projectq<T> sim = Projectq<T>(this->seed, n_qubits_, vec_); sim.ApplyHamiltonian(ham); auto out = ComplexInnerProduct<T, calc_type>(sim.vec_, vec_, static_cast<Index>(len_)); return out; } VT<VT<CT<T>>> HermitianMeasureWithGrad(const VT<Hamiltonian<T>> &hams, const VT<BasicGate<T>> &circ, const VT<BasicGate<T>> &herm_circ, const ParameterResolver<T> &pr, const MST<size_t> &p_map, size_t mea_threads) { auto n_hams = hams.size(); auto n_params = pr.data_.size(); VT<VT<CT<T>>> output; for (size_t i = 0; i < n_hams; i++) { output.push_back({}); for (size_t j = 0; j < n_params + 1; j++) { output[i].push_back({0, 0}); } } Projectq<T> sim = Projectq<T>(this->seed, n_qubits_, vec_); sim.ApplyCircuit(circ, pr); if (n_hams == 1) { auto f_g = sim.RightSizeGrad(sim.vec_, sim.vec_, hams[0], circ, herm_circ, pr, p_map); for (size_t g = 1; g < n_params + 1; g++) { f_g[g] += std::conj(f_g[g]); } output[0] = f_g; } else { std::vector<std::thread> tasks; tasks.reserve(mea_threads); size_t end = 0; size_t offset = n_hams / mea_threads; size_t left = n_hams % mea_threads; for (size_t i = 0; i < mea_threads; ++i) { size_t start = end; end = start + offset; if (i < left) { end += 1; } auto task = [&, start, end]() { for (size_t n = start; n < end; n++) { auto f_g = sim.RightSizeGrad(sim.vec_, sim.vec_, hams[n], circ, herm_circ, pr, p_map); for (size_t g = 1; g < n_params + 1; g++) { f_g[g] += std::conj(f_g[g]); } output[n] = f_g; } }; tasks.emplace_back(task); } for (auto &t : tasks) { t.join(); } } return output; } VT<VT<VT<CT<T>>>> HermitianMeasureWithGrad(const VT<Hamiltonian<T>> &hams, const VT<BasicGate<T>> &circ, const VT<BasicGate<T>> &herm_circ, const VVT<T> &enc_data, const VT<T> &ans_data, const VS &enc_name, const VS &ans_name, size_t batch_threads, size_t mea_threads) { auto n_hams = hams.size(); auto n_prs = enc_data.size(); auto n_params = enc_name.size() + ans_name.size(); VT<VT<VT<CT<T>>>> output; for (size_t i = 0; i < n_prs; i++) { output.push_back({}); for (size_t j = 0; j < n_hams; j++) { output[i].push_back({}); for (size_t k = 0; k < n_params + 1; k++) { output[i][j].push_back({0, 0}); } } } MST<size_t> p_map; for (size_t i = 0; i < enc_name.size(); i++) { p_map[enc_name[i]] = i; } for (size_t i = 0; i < ans_name.size(); i++) { p_map[ans_name[i]] = i + enc_name.size(); } if (n_prs == 1) { ParameterResolver<T> pr = ParameterResolver<T>(); pr.SetItems(enc_name, enc_data[0]); pr.SetItems(ans_name, ans_data); output[0] = HermitianMeasureWithGrad(hams, circ, herm_circ, pr, p_map, mea_threads); } else { std::vector<std::thread> tasks; tasks.reserve(batch_threads); size_t end = 0; size_t offset = n_prs / batch_threads; size_t left = n_prs % batch_threads; for (size_t i = 0; i < batch_threads; ++i) { size_t start = end; end = start + offset; if (i < left) { end += 1; } auto task = [&, start, end]() { for (size_t n = start; n < end; n++) { ParameterResolver<T> pr = ParameterResolver<T>(); pr.SetItems(enc_name, enc_data[n]); pr.SetItems(ans_name, ans_data); auto f_g = HermitianMeasureWithGrad(hams, circ, herm_circ, pr, p_map, mea_threads); output[n] = f_g; } }; tasks.emplace_back(task); } for (auto &t : tasks) { t.join(); } } return output; } VT<VT<CT<T>>> NonHermitianMeasureWithGrad(const VT<Hamiltonian<T>> &hams, const VT<Hamiltonian<T>> &herm_hams, const VT<BasicGate<T>> &left_circ, const VT<BasicGate<T>> &herm_left_circ, const VT<BasicGate<T>> &right_circ, const VT<BasicGate<T>> &herm_right_circ, const ParameterResolver<T> &pr, const MST<size_t> &p_map, size_t mea_threads, const StateVector varphi) { auto n_hams = hams.size(); auto n_params = pr.data_.size(); VT<VT<CT<T>>> output; for (size_t i = 0; i < n_hams; i++) { output.push_back({}); for (size_t j = 0; j < n_params + 1; j++) { output[i].push_back({0, 0}); } } Projectq<T> sim = Projectq<T>(this->seed, n_qubits_, vec_); sim.ApplyCircuit(right_circ, pr); Projectq<T> sim2 = Projectq<T>(this->seed, n_qubits_, varphi); sim2.ApplyCircuit(left_circ, pr); if (n_hams == 1) { auto f_g1 = sim2.RightSizeGrad(sim.vec_, sim2.vec_, hams[0], left_circ, herm_left_circ, pr, p_map); auto f_g2 = sim.RightSizeGrad(sim2.vec_, sim.vec_, herm_hams[0], right_circ, herm_right_circ, pr, p_map); for (size_t g = 1; g < n_params + 1; g++) { f_g2[g] += std::conj(f_g1[g]); } output[0] = f_g2; } else { std::vector<std::thread> tasks; tasks.reserve(mea_threads); size_t end = 0; size_t offset = n_hams / mea_threads; size_t left = n_hams % mea_threads; for (size_t i = 0; i < mea_threads; i++) { size_t start = end; end = start + offset; if (i < left) { end += 1; } auto task = [&, start, end]() { for (size_t n = start; n < end; n++) { auto f_g1 = sim2.RightSizeGrad(sim.vec_, sim2.vec_, hams[n], left_circ, herm_left_circ, pr, p_map); auto f_g2 = sim.RightSizeGrad(sim2.vec_, sim.vec_, herm_hams[n], right_circ, herm_right_circ, pr, p_map); for (size_t g = 1; g < n_params + 1; g++) { f_g2[g] += std::conj(f_g1[g]); } output[n] = f_g2; } }; tasks.emplace_back(task); } for (auto &t : tasks) { t.join(); } } return output; } VT<VT<VT<CT<T>>>> NonHermitianMeasureWithGrad( const VT<Hamiltonian<T>> &hams, const VT<Hamiltonian<T>> &herm_hams, const VT<BasicGate<T>> &left_circ, const VT<BasicGate<T>> &herm_left_circ, const VT<BasicGate<T>> &right_circ, const VT<BasicGate<T>> &herm_right_circ, const VVT<T> &enc_data, const VT<T> &ans_data, const VS &enc_name, const VS &ans_name, size_t batch_threads, size_t mea_threads, const Projectq<T> &simulator_left) { StateVector varphi = simulator_left.vec_; auto n_hams = hams.size(); auto n_prs = enc_data.size(); auto n_params = enc_name.size() + ans_name.size(); VT<VT<VT<CT<T>>>> output; for (size_t i = 0; i < n_prs; i++) { output.push_back({}); for (size_t j = 0; j < n_hams; j++) { output[i].push_back({}); for (size_t k = 0; k < n_params + 1; k++) { output[i][j].push_back({0, 0}); } } } MST<size_t> p_map; for (size_t i = 0; i < enc_name.size(); i++) { p_map[enc_name[i]] = i; } for (size_t i = 0; i < ans_name.size(); i++) { p_map[ans_name[i]] = i + enc_name.size(); } if (n_prs == 1) { ParameterResolver<T> pr = ParameterResolver<T>(); pr.SetItems(enc_name, enc_data[0]); pr.SetItems(ans_name, ans_data); output[0] = NonHermitianMeasureWithGrad(hams, herm_hams, left_circ, herm_left_circ, right_circ, herm_right_circ, pr, p_map, mea_threads, varphi); } else { std::vector<std::thread> tasks; tasks.reserve(batch_threads); size_t end = 0; size_t offset = n_prs / batch_threads; size_t left = n_prs % batch_threads; for (size_t i = 0; i < batch_threads; ++i) { size_t start = end; end = start + offset; if (i < left) { end += 1; } auto task = [&, start, end]() { for (size_t n = start; n < end; n++) { ParameterResolver<T> pr = ParameterResolver<T>(); pr.SetItems(enc_name, enc_data[n]); pr.SetItems(ans_name, ans_data); auto f_g = NonHermitianMeasureWithGrad(hams, herm_hams, left_circ, herm_left_circ, right_circ, herm_right_circ, pr, p_map, mea_threads, varphi); output[n] = f_g; } }; tasks.emplace_back(task); } for (auto &t : tasks) { t.join(); } } return output; } void PrintInfo() const { std::cout << n_qubits_ << " qubits simulator with currently quantum state at:" << std::endl; for (unsigned i = 0; i < (len_ >> 1); i++) { std::cout << "(" << vec_[2 * i] << ", " << vec_[2 * i + 1] << ")" << std::endl; } } VVT<CT<calc_type>> GetCircuitMatrix(const VT<BasicGate<T>> &circ, const ParameterResolver<T> &pr) { VVT<CT<calc_type>> out((1 << n_qubits_)); #pragma omp parallel for schedule(static) for (omp::idx_t i = 0; i < (1UL << n_qubits_); i++) { Projectq<T> sim = Projectq<T>(this->seed, n_qubits_); sim.vec_[0] = 0; sim.vec_[2 * i] = 1; sim.ApplyCircuit(circ, pr); out[i] = sim.cheat(); } return out; } auto GetLen() const { return this->len_; } std::shared_ptr<Projectq<T>> Copy() { return std::make_shared<Projectq<T>>(this->seed, n_qubits_, vec_); } }; template <typename T> CT<T> InnerProduct(const Projectq<T> &bra, const Projectq<T> &ket) { auto res = ComplexInnerProduct<T, Simulator::calc_type>(bra.vec_, ket.vec_, bra.GetLen()); return res; } } // namespace projectq } // namespace mindquantum #endif // MINDQUANTUM_BACKENDS_PROJECTQ_PROJECTQ_H_
parallel-inl.h
// Copyright (c) 2022 Feng Yang // // I am making my contributions/submissions to this project solely in my // personal capacity and am not conveying any rights to any intellectual // property of any third parties. #ifndef INCLUDE_VOX_DETAIL_PARALLEL_INL_H_ #define INCLUDE_VOX_DETAIL_PARALLEL_INL_H_ #include "constants.h" #include "macros.h" #include <algorithm> #include <functional> #include <future> #include <vector> #define VOX_TASKING_CPP11THREADS #ifdef VOX_TASKING_TBB #include <tbb/parallel_for.h> #include <tbb/parallel_reduce.h> #include <tbb/parallel_sort.h> #include <tbb/task.h> #elif defined(VOX_TASKING_CPP11THREADS) #include <thread> #endif namespace vox { namespace internal { // NOTE - This abstraction takes a lambda which should take captured // variables by *value* to ensure no captured references race // with the task itself. template <typename TASK_T> inline void schedule(TASK_T&& fcn) { #ifdef VOX_TASKING_TBB struct LocalTBBTask : public tbb::task { TASK_T func; tbb::task* execute() override { func(); return nullptr; } LocalTBBTask(TASK_T&& f) : func(std::forward<TASK_T>(f)) {} }; auto* tbb_node = new (tbb::task::allocate_root()) LocalTBBTask(std::forward<TASK_T>(fcn)); tbb::task::enqueue(*tbb_node); #elif defined(VOX_TASKING_CPP11THREADS) std::thread thread(fcn); thread.detach(); #else // OpenMP or Serial --> synchronous! fcn(); #endif } template <typename TASK_T> using operator_return_t = typename std::result_of<TASK_T()>::type; // NOTE - see above, same issues associated with schedule() template <typename TASK_T> inline auto async(TASK_T&& fcn) -> std::future<operator_return_t<TASK_T>> { using package_t = std::packaged_task<operator_return_t<TASK_T>()>; auto task = new package_t(std::forward<TASK_T>(fcn)); auto future = task->get_future(); schedule([=]() { (*task)(); delete task; }); return future; } // Adopted from: // Radenski, A. // Shared Memory, Message Passing, and Hybrid Merge Sorts for Standalone and // Clustered SMPs. Proc PDPTA'11, the 2011 International Conference on Parallel // and Distributed Processing Techniques and Applications, CSREA Press // (H. Arabnia, Ed.), 2011, pp. 367 - 373. template <typename RandomIterator, typename RandomIterator2, typename CompareFunction> void merge(RandomIterator a, size_t size, RandomIterator2 temp, CompareFunction compareFunction) { size_t i1 = 0; size_t i2 = size / 2; size_t tempi = 0; while (i1 < size / 2 && i2 < size) { if (compareFunction(a[i1], a[i2])) { temp[tempi] = a[i1]; i1++; } else { temp[tempi] = a[i2]; i2++; } tempi++; } while (i1 < size / 2) { temp[tempi] = a[i1]; i1++; tempi++; } while (i2 < size) { temp[tempi] = a[i2]; i2++; tempi++; } // Copy sorted temp array into main array, a parallelFor(kZeroSize, size, [&](size_t i) { a[i] = temp[i]; }); } template <typename RandomIterator, typename RandomIterator2, typename CompareFunction> void parallelMergeSort(RandomIterator a, size_t size, RandomIterator2 temp, unsigned int numThreads, CompareFunction compareFunction) { if (numThreads == 1) { std::sort(a, a + size, compareFunction); } else if (numThreads > 1) { std::vector<std::future<void>> pool; pool.reserve(2); auto launchRange = [compareFunction](RandomIterator begin, size_t k2, RandomIterator2 temp, unsigned int numThreads) { parallelMergeSort(begin, k2, temp, numThreads, compareFunction); }; pool.emplace_back(internal::async( [=]() { launchRange(a, size / 2, temp, numThreads / 2); })); pool.emplace_back(internal::async([=]() { launchRange(a + size / 2, size - size / 2, temp + size / 2, numThreads - numThreads / 2); })); // Wait for jobs to finish for (auto& f : pool) { if (f.valid()) { f.wait(); } } merge(a, size, temp, compareFunction); } } } // namespace internal template <typename RandomIterator, typename T> void parallelFill(const RandomIterator& begin, const RandomIterator& end, const T& value, ExecutionPolicy policy) { auto diff = end - begin; if (diff <= 0) { return; } size_t size = static_cast<size_t>(diff); parallelFor(kZeroSize, size, [begin, value](size_t i) { begin[i] = value; }, policy); } // Adopted from http://ideone.com/Z7zldb template <typename IndexType, typename Function> void parallelFor(IndexType start, IndexType end, const Function& func, ExecutionPolicy policy) { if (start > end) { return; } #ifdef VOX_TASKING_TBB if (policy == ExecutionPolicy::kParallel) { tbb::parallel_for(start, end, func); } else { for (auto i = start; i < end; ++i) { func(i); } } #elifdef VOX_TASKING_CPP11THREADS // Estimate number of threads in the pool unsigned int numThreadsHint = maxNumberOfThreads(); const unsigned int numThreads = (policy == ExecutionPolicy::kParallel) ? (numThreadsHint == 0u ? 8u : numThreadsHint) : 1; // Size of a slice for the range functions IndexType n = end - start + 1; IndexType slice = (IndexType)std::round(n / static_cast<double>(numThreads)); slice = std::max(slice, IndexType(1)); // [Helper] Inner loop auto launchRange = [&func](IndexType k1, IndexType k2) { for (IndexType k = k1; k < k2; k++) { func(k); } }; // Create pool and launch jobs std::vector<std::thread> pool; pool.reserve(numThreads); IndexType i1 = start; IndexType i2 = std::min(start + slice, end); for (unsigned int i = 0; i + 1 < numThreads && i1 < end; ++i) { pool.emplace_back(launchRange, i1, i2); i1 = i2; i2 = std::min(i2 + slice, end); } if (i1 < end) { pool.emplace_back(launchRange, i1, end); } // Wait for jobs to finish for (std::thread& t : pool) { if (t.joinable()) { t.join(); } } #else #ifdef VOX_TASKING_OPENMP if (policy == ExecutionPolicy::kParallel) { #pragma omp parallel for #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) for (ssize_t i = start; i < ssize_t(end); ++i) { #else // !MSVC || Intel for (auto i = start; i < end; ++i) { #endif // MSVC && !Intel func(i); } } else { for (auto i = start; i < end; ++i) { func(i); } } #else // VOX_TASKING_OPENMP for (auto i = start; i < end; ++i) { func(i); } #endif // VOX_TASKING_OPENMP #endif } template <typename IndexType, typename Function> void parallelRangeFor(IndexType start, IndexType end, const Function& func, ExecutionPolicy policy) { if (start > end) { return; } #ifdef VOX_TASKING_TBB if (policy == ExecutionPolicy::kParallel) { tbb::parallel_for(tbb::blocked_range<IndexType>(start, end), [&func](const tbb::blocked_range<IndexType>& range) { func(range.begin(), range.end()); }); } else { func(start, end); } #else // Estimate number of threads in the pool unsigned int numThreadsHint = maxNumberOfThreads(); const unsigned int numThreads = (policy == ExecutionPolicy::kParallel) ? (numThreadsHint == 0u ? 8u : numThreadsHint) : 1; // Size of a slice for the range functions IndexType n = end - start + 1; IndexType slice = (IndexType)std::round(n / static_cast<double>(numThreads)); slice = std::max(slice, IndexType(1)); // Create pool and launch jobs std::vector<std::future<void>> pool; pool.reserve(numThreads); IndexType i1 = start; IndexType i2 = std::min(start + slice, end); for (unsigned int i = 0; i + 1 < numThreads && i1 < end; ++i) { pool.emplace_back(internal::async([=]() { func(i1, i2); })); i1 = i2; i2 = std::min(i2 + slice, end); } if (i1 < end) { pool.emplace_back(internal::async([=]() { func(i1, end); })); } // Wait for jobs to finish for (auto& f : pool) { if (f.valid()) { f.wait(); } } #endif } template <typename IndexType, typename Function> void parallelFor(IndexType beginIndexX, IndexType endIndexX, IndexType beginIndexY, IndexType endIndexY, const Function& function, ExecutionPolicy policy) { parallelFor(beginIndexY, endIndexY, [&](IndexType j) { for (IndexType i = beginIndexX; i < endIndexX; ++i) { function(i, j); } }, policy); } template <typename IndexType, typename Function> void parallelRangeFor(IndexType beginIndexX, IndexType endIndexX, IndexType beginIndexY, IndexType endIndexY, const Function& function, ExecutionPolicy policy) { parallelRangeFor(beginIndexY, endIndexY, [&](IndexType jBegin, IndexType jEnd) { function(beginIndexX, endIndexX, jBegin, jEnd); }, policy); } template <typename IndexType, typename Function> void parallelFor(IndexType beginIndexX, IndexType endIndexX, IndexType beginIndexY, IndexType endIndexY, IndexType beginIndexZ, IndexType endIndexZ, const Function& function, ExecutionPolicy policy) { parallelFor(beginIndexZ, endIndexZ, [&](IndexType k) { for (IndexType j = beginIndexY; j < endIndexY; ++j) { for (IndexType i = beginIndexX; i < endIndexX; ++i) { function(i, j, k); } } }, policy); } template <typename IndexType, typename Function> void parallelRangeFor(IndexType beginIndexX, IndexType endIndexX, IndexType beginIndexY, IndexType endIndexY, IndexType beginIndexZ, IndexType endIndexZ, const Function& function, ExecutionPolicy policy) { parallelRangeFor(beginIndexZ, endIndexZ, [&](IndexType kBegin, IndexType kEnd) { function(beginIndexX, endIndexX, beginIndexY, endIndexY, kBegin, kEnd); }, policy); } template <typename IndexType, typename Value, typename Function, typename Reduce> Value parallelReduce(IndexType start, IndexType end, const Value& identity, const Function& func, const Reduce& reduce, ExecutionPolicy policy) { if (start > end) { return identity; } #ifdef VOX_TASKING_TBB if (policy == ExecutionPolicy::kParallel) { return tbb::parallel_reduce( tbb::blocked_range<IndexType>(start, end), identity, [&func](const tbb::blocked_range<IndexType>& range, const Value& init) { return func(range.begin(), range.end(), init); }, reduce); } else { (void)reduce; return func(start, end, identity); } #else // Estimate number of threads in the pool unsigned int numThreadsHint = maxNumberOfThreads(); const unsigned int numThreads = (policy == ExecutionPolicy::kParallel) ? (numThreadsHint == 0u ? 8u : numThreadsHint) : 1; // Size of a slice for the range functions IndexType n = end - start + 1; IndexType slice = (IndexType)std::round(n / static_cast<double>(numThreads)); slice = std::max(slice, IndexType(1)); // Results std::vector<Value> results(numThreads, identity); // [Helper] Inner loop auto launchRange = [&](IndexType k1, IndexType k2, unsigned int tid) { results[tid] = func(k1, k2, identity); }; // Create pool and launch jobs std::vector<std::future<void>> pool; pool.reserve(numThreads); IndexType i1 = start; IndexType i2 = std::min(start + slice, end); unsigned int tid = 0; for (; tid + 1 < numThreads && i1 < end; ++tid) { pool.emplace_back(internal::async([=]() { launchRange(i1, i2, tid); })); i1 = i2; i2 = std::min(i2 + slice, end); } if (i1 < end) { pool.emplace_back( internal::async([=]() { launchRange(i1, end, tid); })); } // Wait for jobs to finish for (auto& f : pool) { if (f.valid()) { f.wait(); } } // Gather Value finalResult = identity; for (const Value& val : results) { finalResult = reduce(val, finalResult); } return finalResult; #endif } template <typename RandomIterator, typename CompareFunction> void parallelSort(RandomIterator begin, RandomIterator end, CompareFunction compareFunction, ExecutionPolicy policy) { if (end < begin) { return; } #ifdef VOX_TASKING_TBB if (policy == ExecutionPolicy::kParallel) { tbb::parallel_sort(begin, end, compareFunction); } else { std::sort(begin, end, compareFunction); } #else size_t size = static_cast<size_t>(end - begin); typedef typename std::iterator_traits<RandomIterator>::value_type value_type; std::vector<value_type> temp(size); // Estimate number of threads in the pool unsigned int numThreadsHint = maxNumberOfThreads(); const unsigned int numThreads = (policy == ExecutionPolicy::kParallel) ? (numThreadsHint == 0u ? 8u : numThreadsHint) : 1; internal::parallelMergeSort(begin, size, temp.begin(), numThreads, compareFunction); #endif } template <typename RandomIterator> void parallelSort(RandomIterator begin, RandomIterator end, ExecutionPolicy policy) { parallelSort( begin, end, std::less<typename std::iterator_traits<RandomIterator>::value_type>(), policy); } } // namespace vox #endif // INCLUDE_VOX_DETAIL_PARALLEL_INL_H_
CPUsearch.c
#include "submat.h" #include "CPUsearch.h" #include "utils.h" // CPU search using SSE instrucions and Score Profile technique void cpu_search_sse_sp (char * query_sequences, unsigned short int * query_sequences_lengths, unsigned long int query_sequences_count, unsigned int * query_disp, char * vect_sequences_db, unsigned short int * vect_sequences_db_lengths, unsigned short int * vect_sequences_db_blocks, unsigned long int vect_sequences_db_count, unsigned long int * vect_sequences_db_disp, char * submat, int open_gap, int extend_gap, int n_threads, int cpu_block_size, int * scores, double * workTime){ double tick; char *a, * b; unsigned int * a_disp; unsigned long int * b_disp = NULL; unsigned short int * m, *n, *nbbs, sequences_db_max_length, query_sequences_max_length; a = query_sequences; m = query_sequences_lengths; a_disp = query_disp; query_sequences_max_length = query_sequences_lengths[query_sequences_count-1]; sequences_db_max_length = vect_sequences_db_lengths[vect_sequences_db_count-1]; b = vect_sequences_db; n = vect_sequences_db_lengths; nbbs = vect_sequences_db_blocks; b_disp = vect_sequences_db_disp; tick = dwalltime(); #pragma omp parallel default(none) shared(cpu_block_size, a, b, n, nbbs, m, a_disp, b_disp, submat, scores, query_sequences_count, vect_sequences_db_count, open_gap, extend_gap, sequences_db_max_length, query_sequences_max_length) num_threads(n_threads) { __m128i *row1, *row2, *row3, *maxCol, *maxRow, *lastCol, * ptr_scores, *tmp, *ptr_scoreProfile1, *ptr_scoreProfile2; char * ptr_a, * ptr_b, * scoreProfile; __declspec(align(32)) __m128i score, current, auxBlosum[2], auxLastCol, b_values; __declspec(align(32)) __m128i aux0, aux1, aux2, aux3, aux4, aux5, aux6, aux7; __declspec(align(32)) __m128i vextend_gap_epi8 = _mm_set1_epi8(extend_gap), vopen_extend_gap_epi8 = _mm_set1_epi8(open_gap+extend_gap), vzero_epi8 = _mm_set1_epi8(0); __declspec(align(32)) __m128i vextend_gap_epi16 = _mm_set1_epi16(extend_gap), vopen_extend_gap_epi16 = _mm_set1_epi16(open_gap+extend_gap), vzero_epi16 = _mm_set1_epi16(0); __declspec(align(32)) __m128i vextend_gap_epi32 = _mm_set1_epi32(extend_gap), vopen_extend_gap_epi32 = _mm_set1_epi32(open_gap+extend_gap), vzero_epi32 = _mm_set1_epi32(0); __declspec(align(32)) __m128i v127 = _mm_set1_epi8(127), v32767 = _mm_set1_epi16(32767); __declspec(align(32)) __m128i v15 = _mm_set1_epi8(15), v16 = _mm_set1_epi8(16), vneg32 = _mm_set1_epi8(-32); unsigned int i, j, ii, jj, k, disp_1, disp_2, disp_3, disp_4, dim1, dim2, nbb; unsigned long int t, s, q; int overflow_flag, bb1, bb1_start, bb1_end, bb2, bb2_start, bb2_end; // allocate memory for auxiliary buffers row1 = (__m128i *) _mm_malloc((cpu_block_size+1)*sizeof(__m128i), 32); row2 = (__m128i *) _mm_malloc((cpu_block_size+1)*sizeof(__m128i), 32); row3 = (__m128i *) _mm_malloc((cpu_block_size+1)*sizeof(__m128i), 32); maxCol = (__m128i *) _mm_malloc((cpu_block_size+1)*sizeof(__m128i), 32); maxRow = (__m128i *) _mm_malloc((query_sequences_max_length)*sizeof(__m128i), 32); lastCol = (__m128i *) _mm_malloc((query_sequences_max_length)*sizeof(__m128i), 32); scoreProfile = (char *) _mm_malloc((BLOSUM_ROWS_x_CPU_SSE_INT8_VECTOR_LENGTH*cpu_block_size)*sizeof(char), 32); // calculate alignment score #pragma omp for schedule(dynamic) nowait for (t=0; t< query_sequences_count*vect_sequences_db_count; t++) { q = (query_sequences_count-1) - (t % query_sequences_count); s = (vect_sequences_db_count-1) - (t / query_sequences_count); ptr_a = a + a_disp[q]; ptr_b = b + b_disp[s]; ptr_scores = (__m128i *) (scores + (q*vect_sequences_db_count+s)*CPU_SSE_INT8_VECTOR_LENGTH); // caluclate number of blocks nbb = nbbs[s]; // init buffers #pragma unroll(CPU_SSE_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) maxRow[i] = _mm_set1_epi8(0); #pragma unroll(CPU_SSE_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) lastCol[i] = _mm_set1_epi8(0); // set score to 0 score = _mm_set1_epi8(0); for (k=0; k < nbb; k++){ // calculate dim1 disp_4 = k*cpu_block_size; dim1 = n[s]-disp_4; dim1 = (cpu_block_size < dim1 ? cpu_block_size : dim1); // calculate dim2 dim2 = dim1 / SEQ_LEN_MULT; // calculate a[i] displacement disp_1 = dim1*CPU_SSE_INT8_VECTOR_LENGTH; // init buffers #pragma unroll(CPU_SSE_UNROLL_COUNT) for (i=0; i<dim1+1 ; i++ ) maxCol[i] = _mm_set1_epi8(0); #pragma unroll(CPU_SSE_UNROLL_COUNT) for (i=0; i<dim1+1 ; i++ ) row1[i] = _mm_set1_epi8(0); auxLastCol = _mm_set1_epi8(0); // build score profile for (i=0; i< dim1 ;i++ ) { // indexes b_values = _mm_loadu_si128((__m128i *) (ptr_b + (disp_4+i)*CPU_SSE_INT8_VECTOR_LENGTH)); // indexes >= 16 aux1 = _mm_sub_epi8(b_values, v16); // indexes < 16 aux2 = _mm_cmpgt_epi8(b_values,v15); aux3 = _mm_and_si128(aux2,vneg32); aux4 = _mm_add_epi8(b_values,aux3); ptr_scoreProfile1 = (__m128i*)(scoreProfile) + i; for (j=0; j< BLOSUM_ROWS-1; j++) { tmp = (__m128i *) (submat + j*BLOSUM_COLS); auxBlosum[0] = _mm_load_si128(tmp); auxBlosum[1] = _mm_load_si128(tmp+1); aux5 = _mm_shuffle_epi8(auxBlosum[0], aux4); aux6 = _mm_shuffle_epi8(auxBlosum[1], aux1); aux7 = _mm_add_epi8(aux5, aux6); _mm_store_si128(ptr_scoreProfile1+j*dim1, aux7); } _mm_store_si128(ptr_scoreProfile1+(BLOSUM_ROWS-1)*dim1, vzero_epi8); } for( i = 0; i < m[q]; i+=2){ // update row[0] with lastCol[i-1] row1[0] = lastCol[i]; row2[0] = lastCol[i+1]; // calculate score profile displacement ptr_scoreProfile1 = (__m128i *)(scoreProfile+((int)(ptr_a[i]))*disp_1); ptr_scoreProfile2 = (__m128i *)(scoreProfile+((int)(ptr_a[i+1]))*disp_1); // store maxRow in auxiliars aux1 = maxRow[i]; aux2 = maxRow[i+1]; for (ii=0; ii<dim2 ; ii++) { #pragma unroll(SEQ_LEN_MULT) for( j=ii*SEQ_LEN_MULT+1, jj=0; jj < SEQ_LEN_MULT; jj++, j++) { //calcuate the diagonal value current = _mm_adds_epi8(row1[j-1], _mm_load_si128(ptr_scoreProfile1+(j-1))); // calculate current max value current = _mm_max_epi8(current, aux1); current = _mm_max_epi8(current, maxCol[j]); current = _mm_max_epi8(current, vzero_epi8); // update maxRow and maxCol aux1 = _mm_subs_epi8(aux1, vextend_gap_epi8); maxCol[j] = _mm_subs_epi8(maxCol[j], vextend_gap_epi8); aux0 = _mm_subs_epi8(current, vopen_extend_gap_epi8); aux1 = _mm_max_epi8(aux1, aux0); maxCol[j] = _mm_max_epi8(maxCol[j], aux0); // update max score score = _mm_max_epi8(score,current); // update row buffer row2[j] = current; //calcuate the diagonal value current = _mm_adds_epi8(row2[j-1], _mm_load_si128(ptr_scoreProfile2+(j-1))); // calculate current max value current = _mm_max_epi8(current, aux2); current = _mm_max_epi8(current, maxCol[j]); current = _mm_max_epi8(current, vzero_epi8); // update maxRow and maxCol aux2 = _mm_subs_epi8(aux2, vextend_gap_epi8); maxCol[j] = _mm_subs_epi8(maxCol[j], vextend_gap_epi8); aux0 = _mm_subs_epi8(current, vopen_extend_gap_epi8); aux2 = _mm_max_epi8(aux2, aux0); maxCol[j] = _mm_max_epi8(maxCol[j], aux0); // update max score score = _mm_max_epi8(score,current); // update row buffer row3[j] = current; } } // update maxRow maxRow[i] = aux1; maxRow[i+1] = aux2; // update lastCol lastCol[i] = auxLastCol; lastCol[i+1] = row2[dim1]; auxLastCol = current; // swap buffers tmp = row1; row1 = row3; row3 = tmp; } } // store max value _mm_store_si128 (ptr_scores,_mm_cvtepi8_epi32(score)); _mm_store_si128 (ptr_scores+1,_mm_cvtepi8_epi32(_mm_srli_si128(score,4))); _mm_store_si128 (ptr_scores+2,_mm_cvtepi8_epi32(_mm_srli_si128(score,8))); _mm_store_si128 (ptr_scores+3,_mm_cvtepi8_epi32(_mm_srli_si128(score,12))); // overflow detection aux1 = _mm_cmpeq_epi8(score,v127); overflow_flag = _mm_test_all_zeros(aux1,v127); // if overflow if (overflow_flag == 0){ // detect if overflow occurred in low-half, high-half or both halves aux1 = _mm_cmpeq_epi8(_mm_slli_si128(score,8),v127); bb1_start = _mm_test_all_zeros(aux1,v127); aux1 = _mm_cmpeq_epi8(_mm_srli_si128(score,8),v127); bb1_end = 2 - _mm_test_all_zeros(aux1,v127); // recalculate using 16-bit signed integer precision for (bb1=bb1_start; bb1<bb1_end ; bb1++){ // init buffers #pragma unroll(CPU_SSE_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) maxRow[i] = _mm_set1_epi16(0); #pragma unroll(CPU_SSE_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) lastCol[i] = _mm_set1_epi16(0); // set score to 0 score = _mm_set1_epi16(0); disp_2 = bb1*CPU_SSE_INT16_VECTOR_LENGTH; for (k=0; k < nbb; k++){ // calculate dim1 disp_4 = k*cpu_block_size; dim1 = n[s]-disp_4; dim1 = (cpu_block_size < dim1 ? cpu_block_size : dim1); // calculate dim2 dim2 = dim1 / SEQ_LEN_MULT; // calculate a[i] displacement disp_1 = dim1*CPU_SSE_INT8_VECTOR_LENGTH; // init buffers #pragma unroll(CPU_SSE_UNROLL_COUNT) for (i=0; i<dim1+1 ; i++ ) maxCol[i] = _mm_set1_epi16(0); #pragma unroll(CPU_SSE_UNROLL_COUNT) for (i=0; i<dim1+1 ; i++ ) row1[i] = _mm_set1_epi16(0); auxLastCol = _mm_set1_epi16(0); // build score profile for (i=0; i< dim1 ;i++ ) { // indexes b_values = _mm_loadu_si128((__m128i *) (ptr_b + (disp_4+i)*CPU_SSE_INT8_VECTOR_LENGTH)); // indexes >= 16 aux1 = _mm_sub_epi8(b_values, v16); // indexes < 16 aux2 = _mm_cmpgt_epi8(b_values,v15); aux3 = _mm_and_si128(aux2,vneg32); aux4 = _mm_add_epi8(b_values,aux3); ptr_scoreProfile1 = (__m128i*)(scoreProfile) + i; for (j=0; j< BLOSUM_ROWS-1; j++) { tmp = (__m128i *) (submat + j*BLOSUM_COLS); auxBlosum[0] = _mm_load_si128(tmp); auxBlosum[1] = _mm_load_si128(tmp+1); aux5 = _mm_shuffle_epi8(auxBlosum[0], aux4); aux6 = _mm_shuffle_epi8(auxBlosum[1], aux1); aux7 = _mm_add_epi8(aux5, aux6); _mm_store_si128(ptr_scoreProfile1+j*dim1, aux7); } _mm_store_si128(ptr_scoreProfile1+(BLOSUM_ROWS-1)*dim1, vzero_epi8); } for( i = 0; i < m[q]; i+=2){ // update row[0] with lastCol[i-1] row1[0] = lastCol[i]; row2[0] = lastCol[i+1]; // calculate score profile displacement ptr_scoreProfile1 = (__m128i *)(scoreProfile+((int)(ptr_a[i]))*disp_1+disp_2); ptr_scoreProfile2 = (__m128i *)(scoreProfile+((int)(ptr_a[i+1]))*disp_1+disp_2); // store maxRow in auxiliars aux1 = maxRow[i]; aux2 = maxRow[i+1]; for (ii=0; ii<dim2 ; ii++) { #pragma unroll(SEQ_LEN_MULT) for( j=ii*SEQ_LEN_MULT+1, jj=0; jj < SEQ_LEN_MULT; jj++, j++) { //calcuate the diagonal value current = _mm_adds_epi16(row1[j-1], _mm_cvtepi8_epi16(_mm_loadu_si128(ptr_scoreProfile1+(j-1)))); // calculate current max value current = _mm_max_epi16(current, aux1); current = _mm_max_epi16(current, maxCol[j]); current = _mm_max_epi16(current, vzero_epi16); // update maxRow and maxCol aux1 = _mm_subs_epi16(aux1, vextend_gap_epi16); maxCol[j] = _mm_subs_epi16(maxCol[j], vextend_gap_epi16); aux0 = _mm_subs_epi16(current, vopen_extend_gap_epi16); aux1 = _mm_max_epi16(aux1, aux0); maxCol[j] = _mm_max_epi16(maxCol[j], aux0); // update row buffer row2[j] = current; // update max score score = _mm_max_epi16(score,current); //calcuate the diagonal value current = _mm_adds_epi16(row2[j-1], _mm_cvtepi8_epi16(_mm_loadu_si128(ptr_scoreProfile2+(j-1)))); // calculate current max value current = _mm_max_epi16(current, aux2); current = _mm_max_epi16(current, maxCol[j]); current = _mm_max_epi16(current, vzero_epi16); // update maxRow and maxCol aux2 = _mm_subs_epi16(aux2, vextend_gap_epi16); maxCol[j] = _mm_subs_epi16(maxCol[j], vextend_gap_epi16); aux0 = _mm_subs_epi16(current, vopen_extend_gap_epi16); aux2 = _mm_max_epi16(aux2, aux0); maxCol[j] = _mm_max_epi16(maxCol[j], aux0); // update row buffer row3[j] = current; // update max score score = _mm_max_epi16(score,current); } } // update maxRow maxRow[i] = aux1; maxRow[i+1] = aux2; // update lastCol lastCol[i] = auxLastCol; lastCol[i+1] = row2[dim1]; auxLastCol = current; // swap buffers tmp = row1; row1 = row3; row3 = tmp; } } // store max value _mm_store_si128 (ptr_scores+bb1*2,_mm_cvtepi16_epi32(score)); _mm_store_si128 (ptr_scores+bb1*2+1,_mm_cvtepi16_epi32(_mm_srli_si128(score,8))); // overflow detection aux1 = _mm_cmpeq_epi16(score,v32767); overflow_flag = _mm_test_all_zeros(aux1,v32767); // if overflow if (overflow_flag == 0){ // detect if overflow occurred in low-half, high-half or both halves aux1 = _mm_cmpeq_epi16(_mm_slli_si128(score,8),v32767); bb2_start = _mm_test_all_zeros(aux1,v32767); aux1 = _mm_cmpeq_epi16(_mm_srli_si128(score,8),v32767); bb2_end = 2 - _mm_test_all_zeros(aux1,v32767); // recalculate using 32-bit signed integer precision for (bb2=bb2_start; bb2<bb2_end ; bb2++){ // init buffers #pragma unroll(CPU_SSE_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) maxRow[i] = _mm_set1_epi32(0); #pragma unroll(CPU_SSE_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) lastCol[i] = _mm_set1_epi32(0); // set score to 0 score = _mm_set1_epi32(0); disp_3 = disp_2 + bb2*CPU_SSE_INT32_VECTOR_LENGTH; for (k=0; k < nbb; k++){ // calculate dim1 disp_4 = k*cpu_block_size; dim1 = n[s]-disp_4; dim1 = (cpu_block_size < dim1 ? cpu_block_size : dim1); // calculate dim2 dim2 = dim1 / SEQ_LEN_MULT; // calculate a[i] displacement disp_1 = dim1*CPU_SSE_INT8_VECTOR_LENGTH; // init buffers #pragma unroll(CPU_SSE_UNROLL_COUNT) for (i=0; i<dim1+1 ; i++ ) maxCol[i] = _mm_set1_epi32(0); #pragma unroll(CPU_SSE_UNROLL_COUNT) for (i=0; i<dim1+1 ; i++ ) row1[i] = _mm_set1_epi32(0); auxLastCol = _mm_set1_epi32(0); // build score profile for (i=0; i< dim1 ;i++ ) { // indexes b_values = _mm_loadu_si128((__m128i *) (ptr_b + (disp_4+i)*CPU_SSE_INT8_VECTOR_LENGTH)); // indexes >= 16 aux1 = _mm_sub_epi8(b_values, v16); // indexes < 16 aux2 = _mm_cmpgt_epi8(b_values,v15); aux3 = _mm_and_si128(aux2,vneg32); aux4 = _mm_add_epi8(b_values,aux3); ptr_scoreProfile1 = (__m128i*)(scoreProfile) + i; for (j=0; j< BLOSUM_ROWS-1; j++) { tmp = (__m128i *) (submat + j*BLOSUM_COLS); auxBlosum[0] = _mm_load_si128(tmp); auxBlosum[1] = _mm_load_si128(tmp+1); aux5 = _mm_shuffle_epi8(auxBlosum[0], aux4); aux6 = _mm_shuffle_epi8(auxBlosum[1], aux1); aux7 = _mm_add_epi8(aux5, aux6); _mm_store_si128(ptr_scoreProfile1+j*dim1, aux7); } _mm_store_si128(ptr_scoreProfile1+(BLOSUM_ROWS-1)*dim1, vzero_epi8); } for( i = 0; i < m[q]; i+=2){ // update row[0] with lastCol[i-1] row1[0] = lastCol[i]; row2[0] = lastCol[i+1]; // calculate score profile displacement ptr_scoreProfile1 = (__m128i *)(scoreProfile+((int)(ptr_a[i]))*disp_1+disp_3); ptr_scoreProfile2 = (__m128i *)(scoreProfile+((int)(ptr_a[i+1]))*disp_1+disp_3); // store maxRow in auxiliars aux1 = maxRow[i]; aux2 = maxRow[i+1]; for (ii=0; ii<dim2 ; ii++) { #pragma unroll(SEQ_LEN_MULT) for( j=ii*SEQ_LEN_MULT+1, jj=0; jj < SEQ_LEN_MULT; jj++, j++) { //calcuate the diagonal value current = _mm_add_epi32(row1[j-1], _mm_cvtepi8_epi32(_mm_loadu_si128(ptr_scoreProfile1+(j-1)))); // calculate current max value current = _mm_max_epi32(current, aux1); current = _mm_max_epi32(current, maxCol[j]); current = _mm_max_epi32(current, vzero_epi32); // update maxRow and maxCol aux1 = _mm_sub_epi32(aux1, vextend_gap_epi32); maxCol[j] = _mm_sub_epi32(maxCol[j], vextend_gap_epi32); aux0 = _mm_sub_epi32(current, vopen_extend_gap_epi32); aux1 = _mm_max_epi32(aux1, aux0); maxCol[j] = _mm_max_epi32(maxCol[j], aux0); // update row buffer row2[j] = current; // update max score score = _mm_max_epi32(score,current); //calcuate the diagonal value current = _mm_add_epi32(row2[j-1], _mm_cvtepi8_epi32(_mm_loadu_si128(ptr_scoreProfile2+(j-1)))); // calculate current max value current = _mm_max_epi32(current, aux2); current = _mm_max_epi32(current, maxCol[j]); current = _mm_max_epi32(current, vzero_epi32); // update maxRow and maxCol aux2 = _mm_sub_epi32(aux2, vextend_gap_epi32); maxCol[j] = _mm_sub_epi32(maxCol[j], vextend_gap_epi32); aux0 = _mm_sub_epi32(current, vopen_extend_gap_epi32); aux2 = _mm_max_epi32(aux2, aux0); maxCol[j] = _mm_max_epi32(maxCol[j], aux0); // update row buffer row3[j] = current; // update max score score = _mm_max_epi32(score,current); } } // update maxRow maxRow[i] = aux1; maxRow[i+1] = aux2; // update lastCol lastCol[i] = auxLastCol; lastCol[i+1] = row2[dim1]; auxLastCol = current; // swap buffers tmp = row1; row1 = row3; row3 = tmp; } } // store max value _mm_store_si128 (ptr_scores+bb1*2+bb2,score); } } } } } _mm_free(row1); _mm_free(row2);_mm_free(row3); _mm_free(maxCol); _mm_free(maxRow); _mm_free(lastCol); _mm_free(scoreProfile); } *workTime = dwalltime()-tick; } // CPU search using AVX2 instructions and Score Profile technique void cpu_search_avx2_sp (char * query_sequences, unsigned short int * query_sequences_lengths, unsigned long int query_sequences_count, unsigned int * query_disp, char * vect_sequences_db, unsigned short int * vect_sequences_db_lengths, unsigned short int * vect_sequences_db_blocks, unsigned long int vect_sequences_db_count, unsigned long int * vect_sequences_db_disp, char * submat, int open_gap, int extend_gap, int n_threads, int cpu_block_size, int * scores, double * workTime){ double tick; char *a, * b; unsigned int * a_disp; unsigned long int * b_disp = NULL; unsigned short int * m, *n, *nbbs, sequences_db_max_length, query_sequences_max_length; a = query_sequences; m = query_sequences_lengths; a_disp = query_disp; query_sequences_max_length = query_sequences_lengths[query_sequences_count-1]; sequences_db_max_length = vect_sequences_db_lengths[vect_sequences_db_count-1]; b = vect_sequences_db; n = vect_sequences_db_lengths; nbbs = vect_sequences_db_blocks; b_disp = vect_sequences_db_disp; tick = dwalltime(); #pragma omp parallel default(none) shared(cpu_block_size, a, b, n, nbbs, m, a_disp, b_disp, submat, scores, query_sequences_count, vect_sequences_db_count, open_gap, extend_gap, sequences_db_max_length, query_sequences_max_length) num_threads(n_threads) { __m256i *row1, *row2, *row3, *tmp_row, *maxCol, *maxRow, *lastCol, * ptr_scores, *ptr_scoreProfile1, *ptr_scoreProfile2; __m128i *tmp; char * ptr_a, * ptr_b, * scoreProfile; __declspec(align(32)) __m256i score, current, auxLastCol, b_values, blosum_lo, blosum_hi; __declspec(align(32)) __m256i aux0, aux1, aux2, aux3, aux4, aux5, aux6; __declspec(align(32)) __m256i vextend_gap_epi8 = _mm256_set1_epi8(extend_gap), vopen_extend_gap_epi8 = _mm256_set1_epi8(open_gap+extend_gap); __declspec(align(32)) __m256i vextend_gap_epi16 = _mm256_set1_epi16(extend_gap), vopen_extend_gap_epi16 = _mm256_set1_epi16(open_gap+extend_gap); __declspec(align(32)) __m256i vextend_gap_epi32 = _mm256_set1_epi32(extend_gap), vopen_extend_gap_epi32 = _mm256_set1_epi32(open_gap+extend_gap); __declspec(align(32)) __m256i vzero_epi8 = _mm256_set1_epi8(0), vzero_epi16 = _mm256_set1_epi16(0), vzero_epi32 = _mm256_set1_epi32(0); __declspec(align(32)) __m256i v15 = _mm256_set1_epi8(15), vneg32 = _mm256_set1_epi8(-32), v16 = _mm256_set1_epi8(16); __declspec(align(32)) __m256i v127 = _mm256_set1_epi8(127), v32767 = _mm256_set1_epi16(32767); __declspec(align(32)) __m128i aux, auxBlosum[2]; unsigned int i, j, ii, jj, k, disp_1, disp_2, disp_3, disp_4, dim1, dim2, nbb; unsigned long int t, s, q; int overflow_flag, bb1, bb2, bb1_start, bb1_end, bb2_start, bb2_end; // allocate memory for auxiliary buffers row1 = (__m256i *) _mm_malloc((cpu_block_size+1)*sizeof(__m256i), 32); row2 = (__m256i *) _mm_malloc((cpu_block_size+1)*sizeof(__m256i), 32); row3 = (__m256i *) _mm_malloc((cpu_block_size+1)*sizeof(__m256i), 32); maxCol = (__m256i *) _mm_malloc((cpu_block_size+1)*sizeof(__m256i), 32); maxRow = (__m256i *) _mm_malloc((query_sequences_max_length)*sizeof(__m256i), 32); lastCol = (__m256i *) _mm_malloc((query_sequences_max_length)*sizeof(__m256i), 32); scoreProfile = (char *) _mm_malloc((BLOSUM_ROWS_x_CPU_AVX2_INT8_VECTOR_LENGTH*cpu_block_size)*sizeof(char), 32); // calculate alignment score #pragma omp for schedule(dynamic) nowait for (t=0; t< query_sequences_count*vect_sequences_db_count; t++) { q = (query_sequences_count-1) - (t % query_sequences_count); s = (vect_sequences_db_count-1) - (t / query_sequences_count); ptr_a = a + a_disp[q]; ptr_b = b + b_disp[s]; ptr_scores = (__m256i *) (scores + (q*vect_sequences_db_count+s)*CPU_AVX2_INT8_VECTOR_LENGTH); // calculate number of blocks nbb = nbbs[s]; // init buffers #pragma unroll(CPU_AVX2_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) maxRow[i] = _mm256_set1_epi8(0); #pragma unroll(CPU_AVX2_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) lastCol[i] = _mm256_set1_epi8(0); // set score to 0 score = _mm256_set1_epi8(0); for (k=0; k < nbb; k++){ // calculate dim1 disp_4 = k*cpu_block_size; dim1 = n[s]-disp_4; dim1 = (cpu_block_size < dim1 ? cpu_block_size : dim1); // calculate dim2 dim2 = dim1 / SEQ_LEN_MULT; // calculate SP sub-block length disp_1 = dim1*CPU_AVX2_INT8_VECTOR_LENGTH; // init buffers #pragma unroll(CPU_AVX2_UNROLL_COUNT) for (i=0; i<dim1+1 ; i++ ) maxCol[i] = _mm256_set1_epi8(0); #pragma unroll(CPU_AVX2_UNROLL_COUNT) for (i=0; i<dim1+1 ; i++ ) row1[i] = _mm256_set1_epi8(0); auxLastCol = _mm256_set1_epi8(0); // build score profile for (i=0; i< dim1 ;i++ ) { // indexes b_values = _mm256_loadu_si256((__m256i *) (ptr_b + (disp_4+i)*CPU_AVX2_INT8_VECTOR_LENGTH)); // indexes >= 16 aux1 = _mm256_sub_epi8(b_values, v16); // indexes < 16 aux2 = _mm256_cmpgt_epi8(b_values,v15); aux3 = _mm256_and_si256(aux2,vneg32); aux4 = _mm256_add_epi8(b_values,aux3); ptr_scoreProfile1 = (__m256i *)(scoreProfile) + i; for (j=0; j< BLOSUM_ROWS-1; j++) { tmp = (__m128i*) (submat + j*BLOSUM_COLS); auxBlosum[0] = _mm_load_si128(tmp); auxBlosum[1] = _mm_load_si128(tmp+1); blosum_lo = _mm256_loadu2_m128i(&auxBlosum[0], &auxBlosum[0]); blosum_hi = _mm256_loadu2_m128i(&auxBlosum[1], &auxBlosum[1]); aux5 = _mm256_shuffle_epi8(blosum_lo,aux4); aux6 = _mm256_shuffle_epi8(blosum_hi,aux1); _mm256_store_si256(ptr_scoreProfile1+j*dim1,_mm256_or_si256(aux5,aux6)); } _mm256_store_si256(ptr_scoreProfile1+(BLOSUM_ROWS-1)*dim1, vzero_epi8); } for( i = 0; i < m[q]; i+=2){ // update row[0] with lastCol[i-1] row1[0] = lastCol[i]; row2[0] = lastCol[i+1]; // calculate score profile displacement ptr_scoreProfile1 = (__m256i*)(scoreProfile+((unsigned int)(ptr_a[i]))*disp_1); ptr_scoreProfile2 = (__m256i*)(scoreProfile+((unsigned int)(ptr_a[i+1]))*disp_1); // store maxRow in auxiliars aux1 = maxRow[i]; aux2 = maxRow[i+1]; for (ii=0; ii<dim2 ; ii++) { #pragma unroll(SEQ_LEN_MULT) for( j=ii*SEQ_LEN_MULT+1, jj=0; jj < SEQ_LEN_MULT; jj++, j++) { //calcuate the diagonal value current = _mm256_adds_epi8(row1[j-1], _mm256_load_si256(ptr_scoreProfile1+(j-1))); // calculate current max value current = _mm256_max_epi8(current, aux1); current = _mm256_max_epi8(current, maxCol[j]); current = _mm256_max_epi8(current, vzero_epi8); // update maxRow and maxCol aux1 = _mm256_subs_epi8(aux1, vextend_gap_epi8); maxCol[j] = _mm256_subs_epi8(maxCol[j], vextend_gap_epi8); aux0 = _mm256_subs_epi8(current, vopen_extend_gap_epi8); aux1 = _mm256_max_epi8(aux1, aux0); maxCol[j] = _mm256_max_epi8(maxCol[j], aux0); // update row buffer row2[j] = current; // update max score score = _mm256_max_epi8(score,current); //calcuate the diagonal value current = _mm256_adds_epi8(row2[j-1], _mm256_load_si256(ptr_scoreProfile2+(j-1))); // calculate current max value current = _mm256_max_epi8(current, aux2); current = _mm256_max_epi8(current, maxCol[j]); current = _mm256_max_epi8(current, vzero_epi8); // update maxRow and maxCol aux2 = _mm256_subs_epi8(aux2, vextend_gap_epi8); maxCol[j] = _mm256_subs_epi8(maxCol[j], vextend_gap_epi8); aux0 = _mm256_subs_epi8(current, vopen_extend_gap_epi8); aux2 = _mm256_max_epi8(aux2, aux0); maxCol[j] = _mm256_max_epi8(maxCol[j], aux0); // update row buffer row3[j] = current; // update max score score = _mm256_max_epi8(score,current); } } // update maxRow maxRow[i] = aux1; maxRow[i+1] = aux2; // update lastCol lastCol[i] = auxLastCol; lastCol[i+1] = row2[dim1]; auxLastCol = current; // swap buffers tmp_row = row1; row1 = row3; row3 = tmp_row; } } // store max value aux = _mm256_extracti128_si256 (score,0); _mm256_store_si256 (ptr_scores,_mm256_cvtepi8_epi32(aux)); _mm256_store_si256 (ptr_scores+1,_mm256_cvtepi8_epi32(_mm_srli_si128(aux,8))); aux = _mm256_extracti128_si256 (score,1); _mm256_store_si256 (ptr_scores+2,_mm256_cvtepi8_epi32(aux)); _mm256_store_si256 (ptr_scores+3,_mm256_cvtepi8_epi32(_mm_srli_si128(aux,8))); // overflow detection aux1 = _mm256_cmpeq_epi8(score,v127); overflow_flag = _mm256_testz_si256(aux1,v127); // if overflow if (overflow_flag == 0){ // check overflow in low 16 bits aux1 = _mm256_cmpeq_epi8(_mm256_inserti128_si256(vzero_epi8,_mm256_extracti128_si256(score,0),0),v127); bb1_start = _mm256_testz_si256(aux1,v127); // check overflow in high 16 bits aux1 = _mm256_cmpeq_epi8(_mm256_inserti128_si256(vzero_epi8,_mm256_extracti128_si256(score,1),0),v127); bb1_end = 2 - _mm256_testz_si256(aux1,v127); // recalculate using 16-bit signed integer precision for (bb1=bb1_start; bb1<bb1_end ; bb1++){ // init buffers #pragma unroll(CPU_AVX2_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) maxRow[i] = _mm256_set1_epi16(0); #pragma unroll(CPU_AVX2_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) lastCol[i] = _mm256_set1_epi16(0); // set score to 0 score = _mm256_set1_epi16(0); disp_2 = bb1*CPU_AVX2_INT16_VECTOR_LENGTH; for (k=0; k < nbb; k++){ // calculate dim1 disp_4 = k*cpu_block_size; dim1 = n[s]-disp_4; dim1 = (cpu_block_size < dim1 ? cpu_block_size : dim1); // calculate dim2 dim2 = dim1 / SEQ_LEN_MULT; // calculate SP sub-block length disp_1 = dim1*CPU_AVX2_INT8_VECTOR_LENGTH; // init buffers #pragma unroll(CPU_AVX2_UNROLL_COUNT) for (i=0; i<dim1+1 ; i++ ) maxCol[i] = _mm256_set1_epi16(0); #pragma unroll(CPU_AVX2_UNROLL_COUNT) for (i=0; i<dim1+1 ; i++ ) row1[i] = _mm256_set1_epi16(0); auxLastCol = _mm256_set1_epi16(0); // build score profile for (i=0; i< dim1 ;i++ ) { // indexes b_values = _mm256_loadu_si256((__m256i *) (ptr_b + (disp_4+i)*CPU_AVX2_INT8_VECTOR_LENGTH)); // indexes >= 16 aux1 = _mm256_sub_epi8(b_values, v16); // indexes < 16 aux2 = _mm256_cmpgt_epi8(b_values,v15); aux3 = _mm256_and_si256(aux2,vneg32); aux4 = _mm256_add_epi8(b_values,aux3); ptr_scoreProfile1 = (__m256i *)(scoreProfile) + i; for (j=0; j< BLOSUM_ROWS-1; j++) { tmp = (__m128i*) (submat + j*BLOSUM_COLS); auxBlosum[0] = _mm_load_si128(tmp); auxBlosum[1] = _mm_load_si128(tmp+1); blosum_lo = _mm256_loadu2_m128i(&auxBlosum[0], &auxBlosum[0]); blosum_hi = _mm256_loadu2_m128i(&auxBlosum[1], &auxBlosum[1]); aux5 = _mm256_shuffle_epi8(blosum_lo,aux4); aux6 = _mm256_shuffle_epi8(blosum_hi,aux1); _mm256_store_si256(ptr_scoreProfile1+j*dim1,_mm256_or_si256(aux5,aux6)); } _mm256_store_si256(ptr_scoreProfile1+(BLOSUM_ROWS-1)*dim1, vzero_epi8); } for( i = 0; i < m[q]; i+=2){ // update row[0] with lastCol[i-1] row1[0] = lastCol[i]; row2[0] = lastCol[i+1]; // calculate score profile displacement ptr_scoreProfile1 = (__m256i *)(scoreProfile+((int)(ptr_a[i]))*disp_1+disp_2); ptr_scoreProfile2 = (__m256i *)(scoreProfile+((int)(ptr_a[i+1]))*disp_1+disp_2); // store maxRow in auxiliars aux1 = maxRow[i]; aux2 = maxRow[i+1]; for (ii=0; ii<dim2 ; ii++) { #pragma unroll(SEQ_LEN_MULT) for( j=ii*SEQ_LEN_MULT+1, jj=0; jj < SEQ_LEN_MULT; jj++, j++) { //calcuate the diagonal value current = _mm256_adds_epi16(row1[j-1], _mm256_cvtepi8_epi16(_mm_loadu_si128((__m128i *) (ptr_scoreProfile1+(j-1))))); // calculate current max value current = _mm256_max_epi16(current, aux1); current = _mm256_max_epi16(current, maxCol[j]); current = _mm256_max_epi16(current, vzero_epi16); // update maxRow and maxCol aux1 = _mm256_subs_epi16(aux1, vextend_gap_epi16); maxCol[j] = _mm256_subs_epi16(maxCol[j], vextend_gap_epi16); aux0 = _mm256_subs_epi16(current, vopen_extend_gap_epi16); aux1 = _mm256_max_epi16(aux1, aux0); maxCol[j] = _mm256_max_epi16(maxCol[j], aux0); // update row buffer row2[j] = current; // update max score score = _mm256_max_epi16(score,current); //calcuate the diagonal value current = _mm256_adds_epi16(row2[j-1], _mm256_cvtepi8_epi16(_mm_loadu_si128((__m128i *) (ptr_scoreProfile2+(j-1))))); // calculate current max value current = _mm256_max_epi16(current, aux2); current = _mm256_max_epi16(current, maxCol[j]); current = _mm256_max_epi16(current, vzero_epi16); // update maxRow and maxCol aux2 = _mm256_subs_epi16(aux2, vextend_gap_epi16); maxCol[j] = _mm256_subs_epi16(maxCol[j], vextend_gap_epi16); aux0 = _mm256_subs_epi16(current, vopen_extend_gap_epi16); aux2 = _mm256_max_epi16(aux2, aux0); maxCol[j] = _mm256_max_epi16(maxCol[j], aux0); // update row buffer row3[j] = current; // update max score score = _mm256_max_epi16(score,current); } } // update maxRow maxRow[i] = aux1; maxRow[i+1] = aux2; // update lastCol lastCol[i] = auxLastCol; lastCol[i+1] = row2[dim1]; auxLastCol = current; // swap buffers tmp_row = row1; row1 = row3; row3 = tmp_row; } } // store max value aux = _mm256_extracti128_si256 (score,0); _mm256_store_si256 (ptr_scores+bb1*2,_mm256_cvtepi16_epi32(aux)); aux = _mm256_extracti128_si256 (score,1); _mm256_store_si256 (ptr_scores+bb1*2+1,_mm256_cvtepi16_epi32(aux)); // overflow detection aux1 = _mm256_cmpeq_epi16(score,v32767); overflow_flag = _mm256_testz_si256(aux1,v32767); // if overflow if (overflow_flag == 0){ // check overflow in low 16 bits aux1 = _mm256_cmpeq_epi16(_mm256_inserti128_si256(vzero_epi16,_mm256_extracti128_si256(score,0),0),v32767); bb2_start = _mm256_testz_si256(aux1,v32767); // check overflow in high 16 bits aux1 = _mm256_cmpeq_epi16(_mm256_inserti128_si256(vzero_epi16,_mm256_extracti128_si256(score,1),0),v32767); bb2_end = 2 - _mm256_testz_si256(aux1,v32767); // recalculate using 32-bit signed integer precision for (bb2=bb2_start; bb2<bb2_end ; bb2++){ // init buffers #pragma unroll(CPU_AVX2_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) maxRow[i] = _mm256_set1_epi32(0); #pragma unroll(CPU_AVX2_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) lastCol[i] = _mm256_set1_epi32(0); // set score to 0 score = _mm256_set1_epi32(0); disp_3 = disp_2 + bb2*CPU_AVX2_INT32_VECTOR_LENGTH; for (k=0; k < nbb; k++){ // calculate dim1 disp_4 = k*cpu_block_size; dim1 = n[s]-disp_4; dim1 = (cpu_block_size < dim1 ? cpu_block_size : dim1); // calculate dim2 dim2 = dim1 / SEQ_LEN_MULT; // calculate SP sub-block length disp_1 = dim1*CPU_AVX2_INT8_VECTOR_LENGTH; // init buffers #pragma unroll(CPU_AVX2_UNROLL_COUNT) for (i=0; i<dim1+1 ; i++ ) maxCol[i] = _mm256_set1_epi32(0); #pragma unroll(CPU_AVX2_UNROLL_COUNT) for (i=0; i<dim1+1 ; i++ ) row1[i] = _mm256_set1_epi32(0); auxLastCol = _mm256_set1_epi32(0); // build score profile for (i=0; i< dim1 ;i++ ) { // indexes b_values = _mm256_loadu_si256((__m256i *) (ptr_b + (disp_4+i)*CPU_AVX2_INT8_VECTOR_LENGTH)); // indexes >= 16 aux1 = _mm256_sub_epi8(b_values, v16); // indexes < 16 aux2 = _mm256_cmpgt_epi8(b_values,v15); aux3 = _mm256_and_si256(aux2,vneg32); aux4 = _mm256_add_epi8(b_values,aux3); ptr_scoreProfile1 = (__m256i *)(scoreProfile) + i; for (j=0; j< BLOSUM_ROWS-1; j++) { tmp = (__m128i*) (submat + j*BLOSUM_COLS); auxBlosum[0] = _mm_load_si128(tmp); auxBlosum[1] = _mm_load_si128(tmp+1); blosum_lo = _mm256_loadu2_m128i(&auxBlosum[0], &auxBlosum[0]); blosum_hi = _mm256_loadu2_m128i(&auxBlosum[1], &auxBlosum[1]); aux5 = _mm256_shuffle_epi8(blosum_lo,aux4); aux6 = _mm256_shuffle_epi8(blosum_hi,aux1); _mm256_store_si256(ptr_scoreProfile1+j*dim1,_mm256_or_si256(aux5,aux6)); } _mm256_store_si256(ptr_scoreProfile1+(BLOSUM_ROWS-1)*dim1, vzero_epi8); } for( i = 0; i < m[q]; i+=2){ // update row[0] with lastCol[i-1] row1[0] = lastCol[i]; row2[0] = lastCol[i+1]; // calculate score profile displacement ptr_scoreProfile1 = (__m256i *)(scoreProfile+((unsigned int)(ptr_a[i]))*disp_1+disp_3); ptr_scoreProfile2 = (__m256i *)(scoreProfile+((unsigned int)(ptr_a[i+1]))*disp_1+disp_3); // store maxRow in auxiliars aux1 = maxRow[i]; aux2 = maxRow[i+1]; for (ii=0; ii<dim2 ; ii++) { #pragma unroll(SEQ_LEN_MULT) for( j=ii*SEQ_LEN_MULT+1, jj=0; jj < SEQ_LEN_MULT; jj++, j++) { //calcuate the diagonal value current = _mm256_add_epi32(row1[j-1], _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (ptr_scoreProfile1+(j-1))))); // calculate current max value current = _mm256_max_epi32(current, aux1); current = _mm256_max_epi32(current, maxCol[j]); current = _mm256_max_epi32(current, vzero_epi32); // update maxRow and maxCol aux1 = _mm256_sub_epi32(aux1, vextend_gap_epi32); maxCol[j] = _mm256_sub_epi32(maxCol[j], vextend_gap_epi32); aux0 = _mm256_sub_epi32(current, vopen_extend_gap_epi32); aux1 = _mm256_max_epi32(aux1, aux0); maxCol[j] = _mm256_max_epi32(maxCol[j], aux0); // update row buffer row2[j] = current; // update max score score = _mm256_max_epi32(score,current); //calcuate the diagonal value current = _mm256_add_epi32(row2[j-1], _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (ptr_scoreProfile2+(j-1))))); // calculate current max value current = _mm256_max_epi32(current, aux2); current = _mm256_max_epi32(current, maxCol[j]); current = _mm256_max_epi32(current, vzero_epi32); // update maxRow and maxCol aux2 = _mm256_sub_epi32(aux2, vextend_gap_epi32); maxCol[j] = _mm256_sub_epi32(maxCol[j], vextend_gap_epi32); aux0 = _mm256_sub_epi32(current, vopen_extend_gap_epi32); aux2 = _mm256_max_epi32(aux2, aux0); maxCol[j] = _mm256_max_epi32(maxCol[j], aux0); // update row buffer row3[j] = current; // update max score score = _mm256_max_epi32(score,current); } } // update maxRow maxRow[i] = aux1; maxRow[i+1] = aux2; // update lastCol lastCol[i] = auxLastCol; lastCol[i+1] = row2[dim1]; auxLastCol = current; // swap buffers tmp_row = row1; row1 = row3; row3 = tmp_row; } } // store max value _mm256_store_si256 (ptr_scores+bb1*2+bb2,score); } } } } } _mm_free(row1); _mm_free(row2); _mm_free(row3); _mm_free(maxCol); _mm_free(maxRow); _mm_free(lastCol); _mm_free(scoreProfile); } *workTime = dwalltime()-tick; }
kmeans_impl.h
/*! * Modifications Copyright 2017 H2O.ai, Inc. */ // original code from https://github.com/NVIDIA/kmeans (Apache V2.0 License) #pragma once #include <atomic> #include <signal.h> #include <string> #include <sstream> #include <thrust/device_vector.h> #include <thrust/reduce.h> #include <thrust/inner_product.h> #include "kmeans_centroids.h" #include "kmeans_labels.h" #include "kmeans_general.h" namespace kmeans { //! kmeans clusters data into k groups /*! \param n Number of data points \param d Number of dimensions \param k Number of clusters \param data Data points, in row-major order. This vector must have size n * d, and since it's in row-major order, data point x occupies positions [x * d, (x + 1) * d) in the vector. The vector is passed by reference since it is shared with the caller and not copied. \param labels Cluster labels. This vector has size n. The vector is passed by reference since it is shared with the caller and not copied. \param centroids Centroid locations, in row-major order. This vector must have size k * d, and since it's in row-major order, centroid x occupies positions [x * d, (x + 1) * d) in the vector. The vector is passed by reference since it is shared with the caller and not copied. \param threshold This controls early termination of the kmeans iterations. If the ratio of points being reassigned to a different centroid is less than the threshold, than the iterations are terminated. Defaults to 1e-3. \param max_iterations Maximum number of iterations to run \return The number of iterations actually performed. */ template<typename T> int kmeans( int verbose, volatile std::atomic_int *flag, int n, int d, int k, thrust::device_vector<T> **data, thrust::device_vector<int> **labels, thrust::device_vector<T> **centroids, thrust::device_vector<T> **data_dots, std::vector<int> dList, int n_gpu, int max_iterations, double threshold = 1e-3, bool do_per_iter_check = true) { thrust::device_vector<T> *centroid_dots[n_gpu]; thrust::device_vector<int> *labels_copy[n_gpu]; thrust::device_vector<int> *range[n_gpu]; thrust::device_vector<int> *indices[n_gpu]; thrust::device_vector<int> *counts[n_gpu]; thrust::device_vector<T> d_old_centroids; thrust::host_vector<int> h_counts(k); thrust::host_vector<int> h_counts_tmp(k); thrust::host_vector<T> h_centroids(k * d); h_centroids = *centroids[0]; // all should be equal thrust::host_vector<T> h_centroids_tmp(k * d); T *d_distance_sum[n_gpu]; bool unable_alloc = false; #pragma omp parallel for for (int q = 0; q < n_gpu; q++) { log_debug(verbose, "Before kmeans() Allocation: gpu: %d", q); safe_cuda(cudaSetDevice(dList[q])); safe_cuda(cudaMalloc(&d_distance_sum[q], sizeof(T))); try { centroid_dots[q] = new thrust::device_vector<T>(k); labels_copy[q] = new thrust::device_vector<int>(n / n_gpu); range[q] = new thrust::device_vector<int>(n / n_gpu); counts[q] = new thrust::device_vector<int>(k); indices[q] = new thrust::device_vector<int>(n / n_gpu); } catch (thrust::system_error &e) { log_error(verbose, "Unable to allocate memory for gpu: %d | n/n_gpu: %d | k: %d | d: %d | error: %s", q, n / n_gpu, k, d, e.what()); unable_alloc = true; // throw std::runtime_error(ss.str()); } catch (std::bad_alloc &e) { log_error(verbose, "Unable to allocate memory for gpu: %d | n/n_gpu: %d | k: %d | d: %d | error: %s", q, n / n_gpu, k, d, e.what()); unable_alloc = true; //throw std::runtime_error(ss.str()); } if (!unable_alloc){ //Create and save "range" for initializing labels thrust::copy(thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(n / n_gpu), (*range[q]).begin()); } } if (unable_alloc) return(-1); log_debug(verbose, "Before kmeans() Iterations"); int i = 0; bool done = false; for (; i < max_iterations; i++) { log_verbose(verbose, "KMeans - Iteration %d/%d", i, max_iterations); if (*flag) continue; safe_cuda(cudaSetDevice(dList[0])); d_old_centroids = *centroids[dList[0]]; #pragma omp parallel for for (int q = 0; q < n_gpu; q++) { safe_cuda(cudaSetDevice(dList[q])); detail::batch_calculate_distances(verbose, q, n / n_gpu, d, k, *data[q], *centroids[q], *data_dots[q], *centroid_dots[q], [&](int n, size_t offset, thrust::device_vector<T> &pairwise_distances) { detail::relabel(n, k, pairwise_distances, *labels[q], offset); }); log_verbose(verbose, "KMeans - Relabeled."); detail::memcpy(*labels_copy[q], *labels[q]); detail::find_centroids(q, n / n_gpu, d, k, *data[q], *labels_copy[q], *centroids[q], *range[q], *indices[q], *counts[q], n_gpu <= 1 ); } // Scale the centroids on host if (n_gpu > 1) { //Average the centroids from each device for (int p = 0; p < k; p++) h_counts[p] = 0.0; for (int q = 0; q < n_gpu; q++) { safe_cuda(cudaSetDevice(dList[q])); detail::memcpy(h_counts_tmp, *counts[q]); detail::streamsync(dList[q]); for (int p = 0; p < k; p++) h_counts[p] += h_counts_tmp[p]; } // Zero the centroids only if any of the GPUs actually updated them for (int p = 0; p < k; p++) { for (int r = 0; r < d; r++) { if (h_counts[p] != 0) { h_centroids[p * d + r] = 0.0; } } } for (int q = 0; q < n_gpu; q++) { safe_cuda(cudaSetDevice(dList[q])); detail::memcpy(h_centroids_tmp, *centroids[q]); detail::streamsync(dList[q]); for (int p = 0; p < k; p++) { for (int r = 0; r < d; r++) { if (h_counts[p] != 0) { h_centroids[p * d + r] += h_centroids_tmp[p * d + r]; } } } } for (int p = 0; p < k; p++) { for (int r = 0; r < d; r++) { // If 0 counts that means we leave the original centroids if (h_counts[p] == 0) { h_counts[p] = 1; } h_centroids[p * d + r] /= h_counts[p]; } } //Copy the averaged centroids to each device #pragma omp parallel for for (int q = 0; q < n_gpu; q++) { safe_cuda(cudaSetDevice(dList[q])); detail::memcpy(*centroids[q], h_centroids); } } // whether to perform per iteration check if (do_per_iter_check) { safe_cuda(cudaSetDevice(dList[0])); T squared_norm = thrust::inner_product( d_old_centroids.begin(), d_old_centroids.end(), (*centroids[0]).begin(), (T) 0.0, thrust::plus<T>(), [=]__device__(T left, T right){ T diff = left - right; return diff * diff; } ); if (squared_norm < threshold) { if (verbose) { std::cout << "Threshold triggered. Terminating early." << std::endl; } done = true; } } if (*flag) { fprintf(stderr, "Signal caught. Terminated early.\n"); fflush(stderr); *flag = 0; // set flag done = true; } if (done || i == max_iterations - 1){ // Final relabeling - uses final centroids #pragma omp parallel for for (int q = 0; q < n_gpu; q++){ safe_cuda(cudaSetDevice(dList[q])); detail::batch_calculate_distances(verbose, q, n / n_gpu, d, k, *data[q], *centroids[q], *data_dots[q], *centroid_dots[q], [&](int n, size_t offset, thrust::device_vector<T> &pairwise_distances) { detail::relabel(n, k, pairwise_distances, *labels[q], offset); }); } break; } } //#pragma omp parallel for for (int q = 0; q < n_gpu; q++){ safe_cuda(cudaSetDevice(dList[q])); delete (centroid_dots[q]); delete (labels_copy[q]); delete (range[q]); delete (counts[q]); delete (indices[q]); } log_debug(verbose, "Iterations: %d", i); return i; } }
GB_unop__identity_fc64_uint64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_fc64_uint64 // op(A') function: GB_unop_tran__identity_fc64_uint64 // C type: GxB_FC64_t // A type: uint64_t // cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_fc64_uint64 ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const uint64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fc64_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
axmyVector.c
/* The MIT License (MIT) Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ extern "C" void FUNC(axmyVector)(const dlong & N, const dlong & offset, const dlong & mode, const dfloat & alpha, const dfloat * __restrict__ cpu_w, dfloat * __restrict__ cpu_a){ #ifdef __NEKRS__OMP__ #pragma omp parallel for collapse(2) #endif for(int fld=0;fld<p_NVec;fld++) { for(int i=0;i<N;++i){ const dlong id = i + fld*offset; const dfloat ai = cpu_a[id]; const dfloat wi = cpu_w[i + mode * fld * offset]; cpu_a[id] = alpha*ai*wi; } } }
SE_fg_extend_fcn_mex.c
#include "mex.h" #include "../SE_fgg.h" void SE_FGG_MEX_params(SE_FGG_params*, const mxArray*, int); #define HIN prhs[0] #define OPT prhs[1] #define HOUT plhs[0] // Output #ifndef VERBOSE #define VERBOSE 0 #endif void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[] ) { const int N = 1; const double* H_per = mxGetPr(HIN); SE_FGG_params params; SE_FGG_MEX_params(&params, OPT, N); size_t dims[3] = {params.npdims[0], params.npdims[1], params.npdims[2]}; HOUT = mxCreateNumericArray(3, dims, mxDOUBLE_CLASS, mxREAL); SE_FGG_work work; work.H = mxGetPr(HOUT); #ifdef _OPENMP #pragma omp parallel default(shared) #endif { #ifdef THREE_PERIODIC SE_FGG_extend_fcn(&work, H_per, &params); #endif #ifdef TWO_PERIODIC SE2P_FGG_extend_fcn(&work, H_per, &params); #endif #ifdef ONE_PERIODIC SE1P_FGG_extend_fcn(&work, H_per, &params); #endif } }
GB_binop__le_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__le_uint16) // A.*B function (eWiseMult): GB (_AemultB_01__le_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__le_uint16) // A.*B function (eWiseMult): GB (_AemultB_03__le_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__le_uint16) // A*D function (colscale): GB (_AxD__le_uint16) // D*A function (rowscale): GB (_DxB__le_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__le_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__le_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_uint16) // C=scalar+B GB (_bind1st__le_uint16) // C=scalar+B' GB (_bind1st_tran__le_uint16) // C=A+scalar GB (_bind2nd__le_uint16) // C=A'+scalar GB (_bind2nd_tran__le_uint16) // C type: bool // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LE || GxB_NO_UINT16 || GxB_NO_LE_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__le_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__le_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__le_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__le_uint16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__le_uint16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__le_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__le_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__le_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__le_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__le_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__le_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__le_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__le_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__le_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
loop_p.c
#include<stdio.h> #include<stdlib.h> #include<omp.h> #define TAM 64*127*5 #define ITERACOES_TESTE 100000 int main(){ int i; long soma; int *vetor = calloc(TAM,sizeof(int)); if(vetor == NULL){ printf("Falha ao alocar memória"); return -1; } /*for(int contador=0; contador < ITERACOES_TESTE; contador++){ for(int i=0; i<TAM; i++){ vetor[i] ++; } }*/ for(int contador=0; contador < ITERACOES_TESTE; contador++){ #pragma omp parallel num_threads(2) {//thread1 faz os pares if(omp_get_thread_num()==0){ for(int i=0; i<TAM; i+=2){ vetor[i] ++; } }else if(omp_get_thread_num()==1){//thread2 faz os ímpares for(int i=1; i<TAM; i+=2){ vetor[i] ++; } } } } soma = 0; for(int i=0; i<TAM; i++){ soma += vetor[i]; } printf("%ld\n", soma); free(vetor); return 0; }
ThresholdFilter.h
/* * ErosionFilter.h * * Created on: 13.06.2016 * Author: Darius Malysiak */ #ifndef IMAGEPROCESSING_THRESHOLDFILTER_H_ #define IMAGEPROCESSING_THRESHOLDFILTER_H_ #include "../BaseObject.h" #include "../DataStructures/Matrix.h" #include "../DataStructures/Image.h" namespace Lazarus { template<typename T> class ThresholdFilter: public Lazarus::BaseObject { public: ThresholdFilter() { } virtual ~ThresholdFilter(){} /** * Will cut off all values outside (>,<) the range of 'limits', all values outside the given range will * be set to 'val'. * Returns the filtered image in case of success otherwise NULL. **/ Lazarus::Image<T>* filterImage( Lazarus::Image<T>* image, const Lazarus::ChannelLimits<T>& limits, T val ) { unsigned int image_width = image->getm_width(); unsigned int image_heigth = image->getm_height(); unsigned int channel_count = image->getm_channel_count(); Lazarus::Image<T>* output = new Lazarus::Image<T>( image_width, image_heigth, image->getm_data_alignment() ); #pragma omp parallel for for(unsigned int i=0; i<image_width; i++) { Lazarus::FastKTuple<T> new_color(channel_count); Lazarus::FastKTuple<T> color(channel_count); for(unsigned int j=0; j<image_heigth; j++) { image->getPixelFast(color,i,j); //over every color channel for(unsigned int c=0; c<channel_count; c++) { if( color[c] >= limits.m_min_values[c] && color[c] <= limits.m_max_values[c] ) { new_color.setElement(c,color[c]); } else { new_color.setElement(c,val); } } output->setPixelFast(&new_color,i,j); } } return output; } }; } #endif /* IMAGEPROCESSING_EROSIONFILTER_H_ */
interpolate_op.h
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <algorithm> #include <string> #include <vector> #include "paddle/fluid/framework/op_registry.h" #include "paddle/phi/core/hostdevice.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace paddle { namespace operators { template <typename T, size_t D, int MajorType = Eigen::RowMajor, typename IndexType = Eigen::DenseIndex> using EigenTensor = framework::EigenTensor<T, D, MajorType, IndexType>; using Tensor = framework::Tensor; using DataLayout = framework::DataLayout; inline std::vector<int> get_new_shape( const std::vector<const Tensor*>& list_new_shape_tensor) { // get tensor from std::vector<int> vec_new_shape; for (size_t i = 0; i < list_new_shape_tensor.size(); ++i) { auto tensor = list_new_shape_tensor[i]; PADDLE_ENFORCE_EQ(tensor->dims(), phi::make_ddim({1}), platform::errors::InvalidArgument( "The shape of dimension tensor should be [1]," "but received d%.", tensor->dims())); if (platform::is_gpu_place(tensor->place()) || platform::is_mlu_place(tensor->place())) { framework::Tensor temp; paddle::framework::TensorCopySync(*tensor, platform::CPUPlace(), &temp); vec_new_shape.push_back(static_cast<int32_t>(*temp.data<int32_t>())); } else { vec_new_shape.push_back(static_cast<int32_t>(*tensor->data<int32_t>())); } } return vec_new_shape; } template <typename T> inline std::vector<T> get_new_data_from_tensor(const Tensor* new_data_tensor) { std::vector<T> vec_new_data; auto* new_data = new_data_tensor->data<T>(); framework::Tensor cpu_starts_tensor; if (platform::is_gpu_place(new_data_tensor->place()) || platform::is_mlu_place(new_data_tensor->place())) { paddle::framework::TensorCopySync(*new_data_tensor, platform::CPUPlace(), &cpu_starts_tensor); new_data = cpu_starts_tensor.data<T>(); } vec_new_data = std::vector<T>(new_data, new_data + new_data_tensor->numel()); return vec_new_data; } inline void ExtractNCDWH(const framework::DDim& dims, const DataLayout& data_layout, int* N, int* C, int* D, int* H, int* W) { *N = dims[0]; if (dims.size() == 3) { *C = data_layout == DataLayout::kNCHW ? dims[1] : dims[2]; *D = 1; *H = 1; *W = data_layout == DataLayout::kNCHW ? dims[2] : dims[1]; } else if (dims.size() == 4) { *C = data_layout == DataLayout::kNCHW ? dims[1] : dims[3]; *D = 1; *H = data_layout == DataLayout::kNCHW ? dims[2] : dims[1]; *W = data_layout == DataLayout::kNCHW ? dims[3] : dims[2]; } else { *C = data_layout == DataLayout::kNCHW ? dims[1] : dims[4]; *D = data_layout == DataLayout::kNCHW ? dims[2] : dims[1]; *H = data_layout == DataLayout::kNCHW ? dims[3] : dims[2]; *W = data_layout == DataLayout::kNCHW ? dims[4] : dims[3]; } } template <typename T> static void NearestNeighborInterpolate(const Tensor& input, Tensor* output, const float ratio_h, const float ratio_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout& data_layout) { auto input_t = EigenTensor<T, 4>::From(input); auto output_t = EigenTensor<T, 4>::From(*output); for (int k = 0; k < out_h; k++) { // loop for images int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5) : static_cast<int>(ratio_h * k); for (int l = 0; l < out_w; l++) { int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5) : static_cast<int>(ratio_w * l); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels if (data_layout == DataLayout::kNCHW) { output_t(i, j, k, l) = input_t(i, j, in_k, in_l); } else { output_t(i, k, l, j) = input_t(i, in_k, in_l, j); } } } } } } template <typename T> static void LinearInterpolation(const Tensor& input, Tensor* output, const float ratio_w, const int in_w, const int n, const int c, const int out_w, const bool align_corners, const bool align_mode, const DataLayout data_layout) { auto input_t = EigenTensor<T, 3>::From(input); auto output_t = EigenTensor<T, 3>::From(*output); bool align_flag = (align_mode == 0 && !align_corners); std::vector<int> vx_w, vx_e; std::vector<float> vd_w, vd_e; vx_w.reserve(out_w); vx_e.reserve(out_w); vd_w.reserve(out_w); vd_e.reserve(out_w); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int l = 0; l < out_w; l++) { int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; // w int x_e = (x_w < (in_w - 1)) ? (x_w + 1) : x_w; // w_id float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; // w1lambda float d_e = 1.f - d_w; // w2lambda { vx_w[l] = x_w; vx_e[l] = x_e; vd_w[l] = d_w; vd_e[l] = d_e; } } #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(3) #endif for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels for (int l = 0; l < out_w; l++) { // linear interpolation T out_t; if (data_layout == DataLayout::kNCHW) { out_t = input_t(i, j, vx_w[l]) * vd_e[l] + input_t(i, j, vx_e[l]) * vd_w[l]; output_t(i, j, l) = out_t; } else { out_t = input_t(i, vx_w[l], j) * vd_e[l] + input_t(i, vx_e[l], j) * vd_w[l]; output_t(i, l, j) = out_t; } } } } } template <typename T> static void LinearInterpolationGrad(const Tensor& output_grad, Tensor* input_grad, const float ratio_w, const int in_w, const int n, const int c, const int out_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 3>::From(*input_grad); auto output_grad_t = EigenTensor<T, 3>::From(output_grad); bool align_flag = (align_mode == 0 && !align_corners); for (int l = 0; l < out_w; l++) { int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; // w int x_e = (x_w < (in_w - 1)) ? (x_w + 1) : x_w; // w_id float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; // w1lambda float d_e = 1.f - d_w; // w2lambda for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels // linear interpolation grad if (data_layout == DataLayout::kNCHW) { const T grad = output_grad_t(i, j, l); input_grad_t(i, j, x_w) += static_cast<T>(grad * d_e); input_grad_t(i, j, x_e) += static_cast<T>(grad * d_w); } else { const T grad = output_grad_t(i, l, j); input_grad_t(i, x_w, j) += static_cast<T>(grad * d_e); input_grad_t(i, x_e, j) += static_cast<T>(grad * d_w); } } } } } template <typename T> static void BilinearInterpolation(const Tensor& input, Tensor* output, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const bool align_mode, const DataLayout data_layout) { auto input_t = EigenTensor<T, 4>::From(input); auto output_t = EigenTensor<T, 4>::From(*output); bool align_flag = (align_mode == 0 && !align_corners); std::vector<int> vy_n, vy_s; std::vector<float> vd_n, vd_s; vy_n.reserve(out_h); vy_s.reserve(out_h); vd_n.reserve(out_h); vd_s.reserve(out_h); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int k = 0; k < out_h; k++) { int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; { vy_n[k] = y_n; vy_s[k] = y_s; vd_n[k] = d_n; vd_s[k] = d_s; } } std::vector<int> vx_w, vx_e; std::vector<float> vd_w, vd_e; vx_w.reserve(out_w); vx_e.reserve(out_w); vd_w.reserve(out_w); vd_e.reserve(out_w); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int l = 0; l < out_w; l++) { int x_w = (align_mode == 0 && !align_corners) ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; { vx_w[l] = x_w; vx_e[l] = x_e; vd_w[l] = d_w; vd_e[l] = d_e; } } #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(4) #endif for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels for (int k = 0; k < out_h; k++) { // loop for images for (int l = 0; l < out_w; l++) { // bilinear interpolation T out_t; if (data_layout == DataLayout::kNCHW) { out_t = input_t(i, j, vy_n[k], vx_w[l]) * vd_s[k] * vd_e[l] + input_t(i, j, vy_s[k], vx_w[l]) * vd_n[k] * vd_e[l] + input_t(i, j, vy_n[k], vx_e[l]) * vd_s[k] * vd_w[l] + input_t(i, j, vy_s[k], vx_e[l]) * vd_n[k] * vd_w[l]; output_t(i, j, k, l) = out_t; } else { out_t = input_t(i, vy_n[k], vx_w[l], j) * vd_s[k] * vd_e[l] + input_t(i, vy_s[k], vx_w[l], j) * vd_n[k] * vd_e[l] + input_t(i, vy_n[k], vx_e[l], j) * vd_s[k] * vd_w[l] + input_t(i, vy_s[k], vx_e[l], j) * vd_n[k] * vd_w[l]; output_t(i, k, l, j) = out_t; } } } } } } template <typename T> static void TrilinearInterpolation( const Tensor& input, Tensor* output, const float ratio_d, const float ratio_h, const float ratio_w, const int in_d, const int in_h, const int in_w, const int n, const int c, const int out_d, const int out_h, const int out_w, const bool align_corners, const bool align_mode, const DataLayout& data_layout) { auto input_t = EigenTensor<T, 5>::From(input); auto output_t = EigenTensor<T, 5>::From(*output); bool align_flag = (align_mode == 0 && !align_corners); std::vector<int> vt_f, vt_b; std::vector<float> vd_f, vd_b; vt_f.reserve(out_d); vt_b.reserve(out_d); vd_f.reserve(out_d); vd_b.reserve(out_d); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int j = 0; j < out_d; j++) { int t_f = align_flag ? static_cast<int>(ratio_d * (j + 0.5) - 0.5) : static_cast<int>(ratio_d * j); t_f = (t_f > 0) ? t_f : 0; int t_b = (t_f + 1) < (in_d - 1) ? (t_f + 1) : (in_d - 1); float idx_src_t = ratio_d * (j + 0.5) - 0.5; idx_src_t = (idx_src_t > 0) ? idx_src_t : 0; float d_f = align_flag ? idx_src_t - t_f : ratio_d * j - t_f; float d_b = 1.f - d_f; { vt_f[j] = t_f; vt_b[j] = t_b; vd_f[j] = d_f; vd_b[j] = d_b; } } std::vector<int> vy_n, vy_s; std::vector<float> vd_n, vd_s; vy_n.reserve(out_h); vy_s.reserve(out_h); vd_n.reserve(out_h); vd_s.reserve(out_h); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int k = 0; k < out_h; k++) { int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; { vy_n[k] = y_n; vy_s[k] = y_s; vd_n[k] = d_n; vd_s[k] = d_s; } } std::vector<int> vx_w, vx_e; std::vector<float> vd_w, vd_e; vx_w.reserve(out_w); vx_e.reserve(out_w); vd_w.reserve(out_w); vd_e.reserve(out_w); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int l = 0; l < out_w; l++) { int x_w = (align_mode == 0 && !align_corners) ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; { vx_w[l] = x_w; vx_e[l] = x_e; vd_w[l] = d_w; vd_e[l] = d_e; } } #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(5) #endif for (int b = 0; b < n; b++) { // loop for batches for (int i = 0; i < c; i++) { // loop for channels for (int j = 0; j < out_d; j++) { // loop for D, H, W for (int k = 0; k < out_h; k++) { for (int l = 0; l < out_w; l++) { // trilinear interpolation if (data_layout == DataLayout::kNCHW) { T out_t = input_t(b, i, vt_f[j], vy_n[k], vx_w[l]) * vd_b[j] * vd_s[k] * vd_e[l] + input_t(b, i, vt_f[j], vy_n[k], vx_e[l]) * vd_b[j] * vd_s[k] * vd_w[l] + input_t(b, i, vt_f[j], vy_s[k], vx_w[l]) * vd_b[j] * vd_n[k] * vd_e[l] + input_t(b, i, vt_f[j], vy_s[k], vx_e[l]) * vd_b[j] * vd_n[k] * vd_w[l] + input_t(b, i, vt_b[j], vy_n[k], vx_w[l]) * vd_f[j] * vd_s[k] * vd_e[l] + input_t(b, i, vt_b[j], vy_n[k], vx_e[l]) * vd_f[j] * vd_s[k] * vd_w[l] + input_t(b, i, vt_b[j], vy_s[k], vx_w[l]) * vd_f[j] * vd_n[k] * vd_e[l] + input_t(b, i, vt_b[j], vy_s[k], vx_e[l]) * vd_f[j] * vd_n[k] * vd_w[l]; output_t(b, i, j, k, l) = out_t; } else { T out_t = input_t(b, vt_f[j], vy_n[k], vx_w[l], i) * vd_b[j] * vd_s[k] * vd_e[l] + input_t(b, vt_f[j], vy_n[k], vx_e[l], i) * vd_b[j] * vd_s[k] * vd_w[l] + input_t(b, vt_f[j], vy_s[k], vx_w[l], i) * vd_b[j] * vd_n[k] * vd_e[l] + input_t(b, vt_f[j], vy_s[k], vx_e[l], i) * vd_b[j] * vd_n[k] * vd_w[l] + input_t(b, vt_b[j], vy_n[k], vx_w[l], i) * vd_f[j] * vd_s[k] * vd_e[l] + input_t(b, vt_b[j], vy_n[k], vx_e[l], i) * vd_f[j] * vd_s[k] * vd_w[l] + input_t(b, vt_b[j], vy_s[k], vx_w[l], i) * vd_f[j] * vd_n[k] * vd_e[l] + input_t(b, vt_b[j], vy_s[k], vx_e[l], i) * vd_f[j] * vd_n[k] * vd_w[l]; output_t(b, j, k, l, i) = out_t; } } } } } } } template <typename T> HOSTDEVICE inline T cubic_convolution1(T x, T A) { return ((A + 2) * x - (A + 3)) * x * x + 1; } template <typename T> HOSTDEVICE inline T cubic_convolution2(T x, T A) { return ((A * x - 5 * A) * x + 8 * A) * x - 4 * A; } template <typename T> HOSTDEVICE inline void get_cubic_upsample_coefficients(T coeffs[4], T t) { T A = -0.75; T x1 = t; coeffs[0] = cubic_convolution2<T>(x1 + 1.0, A); coeffs[1] = cubic_convolution1<T>(x1, A); // opposite coefficients T x2 = 1.0 - t; coeffs[2] = cubic_convolution1<T>(x2, A); coeffs[3] = cubic_convolution2<T>(x2 + 1.0, A); } template <typename T> static inline T cubic_interp(T x0, T x1, T x2, T x3, T t) { T coeffs[4]; get_cubic_upsample_coefficients<T>(coeffs, t); return x0 * coeffs[0] + x1 * coeffs[1] + x2 * coeffs[2] + x3 * coeffs[3]; } template <typename T> static void BicubicInterpolation(const Tensor& input, Tensor* output, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout data_layout) { auto input_t = EigenTensor<T, 4>::From(input); auto output_t = EigenTensor<T, 4>::From(*output); for (int k = 0; k < out_h; k++) { // loop for images T y_n = align_corners ? static_cast<T>(ratio_h * k) : static_cast<T>(ratio_h * (k + 0.5) - 0.5); int input_y = floorf(y_n); const T y_t = y_n - input_y; for (int l = 0; l < out_w; l++) { T x_n = align_corners ? static_cast<T>(ratio_w * l) : static_cast<T>(ratio_w * (l + 0.5) - 0.5); int input_x = floorf(x_n); const T x_t = x_n - input_x; for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels T coefficients[4]; // interp 4 times in x direction for (int ii = 0; ii < 4; ii++) { int access_y = std::max(std::min(input_y - 1 + ii, in_h - 1), static_cast<int>(0)); int access_x_0 = std::max(std::min(input_x - 1, in_w - 1), static_cast<int>(0)); int access_x_1 = std::max(std::min(input_x + 0, in_w - 1), static_cast<int>(0)); int access_x_2 = std::max(std::min(input_x + 1, in_w - 1), static_cast<int>(0)); int access_x_3 = std::max(std::min(input_x + 2, in_w - 1), static_cast<int>(0)); if (data_layout == DataLayout::kNCHW) { coefficients[ii] = cubic_interp<T>(input_t(i, j, access_y, access_x_0), input_t(i, j, access_y, access_x_1), input_t(i, j, access_y, access_x_2), input_t(i, j, access_y, access_x_3), x_t); } else { coefficients[ii] = cubic_interp<T>(input_t(i, access_y, access_x_0, j), input_t(i, access_y, access_x_1, j), input_t(i, access_y, access_x_2, j), input_t(i, access_y, access_x_3, j), x_t); } } // interp y direction if (data_layout == DataLayout::kNCHW) { output_t(i, j, k, l) = cubic_interp<T>(coefficients[0], coefficients[1], coefficients[2], coefficients[3], y_t); } else { output_t(i, k, l, j) = cubic_interp<T>(coefficients[0], coefficients[1], coefficients[2], coefficients[3], y_t); } } } } } } template <typename T> static void NearestNeighborInterpolateGrad( const Tensor& output_grad, Tensor* input_grad, const float ratio_h, const float ratio_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 4>::From(*input_grad); auto output_grad_t = EigenTensor<T, 4>::From(output_grad); for (int k = 0; k < out_h; k++) { // loop for images int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5) : static_cast<int>(ratio_h * k); for (int l = 0; l < out_w; l++) { int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5) : static_cast<int>(ratio_w * l); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels if (data_layout == DataLayout::kNCHW) { input_grad_t(i, j, in_k, in_l) += output_grad_t(i, j, k, l); } else { input_grad_t(i, in_k, in_l, j) += output_grad_t(i, k, l, j); } } } } } } template <typename T> static void BilinearInterpolationGrad( const Tensor& output_grad, Tensor* input_grad, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 4>::From(*input_grad); auto output_grad_t = EigenTensor<T, 4>::From(output_grad); bool align_flag = (align_mode == 0 && !align_corners); for (int k = 0; k < out_h; k++) { // loop for images int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; for (int l = 0; l < out_w; l++) { int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels // bilinear interpolation grad if (data_layout == DataLayout::kNCHW) { const T grad = output_grad_t(i, j, k, l); input_grad_t(i, j, y_n, x_w) += static_cast<T>(grad * d_s * d_e); input_grad_t(i, j, y_s, x_w) += static_cast<T>(grad * d_n * d_e); input_grad_t(i, j, y_n, x_e) += static_cast<T>(grad * d_s * d_w); input_grad_t(i, j, y_s, x_e) += static_cast<T>(grad * d_n * d_w); } else { const T grad = output_grad_t(i, k, l, j); input_grad_t(i, y_n, x_w, j) += static_cast<T>(grad * d_s * d_e); input_grad_t(i, y_s, x_w, j) += static_cast<T>(grad * d_n * d_e); input_grad_t(i, y_n, x_e, j) += static_cast<T>(grad * d_s * d_w); input_grad_t(i, y_s, x_e, j) += static_cast<T>(grad * d_n * d_w); } } } } } } template <typename T> static void TrilinearInterpolationGrad( const Tensor& output_grad, Tensor* input_grad, const float ratio_d, const float ratio_h, const float ratio_w, const int in_d, const int in_h, const int in_w, const int n, const int c, const int out_d, const int out_h, const int out_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 5>::From(*input_grad); auto output_grad_t = EigenTensor<T, 5>::From(output_grad); bool align_flag = (align_mode == 0 && !align_corners); for (int j = 0; j < out_d; j++) { // loop for D int t_f = align_flag ? static_cast<int>(ratio_d * (j + 0.5) - 0.5) : static_cast<int>(ratio_d * j); t_f = (t_f > 0) ? t_f : 0; int t_b = (t_f + 1) < (in_d - 1) ? (t_f + 1) : (in_d - 1); float idx_src_t = ratio_d * (j + 0.5) - 0.5; idx_src_t = (idx_src_t > 0) ? idx_src_t : 0; float d_f = align_flag ? idx_src_t - t_f : ratio_d * j - t_f; float d_b = 1.f - d_f; for (int k = 0; k < out_h; k++) { // loop for H int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; for (int l = 0; l < out_w; l++) { // loop for W int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; for (int b = 0; b < n; b++) { // loop for batches for (int i = 0; i < c; i++) { // loop for channels // trilinear interpolation grad if (data_layout == DataLayout::kNCHW) { const T grad = output_grad_t(b, i, j, k, l); input_grad_t(b, i, t_f, y_n, x_w) += static_cast<T>(grad * d_b * d_s * d_e); input_grad_t(b, i, t_f, y_n, x_e) += static_cast<T>(grad * d_b * d_s * d_w); input_grad_t(b, i, t_f, y_s, x_w) += static_cast<T>(grad * d_b * d_n * d_e); input_grad_t(b, i, t_f, y_s, x_e) += static_cast<T>(grad * d_b * d_n * d_w); input_grad_t(b, i, t_b, y_n, x_w) += static_cast<T>(grad * d_f * d_s * d_e); input_grad_t(b, i, t_b, y_n, x_e) += static_cast<T>(grad * d_f * d_s * d_w); input_grad_t(b, i, t_b, y_s, x_w) += static_cast<T>(grad * d_f * d_n * d_e); input_grad_t(b, i, t_b, y_s, x_e) += static_cast<T>(grad * d_f * d_n * d_w); } else { const T grad = output_grad_t(b, j, k, l, i); input_grad_t(b, t_f, y_n, x_w, i) += static_cast<T>(grad * d_b * d_s * d_e); input_grad_t(b, t_f, y_n, x_e, i) += static_cast<T>(grad * d_b * d_s * d_w); input_grad_t(b, t_f, y_s, x_w, i) += static_cast<T>(grad * d_b * d_n * d_e); input_grad_t(b, t_f, y_s, x_e, i) += static_cast<T>(grad * d_b * d_n * d_w); input_grad_t(b, t_b, y_n, x_w, i) += static_cast<T>(grad * d_f * d_s * d_e); input_grad_t(b, t_b, y_n, x_e, i) += static_cast<T>(grad * d_f * d_s * d_w); input_grad_t(b, t_b, y_s, x_w, i) += static_cast<T>(grad * d_f * d_n * d_e); input_grad_t(b, t_b, y_s, x_e, i) += static_cast<T>(grad * d_f * d_n * d_w); } } } } } } } template <typename T> static void BicubicInterpolationGrad(const Tensor& output_grad, Tensor* input_grad, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 4>::From(*input_grad); auto output_grad_t = EigenTensor<T, 4>::From(output_grad); for (int k = 0; k < out_h; k++) { // loop for images T y_n = align_corners ? static_cast<T>(ratio_h * k) : static_cast<T>(ratio_h * (k + 0.5) - 0.5); int input_y = floorf(y_n); T y_t = y_n - input_y; for (int l = 0; l < out_w; l++) { T x_n = align_corners ? static_cast<T>(ratio_w * l) : static_cast<T>(ratio_w * (l + 0.5) - 0.5); int input_x = floorf(x_n); T x_t = x_n - input_x; T x_coeffs[4]; T y_coeffs[4]; get_cubic_upsample_coefficients<T>(x_coeffs, x_t); get_cubic_upsample_coefficients<T>(y_coeffs, y_t); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels // bicubic interpolation grad for (int ii = 0; ii < 4; ii++) { for (int jj = 0; jj < 4; jj++) { int access_x = std::max(std::min(input_x - 1 + ii, in_w - 1), static_cast<int>(0)); int access_y = std::max(std::min(input_y - 1 + jj, in_h - 1), static_cast<int>(0)); if (data_layout == DataLayout::kNCHW) { T grad = output_grad_t(i, j, k, l); input_grad_t(i, j, access_y, access_x) += grad * y_coeffs[jj] * x_coeffs[ii]; } else { T grad = output_grad_t(i, k, l, j); input_grad_t(i, access_y, access_x, j) += grad * y_coeffs[jj] * x_coeffs[ii]; } } } } } } } } template <typename T> static void Interpolate1DCPUFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_w = ctx.Attr<int>("out_w"); auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_w = new_size[0]; } else { float scale; auto scale_tensor = ctx.Input<Tensor>("Scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale = scale_data[0]; } else { scale = ctx.Attr<float>("scale"); } if (scale > 0) { out_w = static_cast<int>(in_w * scale); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_w = out_size_data[0]; } } PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_w}; } else { dim_out = {n, out_w, c}; } output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_w = 0.f; if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } if ("linear" == interp_method) { LinearInterpolation<T>(input, output, ratio_w, in_w, n, c, out_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate2DCPUFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_h = new_size[0]; out_w = new_size[1]; } else { float scale; auto scale_tensor = ctx.Input<Tensor>("Scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale = scale_data[0]; } else { scale = ctx.Attr<float>("scale"); } if (scale > 0) { out_h = static_cast<int>(in_h * scale); out_w = static_cast<int>(in_w * scale); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_h = out_size_data[0]; out_w = out_size_data[1]; } } PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument( "out_h in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_h, out_w}; } else { dim_out = {n, out_h, out_w, c}; } output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_h == out_h && in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_h = 0.f; float ratio_w = 0.f; if (out_h > 1) { ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(in_h) / out_h; } if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } if ("bilinear" == interp_method) { BilinearInterpolation<T>(input, output, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, align_mode, data_layout); } else if ("nearest" == interp_method) { NearestNeighborInterpolate<T>(input, output, ratio_h, ratio_w, n, c, out_h, out_w, align_corners, data_layout); } else if ("bicubic" == interp_method) { BicubicInterpolation<T>(input, output, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, data_layout); } } template <typename T> static void Interpolate3DCPUFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_d = ctx.Attr<int>("out_d"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_d = new_size[0]; out_h = new_size[1]; out_w = new_size[2]; } else { float scale; auto scale_tensor = ctx.Input<Tensor>("Scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale = scale_data[0]; } else { scale = ctx.Attr<float>("scale"); } if (scale > 0) { out_d = static_cast<int>(in_d * scale); out_h = static_cast<int>(in_h * scale); out_w = static_cast<int>(in_w * scale); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_d = out_size_data[0]; out_h = out_size_data[1]; out_w = out_size_data[2]; } } PADDLE_ENFORCE_GT(out_d, 0, platform::errors::InvalidArgument( "out_d in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument( "out_h in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_d, out_h, out_w}; } else { dim_out = {n, out_d, out_h, out_w, c}; } output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_d == out_d && in_h == out_h && in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_d = 0.f; float ratio_h = 0.f; float ratio_w = 0.f; if (out_d > 1) { ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1) : static_cast<float>(in_d) / out_d; } if (out_h > 1) { ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(in_h) / out_h; } if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } if ("trilinear" == interp_method) { TrilinearInterpolation<T>(input, output, ratio_d, ratio_h, ratio_w, in_d, in_h, in_w, n, c, out_d, out_h, out_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate1DCPUBwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor& output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_w = ctx.Attr<int>("out_w"); float scale; auto scale_tensor = ctx.Input<Tensor>("Scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale = scale_data[0]; } else { scale = ctx.Attr<float>("scale"); } if (scale > 0) { out_w = static_cast<int>(in_w * scale); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_w = out_size_data[0]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_w = new_size[0]; } framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_w}; } else { dim_grad = {n, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>(); phi::funcs::SetConstant<platform::CPUDeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_w = 0.f; if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } if ("linear" == interp_method) { LinearInterpolationGrad<T>(output_grad, input_grad, ratio_w, in_w, n, c, out_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate2DCPUBwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor& output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale; auto scale_tensor = ctx.Input<Tensor>("Scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale = scale_data[0]; } else { scale = ctx.Attr<float>("scale"); } if (scale > 0) { out_h = static_cast<int>(in_h * scale); out_w = static_cast<int>(in_w * scale); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_h = out_size_data[0]; out_w = out_size_data[1]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_h = new_size[0]; out_w = new_size[1]; } framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_h, in_w}; } else { dim_grad = {n, in_h, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>(); phi::funcs::SetConstant<platform::CPUDeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_h == out_h && in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_h = 0.f; float ratio_w = 0.f; if (out_h > 1) { ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(in_h) / out_h; } if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } if ("bilinear" == interp_method) { BilinearInterpolationGrad<T>(output_grad, input_grad, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, align_mode, data_layout); } else if ("nearest" == interp_method) { NearestNeighborInterpolateGrad<T>(output_grad, input_grad, ratio_h, ratio_w, n, c, out_h, out_w, align_corners, data_layout); } else if ("bicubic" == interp_method) { BicubicInterpolationGrad<T>(output_grad, input_grad, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, data_layout); } } template <typename T> static void Interpolate3DCPUBwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_d = ctx.Attr<int>("out_d"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale; auto scale_tensor = ctx.Input<Tensor>("Scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale = scale_data[0]; } else { scale = ctx.Attr<float>("scale"); } if (scale > 0) { out_d = static_cast<int>(in_d * scale); out_h = static_cast<int>(in_h * scale); out_w = static_cast<int>(in_w * scale); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_d = out_size_data[0]; out_h = out_size_data[1]; out_w = out_size_data[2]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_d = new_size[0]; out_h = new_size[1]; out_w = new_size[2]; } framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_d, in_h, in_w}; } else { dim_grad = {n, in_d, in_h, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>(); phi::funcs::SetConstant<platform::CPUDeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_d == out_d && in_h == out_h && in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_d = 0.f; float ratio_h = 0.f; float ratio_w = 0.f; if (out_d > 1) { ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1) : static_cast<float>(in_d) / out_d; } if (out_h > 1) { ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(in_h) / out_h; } if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } if ("trilinear" == interp_method) { TrilinearInterpolationGrad<T>( output_grad, input_grad, ratio_d, ratio_h, ratio_w, in_d, in_h, in_w, n, c, out_d, out_h, out_w, align_corners, align_mode, data_layout); } } template <typename T> class InterpolateKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input = ctx.Input<Tensor>("X"); auto* output = ctx.Output<Tensor>("Out"); auto input_dims = input->dims(); if (input_dims.size() == 3) { // 1D interpolation Interpolate1DCPUFwd<T>(ctx, *input, output); } else if (input_dims.size() == 4) { // 2D interpolation Interpolate2DCPUFwd<T>(ctx, *input, output); } else if (input_dims.size() == 5) { // 3D interpolation Interpolate3DCPUFwd<T>(ctx, *input, output); } } }; template <typename T> class InterpolateGradKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X")); auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Out")); auto output_grad_dims = output_grad->dims(); if (output_grad_dims.size() == 3) { // 1D interpolation grad Interpolate1DCPUBwd<T>(ctx, input_grad, *output_grad); } else if (output_grad_dims.size() == 4) { // 2D interpolation grad Interpolate2DCPUBwd<T>(ctx, input_grad, *output_grad); } else if (output_grad_dims.size() == 5) { // 3D interpolation grad Interpolate3DCPUBwd<T>(ctx, input_grad, *output_grad); } } }; } // namespace operators } // namespace paddle
assignment.h
/* Portions Copyright 2019-2021 Xuesong Zhou and Peiheng Li, Cafer Avci * If you help write or modify the code, please also list your names here. * The reason of having Copyright info here is to ensure all the modified version, as a whole, under the GPL * and further prevent a violation of the GPL. * * More about "How to use GNU licenses for your own software" * http://www.gnu.org/licenses/gpl-howto.html */ // Peiheng, 02/03/21, remove them later after adopting better casting #pragma warning(disable : 4305 4267 4018) // stop warning: "conversion from 'int' to 'float', possible loss of data" #pragma warning(disable: 4244) #ifdef _WIN32 #include "pch.h" #endif #include "config.h" #include "utils.h" #include "DTA.h" #include <iostream> #include <fstream> #include <sstream> #include <iomanip> #include <string> #include <cstring> #include <cstdio> #include <ctime> #include <cmath> #include <algorithm> #include <functional> #include <stack> #include <list> #include <vector> #include <map> #include <omp.h> using std::max; using std::min; using std::cout; using std::endl; using std::string; using std::vector; using std::map; using std::ifstream; using std::ofstream; using std::istringstream; void g_reset_and_update_link_volume_based_on_columns(int number_of_links, int iteration_index, bool b_self_reducing_path_volume, bool b_sensitivity_analysis_flag) { // record numbers if (b_sensitivity_analysis_flag) { for (int i = 0; i < number_of_links; ++i) { for (int tau = 0; tau < assignment.g_number_of_demand_periods; ++tau) { g_link_vector[i].VDF_period[tau].link_volume_per_iteration_map[iteration_index] = g_link_vector[i].PCE_volume_per_period[tau] + g_link_vector[i].VDF_period[tau].preload; // used in travel time calculation } } } for (int i = 0; i < number_of_links; ++i) { for (int tau = 0; tau < assignment.g_number_of_demand_periods; ++tau) { // used in travel time calculation g_link_vector[i].PCE_volume_per_period[tau] = 0; g_link_vector[i].person_volume_per_period[tau] = 0; // reserved for BPR-X g_link_vector[i].queue_link_distance_VDF_perslot[tau] = 0; for (int at = 0; at < assignment.g_AgentTypeVector.size(); ++at) g_link_vector[i].person_volume_per_period_per_at[tau][at] = 0; } } if (iteration_index >= 0) { for (int at = 0; at < assignment.g_AgentTypeVector.size(); ++at) //m { std::map<int, CColumnPath>::iterator it; int zone_size = g_zone_vector.size(); int tau_size = assignment.g_DemandPeriodVector.size(); float link_volume_contributed_by_path_volume; int link_seq_no; double PCE_ratio = 1; double OCC_ratio = 1; int nl; std::map<int, CColumnPath>::iterator it_begin; std::map<int, CColumnPath>::iterator it_end; int column_vector_size; CColumnVector* p_column_pool; for (int orig = 0; orig < zone_size; ++orig) // o { for (int dest = 0; dest < zone_size; ++dest) //d { for (int tau = 0; tau < tau_size; ++tau) //tau { p_column_pool = &(assignment.g_column_pool[orig][dest][at][tau]); if (p_column_pool->od_volume > 0) { column_vector_size = p_column_pool->path_node_sequence_map.size(); it_begin = p_column_pool->path_node_sequence_map.begin(); it_end = p_column_pool->path_node_sequence_map.end(); for (it = it_begin; it != it_end; ++it) { link_volume_contributed_by_path_volume = it->second.path_volume; // assign all OD flow to this first path // add path volume to link volume for (nl = 0; nl < it->second.m_link_size; ++nl) // arc a { link_seq_no = it->second.path_link_vector[nl]; // MSA updating for the existing column pools // if iteration_index = 0; then update no flow discount is used (for the column pool case) PCE_ratio = g_link_vector[link_seq_no].VDF_period[tau].pce[at]; // updated on 08/16/2021 for link dependent and agent type dependent pce factor mainly for trucks OCC_ratio = g_link_vector[link_seq_no].VDF_period[tau].occ[at]; // updated on 08/16/2021 for link dependent and agent type dependent pce factor mainly for trucks #pragma omp critical { g_link_vector[link_seq_no].PCE_volume_per_period[tau] += link_volume_contributed_by_path_volume * PCE_ratio; g_link_vector[link_seq_no].person_volume_per_period[tau] += link_volume_contributed_by_path_volume * OCC_ratio; g_link_vector[link_seq_no].person_volume_per_period_per_at[tau][at] += link_volume_contributed_by_path_volume; // pure volume, not consider PCE } } // this self-deducting action does not agents with fixed routing policies. if (!p_column_pool->bfixed_route && b_self_reducing_path_volume) { //after link volumn "tally", self-deducting the path volume by 1/(k+1) (i.e. keep k/(k+1) ratio of previous flow) so that the following shortes path will be receiving 1/(k+1) flow it->second.path_volume = it->second.path_volume * (float(iteration_index) / float(iteration_index + 1)); } } } } } } } } } double update_link_travel_time_and_cost(int inner_iteration_number) { if (assignment.assignment_mode == 2) { //compute the time-dependent delay from simulation //for (int l = 0; l < g_link_vector.size(); l++) //{ // float volume = assignment.m_LinkCumulativeDepartureVector[l][assignment.g_number_of_simulation_intervals - 1]; // link flow rates // float waiting_time_count = 0; //for (int tt = 0; tt < assignment.g_number_of_simulation_intervals; tt++) //{ // waiting_time_count += assignment.m_link_TD_waiting_time[l][tt/number_of_simu_intervals_in_min]; // tally total waiting cou //} //for (int tau = 0; tau < assignment.g_DemandPeriodVector.size(); tau++) //{ // float travel_time = g_link_vector[l].free_flow_travel_time_in_min + waiting_time_count* number_of_seconds_per_interval / max(1, volume) / 60; // g_link_vector[l].travel_time_per_period[tau] = travel_time; //} } #pragma omp parallel for for (int i = 0; i < g_link_vector.size(); ++i) { // step 1: travel time based on VDF g_link_vector[i].calculate_dynamic_VDFunction(inner_iteration_number, false, g_link_vector[i].vdf_type); for (int tau = 0; tau < assignment.g_DemandPeriodVector.size(); ++tau) { for (int at = 0; at < assignment.g_AgentTypeVector.size(); ++at) { float PCE_agent_type = assignment.g_AgentTypeVector[at].PCE; // step 2: marginal cost for SO g_link_vector[i].calculate_marginal_cost_for_agent_type(tau, at, PCE_agent_type); //if (g_debug_level >= 3 && assignment.assignment_mode >= 2 && assignment.g_pFileDebugLog != NULL) // fprintf(assignment.g_pFileDebugLog, "Update link cost: link %d->%d: tau = %d, at = %d, travel_marginal = %.3f\n", // g_node_vector[g_link_vector[l].from_node_seq_no].node_id, // g_node_vector[g_link_vector[l].to_node_seq_no].node_id, // tau, at, // g_link_vector[l].travel_marginal_cost_per_period[tau][at]); } } } double total_network_travel_time = 0; for (int i = 0; i < g_link_vector.size(); ++i) { for (int tau = 0; tau < assignment.g_DemandPeriodVector.size(); ++tau) { total_network_travel_time += g_link_vector[i].VDF_period[tau].avg_travel_time * g_link_vector[i].VDF_period[tau].link_volume; } } return total_network_travel_time; } // changes here are also for odmes, don't need to implement the changes in this function for now double g_reset_and_update_link_volume_based_on_ODME_columns(int number_of_links, int iteration_no, double& system_gap) { float total_gap = 0; float sub_total_gap_link_count = 0; float sub_total_system_gap_count = 0; system_gap = 0; float sub_total_gap_P_count = 0; float sub_total_gap_A_count = 0; double total_system_travel_cost = 0; double total_system_travel_time = 0; double total_system_demand = 0; double total_system_UE_gap = 0; // reset the link volume for (int i = 0; i < number_of_links; ++i) { for (int tau = 0; tau < assignment.g_number_of_demand_periods; ++tau) { // used in travel time calculation g_link_vector[i].PCE_volume_per_period[tau] = 0; g_link_vector[i].person_volume_per_period[tau] = 0; for (int at = 0; at < assignment.g_AgentTypeVector.size(); ++at) g_link_vector[i].person_volume_per_period_per_at[tau][at] = 0; } } // reset the estimated production and attraction for (int orig = 0; orig < g_zone_vector.size(); ++orig) // o { g_zone_vector[orig].est_attraction = 0; g_zone_vector[orig].est_production = 0; } for (int at = 0; at < assignment.g_AgentTypeVector.size(); ++at) //m { int zone_size = g_zone_vector.size(); int tau_size = assignment.g_DemandPeriodVector.size(); float PCE_ratio = assignment.g_AgentTypeVector[at].PCE; float OCC_ratio = assignment.g_AgentTypeVector[at].OCC; #pragma omp parallel for for (int orig = 0; orig < zone_size; ++orig) // o { std::map<int, CColumnPath>::iterator it; float link_volume_contributed_by_path_volume; int nl; std::map<int, CColumnPath>::iterator it_begin; std::map<int, CColumnPath>::iterator it_end; int column_vector_size; CColumnVector* p_column_pool; for (int dest = 0; dest < zone_size; ++dest) //d { for (int tau = 0; tau < tau_size; ++tau) //tau { p_column_pool = &(assignment.g_column_pool[orig][dest][at][tau]); if (p_column_pool->od_volume > 0) { // continuous: type 0 column_vector_size = p_column_pool->path_node_sequence_map.size(); it_begin = p_column_pool->path_node_sequence_map.begin(); it_end = p_column_pool->path_node_sequence_map.end(); double least_cost = 999999; int least_cost_path_seq_no = -1; int least_cost_path_node_sum_index = -1; int path_seq_count = 0; double path_toll = 0; double path_gradient_cost = 0; double path_distance = 0; double path_travel_time = 0; int link_seq_no; double link_travel_time; double total_switched_out_path_volume = 0; double step_size = 0; double previous_path_volume = 0; least_cost = 999999; path_seq_count = 0; it_begin = p_column_pool->path_node_sequence_map.begin(); it_end = p_column_pool->path_node_sequence_map.end(); for (it = it_begin; it != it_end; ++it) { total_system_demand += it->second.path_volume; path_toll = 0; path_gradient_cost = 0; path_distance = 0; path_travel_time = 0; for (int nl = 0; nl < it->second.m_link_size; ++nl) // arc a { link_seq_no = it->second.path_link_vector[nl]; link_travel_time = g_link_vector[link_seq_no].travel_time_per_period[tau]; path_travel_time += link_travel_time; } it->second.path_toll = path_toll; it->second.path_travel_time = path_travel_time; total_system_travel_time += (it->second.path_travel_time * it->second.path_volume); if (column_vector_size == 1) // only one path { break; } if (path_travel_time < least_cost) { least_cost = path_travel_time; least_cost_path_seq_no = it->second.path_seq_no; least_cost_path_node_sum_index = it->first; } #pragma omp critical { total_system_travel_cost += (it->second.path_travel_time * it->second.path_volume); } } // end for each path if (column_vector_size >= 2) { // step 2: calculate gradient cost difference for each column path total_switched_out_path_volume = 0; for (it = it_begin; it != it_end; ++it) { if (it->second.path_seq_no != least_cost_path_seq_no) //for non-least cost path { it->second.UE_gap = it->second.path_travel_time - least_cost; it->second.UE_relative_gap = (it->second.path_travel_time - least_cost) / max(0.0001, least_cost); #pragma omp critical { total_system_UE_gap += (it->second.UE_gap * it->second.path_volume); } } } } // end for each path for (it = it_begin; it != it_end; ++it) // path k { link_volume_contributed_by_path_volume = it->second.path_volume; // assign all OD flow to this first path #pragma omp critical { g_zone_vector[orig].est_production += it->second.path_volume; g_zone_vector[dest].est_attraction += it->second.path_volume; } // add path volume to link volume for (nl = 0; nl < it->second.m_link_size; ++nl) // arc a { link_seq_no = it->second.path_link_vector[nl]; // MSA updating for the existing column pools // if iteration_index = 0; then update no flow discount is used (for the column pool case) #pragma omp critical { g_link_vector[link_seq_no].PCE_volume_per_period[tau] += link_volume_contributed_by_path_volume * PCE_ratio; g_link_vector[link_seq_no].person_volume_per_period[tau] += link_volume_contributed_by_path_volume * OCC_ratio; g_link_vector[link_seq_no].person_volume_per_period_per_at[tau][at] += link_volume_contributed_by_path_volume; // pure volume, not consider PCE } } } } } } } } int total_link_count = 0; // calcualte deviation for each measurement type for (int i = 0; i < number_of_links; ++i) { g_link_vector[i].calculate_dynamic_VDFunction(iteration_no, false, g_link_vector[i].vdf_type); for (int tau = 0; tau < assignment.g_DemandPeriodVector.size(); ++tau) //tau { if (assignment.g_DemandPeriodVector[tau].number_of_demand_files == 0) continue; if (g_link_vector[i].VDF_period[tau].obs_count >= 1) // with data { g_link_vector[i].VDF_period[tau].est_count_dev = g_link_vector[i].PCE_volume_per_period[tau] + g_link_vector[i].VDF_period[tau].preload - g_link_vector[i].VDF_period[tau].obs_count; if (dtalog.debug_level() == 2) { dtalog.output() << "link " << g_node_vector[g_link_vector[i].from_node_seq_no].node_id << "->" << g_node_vector[g_link_vector[i].to_node_seq_no].node_id << "obs:, " << g_link_vector[i].VDF_period[tau].obs_count << "est:, " << g_link_vector[i].PCE_volume_per_period[tau] << "dev:," << g_link_vector[i].VDF_period[tau].est_count_dev << endl; } if (g_link_vector[i].VDF_period[tau].upper_bound_flag == 0) { total_gap += abs(g_link_vector[i].VDF_period[tau].est_count_dev); sub_total_gap_link_count += fabs(g_link_vector[i].VDF_period[tau].est_count_dev / g_link_vector[i].VDF_period[tau].obs_count); sub_total_system_gap_count += g_link_vector[i].VDF_period[tau].est_count_dev / g_link_vector[i].VDF_period[tau].obs_count; } else { // upper bound constraints if (g_link_vector[i].VDF_period[tau].est_count_dev > 0) { total_gap += abs(g_link_vector[i].VDF_period[tau].est_count_dev); sub_total_gap_link_count += fabs(g_link_vector[i].VDF_period[tau].est_count_dev / g_link_vector[i].VDF_period[tau].obs_count); sub_total_system_gap_count += g_link_vector[i].VDF_period[tau].est_count_dev / g_link_vector[i].VDF_period[tau].obs_count; } } total_link_count += 1; } } } //for (int orig = 0; orig < g_zone_vector.size(); ++orig) // o //{ // if (g_zone_vector[orig].obs_attraction >= 1) // with observation // { // g_zone_vector[orig].est_attraction_dev = g_zone_vector[orig].est_attraction - g_zone_vector[orig].obs_attraction; // if (dtalog.debug_level() == 2) // { // dtalog.output() << "zone " << g_zone_vector[orig].zone_id << "A: obs:" << g_zone_vector[orig].obs_attraction // << ",est:," << g_zone_vector[orig].est_attraction << ",dev:," << g_zone_vector[orig].est_attraction_dev << endl; // } // total_gap += abs(g_zone_vector[orig].est_attraction_dev); // sub_total_gap_A_count += g_zone_vector[orig].est_attraction_dev / g_zone_vector[orig].obs_attraction; // } // if (g_zone_vector[orig].obs_production >= 1) // with observation // { // g_zone_vector[orig].est_production_dev = g_zone_vector[orig].est_production - g_zone_vector[orig].obs_production; // if (dtalog.debug_level() == 2) // { // dtalog.output() << "zone " << g_zone_vector[orig].zone_id << "P: obs:" << g_zone_vector[orig].obs_production // << ",est:," << g_zone_vector[orig].est_production << ",dev:," << g_zone_vector[orig].est_production_dev << endl; // } // total_gap += abs(g_zone_vector[orig].est_production_dev); // sub_total_gap_P_count += g_zone_vector[orig].est_production_dev / g_zone_vector[orig].obs_production; // } //} dtalog.output() << "ODME #" << iteration_no << ", link MAE= " << total_gap / max(1, total_link_count) << ",link_MAPE: " << (sub_total_gap_link_count) / max(1, total_link_count) * 100 << "%,system_MPE: " << (sub_total_system_gap_count) / max(1, total_link_count) * 100 << "%,avg_tt = " << total_system_travel_time / max(0.1, total_system_demand) << "(min) " << ",UE gap =" << total_system_UE_gap / max(0.00001, total_system_demand) << "(min)" << " = (" << total_system_UE_gap / max(0.00001, total_system_travel_time) * 100 << " %)" << endl; double gap = sub_total_gap_link_count / max(1, total_link_count); system_gap = sub_total_system_gap_count / max(1, total_link_count); return gap; } void g_update_gradient_cost_and_assigned_flow_in_column_pool(Assignment& assignment, int inner_iteration_number, bool b_sensitivity_analysis_flag) { double total_system_cost_gap = 0; float total_relative_gap = 0; double total_system_travel_cost = 0; double total_system_travel_time = 0; double total_system_demand = 0; // we can have a recursive formulat to reupdate the current link volume by a factor of k/(k+1), // and use the newly generated path flow to add the additional 1/(k+1) g_reset_and_update_link_volume_based_on_columns(g_link_vector.size(), inner_iteration_number, false, b_sensitivity_analysis_flag); if (b_sensitivity_analysis_flag == true) // check estimation counts { for (int i = 0; i < g_link_vector.size(); ++i) { for (int tau = 0; tau < assignment.g_number_of_demand_periods; ++tau) { if (g_link_vector[i].VDF_period[tau].obs_count >= 1) // with data { g_link_vector[i].VDF_period[tau].est_count_dev = g_link_vector[i].PCE_volume_per_period[tau] + g_link_vector[i].VDF_period[tau].preload - g_link_vector[i].VDF_period[tau].obs_count; } } } } // step 4: based on newly calculated path volumn, update volume based travel time, and update volume based resource balance, update gradie update_link_travel_time_and_cost(inner_iteration_number); // step 0 // assignment.summary_file << ",iteration,key,o,d,at,tau,volume,"<< endl; //step 1: calculate shortest path at inner iteration of column flow updating //#pragma omp parallel for for (int orig = 0; orig < g_zone_vector.size(); ++orig) // o { CColumnVector* p_column_pool; std::map<int, CColumnPath>::iterator it, it_begin, it_end; int column_vector_size; double least_gradient_cost = 999999; int least_gradient_cost_path_seq_no = -1; int least_gradient_cost_path_node_sum_index = -1; int path_seq_count = 0; double path_toll = 0; double path_gradient_cost = 0; double path_distance = 0; double path_travel_time = 0; int link_seq_no; double link_travel_time; double total_switched_out_path_volume = 0; double step_size = 0; double previous_path_volume = 0; for (int dest = 0; dest < g_zone_vector.size(); ++dest) //d { for (int at = 0; at < assignment.g_AgentTypeVector.size(); ++at) //m { for (int tau = 0; tau < assignment.g_DemandPeriodVector.size(); ++tau) //tau { p_column_pool = &(assignment.g_column_pool[orig][dest][at][tau]); if (p_column_pool->od_volume > 0) { double diff = p_column_pool->od_volume - p_column_pool->prev_od_volume; if (b_sensitivity_analysis_flag && inner_iteration_number >= 1) { if (diff < -0.0001 || diff > 0.0001) { int idebug = 1; } if (inner_iteration_number >= 1) diff = p_column_pool->od_volume - p_column_pool->od_volume_per_iteration_map[inner_iteration_number - 1]; if (diff < -0.0001 || diff > 0.0001) { int idebug = 1; } } if (b_sensitivity_analysis_flag) { if (g_zone_vector[orig].zone_id == 6 && g_zone_vector[dest].zone_id == 2) { int idebug = 1; } } p_column_pool->prev_od_volume = p_column_pool->od_volume; column_vector_size = p_column_pool->path_node_sequence_map.size(); if (b_sensitivity_analysis_flag) { p_column_pool->od_volume_per_iteration_map[inner_iteration_number] = p_column_pool->od_volume; } // scan through the map with different node sum for different paths /// step 1: update gradient cost for each column path least_gradient_cost = 999999; least_gradient_cost_path_seq_no = -1; least_gradient_cost_path_node_sum_index = -1; path_seq_count = 0; it_begin = p_column_pool->path_node_sequence_map.begin(); it_end = p_column_pool->path_node_sequence_map.end(); bool least_path_passing_improvement_flag = false; for (it = it_begin; it != it_end; ++it) { path_toll = 0; path_gradient_cost = 0; path_distance = 0; path_travel_time = 0; for (int nl = 0; nl < it->second.m_link_size; ++nl) // arc a { link_seq_no = it->second.path_link_vector[nl]; path_toll += g_link_vector[link_seq_no].VDF_period[tau].toll[at]; path_distance += g_link_vector[link_seq_no].link_distance_VDF; link_travel_time = g_link_vector[link_seq_no].travel_time_per_period[tau]; path_travel_time += link_travel_time; path_gradient_cost += g_link_vector[link_seq_no].get_generalized_first_order_gradient_cost_of_second_order_loss_for_agent_type(tau, at); } it->second.path_toll = path_toll; it->second.path_travel_time = path_travel_time; it->second.path_gradient_cost = path_gradient_cost; if (b_sensitivity_analysis_flag == false) it->second.path_time_per_iteration_map[inner_iteration_number] = path_travel_time; else // SA mode it->second.path_time_per_iteration_SA_map[inner_iteration_number] = path_travel_time; #pragma omp critical { total_system_travel_time += (it->second.path_travel_time * it->second.path_volume); total_system_demand += it->second.path_volume; if (column_vector_size == 1) // only one path { total_system_travel_cost += (it->second.path_gradient_cost * it->second.path_volume); } } if (path_gradient_cost < least_gradient_cost) { least_gradient_cost = path_gradient_cost; least_gradient_cost_path_seq_no = it->second.path_seq_no; least_gradient_cost_path_node_sum_index = it->first; if (it->second.network_design_flag) { least_path_passing_improvement_flag = 1; } } } if (column_vector_size >= 2) { // step 2: calculate gradient cost difference for each column path total_switched_out_path_volume = 0; for (it = it_begin; it != it_end; ++it) { if (it->second.path_seq_no != least_gradient_cost_path_seq_no) //for non-least cost path { it->second.path_gradient_cost_difference = it->second.path_gradient_cost - least_gradient_cost; //if(it->second.path_gradient_cost_difference >0.0001f) { it->second.path_gradient_cost_relative_difference = it->second.path_gradient_cost_difference / max(0.0001, least_gradient_cost); } #pragma omp critical { total_system_cost_gap += (it->second.path_gradient_cost_difference * it->second.path_volume); total_system_travel_cost += (it->second.path_gradient_cost * it->second.path_volume); } if (b_sensitivity_analysis_flag == true) // SA stages { //float est_count_dev = 0; //bool network_design_flag = false; //for (int nl = 0; nl < it->second.m_link_size; ++nl) // arc a //{ // // step 3.3 link flow gradient // link_seq_no = it->second.path_link_vector[nl]; // //if (g_link_vector[link_seq_no].tmc_corridor_name .size() > 0) // // network_design_flag = true; // if (g_link_vector[link_seq_no].VDF_period[tau].obs_count >= 1) // { // path_gradient_cost += g_link_vector[link_seq_no].VDF_period[tau].est_count_dev; // est_count_dev += g_link_vector[link_seq_no].VDF_period[tau].est_count_dev; // //if (g_link_vector[link_seq_no].VDF_period[tau].network_design_flag==0 && g_link_vector[link_seq_no].VDF_period[tau].est_count_dev < 0) // if under-report traffic // //{ // // double weight_on_count = 0.0; // // it->second.path_gradient_cost_relative_difference -= weight_on_count* g_link_vector[link_seq_no].VDF_period[tau].est_count_dev; // //} // } //} //step_size = 0.00; //if (least_path_passing_improvement_flag) //{ // if(network_design_flag == false) step_size = 0.05; // small changes //} // step_size = 1.0 / (inner_iteration_number + 2) * p_column_pool->od_volume; //if (network_design_flag) //{ // // step_size = 1.0 / (inner_iteration_number + 2) * p_column_pool->od_volume; // assignment.summary_file << "," << inner_iteration_number // << "," << orig // << "-" << dest // << "-" << at // << "-" << tau // << "," << orig // << "," << dest // << "," << at // << "," << tau // << "," << p_column_pool->od_volume // << "," << step_size * it->second.path_gradient_cost_relative_difference // << endl; //} } else { // column updating step size step_size = 1.0 / (inner_iteration_number + 2) * p_column_pool->od_volume; } previous_path_volume = it->second.path_volume; //b double flow_shift = step_size * max(0.0000, it->second.path_gradient_cost_relative_difference); //c, must be positive if (flow_shift > it->second.path_volume * 0.5) { flow_shift = it->second.path_volume * 0.5; } if (flow_shift >= 0.000001) { int idebug = 1; } //recall that it->second.path_gradient_cost_difference >=0 // step 3.1: shift flow from nonshortest path to shortest path it->second.path_volume = max(0.0, it->second.path_volume - flow_shift); //d // //we use min(step_size to ensure a path is not switching more than 1/n proportion of flow it->second.path_switch_volume = (previous_path_volume - it->second.path_volume); // d-b // should be nonnegative total_switched_out_path_volume += (previous_path_volume - it->second.path_volume); if (fabs(total_switched_out_path_volume) > 0.00001) { int idebug = 1; } } } //step 3.2 consider least cost path, receive all volume shifted from non-shortest path if (least_gradient_cost_path_seq_no != -1 && p_column_pool->path_node_sequence_map.find(least_gradient_cost_path_node_sum_index) != p_column_pool->path_node_sequence_map.end()) { if (least_gradient_cost_path_node_sum_index < 100) { int i_debug = 1; } p_column_pool->path_node_sequence_map[least_gradient_cost_path_node_sum_index].path_volume += total_switched_out_path_volume; if (b_sensitivity_analysis_flag == false) p_column_pool->path_node_sequence_map[least_gradient_cost_path_node_sum_index].path_volume_per_iteration_map[inner_iteration_number] = p_column_pool->path_node_sequence_map[least_gradient_cost_path_node_sum_index].path_volume; else p_column_pool->path_node_sequence_map[least_gradient_cost_path_node_sum_index].path_volume_per_iteration_SA_map[inner_iteration_number] = p_column_pool->path_node_sequence_map[least_gradient_cost_path_node_sum_index].path_volume; #pragma omp critical { total_system_travel_cost += (p_column_pool->path_node_sequence_map[least_gradient_cost_path_node_sum_index].path_gradient_cost * p_column_pool->path_node_sequence_map[least_gradient_cost_path_node_sum_index].path_volume); } } } // record path flow for all paths( including shortst path and non_shortest path) for (it = it_begin; it != it_end; ++it) { if (b_sensitivity_analysis_flag == false) it->second.path_volume_per_iteration_map[inner_iteration_number] = it->second.path_volume; else //SA mode it->second.path_volume_per_iteration_SA_map[inner_iteration_number] = it->second.path_volume; } } } } } } double avg_travel_time = total_system_travel_time / max(0.001, total_system_demand); dtalog.output() << "column updating: iteration= " << inner_iteration_number << ", avg travel time = " << avg_travel_time << "(min), optimization obj = " << total_system_cost_gap << ",Relative_gap=" << total_system_cost_gap * 100.0 / max(0.00001, total_system_travel_cost) << " %" << endl; string stage_str; stage_str = "column updating"; if (b_sensitivity_analysis_flag) stage_str = "sensitivity analaysis"; assignment.summary_file2 << stage_str.c_str() << ",iteration," << inner_iteration_number << ",total_system_demand," << total_system_demand << ",avg travel time," << avg_travel_time << ",optimization obj," << total_system_cost_gap << ",relative_gap," << total_system_cost_gap * 100.0 / max(0.00001, total_system_travel_cost) << "," << endl; } void g_classification_in_column_pool(Assignment& assignment) { int impact_OD_counts = 0; int impact_OD_counts_detour = 0; //#pragma omp parallel for for (int orig = 0; orig < g_zone_vector.size(); ++orig) // o { CColumnVector* p_column_pool; std::map<int, CColumnPath>::iterator it, it_begin, it_end; int column_vector_size; int link_seq_no; for (int dest = 0; dest < g_zone_vector.size(); ++dest) //d { for (int at = 0; at < assignment.g_AgentTypeVector.size(); ++at) //m { for (int tau = 0; tau < assignment.g_DemandPeriodVector.size(); ++tau) //tau { p_column_pool = &(assignment.g_column_pool[orig][dest][at][tau]); if (p_column_pool->od_volume > 0) { if (g_zone_vector[orig].zone_id == 6 && g_zone_vector[dest].zone_id == 2) { int idebug = 1; } column_vector_size = p_column_pool->path_node_sequence_map.size(); // scan through the map with different node sum for different paths /// step 1: update gradient cost for each column path it_begin = p_column_pool->path_node_sequence_map.begin(); it_end = p_column_pool->path_node_sequence_map.end(); bool least_path_passing_improvement_flag = false; // scan all paths in this OD pair int path_count = 0; int network_design_path_count = 0; for (it = it_begin; it != it_end; ++it) { for (int nl = 0; nl < it->second.m_link_size; ++nl) // arc a { link_seq_no = it->second.path_link_vector[nl]; if (g_link_vector[link_seq_no].VDF_period[tau].network_design_flag != 0) // screening condition 1: passing through the network design location { it->second.network_design_flag = 1; // to be revised: passing through work zone, and with signal timing enhancemnets } } if (it->second.network_design_flag) network_design_path_count++; path_count++; } if (network_design_path_count >= 1) { if (network_design_path_count == path_count) { p_column_pool->OD_network_design_flag = 1; impact_OD_counts++; } else { p_column_pool->OD_network_design_flag = 2; // more than 2 alterantive paths with respect to the newtork design location impact_OD_counts_detour++; } } if (p_column_pool->OD_network_design_flag == 2) // { // scan all paths in this OD pair again // mark alternative paths for (it = it_begin; it != it_end; ++it) { if (it->second.network_design_flag == 0) { it->second.network_design_detour_mode = 2; // detour } else { it->second.network_design_detour_mode = 1; // main passing path } } } } } // for each tau }// for each agent type mode } // for each d } string stage_str; stage_str = "classification"; // assignment.summary_file2 << stage_str.c_str() << ",impact_OD_counts," << impact_OD_counts << // ",impact_OD_counts_with_detour," << impact_OD_counts_detour << endl; } void g_column_pool_optimization(Assignment& assignment, int column_updating_iterations, bool sensitivity_analysis_flag = false) { // column_updating_iterations is internal numbers of column updating for (int n = 0; n < column_updating_iterations; ++n) { g_update_gradient_cost_and_assigned_flow_in_column_pool(assignment, n, sensitivity_analysis_flag); if (dtalog.debug_level() >= 3) { for (int i = 0; i < g_link_vector.size(); ++i) { dtalog.output() << "link: " << g_node_vector[g_link_vector[i].from_node_seq_no].node_id << "-->" << g_node_vector[g_link_vector[i].to_node_seq_no].node_id << ", " << "flow count:" << g_link_vector[i].PCE_volume_per_period[0] << endl; } } } } void g_column_pool_route_scheduling(Assignment& assignment, int inner_iteration_number) { //step 1: calculate shortest path at inner iteration of column flow updating #pragma omp parallel for for (int orig = 0; orig < g_zone_vector.size(); ++orig) // o { CColumnVector* p_column_pool; std::map<int, CColumnPath>::iterator it, it_begin, it_end; int column_vector_size; int path_seq_count = 0; double path_toll = 0; double path_gradient_cost = 0; double path_distance = 0; double path_travel_time = 0; int link_seq_no; for (int dest = 0; dest < g_zone_vector.size(); ++dest) //d { for (int at = 0; at < assignment.g_AgentTypeVector.size(); ++at) //m { for (int tau = 0; tau < assignment.g_DemandPeriodVector.size(); ++tau) //tau { p_column_pool = &(assignment.g_column_pool[orig][dest][at][tau]); if (p_column_pool->od_volume > 0) { if (assignment.g_AgentTypeVector[at].real_time_information == 1) // case of VMS { column_vector_size = p_column_pool->path_node_sequence_map.size(); // scan through the map with different node sum for different paths path_seq_count = 0; it_begin = p_column_pool->path_node_sequence_map.begin(); it_end = p_column_pool->path_node_sequence_map.end(); //test condition 1: passing through information zone bool b_passing_information_zone = false; int new_orig_zone_id = 0; std::vector <int> link_seq_vector; //test condition 2: passing through capacity impact area bool b_passing_capacity_impact_area = false; for (it = it_begin; it != it_end; ++it) // scan each first-stage original path { if (it->second.path_volume < 0.00001) continue; for (int nl = 0; nl < it->second.m_link_size; ++nl) // arc a { link_seq_no = it->second.path_link_vector[nl]; CLink* p_current_link = &(g_link_vector[link_seq_no]); if (b_passing_information_zone == false && assignment.node_seq_no_2_info_zone_id_mapping.find(p_current_link->to_node_seq_no) != assignment.node_seq_no_2_info_zone_id_mapping.end()) // this node been defined as zone { int zone_id = assignment.node_seq_no_2_info_zone_id_mapping[p_current_link->to_node_seq_no]; int zone_no = assignment.g_zoneid_to_zone_seq_no_mapping[zone_id]; if (assignment.zone_seq_no_2_info_mapping.find(zone_no) != assignment.zone_seq_no_2_info_mapping.end()) // as information zone { b_passing_information_zone = true; new_orig_zone_id = zone_id; // zone id to zone no. for (int nl2 = 0; nl2 <= nl; ++nl2) // arc a { // copy the existing link sequence up to the downstream node id corresponding to the info zone link_seq_no = it->second.path_link_vector[nl2]; link_seq_vector.push_back(link_seq_no); } } } if (p_current_link->capacity_reduction_map.find(tau) != p_current_link->capacity_reduction_map.end()) { b_passing_capacity_impact_area = true; } } if (b_passing_capacity_impact_area == true && b_passing_information_zone == true) { CColumnVector* p_2_stage_column_pool; int info_orig = assignment.g_zoneid_to_zone_seq_no_mapping[new_orig_zone_id]; //step 2: fetch the related column pool from the information node/zone p_2_stage_column_pool = &(assignment.g_column_pool[info_orig][dest][at][tau]); // we come from info_orig but going to the same destination with same at, and assignment period tau // scan through the map with different node sum for different continuous paths std::map<int, CColumnPath>::iterator it2, it_begin2, it_end2; it_begin2 = p_2_stage_column_pool->path_node_sequence_map.begin(); it_end2 = p_2_stage_column_pool->path_node_sequence_map.end(); for (it2 = it_begin2; it2 != it_end2; ++it2) // we can still have k-path from the info zone to to final destination so we need to random select one { for (int nl = 1; nl < it2->second.m_link_size; ++nl) // arc a // exclude virtual link at the end; { link_seq_vector.push_back(it2->second.path_link_vector[nl]); } break; // only connect with the first available second stage path } if (it->second.path_link_vector != NULL) { // copy the updated path (stage1 + stage 2) back to the path link vector delete it->second.path_link_vector; it->second.path_link_vector = new int[link_seq_vector.size()]; for (int l = 0; l < link_seq_vector.size(); l++) { it->second.path_link_vector[l] = link_seq_vector[l]; } it->second.m_link_size = link_seq_vector.size(); // copy the updated path (stage1 + stage 2) back to the path node vector delete it->second.path_node_vector; it->second.path_node_vector = new int[link_seq_vector.size() + 1]; // first node it->second.path_node_vector[0] = g_link_vector[link_seq_vector[0]].from_node_seq_no; // remaining nodes to the end of path for (int l = 0; l < link_seq_vector.size(); l++) { it->second.path_node_vector[l + 1] = g_link_vector[link_seq_vector[l]].to_node_seq_no; } it->second.m_node_size = link_seq_vector.size() + 1; } p_2_stage_column_pool->od_volume += it->second.path_volume;// carry over the switching path flow to the second path volume count p_2_stage_column_pool->information_type = 1; it2->second.path_volume += it->second.path_volume;// carry over the switching path flow to the second path volume count } // two conditions satisified } //end of scanning for the first stage path in the column pool } // agent type is real time agent type } // with positve OD volume } // tau } //agent type } //dest } // orig dtalog.output() << " updating"; } void g_rt_info_column_generation(Assignment* p_assignment, float current_time_in_min, int recording_flag = 0) { //dtalog.output() << "Begin the computing of " << g_NetworkForRTSP_vector.size() << " RTSP networks in CPU." << endl; clock_t start_t0, end_t0, total_t0; start_t0 = clock(); #pragma omp parallel for // step 3: C++ open mp automatically create n threads., each thread has its own computing thread on a cpu core for (int blk = 0; blk < g_NetworkForRTSP_vector.size(); ++blk) { NetworkForSP* pNetwork = g_NetworkForRTSP_vector[blk]; if (assignment.g_DemandPeriodVector[pNetwork->m_tau].starting_time_slot_no * MIN_PER_TIMESLOT > current_time_in_min) // RT network is for a later time interval continue; pNetwork->optimal_backward_label_correcting_from_destination(blk, p_assignment, current_time_in_min, pNetwork->m_RT_dest_zone, pNetwork->m_RT_dest_node, -1, recording_flag); } end_t0 = clock(); total_t0 = (end_t0 - start_t0); int second = total_t0 / 1000.0; int min = second / 60; int sec = second - min * 60; //dtalog.output() << "CPU Running Time for RT shortest path: " << min << " min " << sec << " sec" << endl; } void g_column_pool_activity_scheduling(Assignment& assignment, int inner_iteration_number) { //step 1: calculate shortest path at inner iteration of column flow updating for (int orig = 0; orig < g_zone_vector.size(); ++orig) // o { CColumnVector* p_column_pool; int path_seq_count = 0; double path_toll = 0; double path_gradient_cost = 0; double path_distance = 0; double path_travel_time = 0; for (int dest = 0; dest < g_zone_vector.size(); ++dest) //d { for (int at = 0; at < assignment.g_AgentTypeVector.size(); ++at) //m { for (int tau = 0; tau < assignment.g_DemandPeriodVector.size(); ++tau) //tau { p_column_pool = &(assignment.g_column_pool[orig][dest][at][tau]); if (p_column_pool->od_volume > 0) { if (p_column_pool->activity_zone_no_vector.size()) // case of activity zones { p_column_pool->path_node_sequence_map.clear(); // remove existing single OD pair based routes std::vector <int> link_seq_vector; // for each origin and detination pair in activity zone no to perform routing continuously for (int az = 0; az < p_column_pool->activity_zone_no_vector.size() - 1; az++) // key step: go through each activty OD pair { // 0 will the origin // last one will destination int aat = p_column_pool->activity_agent_type_no_vector[az]; CColumnVector* p_2_stage_column_pool; int activity_orig = p_column_pool->activity_zone_no_vector[az]; int activity_dest = p_column_pool->activity_zone_no_vector[az + 1]; //step 2: fetch the related column pool from the information node/zone p_2_stage_column_pool = &(assignment.g_column_pool[activity_orig][activity_dest][aat][tau]); // we come from info_orig but going to the same destination with same at, and assignment period tau // scan through the map with different node sum for different continuous paths std::map<int, CColumnPath>::iterator it2, it_begin2, it_end2; it_begin2 = p_2_stage_column_pool->path_node_sequence_map.begin(); it_end2 = p_2_stage_column_pool->path_node_sequence_map.end(); for (it2 = it_begin2; it2 != it_end2; ++it2) // we can still have k-path from the info zone to to final destination so we need to random select one { for (int nl = 1; nl < it2->second.m_link_size - 1; ++nl) // arc a // exclude virtual link in the beginning and at the end; { link_seq_vector.push_back(it2->second.path_link_vector[nl]); } break; // only connect with the first available second stage path } } if (link_seq_vector.size() == 0) { int i_debug = 1; continue; } int node_sum = 0; for (int l = 0; l < link_seq_vector.size(); l++) { node_sum += link_seq_vector[l]; } // add this unique path // later we can add k activity paths int path_count = p_column_pool->path_node_sequence_map.size(); p_column_pool->path_node_sequence_map[node_sum].path_seq_no = path_count; p_column_pool->path_node_sequence_map[node_sum].path_volume = p_column_pool->od_volume; p_column_pool->path_node_sequence_map[node_sum].path_toll = 0; p_column_pool->path_node_sequence_map[node_sum].path_link_vector = new int[link_seq_vector.size()]; p_column_pool->path_node_sequence_map[node_sum].path_node_vector = new int[link_seq_vector.size() + 1]; for (int l = 0; l < link_seq_vector.size(); l++) { p_column_pool->path_node_sequence_map[node_sum].path_link_vector[l] = link_seq_vector[l]; p_column_pool->path_node_sequence_map[node_sum].path_link_STL_vector.push_back(link_seq_vector[l]); } p_column_pool->path_node_sequence_map[node_sum].m_link_size = link_seq_vector.size(); // copy the updated path (stage1 + stage 2) back to the path node vector // first node p_column_pool->path_node_sequence_map[node_sum].path_node_vector[0] = g_link_vector[link_seq_vector[0]].from_node_seq_no; // remaining nodes to the end of path for (int l = 0; l < link_seq_vector.size(); l++) { p_column_pool->path_node_sequence_map[node_sum].path_node_vector[l + 1] = g_link_vector[link_seq_vector[l]].to_node_seq_no; } p_column_pool->path_node_sequence_map[node_sum].m_node_size = link_seq_vector.size() + 1; } //end of conditions for activity chain } // with positve OD volume } // tau } //agent type } //dest } // orig dtalog.output() << " updating"; }
hamiltonian.c
#include "cloud.h" double compute_hamiltonian (int Np, double k, const double *X[3], const double *U[3]) { double h = 0.; #pragma omp parallel for for (int i = 0; i < Np; i++) { /* for every particle */ h += 0.5 * (U[0][i]*U[0][i] + U[1][i]*U[1][i] + U[2][i]*U[2][i]); /* kinetic energy */ if (k) { for (int j = i + 1; j < Np; j++) { /* for every other particle */ double hj = potential (k, X[0][i], X[1][i], X[2][i], X[0][j], X[1][j], X[2][j]); h += hj; } } } return h; }