source
stringlengths
3
92
c
stringlengths
26
2.25M
data_augmentation.h
/* Copyright (c) 2019, Sanaxen All rights reserved. Use of this source code is governed by a MIT license that can be found in the LICENSE file. */ #ifndef _DATA_AUGMANTATION_H #define _DATA_AUGMANTATION_H namespace cpp_torch { namespace test { void Image3CannelDataAugment(std::vector<tiny_dnn::vec_t>& train_images, std::vector<tiny_dnn::label_t>& train_labels, const float_t mean, const float_t stddiv, const int image_height, const int image_width, int extend_factor=2, float channel_range = CHANNEL_RANGE) { std::random_device rnd; std::mt19937 mt(rnd()); std::uniform_int_distribution<> rand(0, 5); std::uniform_int_distribution<> rand_index(0, train_images.size() - 1); const size_t sz = train_images.size(); for (int i = 0; i < sz * extend_factor; i++) { const int index = rand_index(mt); tiny_dnn::vec_t& u = train_images[index]; //{ // cpp_torch::Image& bmp = cpp_torch::vec_t2image(u, 3, image_height, image_width); // cpp_torch::ImageWrite("aaa.bmp", &bmp); // //exit(0); //} std::string func = ""; switch (rand(mt)) { case 0:func = "GAMMA"; break; case 1:func = "RL"; break; case 2:func = "COLOR_NOIZE"; break; case 3:func = "NOIZE"; break; case 4:func = "ROTATION"; break; case 5:func = "SIFT"; break; } cpp_torch::ImageAugmentation(u, image_height, image_width, func); tiny_dnn::vec_t v(u.size()); transform(u.begin(), u.end(), v.begin(), [=](float_t c) {return (c / channel_range); } ); train_images.push_back(v); train_labels.push_back(train_labels[index]); //{ // tiny_dnn::vec_t v2(v.size()); // transform(v.begin(), v.end(), v2.begin(), // [=](float_t c) {return (c * channel_range); } // ); // cpp_torch::Image& bmp = cpp_torch::vec_t2image(v2, 3, image_height, image_width); // cpp_torch::ImageWrite("bbb.bmp", &bmp); // exit(0); //} } const size_t sz2 = train_images.size(); #pragma omp parallel for for (int i = 0; i < sz2; i++) { for (int j = 0; j < train_images[i].size(); j++) { train_images[i][j] = (train_images[i][j] - mean) / stddiv; } } printf("Augmentation:%d -> %d\n", sz, sz2); } } } #endif
malloc.c
/** * @brief implement for malloc utils * @author Zhuoqiang Guo <gzq9425@qq.com> */ #include "alphasparse/util/malloc.h" #include <malloc.h> #include <stdio.h> #include <time.h> #include "alphasparse/util/random.h" #include "alphasparse/util/thread.h" #ifdef NUMA #include <numa.h> #endif void *alpha_malloc(size_t bytes) { #ifdef NUMA void *ret = numa_alloc_onnode(bytes, 0); #else void *ret = malloc(bytes); #endif if (ret == NULL) { printf("no enough memory space to alloc!!!\n"); exit(-1); } return ret; } void *alpha_memalign(size_t bytes, size_t alignment) { #ifdef NUMA void *ret = numa_alloc_onnode(bytes, 0); #else void *ret = memalign(alignment, bytes); #endif if (ret == NULL) { printf("no enough memory space to alloc!!!"); exit(-1); } return ret; } void alpha_free(void *point) { if (!point) free(point); } void alpha_clear_cache() { ALPHA_INT thread_num = alpha_get_thread_num(); const size_t L3_used = (thread_num + 23) / 24; const size_t size = L3_CACHE_SIZE * 8 * L3_used; long long *c = (long long *)alpha_memalign(size, DEFAULT_ALIGNMENT); #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (size_t i = 0; i < 15; i++) for (size_t j = 0; j < L3_CACHE_SIZE * L3_used; j++) c[j] += i * j; alpha_free(c); } void alpha_fill_s(float *arr, const float num, const size_t size) { for (size_t i = 0; i < size; ++i) arr[i] = num; } void alpha_fill_d(double *arr, const double num, const size_t size) { for (size_t i = 0; i < size; ++i) arr[i] = num; } void alpha_fill_c(ALPHA_Complex8 *arr, const ALPHA_Complex8 num, const size_t size) { for (size_t i = 0; i < size; ++i) arr[i] = num; } void alpha_fill_z(ALPHA_Complex16 *arr, const ALPHA_Complex16 num, const size_t size) { for (size_t i = 0; i < size; ++i) arr[i] = num; } void alpha_fill_random_int(int *arr, unsigned int seed, const size_t size, int upper) { if (seed == 0) seed = time_seed(); srand(seed); for (size_t i = 0; i < size; ++i) arr[i] = random_int(upper); } void alpha_fill_random_long(long long *arr, unsigned int seed, const size_t size, long long upper) { if (seed == 0) seed = time_seed(); srand(seed); for (size_t i = 0; i < size; ++i) arr[i] = random_long(upper); } void alpha_fill_random_s(float *arr, unsigned int seed, const size_t size) { if (seed == 0) seed = time_seed(); srand(seed); for (size_t i = 0; i < size; ++i) arr[i] = random_float(); } void alpha_fill_random_d(double *arr, unsigned int seed, const size_t size) { if (seed == 0) seed = time_seed(); srand(seed); for (size_t i = 0; i < size; ++i) arr[i] = random_double(); } void alpha_fill_random_c(ALPHA_Complex8 *arr, unsigned int seed, const size_t size) { alpha_fill_random_s((float *)arr, seed, size * 2); } void alpha_fill_random_z(ALPHA_Complex16 *arr, unsigned int seed, const size_t size) { alpha_fill_random_d((double *)arr, seed, size * 2); } void alpha_parallel_fill_random_s(float *arr, unsigned int seed, const size_t size) { if (seed == 0) seed = time_seed(); srand(seed); ALPHA_INT thread_num = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (size_t i = 0; i < size; ++i) arr[i] = random_float(); } void alpha_parallel_fill_random_d(double *arr, unsigned int seed, const size_t size) { if (seed == 0) seed = time_seed(); srand(seed); ALPHA_INT thread_num = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (size_t i = 0; i < size; ++i) arr[i] = random_double(); } void alpha_parallel_fill_random_c(ALPHA_Complex8 *arr, unsigned int seed, const size_t size) { alpha_fill_random_s((float *)arr, seed, size * 2); } void alpha_parallel_fill_random_z(ALPHA_Complex16 *arr, unsigned int seed, const size_t size) { alpha_fill_random_d((double *)arr, seed, size * 2); } void alpha_parallel_fill_s(float *arr, const float num, const size_t size) { ALPHA_INT thread_num = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (size_t i = 0; i < size; ++i) arr[i] = num; } void alpha_parallel_fill_d(double *arr, const double num, const size_t size) { ALPHA_INT thread_num = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (size_t i = 0; i < size; ++i) arr[i] = num; } void alpha_parallel_fill_c(ALPHA_Complex8 *arr, const ALPHA_Complex8 num, const size_t size) { ALPHA_INT thread_num = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (size_t i = 0; i < size; ++i) arr[i] = num; } void alpha_parallel_fill_z(ALPHA_Complex16 *arr, const ALPHA_Complex16 num, const size_t size) { ALPHA_INT thread_num = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (size_t i = 0; i < size; ++i) arr[i] = num; }
DiracMatrix.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2016 Jeongnim Kim and QMCPACK developers. // // File developed by: // // File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp. ////////////////////////////////////////////////////////////////////////////////////// #ifndef QMCPLUSPLUS_DIRAC_MATRIX_H #define QMCPLUSPLUS_DIRAC_MATRIX_H #include "Numerics/Blasf.h" #include <OhmmsPETE/OhmmsMatrix.h> #include <type_traits/scalar_traits.h> namespace qmcplusplus { inline void Xgetrf(int n, int m, float* restrict a, int lda, int* restrict piv) { int status; sgetrf(n,m,a,lda,piv,status); } inline void Xgetri(int n, float* restrict a, int lda, int* restrict piv, float* restrict work, int& lwork) { int status; sgetri(n,a,lda,piv,work,lwork,status); } inline void Xgetrf(int n, int m, std::complex<float>* restrict a, int lda, int* restrict piv) { int status; cgetrf(n,m,a,lda,piv,status); } /** inversion of a float matrix after lu factorization*/ inline void Xgetri(int n, std::complex<float>* restrict a, int lda, int* restrict piv, std::complex<float>* restrict work, int& lwork) { int status; cgetri(n,a,lda,piv,work,lwork,status); } inline void Xgetrf(int n, int m, double* restrict a, int lda, int* restrict piv) { int status; dgetrf(n,m,a,lda,piv,status); } inline void Xgetri(int n, double* restrict a, int lda, int* restrict piv, double* restrict work, int& lwork) { int status; dgetri(n,a,lda,piv,work,lwork,status); } inline void Xgetrf(int n, int m, std::complex<double>* restrict a, int lda, int* restrict piv) { int status; zgetrf(n,m,a,lda,piv,status); } /** inversion of a std::complex<double> matrix after lu factorization*/ inline void Xgetri(int n, std::complex<double>* restrict a, int lda, int* restrict piv, std::complex<double>* restrict work, int& lwork) { int status; zgetri(n,a,lda,piv,work,lwork,status); } template<typename TIN, typename TOUT> inline void TansposeSquare(const TIN* restrict in, TOUT* restrict out, size_t n, size_t lda) { #pragma omp simd for(size_t i=0; i<n; ++i) for(size_t j=0; j<n; ++j) out[i*lda+j]=in[i+j*lda]; } template<typename T> inline T computeLogDet(const T* restrict X, int n, int lda, const int* restrict pivot, T& phase) { T logdet(0); int sign_det=1; for(size_t i=0; i<n; i++) { const size_t ii=i*lda+i; sign_det *= (pivot[i] == i+1)?1:-1; sign_det *= (X[ii]>0)?1:-1; logdet += std::log(std::abs(X[ii])); } phase=(sign_det>0)? T(0):M_PI; return logdet; } template<typename T> inline T computeLogDet(const std::complex<T>* restrict X, int n, int lda, const int* restrict pivot, T& phase) { T logdet(0); phase=T(0); for(size_t i=0; i<n; i++) { const size_t ii=i*lda+i; phase += std::arg(X[ii]); if(pivot[i]!=i+1) phase += M_PI; logdet+=std::log(X[ii].real()*X[ii].real()+X[ii].imag()*X[ii].imag()); //slightly smaller error with the following // logdet+=2.0*std::log(std::abs(x[ii]); } constexpr T one_over_2pi=T(1)/TWOPI; phase -= std::floor(phase*one_over_2pi)*TWOPI; return 0.5*logdet; } template<typename T> struct DiracMatrix { typedef typename scalar_traits<T>::real_type real_type; aligned_vector<T> m_work; aligned_vector<int> m_pivot; int Lwork; real_type LogDet; real_type Phase; DiracMatrix():Lwork(0) {} inline void invert(Matrix<T>& amat, bool computeDet) { const int n=amat.rows(); const int lda=amat.cols(); if(Lwork<lda) reset(amat,lda); Xgetrf(n,n,amat.data(),lda,m_pivot.data()); if(computeDet) { LogDet=computeLogDet(amat.data(),n,lda,m_pivot.data(),Phase); } Xgetri(n, amat.data(),lda,m_pivot.data(),m_work.data(),Lwork); } inline void reset(Matrix<T>& amat, const int lda) { m_pivot.resize(lda); Lwork=-1; T tmp; real_type lw; Xgetri(lda, amat.data(),lda,m_pivot.data(),&tmp,Lwork); convert(tmp,lw); Lwork=static_cast<int>(lw); m_work.resize(Lwork); } }; } #endif // QMCPLUSPLUS_DIRAC_MATRIX_H
CellwiseOperator.h
// // Cubism3D // Copyright (c) 2019 CSE-Lab, ETH Zurich, Switzerland. // Distributed under the terms of the MIT license. // // Created by Ivica Kicic (kicici@ethz.ch). // /* * This file contains helper functions for applying single-cell or * stencil-based kernels on the grid. */ #ifndef CUBISMUP3D_CELLWISE_OPERATOR_H #define CUBISMUP3D_CELLWISE_OPERATOR_H #include "Operator.h" CubismUP_3D_NAMESPACE_BEGIN /* * Struct passed to kernels in `apply_kernel` and `apply_stencil_kernel` functions. * * Ideally, we would put all interesting quantities as struct members and let * compiler optimize away unused ones. Although some basic tests show that * compiler indeed do so, it is not sure if that holds for arbitrarily large * structs. * * Thus, we put only the most necessary items in the struct and provide other * values as member functions. */ struct CellInfo { const cubism::BlockInfo &block_info; int ix, iy, iz; std::array<Real, 3> get_pos() const { return block_info.pos<Real>(ix, iy, iz); } int get_abs_ix() const { return ix + block_info.index[0] * FluidBlock::sizeX; } int get_abs_iy() const { return iy + block_info.index[1] * FluidBlock::sizeY; } int get_abs_iz() const { return iz + block_info.index[2] * FluidBlock::sizeZ; } }; /* * Apply a given single-cell kernel to each cell of the grid. * * Usage example: * applyKernel(sim, [](CellInfo info, FluidElement &e) { * e.u = e.tmpU; * e.v = e.tmpV; * e.w = e.tmpW; * }); */ template <typename Func> void applyKernel(SimulationData &sim, Func func) { const std::vector<cubism::BlockInfo> &vInfo = sim.vInfo(); int size = (int)vInfo.size(); #pragma omp parallel for schedule(static) for (int i = 0; i < size; ++i) { FluidBlock &b = *(FluidBlock *)vInfo[i].ptrBlock; for (int iz = 0; iz < FluidBlock::sizeZ; ++iz) for (int iy = 0; iy < FluidBlock::sizeY; ++iy) for (int ix = 0; ix < FluidBlock::sizeX; ++ix) { CellInfo info{vInfo[i], ix, iy, iz}; func(info, b(ix, iy, iz)); } } } /* * Lab wrapper that shifts from cell-based indices (given by the user) to the * block-based indices (required by the original BlockLab). */ struct StencilKernelLab { LabMPI &lab; int ix, iy, iz; FluidElement& operator()(int dx, int dy = 0, int dz = 0) const { return lab(ix + dx, iy + dy, iz + dz); } }; /* * Apply a given stencil kernel to each cell of the grid. * * Usage example: * Real factor = 0.5 / h; * applyStencilKernel( * sim, * StencilInfo{-1, 0, 0, 2, 1, 1, false, 1, FE_U}, * [factor](StencilKernelLab lab, CellInfo info, FluidElement &out) { * out.df = factor * (lab(1, 0, 0).f - lab(-1, 0, 0).f); * }); */ template <typename Func> void applyStencilKernel(SimulationData &sim, cubism::StencilInfo stencil, Func func) { // Block-based kernel. struct Kernel { const cubism::StencilInfo stencil; Func func; void operator()(LabMPI &lab, const cubism::BlockInfo &block_info, FluidBlock &out) const { for (int iz = 0; iz < FluidBlock::sizeZ; ++iz) for (int iy = 0; iy < FluidBlock::sizeY; ++iy) for (int ix = 0; ix < FluidBlock::sizeX; ++ix) { StencilKernelLab lab_wrapper{lab, ix, iy, iz}; CellInfo info{block_info, ix, iy, iz}; func(lab_wrapper, info, out(ix, iy, iz)); } } }; struct CellwiseOperator : Operator { Kernel kernel; CellwiseOperator(SimulationData &s, const cubism::StencilInfo &stencil, Func func) : Operator(s), kernel{stencil, func} {} void operator()(const Real /* dt */) { // For now we ignore the `dt` argument. We could e.g. pass it via the // `CellInfo` struct. In that case, we would need to rename it to // something like `Extra`. compute(kernel); } std::string getName() { return "apply_stencil_kernel::CellwiseOperator"; } }; CellwiseOperator op{sim, stencil, func}; op(0.0); // dt is unused for now. } CubismUP_3D_NAMESPACE_END #endif
GB_binop__min_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__min_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__min_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__min_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__min_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__min_fp32) // A*D function (colscale): GB (_AxD__min_fp32) // D*A function (rowscale): GB (_DxB__min_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__min_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__min_fp32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__min_fp32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__min_fp32) // C=scalar+B GB (_bind1st__min_fp32) // C=scalar+B' GB (_bind1st_tran__min_fp32) // C=A+scalar GB (_bind2nd__min_fp32) // C=A'+scalar GB (_bind2nd_tran__min_fp32) // C type: float // A type: float // A pattern? 0 // B type: float // B pattern? 0 // BinaryOp: cij = fminf (aij, bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = fminf (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MIN || GxB_NO_FP32 || GxB_NO_MIN_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__min_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__min_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__min_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__min_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__min_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__min_fp32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__min_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; float alpha_scalar ; float beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((float *) alpha_scalar_in)) ; beta_scalar = (*((float *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__min_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__min_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__min_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__min_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__min_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = fminf (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__min_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = fminf (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = fminf (x, aij) ; \ } GrB_Info GB (_bind1st_tran__min_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = fminf (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__min_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Cover.h
/* * Cover.h * * Created on: 03.10.2013 * Author: cls */ #ifndef COVER_H_ #define COVER_H_ #include <cinttypes> #include <set> #include <vector> #include <map> #include <cassert> #include <limits> #include "Partition.h" #include "../Globals.h" namespace NetworKit { /** * @ingroup structures * Implements a cover of a set, i.e. an assignment of * its elements to possibly overlapping subsets. */ class Cover { public: /** Default constructor */ Cover(); /** * Create a new cover data structure for elements up to a maximum element index. * * @param[in] z maximum index */ Cover(index z); /** * Creates a new cover data structure which contains the given partition. * * @param[in] p The partition to construct the cover from */ Cover(const Partition &p); /** Default destructor */ virtual ~Cover() = default; /** * Index operator. * * @param[in] e an element */ inline std::set<index>& operator [](const index& e) { return this->data[e]; } /** * Index operator for const instances of this class. * * @param[in] e an element */ inline const std::set<index>& operator [](const index& e) const { return this->data[e]; } /** * Return the ids of subsets in which the element @a e is contained. * * @param[in] e an element * @return A set of subset ids in which @a e is contained. */ inline std::set<index> subsetsOf(index e) const { // TODO: assert (e < this->numberOfElements()); return this->data[e]; } /** * Check if cover assigns a valid subset to the element @a e. * * @param e an element. * @return @c true, if @a e is assigned to a valid subset, @c false otherwise. */ bool contains(index e) const; /** * Check if two elements @a e1 and @a e2 belong to the same subset. * * @param e1 an element. * @param e2 an element. * @return @c true, if @a e1 and @a e2 belong to the same subset, @c false otherwise. */ bool inSameSubset(index e1, index e2) const; /** * Get the members of a specific subset @a s. * * @return The set of members of subset @a s. */ std::set<index> getMembers(const index s) const; /** * Add the (previously unassigned) element @a e to the set @a s. * @param[in] s a subset * @param[in] e an element */ void addToSubset(index s, index e); /** * Remove the element @a e from the set @a s. * @param[in] s a subset * @param[in] e an element */ void removeFromSubset(index s, index e); /** * Move the element @a e to subset @a s, i.e. remove it from all * other subsets and place it in the subset. * @param[in] s a subset * @param[in] e an element */ void moveToSubset(index s, index e); /** * Creates a singleton set containing the element @a e and returns the index of the new set. * @param[in] e an element * @return The index of the new set. */ index toSingleton(index e); /** * Assigns every element to a singleton set. * Set id is equal to element id. */ void allToSingletons(); /** * Assigns the elements from both sets to a new set. * @param[in] s a subset * @param[in] t a subset */ void mergeSubsets(index s, index t); /** * Get an upper bound for the subset ids that have been assigned. * (This is the maximum id + 1.) * * @return An upper bound. */ index upperBound() const; /** * Get a lower bound for the subset ids that have been assigned. * @return A lower bound. */ index lowerBound() const; /** * Get a list of subset sizes. Indices do not necessarily correspond to subset ids. * * @return A list of subset sizes. */ std::vector<count> subsetSizes() const; /** * Get a map from subset id to size of the subset. * * @return A map from subset id to size of the subset. */ std::map<index, count> subsetSizeMap() const; /** * Get the current number of sets in this cover. * * @return The number of sets in this cover. */ count numberOfSubsets() const; /** * Get the current number of elements in this cover. * * @return The current number of elements. */ count numberOfElements() const; /** * Get the ids of nonempty subsets. * * @return A set of ids of nonempty subsets. */ std::set<index> getSubsetIds() const; /** * Sets an upper bound for the subset ids that CAN be assigned. * * @param[in] upper highest assigned subset ID + 1 */ void setUpperBound(index upper); /** * Iterate over all entries (node, subset ID of node) and execute callback function @a func (lambda closure). * * @param func Takes parameters <code>(node, index)</code> */ template<typename Callback> void forEntries(Callback func) const; /** * Iterate over all entries (node, subset ID of node) in parallel and execute callback function @a func (lambda closure). * * @param func Takes parameters <code>(node, index)</code> */ template<typename Callback> void parallelForEntries(Callback handle) const; private: index z; //!< maximum element index that can be mapped index omega; //!< maximum subset index ever assigned std::vector<std::set<index>> data; //!< data container, indexed by element id, containing set of subset ids /** * Allocates and returns a new subset id. */ inline index newSubsetId() { omega++; index s = omega; return s; } }; template<typename Callback> inline void Cover::forEntries(Callback handle) const { for (index e = 0; e <= this->z; e += 1) { handle(e, data[e]); } } template<typename Callback> inline void Cover::parallelForEntries(Callback handle) const { #pragma omp parallel for for (omp_index e = 0; e <= static_cast<omp_index>(this->z); e += 1) { handle(e, data[e]); } } } /* namespace NetworKit */ #endif /* COVER_H_ */
ordered_doacross_codegen.c
// RUN: %clang_cc1 -verify -fopenmp -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s // RUN: %clang_cc1 -fopenmp -triple x86_64-unknown-unknown -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp -triple x86_64-unknown-unknown -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s // RUN: %clang_cc1 -verify -fopenmp-simd -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s // RUN: %clang_cc1 -fopenmp-simd -triple x86_64-unknown-unknown -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp-simd -triple x86_64-unknown-unknown -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s // SIMD-ONLY0-NOT: {{__kmpc|__tgt}} // expected-no-diagnostics #ifndef HEADER #define HEADER // CHECK: [[KMP_DIM:%.+]] = type { i64, i64, i64 } extern int n; int a[10], b[10], c[10], d[10]; void foo(); // CHECK-LABEL: @main() int main() { int i; // CHECK: [[DIMS:%.+]] = alloca [[KMP_DIM]], // CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num([[IDENT:%.+]]) // CHECK: icmp // CHECK-NEXT: br i1 % // CHECK: [[CAST:%.+]] = bitcast [[KMP_DIM]]* [[DIMS]] to i8* // CHECK: call void @llvm.memset.p0i8.i64(i8* align 8 [[CAST]], i8 0, i64 24, i1 false) // CHECK: getelementptr inbounds [[KMP_DIM]], [[KMP_DIM]]* [[DIMS]], i32 0, i32 1 // CHECK: store i64 %{{.+}}, i64* % // CHECK: getelementptr inbounds [[KMP_DIM]], [[KMP_DIM]]* [[DIMS]], i32 0, i32 2 // CHECK: store i64 1, i64* % // CHECK: [[CAST:%.+]] = bitcast [[KMP_DIM]]* [[DIMS]] to i8* // CHECK: call void @__kmpc_doacross_init([[IDENT]], i32 [[GTID]], i32 1, i8* [[CAST]]) // CHECK: call void @__kmpc_for_static_init_4( #pragma omp for ordered(1) for (i = 0; i < n; ++i) { a[i] = b[i] + 1; foo(); // CHECK: call void [[FOO:.+]]( // CHECK: load i32, i32* [[CNT:%.+]], // CHECK-NEXT: sext i32 %{{.+}} to i64 // CHECK-NEXT: store i64 %{{.+}}, i64* [[TMP:%.+]], // CHECK-NEXT: call void @__kmpc_doacross_post([[IDENT]], i32 [[GTID]], i64* [[TMP]]) #pragma omp ordered depend(source) c[i] = c[i] + 1; foo(); // CHECK: call void [[FOO]] // CHECK: load i32, i32* [[CNT]], // CHECK-NEXT: sub nsw i32 %{{.+}}, 2 // CHECK-NEXT: sext i32 %{{.+}} to i64 // CHECK-NEXT: store i64 %{{.+}}, i64* [[TMP:%.+]], // CHECK-NEXT: call void @__kmpc_doacross_wait([[IDENT]], i32 [[GTID]], i64* [[TMP]]) #pragma omp ordered depend(sink : i - 2) d[i] = a[i - 2]; } // CHECK: call void @__kmpc_for_static_fini( // CHECK: call void @__kmpc_doacross_fini([[IDENT]], i32 [[GTID]]) // CHECK: ret i32 0 return 0; } #endif // HEADER
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 8; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
vars0-omp.c
#include <stdio.h> #include <omp.h> int gvar = 0; int main( int argc, char **argv ) { int lvar = 0; int tid; #pragma omp parallel private(tid) { tid = omp_get_thread_num(); printf( "<%d> gvar:%p lvar:%p tid:%p\n", tid, &gvar, &lvar ); } return 0; }
GB_nvec_nonempty.c
//------------------------------------------------------------------------------ // GB_nvec_nonempty: count the number of non-empty vectors //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // All pending tuples are ignored. If a vector has all zombies it is still // counted as non-empty. #include "GB.h" GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only int64_t GB_nvec_nonempty // return # of non-empty vectors ( const GrB_Matrix A, // input matrix to examine GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (A != NULL) ; ASSERT (GB_ZOMBIES_OK (A)) ; //-------------------------------------------------------------------------- // trivial case //-------------------------------------------------------------------------- if (GB_NNZ (A) == 0) { return (0) ; } //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- int64_t anvec = A->nvec ; GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (anvec, chunk, nthreads_max) ; //-------------------------------------------------------------------------- // count the non-empty columns //-------------------------------------------------------------------------- int64_t nvec_nonempty = 0 ; const int64_t *GB_RESTRICT Ap = A->p ; int64_t k ; #pragma omp parallel for num_threads(nthreads) schedule(static) \ reduction(+:nvec_nonempty) for (k = 0 ; k < anvec ; k++) { if (Ap [k] < Ap [k+1]) nvec_nonempty++ ; } ASSERT (nvec_nonempty >= 0 && nvec_nonempty <= A->vdim) ; //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- return (nvec_nonempty) ; }
mkldnn_os.h
/******************************************************************************* * Copyright 2017 NEC Labs America * MODIFICATIONS Copyright 2019 NEC Labs America * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ /** \file * handle various compiler/os retrictions */ #ifndef _MKLDNN_OS_H_ #define _MKLDNN_OS_H_ //#include "os_common.hpp" // not available -- we use mkldnn public API only. #if 1 #if defined(__ve) #define strnlen strnlen_s #endif // How is the restrict keyword handled? (disallow it as you encounter errors, please) #if defined(_SX) #elif defined(__ve) // restrict is allowed #ifndef __restrict #define __restrict restrict /* ve/musl/include/stdlib.h uses __restrict !!! */ #endif #elif defined(__INTEL_COMPILER) || defined(__GNUC__) #define restrict /*no-restrict*/ #elif defined(WIN32) // ??? #else // ??? #endif // restrict keyword handling // Any restrictions on the alignas attribute? #ifdef __ve #define alignas(x) alignas((x) > 16 ? 16 : (x)) #endif #endif // ENABLE_OPT_PRAGMAS // set to 0 to debug pragma-related incorrect assumptions #if !defined(ENABLE_OPT_PRAGMAS) //#warning "Unknown system: optimization pragmas NOT USED" //#define ENABLE_OPT_PRAGMAS 0/*XXX*/ #define ENABLE_OPT_PRAGMAS 1 #endif // ENABLE_OMP defaults to 1 #if !defined(ENABLE_OMP) #if defined(_SX) #elif defined(__ve) // OMP is not yet supported by ncc/nc++ //#define ENABLE_OMP 0 // at Dec. 25th 2017 release, ncc may support OMP #elif defined(__INTEL_COMPILER) #elif defined(__GNUC__) #else #endif #if !defined(ENABLE_OMP) #define ENABLE_OMP 1 #endif #endif // -------- compiler-specific pragmas -------- // __ve compile does something with pragma omp, but it is not officially supported, // so we use C++11 _Pragma to emit pragmas from macros and customize pragmas to // particular compilers. // // Allocation directives: // VREG : hint that array fits into one simd register // There may be many conditions on array access! // ALLOC_ON_VREG : hint that array fits into multiple simd registers // ALLOC_ON_ADB : hint that array should be "cached" in special memory bank. // // Loop directives apply to an IMMEDIATELY FOLLOWING loop: // ShortLoop : hint that for-loop limit is less than max simd register length // RETAIN : hint that array should be kept accesible (cached) // IVDEP : pretend all ptrs are independent (restrict) // // TODO: SX pre-loop macros must be SINGLE ones, because sxcc REQUIRES // multiple #pragma cdir to be combined, comma-separated. // So you can only use ONE pre-loop macro. If 2 macros, // compiler docs say **both** will be ignored! // // FIXME SX alloc_on_vreg 2nd arg must be a compile-time constant // // Oh! ALLOC_ON_VREG cannot "decay" into RETAIN, because syntax is different // ----------------------------------- //#define BENCHDNN_YPRAGMA(str) do{int ypr=str;}while(0); #define BENCHDNN_MPRAGMA(str) _Pragma(str) #define BENCHDNN_STRINGIZE(...) #__VA_ARGS__ #define PragmaQuote(...) BENCHDNN_MPRAGMA(BENCHDNN_STRINGIZE(__VA_ARGS__)) #if ENABLE_OPT_PRAGMAS && defined(_SX) // SX preprocessor generates _Pragma(XXX) and sxc++ might be ignoring // *some*, based on failure to produce some warning messages. //#warning "SX optimization pragmas IN EFFECT" # define VREG(...) PragmaQuote(cdir vreg(__VA_ARGS__)) # define ALLOC_ON_VREG(...) PragmaQuote(cdir alloc_on_vreg(__VA_ARGS__)) # define ALLOC_ON_ADB(...) PragmaQuote(cdir alloc_on_adb(__VA_ARGS__)) // Is there a pre-for-loop RETAIN for SX? For now, kludge as on_adb. # define RETAIN(...) PragmaQuote(cdir on_adb(__VA_ARGS__)) # define RETAIN1st(var,...) PragmaQuote(cdir on_adb(var)) # define ShortLoop() _Pragma("cdir shortloop") # define ShortLoopTest() /*?*/ # define IVDEP() _Pragma("cdir nodep") # define UNROLL(x) # define PRAGMA_UNROLL #elif ENABLE_OPT_PRAGMAS && defined(__ve) //# warning "__ve optimization pragmas IN EFFECT" # define VREG(...) PragmaQuote(_NEC vreg(__VA_ARGS__)) # define ALLOC_ON_VREG(...) # define ALLOC_ON_ADB(...) # define RETAIN(...) PragmaQuote(_NEC retain(__VA_ARGS__)) # define RETAIN1st(var,...) PragmaQuote(_NEC retain(var)) # define ShortLoop() _Pragma("_NEC shortloop") # define ShortLoopTest() _Pragma("_NEC shortloop_reduction") # define IVDEP() _Pragma("_NEC ivdep") # define UNROLL(x) PragmaQuote(_NEC unroll(x)) # define PRAGMA_UNROLL PragmaQuote(_NEC unroll(4)) #elif ENABLE_OPT_PRAGMAS && defined(__INTEL_COMPILER) // restrict keyword requires the "-restrict" CFLAG; __restrict__ works anyway # define restrict __restrict__ # define IVDEP() _Pragma("ivdep") # define UNROLL(x) PragmaQuote(unroll(x)) # define PRAGMA_UNROLL PragmaQuote(unroll) // TODO: # define VREG(...) # define ALLOC_ON_VREG(...) # define ALLOC_ON_ADB(...) # define RETAIN(...) # define ShortLoop() # define ShortLoopTest() #elif ENABLE_OPT_PRAGMAS && defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER) //-------------------------------------------- // taken from MSVC code in mkldnn_thread.hpp //# warning "MSVC still supports omp 2.0 only" # define collapse(x) //# define PRAGMA_OMP_SIMD(...) ... below //-------------------------------------------- # define UNROLL(x) # define PRAGMA_UNROLL # define VREG(...) # define ALLOC_ON_VREG(...) # define ALLOC_ON_ADB(...) # define RETAIN(...) # define ShortLoop() # define ShortLoopTest() #elif ENABLE_OPT_PRAGMAS && defined(__GNUC__) //#warning "__GNUC optimization pragmas IN EFFECT" # define VREG(...) # define ALLOC_ON_VREG(...) # define ALLOC_ON_ADB(...) # define RETAIN(...) # define ShortLoop() # define ShortLoopTest() # define IVDEP() _Pragma("GCC ivdep") #if __GNUC__ >= 8 # define UNROLL(x) PragmaQuote(GCC unroll x) # define PRAGMA_UNROLL PragmaQuote(GCC unroll 4) #else # define UNROLL(x) # define PRAGMA_UNROLL #endif #else /* A new system might begin by ignoring the optimization pragmas */ # warning "Please check if _Pragma macros can be defined for this platorm" # define VREG(...) # define ALLOC_ON_VREG(...) # define ALLOC_ON_ADB(...) # define RETAIN(...) # define ShortLoop() # define ShortLoopTest() # define IVDEP() # define UNROLL(x) # define PRAGMA_UNROLL #endif #if ENABLE_OMP # define OMP(...) PragmaQuote(omp __VA_ARGS__) //# if defined(__ve) //# warning "__ve enabling #pragma omp" //# endif # if defined(_SX) // no support for "simd" pragmas # elif defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER) # elif defined(__ve) # define PRAGMASIMD(...) PragmaQuote(simd __VA_ARGS__) //# warning "__ve (ncc) ignores simd directive in PRAGMA_OMP_SIMD(...) # define OMPSIMD(...) PragmaQuote(omp __VA_ARGS__) # define PRAGMA_OMP_SIMD(...) PragmaQuote(omp __VA_ARGS__) # else // defined(__GNUC) or intel or ... # define PRAGMASIMD(...) PragmaQuote(simd __VA_ARGS__) # define OMPSIMD(...) PragmaQuote(omp simd __VA_ARGS__) # define PRAGMA_OMP_SIMD(...) PragmaQuote(omp simd __VA_ARGS__) # endif #endif #ifndef PRAGMASIMD # define PRAGMASIMD(...) #endif #ifndef OMPSIMD # define OMPSIMD(...) #endif #ifndef PRAGMA_OMP_SIMD # define PRAGMA_OMP_SIMD(...) #endif #ifndef OMP # define OMP(...) #if defined(REF_LRN_HPP) // mostly ignore: show for cpu_engine compile at least # warning "not enabling #pragma omp (mkldnn_os.h)" #endif #endif #endif // _MKLDNN_OS_H_
parallel_steps.c
#include <stdio.h> #include <errno.h> // for errno #include <math.h> #include <limits.h> // for INT_MAX #include <stdlib.h> // for strtol #include <time.h> #include <omp.h> long number_of_threads = 2; long max_number_of_char = 10; long number_of_types_char = 10; long number_of_results = 5; long number_of_queues = 2; long detail_level = 0; long* ids = NULL; long* results = NULL; long* n_of_chars = NULL; typedef struct Client { long* initial_characteristics; long** derived_characteristics; long number_of_init_chars; long identifier; long result; } Client; long rand_between(long l, long r, unsigned int seed) { int value; #pragma omp critical (rand) { srand(seed); value = rand(); } return (long) (l + (value % (r - l + 1))); } Client* createClient(long id, unsigned int seed) { Client* client = malloc(sizeof(Client)); client->number_of_init_chars = rand_between(0, max_number_of_char, seed); seed += client->number_of_init_chars; if(client->number_of_init_chars > 0) { client->initial_characteristics = malloc(client->number_of_init_chars*sizeof(long)); //printf("id: %ld - init size: %ld\n", id, client->number_of_init_chars); for (long i = 0; i < client->number_of_init_chars; ++i) { client->initial_characteristics[i] = rand_between(1, number_of_types_char, seed); seed += client->initial_characteristics[i]; } if(number_of_queues - 1 > 0){ client->derived_characteristics = malloc((number_of_queues-1)*sizeof(long*)); for (long i = 0; i < number_of_queues-1; ++i) { client->derived_characteristics[i] = malloc((client->number_of_init_chars+i+1)*sizeof(long)); //printf("id: %ld - level: %ld - size: %ld\n", id, i, client->number_of_init_chars+i+1); } } } else { client->initial_characteristics = NULL; client->derived_characteristics = NULL; } client->identifier = id; client->result = -1; return client; } void destroyClient(Client* client){ if(client->number_of_init_chars > 0) { free(client->initial_characteristics); if(number_of_queues-1 > 0){ for (long i = 0; i < number_of_queues-1; ++i) { free(client->derived_characteristics[i]); } free(client->derived_characteristics); } } free(client); } void printClient(Client* client, short int detail_level) { #pragma omp critical (print) { if(detail_level > 1) { printf("\nId: %ld\n", client->identifier); printf("Result: %ld\n", client->result); if(detail_level > 1) { printf("Nº of characteristics: %ld\n", client->number_of_init_chars); if(client->number_of_init_chars > 0) { printf("Characteristics: %ld", client->initial_characteristics[0]); for (long i = 1; i < client->number_of_init_chars; ++i) { printf(", %ld", client->initial_characteristics[i]); } } for (long i = 0; i < number_of_queues - 1; ++i) { if(detail_level > i+2) { if(client->number_of_init_chars > 0) { printf("\nCharacteristics on queue %ld: %ld", i+1, client->derived_characteristics[i][0]); for (long j = 1; j < client->number_of_init_chars+i+1; ++j) { printf(", %ld", client->derived_characteristics[i][j]); } } } } } printf("\n"); } } } void printClientCSV(Client* client) { printf("\n%ld, %ld, %ld\n", client->identifier, client->result, client->number_of_init_chars); } void printCSV(long* ids, long* results, long* n_of_chars, long number_of_clients) { printf("ID,Result,Nº Initial Characteristics\n"); for (long i = 0; i < number_of_clients; ++i) { printf("%ld, %ld, %ld\n", ids[i], results[i], n_of_chars[i]); } } long initialCharProcess(Client* client) { long value = 0; for (long i = 0; i < client->number_of_init_chars; ++i) { value += client->initial_characteristics[i]; } return (value / (client->number_of_init_chars + 1)) + (value % (client->number_of_init_chars + 1)); } long levelCharProcess(Client* client, long* values, long level) { // printf("client: %ld - chars: %ld\n", client->identifier, client->number_of_init_chars); long* origin = NULL; if (level == 0) { origin = client->initial_characteristics; } else { origin = client->derived_characteristics[level-1]; } long value = 0; // printf("client: %ld - level: %ld - level size: %ld\n", client->identifier, level, client->number_of_init_chars+level); for (long i = 0; i < client->number_of_init_chars+level-1; ++i) { client->derived_characteristics[level][i] = 0; for (long j = 0; j < client->number_of_init_chars+level; ++j) { client->derived_characteristics[level][i] += abs(origin[i] - origin[j]); } // printf("client: %ld - %ld: %ld\n", client->identifier,i, client->derived_characteristics[level][i]); value += client->derived_characteristics[level][i]; } client->derived_characteristics[level][client->number_of_init_chars+level-1] = abs(origin[client->number_of_init_chars+level-1] - origin[0]); // printf("client: %ld - %ld: %ld\n", client->identifier,client->number_of_init_chars+level-1, client->derived_characteristics[level][client->number_of_init_chars+level-1]); client->derived_characteristics[level][client->number_of_init_chars+level] = values[level]; // printf("client: %ld - %ld: %ld\n", client->identifier,client->number_of_init_chars+level, client->derived_characteristics[level][client->number_of_init_chars+level]); value += client->derived_characteristics[level][client->number_of_init_chars+level-1]; value += client->derived_characteristics[level][client->number_of_init_chars+level]; return (value * client->number_of_init_chars) % number_of_types_char; } long categoryFromValue(long* values) { long result = 0; if(values[0] == 0){ return 0; } for (long i = 0; i < number_of_queues; ++i) { result += values[i] * (i+1); } result = result / number_of_queues; return 1 + ( result % number_of_results); } void categorizeClient(Client* client) { if(client->number_of_init_chars == 0) { client->result = 0; return; } long* values = calloc(number_of_queues, sizeof(long)); values[0] = initialCharProcess(client); //printf("initial value: %ld\n", values[0]); for (long i = 0; i < number_of_queues-1; ++i) { values[i+1] += levelCharProcess(client, values, i); //printf("value %ld: %ld\n", i, values[i]); } client->result = categoryFromValue(values); free(values); } void taskProcessClient(Client* client, long* clientValues, long step){ // printf("process step %ld of %ld - %d\n", step, client->identifier, omp_get_thread_num()); if(client->number_of_init_chars == 0) { client->result = 0; ids[client->identifier] = client->identifier; results[client->identifier] = client->result; n_of_chars[client->identifier] = client->number_of_init_chars; printClient(client, detail_level); return; } if(step < number_of_queues-1) { clientValues[step+1] += levelCharProcess(client, clientValues, step); // printf("step: %ld - number_of_queues: %ld - result %ld\n", step, number_of_queues, clientValues[step+1]); #pragma omp task { taskProcessClient(client, clientValues, step+1); } } else { // #pragma omp task // { // printf("step: %ld - number_of_queues: %ld\n", step, number_of_queues); client->result = categoryFromValue(clientValues); ids[client->identifier] = client->identifier; results[client->identifier] = client->result; n_of_chars[client->identifier] = client->number_of_init_chars; printClient(client, detail_level); // } } } long convert_str_long(char *str){ char *p; errno = 0; long conv = strtol(str, &p, 10); if (errno != 0 || *p != '\0') { printf("%s não é um número!\n", str); exit(-1); } return (long)conv; } int main(int argc, char **argv){ if (argc != 9) { printf("É necessário informar os seguintes argumentos:\n"); printf("Quantidade de threads a serem usadas\n"); printf("Seed usada para gerar os dados\n"); printf("Número de clientes a serem criados\n"); printf("Quantidade máxima de caracteristicas por cliente\n"); printf("Quantidade de tipos de caracteristicas\n"); printf("Quantidade de resultados diferentes do 0\n"); printf("Número de etapas a serem utilizadas para processar o resultado\n"); printf("Nível de detalhe da exibição dos resultados:\n"); printf(" - Caso seja 0: Imprime apenas o tempo gasto\n"); printf(" - Caso seja 1: Imprime o Id, o Resultado e o Nª de categorias de cada cliente em um .csv\n"); printf(" - Caso seja n: Imprime os dados de cada cliente detalhando as caracteristicas de até n-1 etapas e ao final os dados de detalhe 1\n"); return -1; } number_of_threads = convert_str_long(argv[1]); unsigned int seed = convert_str_long(argv[2]); long number_of_clients = convert_str_long(argv[3]); max_number_of_char = convert_str_long(argv[4]); number_of_types_char = convert_str_long(argv[5]); number_of_results = convert_str_long(argv[6]); number_of_queues = convert_str_long(argv[7]); detail_level = convert_str_long(argv[8]); ids = malloc(number_of_clients*sizeof(long)); results = malloc(number_of_clients*sizeof(long)); n_of_chars = malloc(number_of_clients*sizeof(long)); Client** clients = malloc(number_of_clients*sizeof(Client*)); Client*** levels = malloc((number_of_queues)*sizeof(Client**)); long** values = malloc(number_of_clients*sizeof(long*)); for (size_t i = 0; i < number_of_queues; ++i) { levels[i] = malloc(number_of_clients*sizeof(Client*)); } for (long i = 0; i < number_of_clients; ++i) { clients[i] = createClient(i, seed+i); values[i] = calloc(number_of_queues, sizeof(long)); } double t = omp_get_wtime(); #pragma omp parallel num_threads(number_of_threads) default(none) \ shared(number_of_clients, values, clients, levels) { #pragma omp for schedule(guided) for (long i = 0; i < number_of_clients; ++i) { values[i][0] = initialCharProcess(clients[i]); //printf("inicio %ld - %d\n", i, omp_get_thread_num()); #pragma omp task { taskProcessClient(clients[i], values[i], 0); } } } #pragma omp taskwait t = omp_get_wtime() - t; if(detail_level > 0) { printCSV(ids, results, n_of_chars, number_of_clients); } else { printf("%.10lf\n", t); } for (long i = 0; i < number_of_clients; ++i) { destroyClient(clients[i]); free(values[i]); } for (size_t i = 0; i < number_of_queues; ++i) { free(levels[i]); } free(levels); free(values); free(clients); free(ids); free(results); free(n_of_chars); return 0; } /* main */
GB_unop__isnan_bool_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__isnan_bool_fp32) // op(A') function: GB (_unop_tran__isnan_bool_fp32) // C type: bool // A type: float // cast: float cij = (aij) // unaryop: cij = isnan (aij) #define GB_ATYPE \ float #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = isnan (x) ; // casting #define GB_CAST(z, aij) \ float z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = (aij) ; \ Cx [pC] = isnan (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNAN || GxB_NO_BOOL || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__isnan_bool_fp32) ( bool *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = (aij) ; Cx [p] = isnan (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = (aij) ; Cx [p] = isnan (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__isnan_bool_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
cryptocontext.h
/** * @file cryptocontext.h -- Control for encryption operations. * @author TPOC: contact@palisade-crypto.org * * @section LICENSE * * @copyright Copyright (c) 2019, New Jersey Institute of Technology (NJIT)) * All rights reserved. * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, this * list of conditions and the following disclaimer in the documentation and/or other * materials provided with the distribution. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SRC_PKE_CRYPTOCONTEXT_H_ #define SRC_PKE_CRYPTOCONTEXT_H_ #include "palisade.h" #include "scheme/allscheme.h" #include "cryptocontexthelper.h" #include "cryptotiming.h" #include "utils/serial.h" #include "utils/serialize-binary.h" #include "utils/serialize-json.h" namespace lbcrypto { template<typename Element> class CryptoContextFactory; template<typename Element> class CryptoContextImpl; template<typename Element> using CryptoContext = shared_ptr<CryptoContextImpl<Element>>; /** * @brief CryptoContextImpl * * A CryptoContextImpl is the object used to access the PALISADE library * * All PALISADE functionality is accessed by way of an instance of a CryptoContextImpl; we say that various objects are * "created in" a context, and can only be used in the context in which they were created * * All PALISADE methods are accessed through CryptoContextImpl methods. Guards are implemented to make certain that * only valid objects that have been created in the context are used * * Contexts are created using the CryptoContextFactory, and can be serialized and recovered from a serialization */ template<typename Element> class CryptoContextImpl : public Serializable { friend class CryptoContextFactory<Element>; protected: shared_ptr<LPCryptoParameters<Element>> params; /*!< crypto parameters used for this context */ shared_ptr<LPPublicKeyEncryptionScheme<Element>> scheme; /*!< algorithm used; accesses all crypto methods */ static std::map<string,std::vector<LPEvalKey<Element>>> evalMultKeyMap; /*!< cached evalmult keys, by secret key UID */ static std::map<string,shared_ptr<std::map<usint,LPEvalKey<Element>>>> evalSumKeyMap; /*!< cached evalsum keys, by secret key UID */ static std::map<string,shared_ptr<std::map<usint,LPEvalKey<Element>>>> evalAutomorphismKeyMap; /*!< cached evalautomorphism keys, by secret key UID */ bool doTiming; vector<TimingInfo>* timeSamples; string m_schemeId; size_t m_keyGenLevel; /** * TypeCheck makes sure that an operation between two ciphertexts is permitted * @param a * @param b */ void TypeCheck(ConstCiphertext<Element> a, ConstCiphertext<Element> b) const { if( a == NULL || b == NULL ) PALISADE_THROW( type_error, "Null Ciphertext"); if( a->GetCryptoContext().get() != this ) PALISADE_THROW( type_error, "Ciphertext was not created in this CryptoContext"); if( a->GetCryptoContext() != b->GetCryptoContext() ) PALISADE_THROW( type_error, "Ciphertexts were not created in the same CryptoContext"); if( a->GetKeyTag() != b->GetKeyTag() ) PALISADE_THROW( type_error, "Ciphertexts were not encrypted with same keys" ); if( a->GetEncodingType() != b->GetEncodingType() ) { stringstream ss; ss << "Ciphertext encoding types " << a->GetEncodingType(); ss << " and " << b->GetEncodingType(); ss << " do not match"; PALISADE_THROW( type_error, ss.str() ); } } /** * TypeCheck makes sure that an operation between two ciphertexts is permitted * This is intended for mutable methods, hence inputs are Ciphretext instead * of ConstCiphertext. * * @param a * @param b */ void TypeCheck(Ciphertext<Element> a, Ciphertext<Element> b) const { if( a == NULL || b == NULL ) PALISADE_THROW( type_error, "Null Ciphertext"); if( a->GetCryptoContext().get() != this ) PALISADE_THROW( type_error, "Ciphertext was not created in this CryptoContext"); if( a->GetCryptoContext() != b->GetCryptoContext() ) PALISADE_THROW( type_error, "Ciphertexts were not created in the same CryptoContext"); if( a->GetKeyTag() != b->GetKeyTag() ) PALISADE_THROW( type_error, "Ciphertexts were not encrypted with same keys" ); if( a->GetEncodingType() != b->GetEncodingType() ) { stringstream ss; ss << "Ciphertext encoding types " << a->GetEncodingType(); ss << " and " << b->GetEncodingType(); ss << " do not match"; PALISADE_THROW( type_error, ss.str() ); } } /** * TypeCheck makes sure that an operation between a ciphertext and a plaintext is permitted * @param a * @param b */ void TypeCheck(ConstCiphertext<Element> a, ConstPlaintext b) const { if( a == NULL ) PALISADE_THROW( type_error, "Null Ciphertext"); if( b == NULL ) PALISADE_THROW( type_error, "Null Plaintext"); if( a->GetCryptoContext().get() != this ) PALISADE_THROW( type_error, "Ciphertext was not created in this CryptoContext"); if( a->GetEncodingType() != b->GetEncodingType() ) { stringstream ss; ss << "Ciphertext encoding type " << a->GetEncodingType(); ss << " and Plaintext encoding type " << b->GetEncodingType(); ss << " do not match"; PALISADE_THROW( type_error, ss.str() ); } } /** * TypeCheck makes sure that an operation between two ciphertexts is permitted * @param a * @param b */ void TypeCheck(const RationalCiphertext<Element>& a, const RationalCiphertext<Element>& b) const { if( a.GetCryptoContext().get() != this ) PALISADE_THROW( type_error, "Ciphertext was not created in this CryptoContextImpl"); if( a.GetCryptoContext() != b.GetCryptoContext() ) PALISADE_THROW( type_error, "Ciphertexts were not created in the same CryptoContextImpl"); if( a.GetKeyTag() != b.GetKeyTag() ) PALISADE_THROW( type_error, "Ciphertexts were not encrypted with same keys" ); if( a.GetNumerator()->GetEncodingType() != b.GetNumerator()->GetEncodingType() ) { stringstream ss; ss << "RationalCiphertext encoding types " << a.GetNumerator()->GetEncodingType(); ss << " and " << b.GetNumerator()->GetEncodingType(); ss << " do not match"; PALISADE_THROW( type_error, ss.str() ); } } /** * TypeCheck makes sure that an operation between a ciphertext and a plaintext is permitted * @param a * @param b */ void TypeCheck(const RationalCiphertext<Element>& a, ConstPlaintext b) const { if( b == NULL ) PALISADE_THROW( type_error, "Null Plaintext"); if( a.GetCryptoContext().get() != this ) PALISADE_THROW( type_error, "Ciphertext was not created in this CryptoContextImpl"); if( a.GetNumerator()->GetEncodingType() != b->GetEncodingType() ){ stringstream ss; ss << "RationalCiphertext encoding type " << a.GetNumerator()->GetEncodingType(); ss << " and Plaintext encoding type " << b->GetEncodingType(); ss << " do not match"; PALISADE_THROW( type_error, ss.str() ); } } bool Mismatched(const CryptoContext<Element> a) const { if( a.get() != this ) { return true; } return false; } public: LPPrivateKey<Element> privateKey; /** * This stores the private key in the crypto context. * This is only intended for debugging and should not be * used in production systems. Please define DEBUG_KEY in * palisade.h to enable this. * * If used, one can create a key pair and store the secret * key in th crypto context like this: * * auto keys = cc->KeyGen(); * cc->SetPrivateKey(keys.secretKey); * * After that, anyone in the code, one can access the * secret key by getting the crypto context and doing the * following: * * auto sk = cc->GetPrivateKey(); * * This key can be used for decrypting any intermediate * ciphertexts for debugging purposes. * * @param sk the secret key * */ void SetPrivateKey(const LPPrivateKey<Element> sk) { #ifdef DEBUG_KEY cerr << "Warning - SetPrivateKey is only intended to be used for debugging purposes - not for production systems." << endl; this->privateKey = sk; #else PALISADE_THROW(not_available_error, "SetPrivateKey is only allowed if DEBUG_KEY is set in palisade.h"); #endif } /** * This gets the private key from the crypto context. * This is only intended for debugging and should not be * used in production systems. Please define DEBUG_KEY in * palisade.h to enable this. * * If used, one can create a key pair and store the secret * key in th crypto context like this: * * auto keys = cc->KeyGen(); * cc->SetPrivateKey(keys.secretKey); * * After that, anyone in the code, one can access the * secret key by getting the crypto context and doing the * following: * * auto sk = cc->GetPrivateKey(); * * This key can be used for decrypting any intermediate * ciphertexts for debugging purposes. * * @return the secret key * */ const LPPrivateKey<Element> GetPrivateKey() { #ifdef DEBUG_KEY return this->privateKey; #else PALISADE_THROW(not_available_error, "GetPrivateKey is only allowed if DEBUG_KEY is set in palisade.h"); #endif } void setSchemeId(string schemeTag) { this->m_schemeId = schemeTag; } string getSchemeId() { return this->m_schemeId; } /** * CryptoContextImpl constructor from pointers to parameters and scheme * @param params - pointer to CryptoParameters * @param scheme - pointer to Crypto Scheme */ CryptoContextImpl(LPCryptoParameters<Element> *params = 0, LPPublicKeyEncryptionScheme<Element> *scheme = 0, const string & schemeId = "Not") { this->params.reset(params); this->scheme.reset(scheme); this->doTiming = false; this->timeSamples = 0; this->m_keyGenLevel = 0; this->m_schemeId = schemeId; } /** * CryptoContextImpl constructor from shared pointers to parameters and scheme * @param params - shared pointer to CryptoParameters * @param scheme - sharedpointer to Crypto Scheme */ CryptoContextImpl(shared_ptr<LPCryptoParameters<Element>> params, shared_ptr<LPPublicKeyEncryptionScheme<Element>> scheme, const string & schemeId = "Not") { this->params = params; this->scheme = scheme; this->doTiming = false; this->timeSamples = 0; this->m_keyGenLevel = 0; this->m_schemeId = schemeId; } /** * Copy constructor * @param c - source */ CryptoContextImpl(const CryptoContextImpl<Element>& c) { params = c.params; scheme = c.scheme; doTiming = c.doTiming; timeSamples = c.timeSamples; this->m_keyGenLevel = 0; this->m_schemeId = c.m_schemeId; } /** * Assignment * @param rhs - assigning from * @return this */ CryptoContextImpl<Element>& operator=(const CryptoContextImpl<Element>& rhs) { params = rhs.params; scheme = rhs.scheme; doTiming = rhs.doTiming; timeSamples = rhs.timeSamples; m_keyGenLevel = rhs.m_keyGenLevel; m_schemeId = rhs.m_schemeId; return *this; } /** * A CryptoContextImpl is only valid if the shared pointers are both valid */ operator bool() const { return bool(params) && bool(scheme); } /** * Private methods to compare two contexts; this is only used internally and is not generally available * @param a - operand 1 * @param b - operand 2 * @return true if the implementations have identical parms and scheme */ friend bool operator==(const CryptoContextImpl<Element>& a, const CryptoContextImpl<Element>& b) { // Identical if the parameters and the schemes are identical... the exact same object, // OR the same type and the same values if( a.params.get() == b.params.get() ) { return true; } else { if( typeid(*a.params.get()) != typeid(*b.params.get()) ) { return false; } if( *a.params.get() != *b.params.get() ) return false; } if( a.scheme.get() == b.scheme.get() ) { return true; } else { if( typeid(*a.scheme.get()) != typeid(*b.scheme.get()) ) { return false; } if( *a.scheme.get() != *b.scheme.get() ) return false; } return true; } friend bool operator!=(const CryptoContextImpl<Element>& a, const CryptoContextImpl<Element>& b) { return !( a == b ); } // TIMING METHODS /** * StartTiming method activates timing of CryptoMethods * * @param timeSamples points to a vector in which timing samples will be stored */ void StartTiming(vector<TimingInfo>* timeSamples) { this->timeSamples = timeSamples; doTiming = true; } /* * StopTiming - turns off timing */ void StopTiming() { doTiming = false; } /** * ResumeTiming - re-enables timing with existing TimingInfo vector */ void ResumeTiming() { doTiming = true; } /** * ResetTiming - erases measurements */ void ResetTiming() { this->timeSamples->clear(); } static bool SerializeEvalMultKey(Serialized* serObj) __attribute__ ((deprecated("serialization changed, see wiki for details"))); static bool SerializeEvalMultKey(Serialized* serObj, const string& id) __attribute__ ((deprecated("serialization changed, see wiki for details"))); static bool SerializeEvalMultKey(Serialized* serObj, const CryptoContext<Element> cc) __attribute__ ((deprecated("serialization changed, see wiki for details"))); static bool DeserializeEvalMultKey(Serialized* serObj) __attribute__ ((deprecated("serialization changed, see wiki for details"))); /** * SerializeEvalMultKey for a single EvalMult key or all EvalMult keys * * @param ser - stream to serialize to * @param sertype - type of serialization * @param id for key to serialize - if empty string, serialize them all * @return true on success */ template<typename ST> static bool SerializeEvalMultKey(std::ostream& ser, const ST& sertype, string id = "") { decltype(evalMultKeyMap) *smap; decltype(evalMultKeyMap) omap; if( id.length() == 0 ) smap = &evalMultKeyMap; else { auto k = evalMultKeyMap.find(id); if( k == evalMultKeyMap.end() ) return false; // no such id smap = &omap; omap[ k->first ] = k->second; } Serial::Serialize(*smap, ser, sertype); return true; } /** * SerializeEvalMultKey for all EvalMultKeys made in a given context * * @param cc whose keys should be serialized * @param ser - stream to serialize to * @param sertype - type of serialization * @return true on success (false on failure or no keys found) */ template<typename ST> static bool SerializeEvalMultKey(std::ostream& ser, const ST& sertype, const CryptoContext<Element> cc) { decltype(evalMultKeyMap) omap; for( const auto& k : evalMultKeyMap ) { if( k.second[0]->GetCryptoContext() == cc ) { omap[k.first] = k.second; } } if( omap.size() == 0 ) return false; Serial::Serialize(omap, ser, sertype); return true; } /** * DeserializeEvalMultKey deserialize all keys in the serialization * deserialized keys silently replace any existing matching keys * deserialization will create CryptoContextImpl if necessary * * @param serObj - stream with a serialization * @return true on success */ template<typename ST> static bool DeserializeEvalMultKey(std::istream& ser, const ST& sertype) { decltype(evalMultKeyMap) evalMultKeys; Serial::Deserialize(evalMultKeys, ser, sertype); // The deserialize call created any contexts that needed to be created.... so all we need to do // is put the keys into the maps for their context for( auto k : evalMultKeys ) { evalMultKeyMap[ k.first ] = k.second; } return true; } /** * ClearEvalMultKeys - flush EvalMultKey cache */ static void ClearEvalMultKeys(); /** * ClearEvalMultKeys - flush EvalMultKey cache for a given id * @param id */ static void ClearEvalMultKeys(const string& id); /** * ClearEvalMultKeys - flush EvalMultKey cache for a given context * @param cc */ static void ClearEvalMultKeys(const CryptoContext<Element> cc); /** * InsertEvalMultKey - add the given vector of keys to the map, replacing the existing vector if there * @param vectorToInsert */ static void InsertEvalMultKey(const std::vector<LPEvalKey<Element>>& vectorToInsert); static bool SerializeEvalSumKey(Serialized* serObj) __attribute__ ((deprecated("serialization changed, see wiki for details"))); static bool SerializeEvalSumKey(Serialized* serObj, const string& id) __attribute__ ((deprecated("serialization changed, see wiki for details"))); static bool SerializeEvalSumKey(Serialized* serObj, const CryptoContext<Element> cc) __attribute__ ((deprecated("serialization changed, see wiki for details"))); static bool DeserializeEvalSumKey(const Serialized& serObj) __attribute__ ((deprecated("serialization changed, see wiki for details"))); /** * SerializeEvalSumKey for a single EvalSum key or all of the EvalSum keys * * @param ser - stream to serialize to * @param sertype - type of serialization * @param id - key to serialize; empty string means all keys * @return true on success */ template<typename ST> static bool SerializeEvalSumKey(std::ostream& ser, const ST& sertype, string id = "") { decltype(evalSumKeyMap)* smap; decltype(evalSumKeyMap) omap; if( id.length() == 0 ) smap = &evalSumKeyMap; else { auto k = evalSumKeyMap.find(id); if( k == evalSumKeyMap.end() ) return false; // no such id smap = &omap; omap[ k->first ] = k->second; } Serial::Serialize(*smap, ser, sertype); return true; } /** * SerializeEvalSumKey for all of the EvalSum keys for a context * * @param ser - stream to serialize to * @param sertype - type of serialization * @param cc - context * @return true on success */ template<typename ST> static bool SerializeEvalSumKey(std::ostream& ser, const ST& sertype, const CryptoContext<Element> cc) { decltype(evalSumKeyMap) omap; for( const auto& k : evalSumKeyMap ) { if( k.second->begin()->second->GetCryptoContext() == cc ) { omap[k.first] = k.second; } } if( omap.size() == 0 ) return false; Serial::Serialize(omap, ser, sertype); return true; } /** * DeserializeEvalSumKey deserialize all keys in the serialization * deserialized keys silently replace any existing matching keys * deserialization will create CryptoContextImpl if necessary * * @param ser - stream to serialize from * @param sertype - type of serialization * @return true on success */ template<typename ST> static bool DeserializeEvalSumKey(std::istream& ser, const ST& sertype) { decltype(evalSumKeyMap) evalSumKeys; Serial::Deserialize(evalSumKeys, ser, sertype); // The deserialize call created any contexts that needed to be created.... so all we need to do // is put the keys into the maps for their context for( auto k : evalSumKeys ) { evalSumKeyMap[ k.first ] = k.second; } return true; } /** * ClearEvalSumKeys - flush EvalSumKey cache */ static void ClearEvalSumKeys(); /** * ClearEvalSumKeys - flush EvalSumKey cache for a given id * @param id */ static void ClearEvalSumKeys(const string& id); /** * ClearEvalSumKeys - flush EvalSumKey cache for a given context * @param cc */ static void ClearEvalSumKeys(const CryptoContext<Element> cc); /** * InsertEvalSumKey - add the given map of keys to the map, replacing the existing map if there * @param mapToInsert */ static void InsertEvalSumKey(const shared_ptr<std::map<usint,LPEvalKey<Element>>> mapToInsert); static bool SerializeEvalAutomorphismKey(Serialized* serObj) __attribute__ ((deprecated("serialization changed, see wiki for details"))); static bool SerializeEvalAutomorphismKey(Serialized* serObj, const string& id) __attribute__ ((deprecated("serialization changed, see wiki for details"))); static bool SerializeEvalAutomorphismKey(Serialized* serObj, const CryptoContext<Element> cc) __attribute__ ((deprecated("serialization changed, see wiki for details"))); static bool DeserializeEvalAutomorphismKey(const Serialized& serObj) __attribute__ ((deprecated("serialization changed, see wiki for details"))); /** * SerializeEvalAutomorphismKey for a single EvalAuto key or all of the EvalAuto keys * * @param ser - stream to serialize to * @param sertype - type of serialization * @param id - key to serialize; empty string means all keys * @return true on success */ template<typename ST> static bool SerializeEvalAutomorphismKey(std::ostream& ser, const ST& sertype, string id = "") { decltype(evalAutomorphismKeyMap)* smap; decltype(evalAutomorphismKeyMap) omap; if( id.length() == 0 ) smap = &evalAutomorphismKeyMap; else { auto k = evalAutomorphismKeyMap.find(id); if( k == evalAutomorphismKeyMap.end() ) return false; // no such id smap = &omap; omap[ k->first ] = k->second; } Serial::Serialize(*smap, ser, sertype); return true; } /** * SerializeEvalAutomorphismKey for all of the EvalAuto keys for a context * * @param ser - stream to serialize to * @param sertype - type of serialization * @param cc - context * @return true on success */ template<typename ST> static bool SerializeEvalAutomorphismKey(std::ostream& ser, const ST& sertype, const CryptoContext<Element> cc) { decltype(evalAutomorphismKeyMap) omap; for( const auto& k : evalAutomorphismKeyMap ) { if( k.second->begin()->second->GetCryptoContext() == cc ) { omap[k.first] = k.second; } } if( omap.size() == 0 ) return false; Serial::Serialize(omap, ser, sertype); return true; } /** * DeserializeEvalAutomorphismKey deserialize all keys in the serialization * deserialized keys silently replace any existing matching keys * deserialization will create CryptoContextImpl if necessary * * @param ser - stream to serialize from * @param sertype - type of serialization * @return true on success */ template<typename ST> static bool DeserializeEvalAutomorphismKey(std::istream& ser, const ST& sertype) { decltype(evalAutomorphismKeyMap) evalSumKeys; Serial::Deserialize(evalSumKeys, ser, sertype); // The deserialize call created any contexts that needed to be created.... so all we need to do // is put the keys into the maps for their context for( auto k : evalSumKeys ) { evalAutomorphismKeyMap[ k.first ] = k.second; } return true; } /** * ClearEvalAutomorphismKeys - flush EvalAutomorphismKey cache */ static void ClearEvalAutomorphismKeys(); /** * ClearEvalAutomorphismKeys - flush EvalAutomorphismKey cache for a given id * @param id */ static void ClearEvalAutomorphismKeys(const string& id); /** * ClearEvalAutomorphismKeys - flush EvalAutomorphismKey cache for a given context * @param cc */ static void ClearEvalAutomorphismKeys(const CryptoContext<Element> cc); /** * InsertEvalAutomorphismKey - add the given map of keys to the map, replacing the existing map if there * @param mapToInsert */ static void InsertEvalAutomorphismKey(const shared_ptr<std::map<usint,LPEvalKey<Element>>> mapToInsert); // TURN FEATURES ON /** * Enable a particular feature for use with this CryptoContextImpl * @param feature - the feature that should be enabled */ void Enable(PKESchemeFeature feature) { scheme->Enable(feature); } /** * Enable several features at once * @param featureMask - bitwise or of several PKESchemeFeatures */ void Enable(usint featureMask) { scheme->Enable(featureMask); } // GETTERS /** * Getter for Scheme * @return scheme */ const shared_ptr<LPPublicKeyEncryptionScheme<Element>> GetEncryptionAlgorithm() const { return scheme; } /** * Getter for CryptoParams * @return params */ const shared_ptr<LPCryptoParameters<Element>> GetCryptoParameters() const { return params; } const size_t GetKeyGenLevel() const { return m_keyGenLevel; } void SetKeyGenLevel(size_t level) { m_keyGenLevel = level; } /** * Getter for element params * @return */ const shared_ptr<typename Element::Params> GetElementParams() const { return params->GetElementParams(); } /** * Getter for encoding params * @return */ const EncodingParams GetEncodingParams() const { return params->GetEncodingParams(); } /** * Get the cyclotomic order used for this context * * @return */ const usint GetCyclotomicOrder() const { return params->GetElementParams()->GetCyclotomicOrder(); } /** * Get the ring dimension used for this context * * @return */ const usint GetRingDimension() const { return params->GetElementParams()->GetRingDimension(); } /** * Get the ciphertext modulus used for this context * * @return */ const typename Element::Integer& GetModulus() const { return params->GetElementParams()->GetModulus(); } /** * Get the ciphertext modulus used for this context * * @return */ const typename Element::Integer& GetRootOfUnity() const { return params->GetElementParams()->GetRootOfUnity(); } /** * KeyGen generates a key pair using this algorithm's KeyGen method * @return a public/secret key pair */ LPKeyPair<Element> KeyGen() { TimeVar t; if( doTiming ) TIC(t); auto r = GetEncryptionAlgorithm()->KeyGen(CryptoContextFactory<Element>::GetContextForPointer(this), false); if( doTiming ) { timeSamples->push_back( TimingInfo(OpKeyGen, TOC_US(t)) ); } return r; } /** * KeyGen generates a Multiparty key pair using this algorithm's KeyGen method from two keys * @param pk first public key used to coordinate the creation of later public keys. * @return a public/secret key pair */ LPKeyPair<Element> MultipartyKeyGen( const LPPublicKey<Element> pk, bool makeSparse=false, bool pre=false) { TimeVar t; if( doTiming ) TIC(t); auto r = GetEncryptionAlgorithm()->MultipartyKeyGen(CryptoContextFactory<Element>::GetContextForPointer(this), pk, makeSparse, pre); if( doTiming ) { timeSamples->push_back( TimingInfo(OpMultiPartyKeyGenKey, TOC_US(t)) ); } return r; } /** * KeyGen generates a Multiparty key pair using a vector of secret keys * @param secretKeys a vector of the secret keys to be used for multiparty computation. * @return a public/secret key pair */ LPKeyPair<Element> MultipartyKeyGen( const vector<LPPrivateKey<Element>>& secretKeys) { TimeVar t; if( doTiming ) TIC(t); auto r = GetEncryptionAlgorithm()->MultipartyKeyGen(CryptoContextFactory<Element>::GetContextForPointer(this), secretKeys, false); if( doTiming ) { timeSamples->push_back( TimingInfo(OpMultiPartyKeyGenKeyvec, TOC_US(t)) ); } return r; } /** * Lead Multiparty Decryption method for PALISADE multiparty operations. * This should be performed by exactly one of the clients. * All other clients should perform the MultipartyDecryptMain operation. * @param privateKey the secret key of the lead decryption client * @param ciphertext vector of encrypted ciphertext * @return vector of partially decrypted ciphertexts */ vector<Ciphertext<Element>> MultipartyDecryptLead( const LPPrivateKey<Element> privateKey, const vector<Ciphertext<Element>>& ciphertext) const { if( privateKey == NULL || Mismatched(privateKey->GetCryptoContext()) ) PALISADE_THROW(config_error, "Information passed to MultipartyDecryptLead was not generated with this crypto context"); vector<Ciphertext<Element>> newCiphertext; TimeVar t; if( doTiming ) TIC(t); for( size_t i = 0; i < ciphertext.size(); i++ ) { if( ciphertext[i] == NULL || Mismatched(ciphertext[i]->GetCryptoContext()) ) PALISADE_THROW(config_error, "A ciphertext passed to MultipartyDecryptLead was not generated with this crypto context"); newCiphertext.push_back( GetEncryptionAlgorithm()->MultipartyDecryptLead(privateKey, ciphertext[i]) ); } if( doTiming ) { timeSamples->push_back( TimingInfo(OpMultiPartyDecryptLead, TOC_US(t)) ); } return newCiphertext; } /** * Multiparty decryption method for PALISADE multiparty operations. * The lead multiparty decryption operation should be performed by exactly one of the clients. * All other clients should perform this MultipartyDecryptMain operation. * @param privateKey - for decryption * @param ciphertext - vector of encrypted ciphertext * @return vector of partially decrypted ciphertexts */ vector<Ciphertext<Element>> MultipartyDecryptMain( const LPPrivateKey<Element> privateKey, const vector<Ciphertext<Element>>& ciphertext) const { if( privateKey == NULL || Mismatched(privateKey->GetCryptoContext()) ) PALISADE_THROW(config_error, "Information passed to MultipartyDecryptMain was not generated with this crypto context"); vector<Ciphertext<Element>> newCiphertext; TimeVar t; if( doTiming ) TIC(t); for( size_t i = 0; i < ciphertext.size(); i++ ) { if( ciphertext[i] == NULL || Mismatched(ciphertext[i]->GetCryptoContext()) ) PALISADE_THROW(config_error, "A ciphertext passed to MultipartyDecryptMain was not generated with this crypto context"); newCiphertext.push_back( GetEncryptionAlgorithm()->MultipartyDecryptMain(privateKey, ciphertext[i]) ); } if( doTiming ) { timeSamples->push_back( TimingInfo(OpMultiPartyDecryptMain, TOC_US(t)) ); } return newCiphertext; } /** * Final multiparty decryption method to fuse the partially decrypted ciphertexts into a decrypted plaintext. * The lead multiparty decryption operation should be performed by exactly one of the clients. * All other clients should perform the MultipartyDecryptMain operation. * @param partialCiphertextVec - vector of partially decrypted ciphertexts. * @param plaintext - pointer to destination for the result of decryption * @param doPadding - true if input plaintext was padded; causes unpadding on last piece of ciphertext * @return size of plaintext */ DecryptResult MultipartyDecryptFusion( const vector<Ciphertext<Element>>& partialCiphertextVec, Plaintext *plaintext) const { DecryptResult result; //Make sure we're processing ciphertexts. size_t last_ciphertext = partialCiphertextVec.size(); if ( last_ciphertext < 1 ) return result; TimeVar t; if( doTiming ) TIC(t); for( size_t i = 0; i < last_ciphertext; i++ ) { if (partialCiphertextVec[i] == NULL || Mismatched(partialCiphertextVec[i]->GetCryptoContext())) PALISADE_THROW(config_error, "A ciphertext passed to MultipartyDecryptFusion was not generated with this crypto context"); if (partialCiphertextVec[i]->GetEncodingType() != partialCiphertextVec[0]->GetEncodingType()) PALISADE_THROW(type_error, "Ciphertexts passed to MultipartyDecryptFusion have mismatched encoding types"); } // determine which type of plaintext that you need to decrypt into Plaintext decrypted = GetPlaintextForDecrypt(partialCiphertextVec[0]->GetEncodingType(), partialCiphertextVec[0]->GetElements()[0].GetParams(), this->GetEncodingParams()); if ((partialCiphertextVec[0]->GetEncodingType() == CKKSPacked) && (typeid(Element) != typeid(NativePoly))) result = GetEncryptionAlgorithm()->MultipartyDecryptFusion(partialCiphertextVec, &decrypted->GetElement<Poly>()); else result = GetEncryptionAlgorithm()->MultipartyDecryptFusion(partialCiphertextVec, &decrypted->GetElement<NativePoly>()); if (result.isValid == false) return result; if (partialCiphertextVec[0]->GetEncodingType() == CKKSPacked){ shared_ptr<CKKSPackedEncoding> decryptedCKKS = std::dynamic_pointer_cast<CKKSPackedEncoding>(decrypted); const shared_ptr<LPCryptoParametersCKKS<DCRTPoly>> cryptoParamsCKKS = std::dynamic_pointer_cast<LPCryptoParametersCKKS<DCRTPoly>>(this->GetCryptoParameters()); decryptedCKKS->Decode(partialCiphertextVec[0]->GetDepth(), partialCiphertextVec[0]->GetScalingFactor(), cryptoParamsCKKS->GetRescalingTechnique()); } else decrypted->Decode(); *plaintext = decrypted; if( doTiming ) { timeSamples->push_back( TimingInfo(OpMultiPartyDecryptFusion, TOC_US(t)) ); } return result; } /** * SparseKeyGen generates a key pair with special structure, and without full entropy, * for use in special cases like Ring Reduction * @return a public/secret key pair */ LPKeyPair<Element> SparseKeyGen() { TimeVar t; if( doTiming ) TIC(t); auto r = GetEncryptionAlgorithm()->KeyGen(CryptoContextFactory<Element>::GetContextForPointer(this), true); if( doTiming ) { timeSamples->push_back( TimingInfo(OpSparseKeyGen, TOC_US(t)) ); } return r; } /** * ReKeyGen produces an Eval Key that PALISADE can use for Proxy Re Encryption * @param newKey (public) * @param oldKey (private) * @return new evaluation key */ LPEvalKey<Element> ReKeyGen( const LPPublicKey<Element> newKey, const LPPrivateKey<Element> oldKey) const { if( newKey == NULL || oldKey == NULL || Mismatched(newKey->GetCryptoContext()) || Mismatched(oldKey->GetCryptoContext()) ) PALISADE_THROW(config_error, "Keys passed to ReKeyGen were not generated with this crypto context"); TimeVar t; if( doTiming ) TIC(t); auto r = GetEncryptionAlgorithm()->ReKeyGen(newKey, oldKey); if( doTiming ) { timeSamples->push_back( TimingInfo(OpReKeyGenPubPri, TOC_US(t)) ); } return r; } /** * ReKeyGen produces an Eval Key that PALISADE can use for Proxy Re Encryption * NOTE this functionality has been completely removed from PALISADE * @param newKey (private) * @param oldKey (private) * @return new evaluation key */ LPEvalKey<Element> ReKeyGen( const LPPrivateKey<Element> newKey, const LPPrivateKey<Element> oldKey) const __attribute__ ((deprecated("functionality removed from PALISADE"))); /** * EvalMultKeyGen creates a key that can be used with the PALISADE EvalMult operator * @param key * @return new evaluation key */ void EvalMultKeyGen(const LPPrivateKey<Element> key); /** * EvalMultsKeyGen creates a vector evalmult keys that can be used with the PALISADE EvalMult operator * 1st key (for s^2) is used for multiplication of ciphertexts of depth 1 * 2nd key (for s^3) is used for multiplication of ciphertexts of depth 2, etc. * * @param key * @return a vector of evaluation keys */ void EvalMultKeysGen(const LPPrivateKey<Element> key); /** * GetEvalMultKeyVector fetches the eval mult keys for a given KeyID * @param keyID * @return key vector from ID */ static const vector<LPEvalKey<Element>>& GetEvalMultKeyVector(const string& keyID); /** * GetEvalMultKeys * @return map of all the keys */ static const std::map<string,std::vector<LPEvalKey<Element>>>& GetAllEvalMultKeys(); /** * KeySwitchGen creates a key that can be used with the PALISADE KeySwitch operation * @param key1 * @param key2 * @return new evaluation key */ LPEvalKey<Element> KeySwitchGen( const LPPrivateKey<Element> key1, const LPPrivateKey<Element> key2) const { if( key1 == NULL || key2 == NULL || Mismatched(key1->GetCryptoContext()) || Mismatched(key2->GetCryptoContext()) ) PALISADE_THROW(config_error, "Keys passed to KeySwitchGen were not generated with this crypto context"); TimeVar t; if( doTiming ) TIC(t); auto r = GetEncryptionAlgorithm()->KeySwitchGen(key1, key2); if( doTiming ) { timeSamples->push_back( TimingInfo(OpKeySwitchGen, TOC_US(t)) ); } return r; } /** * Encrypt a plaintext using a given public key * @param publicKey * @param plaintext * @return ciphertext (or null on failure) */ Ciphertext<Element> Encrypt( const LPPublicKey<Element> publicKey, Plaintext plaintext) { if( publicKey == NULL ) PALISADE_THROW(type_error, "null key passed to Encrypt"); if( plaintext == NULL ) PALISADE_THROW(type_error, "null plaintext passed to Encrypt"); if( Mismatched(publicKey->GetCryptoContext()) ) PALISADE_THROW(config_error, "key passed to Encrypt was not generated with this crypto context"); TimeVar t; if( doTiming ) TIC(t); Ciphertext<Element> ciphertext = GetEncryptionAlgorithm()->Encrypt(publicKey, plaintext->GetElement<Element>()); if (ciphertext) { ciphertext->SetEncodingType( plaintext->GetEncodingType() ); ciphertext->SetScalingFactor( plaintext->GetScalingFactor() ); ciphertext->SetDepth( plaintext->GetDepth() ); ciphertext->SetLevel( plaintext->GetLevel() ); } if( doTiming ) { timeSamples->push_back( TimingInfo(OpEncryptPub, TOC_US(t)) ); } return ciphertext; } /** * Encrypt a plaintext using a given private key * @param privateKey * @param plaintext * @return ciphertext (or null on failure) */ Ciphertext<Element> Encrypt( const LPPrivateKey<Element> privateKey, Plaintext plaintext) const { if( privateKey == NULL || Mismatched(privateKey->GetCryptoContext()) ) PALISADE_THROW(config_error, "key passed to Encrypt was not generated with this crypto context"); if( plaintext == NULL ) PALISADE_THROW(type_error, "null plaintext passed to Encrypt"); TimeVar t; if( doTiming ) TIC(t); Ciphertext<Element> ciphertext = GetEncryptionAlgorithm()->Encrypt(privateKey, plaintext->GetElement<Element>()); if (ciphertext) { ciphertext->SetEncodingType( plaintext->GetEncodingType() ); ciphertext->SetScalingFactor( plaintext->GetScalingFactor() ); ciphertext->SetDepth( plaintext->GetDepth() ); ciphertext->SetLevel( plaintext->GetLevel() ); } if( doTiming ) { timeSamples->push_back( TimingInfo(OpEncryptPriv, TOC_US(t)) ); } return ciphertext; } /** * Encrypt a matrix of Plaintext * @param publicKey - for encryption * @param plaintext - to encrypt * @param doEncryption encrypts if true, embeds (encodes) the plaintext into cryptocontext if false * @return a vector of pointers to Ciphertexts created by encrypting the plaintext */ shared_ptr<Matrix<RationalCiphertext<Element>>> EncryptMatrix( const LPPublicKey<Element> publicKey, Matrix<Plaintext> &plaintext) { if (publicKey == NULL || Mismatched(publicKey->GetCryptoContext())) PALISADE_THROW(config_error, "key passed to EncryptMatrix was not generated with this crypto context"); auto zeroAlloc = [=]() { return RationalCiphertext<Element>(publicKey->GetCryptoContext(), true); }; shared_ptr<Matrix<RationalCiphertext<Element>>> cipherResults(new Matrix<RationalCiphertext<Element>> (zeroAlloc, plaintext.GetRows(), plaintext.GetCols())); TimeVar t; if( doTiming ) TIC(t); for (size_t row = 0; row < plaintext.GetRows(); row++) { for (size_t col = 0; col < plaintext.GetCols(); col++) { if( plaintext(row,col)->Encode() == false ) return 0; Ciphertext<Element> ciphertext = GetEncryptionAlgorithm()->Encrypt(publicKey, plaintext(row,col)->GetElement<Element>()); if (ciphertext) { ciphertext->SetEncodingType( plaintext(row,col)->GetEncodingType() ); } (*cipherResults)(row, col).SetNumerator(ciphertext); } } if( doTiming ) { timeSamples->push_back( TimingInfo(OpEncryptMatrixPlain, TOC_US(t)) ); } return cipherResults; } /** * Encrypt a matrix of Plaintext * @param publicKey - for encryption * @param plaintext - to encrypt * @param doEncryption encrypts if true, embeds (encodes) the plaintext into cryptocontext if false * @return a vector of pointers to Ciphertexts created by encrypting the plaintext */ Matrix<Ciphertext<Element>> EncryptMatrixCiphertext( const LPPublicKey<Element> publicKey, Matrix<Plaintext> &plaintext) { if (publicKey == NULL || Mismatched(publicKey->GetCryptoContext())) PALISADE_THROW(config_error, "key passed to EncryptMatrix was not generated with this crypto context"); auto zeroAlloc = [=]() { return Ciphertext<Element>(new CiphertextImpl<Element>(publicKey->GetCryptoContext())); }; Matrix<Ciphertext<Element>> cipherResults(zeroAlloc, plaintext.GetRows(), plaintext.GetCols()); TimeVar t; if( doTiming ) TIC(t); for (size_t row = 0; row < plaintext.GetRows(); row++) { for (size_t col = 0; col < plaintext.GetCols(); col++) { if( plaintext(row,col)->Encode() == false ) PALISADE_THROW(math_error, "Plaintext is not encoded"); Ciphertext<Element> ciphertext = GetEncryptionAlgorithm()->Encrypt(publicKey, plaintext(row,col)->GetElement<Element>()); if (ciphertext) { ciphertext->SetEncodingType( plaintext(row,col)->GetEncodingType() ); } cipherResults(row, col) = (ciphertext); } } if( doTiming ) { timeSamples->push_back( TimingInfo(OpEncryptMatrixPlain, TOC_US(t)) ); } return cipherResults; } /** * Perform an encryption by reading plaintext from a stream, serializing each piece of ciphertext, * and writing the serializations to an output stream * @param publicKey - the encryption key in use * @param instream - where to read the input from * @param ostream - where to write the serialization to * @param doEncryption encrypts if true, embeds (encodes) the plaintext into cryptocontext if false * @return */ void EncryptStream( const LPPublicKey<Element> publicKey, std::istream& instream, std::ostream& outstream) const __attribute__ ((deprecated("serialization changed, see wiki for details"))); // PLAINTEXT FACTORY METHODS // FIXME to be deprecated in 2.0 /** * MakeScalarPlaintext constructs a ScalarEncoding in this context * @param value * @param isSigned * @return plaintext */ Plaintext MakeScalarPlaintext(int64_t value) const { auto p = PlaintextFactory::MakePlaintext( Scalar, this->GetElementParams(), this->GetEncodingParams(), value ); return p; } /** * MakeStringPlaintext constructs a StringEncoding in this context * @param str * @return plaintext */ Plaintext MakeStringPlaintext(const string& str) const { auto p = PlaintextFactory::MakePlaintext( String, this->GetElementParams(), this->GetEncodingParams(), str ); return p; } /** * MakeIntegerPlaintext constructs an IntegerEncoding in this context * @param value * @return plaintext */ Plaintext MakeIntegerPlaintext(int64_t value) const { auto p = PlaintextFactory::MakePlaintext( Integer, this->GetElementParams(), this->GetEncodingParams(), value ); return p; } /** * MakeIntegerPlaintext constructs a FractionalEncoding in this context * @param value * @param truncatedBits limit on fractional * @return plaintext */ Plaintext MakeFractionalPlaintext(int64_t value, size_t truncatedBits = 0) const { auto p = PlaintextFactory::MakePlaintext( Fractional, this->GetElementParams(), this->GetEncodingParams(), value, truncatedBits ); return p; } /** * MakeCoefPackedPlaintext constructs a CoefPackedEncoding in this context * @param value * @return plaintext */ Plaintext MakeCoefPackedPlaintext(const vector<int64_t>& value) const { auto p = PlaintextFactory::MakePlaintext( CoefPacked, this->GetElementParams(), this->GetEncodingParams(), value ); return p; } /** * MakePackedPlaintext constructs a PackedEncoding in this context * @param value * @return plaintext */ Plaintext MakePackedPlaintext(const vector<int64_t>& value) const { auto p = PlaintextFactory::MakePlaintext( Packed, this->GetElementParams(), this->GetEncodingParams(), value ); return p; } /** * MakePlaintext static that takes a cc and calls the Plaintext Factory * @param encoding * @param cc * @param value * @return */ template<typename Value1> static Plaintext MakePlaintext(PlaintextEncodings encoding, CryptoContext<Element> cc, const Value1& value) { return PlaintextFactory::MakePlaintext( encoding, cc->GetElementParams(), cc->GetEncodingParams(), value ); } template<typename Value1, typename Value2> static Plaintext MakePlaintext(PlaintextEncodings encoding, CryptoContext<Element> cc, const Value1& value, const Value2& value2) { return PlaintextFactory::MakePlaintext( encoding, cc->GetElementParams(), cc->GetEncodingParams(), value, value2 ); } /** * MakeCKKSPackedPlaintext constructs a CKKSPackedEncoding in this context * @param value * @return plaintext */ Plaintext MakeCKKSPackedPlaintext(const std::vector<std::complex<double>> &value, size_t depth=1, uint32_t level=0, const shared_ptr<typename Element::Params> params=nullptr) const { Plaintext p; const shared_ptr<LPCryptoParametersCKKS<DCRTPoly>> cryptoParamsCKKS = std::dynamic_pointer_cast<LPCryptoParametersCKKS<DCRTPoly>>(this->GetCryptoParameters()); double ptxtMod = cryptoParamsCKKS->GetEncodingParams()->GetPlaintextModulus(); double scFact = 1.0; if (cryptoParamsCKKS->GetRescalingTechnique() == EXACTRESCALE) { scFact = cryptoParamsCKKS->GetScalingFactorOfLevel(level); } else { scFact = pow(2, ptxtMod); } if (params == nullptr) { shared_ptr<ILDCRTParams<DCRTPoly::Integer>> elemParamsPtr; if (level != 0) { ILDCRTParams<DCRTPoly::Integer> elemParams = *(cryptoParamsCKKS->GetElementParams()); for (uint32_t i=0; i<level; i++) { elemParams.PopLastParam(); } elemParamsPtr = std::make_shared<ILDCRTParams<DCRTPoly::Integer>>(elemParams); } else { elemParamsPtr = cryptoParamsCKKS->GetElementParams(); } p = Plaintext( new CKKSPackedEncoding( elemParamsPtr, this->GetEncodingParams(), value, depth, level, scFact) ); } else p = Plaintext( new CKKSPackedEncoding( params, this->GetEncodingParams(), value, depth, level, scFact) ); p->Encode(); return p; } /** * GetPlaintextForDecrypt returns a new Plaintext to be used in decryption. * * @param pte Type of plaintext we want to return * @param evp Element parameters * @param ep Encoding parameters * @return plaintext */ static Plaintext GetPlaintextForDecrypt(PlaintextEncodings pte, shared_ptr<typename Element::Params> evp, EncodingParams ep) { shared_ptr<typename NativePoly::Params> vp( new typename NativePoly::Params(evp->GetCyclotomicOrder(), ep->GetPlaintextModulus(), 1) ); Plaintext tempPlaintext; if (pte == CKKSPacked) { if (evp->GetModulus().GetMSB() < MAX_MODULUS_SIZE + 1) tempPlaintext = PlaintextFactory::MakePlaintext(pte, vp, ep); else tempPlaintext = PlaintextFactory::MakePlaintext(pte, evp, ep); } else tempPlaintext = PlaintextFactory::MakePlaintext(pte, vp, ep); return tempPlaintext; } public: /** * Decrypt a single ciphertext into the appropriate plaintext * * @param privateKey - decryption key * @param ciphertext - ciphertext to decrypt * @param plaintext - resulting plaintext object pointer is here * @return */ DecryptResult Decrypt( const LPPrivateKey<Element> privateKey, ConstCiphertext<Element> ciphertext, Plaintext* plaintext) { if( privateKey == NULL || Mismatched(privateKey->GetCryptoContext()) ) PALISADE_THROW(config_error, "Information passed to Decrypt was not generated with this crypto context"); TimeVar t; if( doTiming ) TIC(t); // determine which type of plaintext that you need to decrypt into //Plaintext decrypted = GetPlaintextForDecrypt(ciphertext->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); Plaintext decrypted = GetPlaintextForDecrypt(ciphertext->GetEncodingType(), ciphertext->GetElements()[0].GetParams(), this->GetEncodingParams()); DecryptResult result; if ((ciphertext->GetEncodingType() == CKKSPacked) && (typeid(Element) != typeid(NativePoly))) { if (typeid(Element) == typeid(DCRTPoly)) { if (ciphertext->GetElements()[0].GetModulus().GetMSB() < MAX_MODULUS_SIZE + 1) // only one tower in DCRTPoly result = GetEncryptionAlgorithm()->Decrypt(privateKey, ciphertext, &decrypted->GetElement<NativePoly>()); else result = GetEncryptionAlgorithm()->Decrypt(privateKey, ciphertext, &decrypted->GetElement<Poly>()); } else result = GetEncryptionAlgorithm()->Decrypt(privateKey, ciphertext, &decrypted->GetElement<Poly>()); } else result = GetEncryptionAlgorithm()->Decrypt(privateKey, ciphertext, &decrypted->GetElement<NativePoly>()); if (result.isValid == false) return result; if (ciphertext->GetEncodingType() == CKKSPacked){ shared_ptr<CKKSPackedEncoding> decryptedCKKS = std::dynamic_pointer_cast<CKKSPackedEncoding>(decrypted); decryptedCKKS->SetDepth(ciphertext->GetDepth()); decryptedCKKS->SetLevel(ciphertext->GetLevel()); decryptedCKKS->SetScalingFactor(ciphertext->GetScalingFactor()); const shared_ptr<LPCryptoParametersCKKS<DCRTPoly>> cryptoParamsCKKS = std::dynamic_pointer_cast<LPCryptoParametersCKKS<DCRTPoly>>(this->GetCryptoParameters()); decryptedCKKS->Decode(ciphertext->GetDepth(), ciphertext->GetScalingFactor(), cryptoParamsCKKS->GetRescalingTechnique()); } else decrypted->Decode(); if( doTiming ) { timeSamples->push_back( TimingInfo(OpDecrypt, TOC_US(t)) ); } *plaintext = decrypted; return result; } /** * Decrypt method for a matrix of ciphertexts * @param privateKey - for decryption * @param ciphertext - matrix of encrypted ciphertexts * @param plaintext - pointer to the destination martrix of plaintexts * @return size of plaintext */ DecryptResult DecryptMatrix( const LPPrivateKey<Element> privateKey, const shared_ptr<Matrix<RationalCiphertext<Element>>> ciphertext, shared_ptr<Matrix<Plaintext>> *numerator, shared_ptr<Matrix<Plaintext>> *denominator) const { // edge case if ((ciphertext->GetCols()== 0) && (ciphertext->GetRows() == 0)) return DecryptResult(); if (privateKey == NULL || Mismatched(privateKey->GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to DecryptMatrix was not generated with this crypto context"); const Ciphertext<Element> ctN = (*ciphertext)(0, 0).GetNumerator(); // need to build matrices for the result Plaintext ptx = GetPlaintextForDecrypt(ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); auto zeroPackingAlloc = [=]() { return Plaintext(ptx); }; *numerator = shared_ptr<Matrix<Plaintext>>( new Matrix<Plaintext>(zeroPackingAlloc, ciphertext->GetRows(), ciphertext->GetCols()) ); *denominator = shared_ptr<Matrix<Plaintext>>( new Matrix<Plaintext>(zeroPackingAlloc, ciphertext->GetRows(), ciphertext->GetCols()) ); TimeVar t; if( doTiming ) TIC(t); for (size_t row = 0; row < ciphertext->GetRows(); row++) { for (size_t col = 0; col < ciphertext->GetCols(); col++) { if (Mismatched((*ciphertext)(row, col).GetCryptoContext())) PALISADE_THROW(config_error, "A ciphertext passed to DecryptMatrix was not generated with this crypto context"); const Ciphertext<Element> ctN = (*ciphertext)(row, col).GetNumerator(); // determine which type of plaintext that you need to decrypt into Plaintext decryptedNumerator = GetPlaintextForDecrypt(ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); DecryptResult resultN = GetEncryptionAlgorithm()->Decrypt(privateKey, ctN, &decryptedNumerator->GetElement<NativePoly>()); if (resultN.isValid == false) return resultN; (**numerator)(row,col) = decryptedNumerator; (**numerator)(row,col)->Decode(); Plaintext decryptedDenominator = GetPlaintextForDecrypt(ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); if( (*ciphertext)(row,col).GetIntegerFlag() == true ) { decryptedDenominator->GetElement<Poly>().SetValuesToZero(); decryptedDenominator->GetElement<Poly>().at(0) = 1; } else { const Ciphertext<Element> ctD = (*ciphertext)(row, col).GetDenominator(); DecryptResult resultD = GetEncryptionAlgorithm()->Decrypt(privateKey, ctD, &decryptedDenominator->GetElement<NativePoly>()); if (resultD.isValid == false) return resultD; (**denominator)(row,col) = decryptedDenominator; } (**denominator)(row, col)->Decode(); } } if( doTiming ) { timeSamples->push_back( TimingInfo(OpDecryptMatrixPlain, TOC_US(t)) ); } return DecryptResult((**numerator)((*numerator)->GetRows()-1,(*numerator)->GetCols()-1)->GetLength()); } /** * Decrypt method for a matrix of ciphertexts * @param privateKey - for decryption * @param ciphertext - matrix of encrypted ciphertexts * @param plaintext - pointer to the destination martrix of plaintexts * @return size of plaintext */ DecryptResult DecryptMatrixCiphertext( const LPPrivateKey<Element> privateKey, const Matrix<Ciphertext<Element>> ciphertext, Matrix<Plaintext> *numerator) const { // edge case if ((ciphertext.GetCols()== 0) && (ciphertext.GetRows() == 0)) return DecryptResult(); if (privateKey == NULL || Mismatched(privateKey->GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to DecryptMatrix was not generated with this crypto context"); const Ciphertext<Element> ctN = (ciphertext)(0, 0); // need to build matrices for the result // Plaintext ptx = GetPlaintextForDecrypt(ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); // auto zeroPackingAlloc = [=]() { return Plaintext(ptx); }; // numerator = new Matrix<Plaintext>(zeroPackingAlloc, ciphertext.GetRows(), ciphertext.GetCols()); TimeVar t; if( doTiming ) TIC(t); for (size_t row = 0; row < ciphertext.GetRows(); row++) { for (size_t col = 0; col < ciphertext.GetCols(); col++) { if (Mismatched( (ciphertext(row, col))->GetCryptoContext() )) PALISADE_THROW(config_error, "A ciphertext passed to DecryptMatrix was not generated with this crypto context"); const Ciphertext<Element> ctN = (ciphertext)(row, col); // determine which type of plaintext that you need to decrypt into Plaintext decryptedNumerator = GetPlaintextForDecrypt(ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); DecryptResult resultN = GetEncryptionAlgorithm()->Decrypt(privateKey, ctN, &decryptedNumerator->GetElement<NativePoly>()); if (resultN.isValid == false) return resultN; (*numerator)(row,col) = decryptedNumerator; (*numerator)(row,col)->Decode(); } } if( doTiming ) { timeSamples->push_back( TimingInfo(OpDecryptMatrixPlain, TOC_US(t)) ); } return DecryptResult((*numerator)( numerator->GetRows()-1, numerator->GetCols()-1)->GetLength()); } /** * Decrypt method for numerators in a matrix of ciphertexts (packed encoding) * @param privateKey - for decryption * @param ciphertext - matrix of encrypted ciphertexts * @param plaintext - pointer to the destination martrix of plaintexts * @return size of plaintext */ DecryptResult DecryptMatrixNumerator( const LPPrivateKey<Element> privateKey, const shared_ptr<Matrix<RationalCiphertext<Element>>> ciphertext, shared_ptr<Matrix<Plaintext>> *numerator) const { // edge case if ((ciphertext->GetCols() == 0) && (ciphertext->GetRows() == 0)) return DecryptResult(); if (privateKey == NULL || Mismatched(privateKey->GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to DecryptMatrix was not generated with this crypto context"); TimeVar t; if (doTiming) TIC(t); //force all precomputations to take place in advance if( Mismatched((*ciphertext)(0, 0).GetCryptoContext()) ) PALISADE_THROW(config_error, "A ciphertext passed to DecryptMatrix was not generated with this crypto context"); const Ciphertext<Element> ctN = (*ciphertext)(0, 0).GetNumerator(); // need to build a numerator matrix for the result Plaintext ptx = GetPlaintextForDecrypt(ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); auto zeroPackingAlloc = [=]() { return Plaintext(ptx); }; *numerator = shared_ptr<Matrix<Plaintext>>( new Matrix<Plaintext>(zeroPackingAlloc, ciphertext->GetRows(), ciphertext->GetCols()) ); Plaintext decryptedNumerator = GetPlaintextForDecrypt(ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); DecryptResult resultN = GetEncryptionAlgorithm()->Decrypt(privateKey, ctN, &decryptedNumerator->GetElement<NativePoly>()); if (resultN.isValid == false) return resultN; (**numerator)(0, 0) = decryptedNumerator; (**numerator)(0, 0)->Decode(); for (size_t row = 0; row < ciphertext->GetRows(); row++) { #pragma omp parallel for for (size_t col = 0; col < ciphertext->GetCols(); col++) { if (row + col > 0) { if( Mismatched((*ciphertext)(row, col).GetCryptoContext()) ) PALISADE_THROW(config_error, "A ciphertext passed to DecryptMatrix was not generated with this crypto context"); const Ciphertext<Element> ctN = (*ciphertext)(row, col).GetNumerator(); Plaintext decryptedNumerator = GetPlaintextForDecrypt(ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); GetEncryptionAlgorithm()->Decrypt(privateKey, ctN, &decryptedNumerator->GetElement<NativePoly>()); (**numerator)(row, col) = decryptedNumerator; (**numerator)(row, col)->Decode(); } } } if (doTiming) { timeSamples->push_back(TimingInfo(OpDecryptMatrixPacked, TOC_US(t))); } return DecryptResult((**numerator)((*numerator)->GetRows() - 1, (*numerator)->GetCols() - 1)->GetLength()); } /** * read instream for a sequence of serialized ciphertext; deserialize it, decrypt it, and write it to outstream * @param privateKey - reference to the decryption key * @param instream - input stream with sequence of serialized ciphertexts * @param outstream - output stream for plaintext * @return total bytes processed */ size_t DecryptStream( const LPPrivateKey<Element> privateKey, std::istream& instream, std::ostream& outstream) __attribute__ ((deprecated("serialization changed, see wiki for details"))); /** * ReEncrypt - Proxy Re Encryption mechanism for PALISADE * @param evalKey - evaluation key from the PRE keygen method * @param ciphertext - vector of shared pointers to encrypted Ciphertext * @param publicKey the public key of the recipient of the re-encrypted ciphertext. * @return vector of shared pointers to re-encrypted ciphertexts */ Ciphertext<Element> ReEncrypt( LPEvalKey<Element> evalKey, ConstCiphertext<Element> ciphertext, const LPPublicKey<Element> publicKey = nullptr) const { if( evalKey == NULL || Mismatched(evalKey->GetCryptoContext()) ) PALISADE_THROW(config_error, "Information passed to ReEncrypt was not generated with this crypto context"); if( ciphertext == NULL || Mismatched(ciphertext->GetCryptoContext()) ) PALISADE_THROW(config_error, "The ciphertext passed to ReEncrypt was not generated with this crypto context"); TimeVar t; if( doTiming ) TIC(t); Ciphertext<Element> newCiphertext = GetEncryptionAlgorithm()->ReEncrypt(evalKey, ciphertext, publicKey); if( doTiming ) { timeSamples->push_back( TimingInfo(OpReEncrypt, TOC_US(t)) ); } return newCiphertext; } /** * read instream for a serialized ciphertext. deserialize, re-encrypt, serialize, and write to outstream * @param evalKey - reference to the re-encryption key * @param instream - input stream with sequence of serialized ciphertext * @param outstream - output stream with sequence of serialized re-encrypted ciphertext */ void ReEncryptStream( const LPEvalKey<Element> evalKey, std::istream& instream, std::ostream& outstream, const LPPublicKey<Element> publicKey = nullptr) __attribute__ ((deprecated("serialization changed, see wiki for details"))); /** * EvalAdd - PALISADE EvalAdd method for a pair of ciphertexts * @param ct1 * @param ct2 * @return new ciphertext for ct1 + ct2 */ Ciphertext<Element> EvalAdd(ConstCiphertext<Element> ct1, ConstCiphertext<Element> ct2) const { TypeCheck(ct1, ct2); TimeVar t; if( doTiming ) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalAdd(ct1, ct2); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalAdd, TOC_US(t)) ); } return rv; } /** * EvalAdd - PALISADE EvalAddMutable method for a pair of ciphertexts. * This is a mutable version - input ciphertexts may get automatically * rescaled, or level-reduced. * * @param ct1 * @param ct2 * @return new ciphertext for ct1 + ct2 */ Ciphertext<Element> EvalAddMutable(Ciphertext<Element> &ct1, Ciphertext<Element> &ct2) const { TypeCheck(ct1, ct2); TimeVar t; if( doTiming ) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalAddMutable(ct1, ct2); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalAdd, TOC_US(t)) ); } return rv; } /** * EvalAddMatrix - PALISADE EvalAdd method for a pair of matrices of ciphertexts * @param ct1 * @param ct2 * @return new matrix for ct1 + ct2 */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalAddMatrix(const shared_ptr<Matrix<RationalCiphertext<Element>>> ct1, const shared_ptr<Matrix<RationalCiphertext<Element>>> ct2) const { TypeCheck((*ct1)(0,0), (*ct2)(0,0)); // TODO only checking one; when Matrix is refactored, this should be revisited TimeVar t; if( doTiming ) TIC(t); Matrix<RationalCiphertext<Element>> rv = *ct1 + *ct2; if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalAddMatrix, TOC_US(t)) ); } shared_ptr<Matrix<RationalCiphertext<Element>>> a(new Matrix<RationalCiphertext<Element>>(rv)); return a; } /** * EvalAddMatrix - PALISADE EvalAdd method for a pair of matrices of ciphertexts * @param ct1 * @param ct2 * @return new matrix for ct1 + ct2 */ Matrix<Ciphertext<Element>> EvalAddMatrix(const Matrix<Ciphertext<Element>> &ct1, const Matrix<Ciphertext<Element>> &ct2) const { TypeCheck(ct1(0,0), ct2(0,0)); // TODO only checking one; when Matrix is refactored, this should be revisited TimeVar t; if( doTiming ) TIC(t); Matrix<Ciphertext<Element>> rv = ct1 + ct2; if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalAddMatrix, TOC_US(t)) ); } // Matrix<Ciphertext<Element>> a(rv); return rv; } /** * EvalSub - PALISADE EvalSub method for a pair of ciphertexts * @param ct1 * @param ct2 * @return new ciphertext for ct1 - ct2 */ Ciphertext<Element> EvalSub(ConstCiphertext<Element> ct1, ConstCiphertext<Element> ct2) const { TypeCheck(ct1, ct2); TimeVar t; if( doTiming ) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalSub(ct1, ct2); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalSub, TOC_US(t)) ); } return rv; } /** * EvalSub - PALISADE EvalSubMutable method for a pair of ciphertexts * This is a mutable version - input ciphertexts may get automatically * rescaled, or level-reduced. * * @param ct1 * @param ct2 * @return new ciphertext for ct1 - ct2 */ Ciphertext<Element> EvalSubMutable(Ciphertext<Element> &ct1, Ciphertext<Element> &ct2) const { TypeCheck(ct1, ct2); TimeVar t; if( doTiming ) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalSubMutable(ct1, ct2); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalSub, TOC_US(t)) ); } return rv; } /** * EvalSubMatrix - PALISADE EvalSub method for a pair of matrices of ciphertexts * @param ct1 * @param ct2 * @return new matrix for ct1 + ct2 */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalSubMatrix(const shared_ptr<Matrix<RationalCiphertext<Element>>> ct1, const shared_ptr<Matrix<RationalCiphertext<Element>>> ct2) const { TypeCheck((*ct1)(0,0), (*ct2)(0,0)); // TODO only checking one; when Matrix is refactored, this should be revisited TimeVar t; if( doTiming ) TIC(t); Matrix<RationalCiphertext<Element>> rv = *ct1 - *ct2; if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalSubMatrix, TOC_US(t)) ); } shared_ptr<Matrix<RationalCiphertext<Element>>> a(new Matrix<RationalCiphertext<Element>>(rv)); return a; } /** * EvalSubMatrix - PALISADE EvalSub method for a pair of matrices of ciphertexts * @param ct1 * @param ct2 * @return new matrix for ct1 + ct2 */ Matrix<Ciphertext<Element>> EvalSubMatrix(const Matrix<Ciphertext<Element>> &ct1, const Matrix<Ciphertext<Element>> &ct2) const { TypeCheck(ct1(0,0), ct2(0,0)); // TODO only checking one; when Matrix is refactored, this should be revisited TimeVar t; if( doTiming ) TIC(t); Matrix<Ciphertext<Element>> rv = ct1 - ct2; if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalSubMatrix, TOC_US(t)) ); } Matrix<Ciphertext<Element>> a(rv); return a; } /** * EvalAdd - PALISADE EvalAdd method for a ciphertext and plaintext * @param ciphertext * @param plaintext * @return new ciphertext for ciphertext + plaintext */ Ciphertext<Element> EvalAdd(ConstCiphertext<Element> ciphertext, ConstPlaintext plaintext) const { TypeCheck(ciphertext, plaintext); TimeVar t; if( doTiming ) TIC(t); plaintext->SetFormat(EVALUATION); auto rv = GetEncryptionAlgorithm()->EvalAdd(ciphertext, plaintext); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalAddPlain, TOC_US(t)) ); } return rv; } /** * EvalAdd - PALISADE EvalAddMutable method for a ciphertext and plaintext * This is a mutable version - input ciphertexts may get automatically * rescaled, or level-reduced. * * @param ciphertext * @param plaintext * @return new ciphertext for ciphertext + plaintext */ Ciphertext<Element> EvalAddMutable(Ciphertext<Element> &ciphertext, Plaintext plaintext) const { TypeCheck((ConstCiphertext<Element>)ciphertext, (ConstPlaintext) plaintext); TimeVar t; if( doTiming ) TIC(t); plaintext->SetFormat(EVALUATION); auto rv = GetEncryptionAlgorithm()->EvalAddMutable(ciphertext, plaintext); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalAddPlain, TOC_US(t)) ); } return rv; } /** * EvalAdd - PALISADE EvalAdd method for a ciphertext and constant * @param ciphertext * @param constant * @return new ciphertext for ciphertext + constant */ Ciphertext<Element> EvalAdd(ConstCiphertext<Element> ciphertext, double constant) const { TimeVar t; Ciphertext<Element> rv; if ( constant >= 0 ) { if( doTiming ) TIC(t); rv = GetEncryptionAlgorithm()->EvalAdd(ciphertext, constant); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalAddConst, TOC_US(t)) ); } } else { TimeVar t; if( doTiming ) TIC(t); rv = GetEncryptionAlgorithm()->EvalSub(ciphertext, -constant); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalAddConst, TOC_US(t)) ); } } return rv; } /** * EvalLinearWSum - PALISADE EvalLinearWSum method to compute a linear weighted sum * * @param ciphertexts a list of ciphertexts * @param constants a list of weights * @return new ciphertext containing the weighted sum */ Ciphertext<Element> EvalLinearWSum( vector<Ciphertext<Element>> ciphertexts, vector<double> constants) const { TimeVar t; if( doTiming ) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalLinearWSum(ciphertexts, constants); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalLinearWSum, TOC_US(t)) ); } return rv; } /** * EvalLinearWSum - method to compute a linear weighted sum. * This is a mutable version, meaning the level/depth of input * ciphertexts may change in the process. * * @param ciphertexts a list of ciphertexts * @param constants a list of weights * @return new ciphertext containing the weighted sum */ Ciphertext<Element> EvalLinearWSumMutable( vector<Ciphertext<Element>> ciphertexts, vector<double> constants) const { TimeVar t; if( doTiming ) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalLinearWSumMutable(ciphertexts, constants); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalLinearWSum, TOC_US(t)) ); } return rv; } inline Ciphertext<Element> EvalLinearWSum(vector<double> constants, vector<Ciphertext<Element>> ciphertexts) const { return EvalLinearWSum(ciphertexts, constants); } inline Ciphertext<Element> EvalLinearWSumMutable(vector<double> constants, vector<Ciphertext<Element>> ciphertexts) const { return EvalLinearWSumMutable(ciphertexts, constants); } inline Ciphertext<Element> EvalAdd(ConstPlaintext plaintext, ConstCiphertext<Element> ciphertext) const { return EvalAdd(ciphertext, plaintext); } inline Ciphertext<Element> EvalAddMutable(Plaintext plaintext, Ciphertext<Element> &ciphertext) const { return EvalAddMutable(ciphertext, plaintext); } inline Ciphertext<Element> EvalAdd(double constant, ConstCiphertext<Element> ciphertext) const { return EvalAdd(ciphertext, constant); } /** * EvalSubPlain - PALISADE EvalSub method for a ciphertext and plaintext * @param ciphertext * @param plaintext * @return new ciphertext for ciphertext - plaintext */ Ciphertext<Element> EvalSub(ConstCiphertext<Element> ciphertext, ConstPlaintext plaintext) const { TypeCheck(ciphertext, plaintext); TimeVar t; if( doTiming ) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalSub(ciphertext, plaintext); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalSubPlain, TOC_US(t)) ); } return rv; } /** * EvalSubPlain - PALISADE EvalSubMutable method for a ciphertext and plaintext * This is a mutable version - input ciphertexts may get automatically * rescaled, or level-reduced. * * @param ciphertext * @param plaintext * @return new ciphertext for ciphertext - plaintext */ Ciphertext<Element> EvalSubMutable(Ciphertext<Element> &ciphertext, Plaintext plaintext) const { TypeCheck((ConstCiphertext<Element>)ciphertext, (ConstPlaintext) plaintext); TimeVar t; if( doTiming ) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalSubMutable(ciphertext, plaintext); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalSubPlain, TOC_US(t)) ); } return rv; } /** * EvalSub - PALISADE EvalSub method for a ciphertext and constant * @param ciphertext * @param constant * @return new ciphertext for ciphertext - constant */ Ciphertext<Element> EvalSub(ConstCiphertext<Element> ciphertext, double constant) const { TimeVar t; Ciphertext<Element> rv; if ( constant >= 0 ) { if( doTiming ) TIC(t); rv = GetEncryptionAlgorithm()->EvalSub(ciphertext, constant); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalSubConst, TOC_US(t)) ); } } else { if( doTiming ) TIC(t); rv = GetEncryptionAlgorithm()->EvalAdd(ciphertext, -constant); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalSubConst, TOC_US(t)) ); } } return rv; } inline Ciphertext<Element> EvalSub(ConstPlaintext plaintext, ConstCiphertext<Element> ciphertext) const { return EvalAdd(EvalNegate(ciphertext), plaintext); } inline Ciphertext<Element> EvalSubMutable(Plaintext plaintext, Ciphertext<Element> &ciphertext) const { Ciphertext<Element> negated = EvalNegate(ciphertext); Ciphertext<Element> result = EvalAddMutable(negated, plaintext); ciphertext = EvalNegate(negated); return result; } inline Ciphertext<Element> EvalSub(double constant, ConstCiphertext<Element> ciphertext) const { return EvalAdd(EvalNegate(ciphertext), constant); } /** * EvalMult - PALISADE EvalMult method for a pair of ciphertexts - with key switching * @param ct1 * @param ct2 * @return new ciphertext for ct1 * ct2 */ Ciphertext<Element> EvalMult(ConstCiphertext<Element> ct1, ConstCiphertext<Element> ct2) const { TypeCheck(ct1, ct2); auto ek = GetEvalMultKeyVector(ct1->GetKeyTag()); TimeVar t; if( doTiming ) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMult(ct1, ct2, ek[0]); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalMult, TOC_US(t)) ); } return rv; } /** * EvalMult - PALISADE EvalMult method for a pair of ciphertexts - with key switching * This is a mutable version - input ciphertexts may get automatically * rescaled, or level-reduced. * * @param ct1 * @param ct2 * @return new ciphertext for ct1 * ct2 */ Ciphertext<Element> EvalMultMutable(Ciphertext<Element> &ct1, Ciphertext<Element> &ct2) const { TypeCheck(ct1, ct2); auto ek = GetEvalMultKeyVector(ct1->GetKeyTag()); TimeVar t; if( doTiming ) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMultMutable(ct1, ct2, ek[0]); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalMult, TOC_US(t)) ); } return rv; } /** * EvalMult - PALISADE EvalMult method for a pair of ciphertexts - no key switching (relinearization) * @param ct1 * @param ct2 * @return new ciphertext for ct1 * ct2 */ Ciphertext<Element> EvalMultNoRelin(ConstCiphertext<Element> ct1, ConstCiphertext<Element> ct2) const { TypeCheck(ct1, ct2); TimeVar t; if( doTiming ) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMult(ct1, ct2); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalMult, TOC_US(t)) ); } return rv; } /** * EvalMultMany - PALISADE function for evaluating multiplication on ciphertext followed by relinearization operation (at the end). * It computes the multiplication in a binary tree manner. Also, it reduces the number of * elements in the ciphertext to two after each multiplication. * Currently it assumes that the consecutive two input arguments have * total depth smaller than the supported depth. Otherwise, it throws an error. * * @param cipherTextList is the ciphertext list. * * @return new ciphertext. */ Ciphertext<Element> EvalMultMany(const vector<Ciphertext<Element>>& ct) const{ const auto ek = GetEvalMultKeyVector(ct[0]->GetKeyTag()); TimeVar t; if( doTiming ) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMultMany(ct, ek); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalMultMany, TOC_US(t)) ); } return rv; } /** * EvalAddMany - Evaluate addition on a vector of ciphertexts. * It computes the addition in a binary tree manner. * * @param ctList is the list of ciphertexts. * * @return new ciphertext. */ Ciphertext<Element> EvalAddMany(const vector<Ciphertext<Element>>& ctList) const{ TimeVar t; if( doTiming ) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalAddMany(ctList); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalAddMany, TOC_US(t)) ); } return rv; } /** * EvalAddManyInPlace - Evaluate addition on a vector of ciphertexts. * Addition is computed in a binary tree manner. Difference with EvalAddMany * is that EvalAddManyInPlace uses the input ciphertext vector to store * intermediate results, to avoid the overhead of using extra tepmorary * space. * * @param ctList is the list of ciphertexts. * * @return new ciphertext. */ Ciphertext<Element> EvalAddManyInPlace(vector<Ciphertext<Element>>& ctList) const{ TimeVar t; if( doTiming ) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalAddManyInPlace(ctList); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalAddManyInPlace, TOC_US(t)) ); } return rv; } /** * Function for evaluating multiplication on ciphertext followed by relinearization operation. * Currently it assumes that the input arguments have total depth smaller than the supported depth. Otherwise, it throws an error. * * @param ct1 first input ciphertext. * @param ct2 second input ciphertext. * * @return new ciphertext */ Ciphertext<Element> EvalMultAndRelinearize(ConstCiphertext<Element> ct1, ConstCiphertext<Element> ct2) const { const auto ek = GetEvalMultKeyVector(ct1->GetKeyTag()); TimeVar t; if( doTiming ) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMultAndRelinearize(ct1, ct2, ek); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalMult, TOC_US(t)) ); } return rv; } /** * Function for relinearization of a ciphertext. * * @param ct input ciphertext. * * @return relinearized ciphertext */ Ciphertext<Element> Relinearize(ConstCiphertext<Element> ct) const { const auto ek = GetEvalMultKeyVector(ct->GetKeyTag()); TimeVar t; if( doTiming ) TIC(t); auto rv = GetEncryptionAlgorithm()->Relinearize(ct, ek); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalRelin, TOC_US(t)) ); } return rv; } /** * EvalMult - PALISADE EvalMult method for plaintext * ciphertext * @param pt2 * @param ct1 * @return new ciphertext for ct1 * pt2 */ inline Ciphertext<Element> EvalMult(ConstPlaintext pt2, ConstCiphertext<Element> ct1) const { return EvalMult(ct1, pt2); } /** * EvalMult - PALISADE EvalMultMutable method for plaintext * ciphertext * @param pt2 * @param ct1 * @return new ciphertext for ct1 * pt2 */ inline Ciphertext<Element> EvalMultMutable(Plaintext pt2, Ciphertext<Element> &ct1) const { return EvalMultMutable(ct1, pt2); } /** * EvalMult - PALISADE EvalMult method for constant * ciphertext * @param constant * @param ct1 * @return new ciphertext for ct1 * constant */ inline Ciphertext<Element> EvalMult(double constant, ConstCiphertext<Element> ct1) const { return EvalMult(ct1, constant); } inline Ciphertext<Element> EvalMultMutable(double constant, Ciphertext<Element> &ct1) const { return EvalMultMutable(ct1, constant); } /** * EvalShiftRight - works only for Fractional Encoding * @param pt2 * @param ct1 * @return new ciphertext for ct1 * pt2 */ Ciphertext<Element> EvalRightShift(ConstCiphertext<Element> ct1, size_t divisor) const { if( ct1 && ct1->GetEncodingType() != Fractional ) { stringstream ss; ss << "A " << Fractional << " encoded ciphertext is required for the EvalRightShift operation"; PALISADE_THROW( type_error, ss.str() ); } Plaintext plaintextShift = MakeFractionalPlaintext(0,divisor); TypeCheck(ct1, plaintextShift); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = EvalMult(ct1, plaintextShift); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalRightShift, currentDateTime() - start) ); } return rv; } /** * EvalMult - PALISADE EvalMult method for plaintext * ciphertext * @param ct1 * @param pt2 * @return new ciphertext for ct1 * pt2 */ Ciphertext<Element> EvalMult(ConstCiphertext<Element> ct1, ConstPlaintext pt2) const { TypeCheck(ct1, pt2); TimeVar t; if( doTiming ) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMult(ct1, pt2); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalMult, TOC_US(t)) ); } return rv; } /** * EvalMult - PALISADE EvalMultMutable method for plaintext * ciphertext * This is a mutable version - input ciphertexts may get automatically * rescaled, or level-reduced. * * @param ct1 * @param pt2 * @return new ciphertext for ct1 * pt2 */ Ciphertext<Element> EvalMultMutable(Ciphertext<Element> &ct1, Plaintext pt2) const { TypeCheck((ConstCiphertext<Element>) ct1, (ConstPlaintext) pt2); TimeVar t; if( doTiming ) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMultMutable(ct1, pt2); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalMult, TOC_US(t)) ); } return rv; } /** * EvalMult - PALISADE EvalSub method for a ciphertext and constant * @param ciphertext * @param constant * @return new ciphertext for ciphertext - constant */ Ciphertext<Element> EvalMult(ConstCiphertext<Element> ciphertext, double constant) const { TimeVar t; if( doTiming ) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMult(ciphertext, constant); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalMultConst, TOC_US(t)) ); } return rv; } /** * EvalMult - PALISADE EvalSub method for a ciphertext and constant * This is a mutable version - input ciphertexts may get automatically * rescaled, or level-reduced. * * @param ciphertext * @param constant * @return new ciphertext for ciphertext - constant */ Ciphertext<Element> EvalMultMutable(Ciphertext<Element> &ciphertext, double constant) const { TimeVar t; if( doTiming ) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMultMutable(ciphertext, constant); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalMultConst, TOC_US(t)) ); } return rv; } /** * EvalMultMatrix - PALISADE EvalMult method for two matrices of ciphertext * @param ct1 * @param ct2 * @return new matrix for ct1 * ct2 */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalMultMatrix(const shared_ptr<Matrix<RationalCiphertext<Element>>> ct1, const shared_ptr<Matrix<RationalCiphertext<Element>>> ct2) const { TypeCheck((*ct1)(0,0), (*ct2)(0,0)); // TODO only checking one; when Matrix is refactored, this should be revisited TimeVar t; if( doTiming ) TIC(t); Matrix<RationalCiphertext<Element>> rv = *ct1 * *ct2; if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalMultMatrix, TOC_US(t)) ); } shared_ptr<Matrix<RationalCiphertext<Element>>> a(new Matrix<RationalCiphertext<Element>>(rv)); return a; } /** * EvalSub - PALISADE Negate method for a ciphertext * @param ct * @return new ciphertext -ct */ Ciphertext<Element> EvalNegate(ConstCiphertext<Element> ct) const { if (ct == NULL || Mismatched(ct->GetCryptoContext()) ) PALISADE_THROW(config_error, "Information passed to EvalNegate was not generated with this crypto context"); TimeVar t; if( doTiming ) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalNegate(ct); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalNeg, TOC_US(t)) ); } return rv; } /** * EvalSub - PALISADE Negate method for a ciphertext * @param ct * @return new ciphertext -ct */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalNegateMatrix(const shared_ptr<Matrix<RationalCiphertext<Element>>> ct) const { if (ct == NULL || Mismatched((*ct)(0,0).GetCryptoContext()) ) PALISADE_THROW(config_error, "Information passed to EvalNegateMatrix was not generated with this crypto context"); TimeVar t; if( doTiming ) TIC(t); shared_ptr<Matrix<RationalCiphertext<Element>>> m( new Matrix<RationalCiphertext<Element>>(ct->GetAllocator(), ct->GetRows(), ct->GetCols())); for( size_t r = 0; r < m->GetRows(); r++ ) for( size_t c = 0; c < m->GetCols(); c++ ) (*m)(r,c) = -((*ct)(r,c)); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalNegMatrix, TOC_US(t)) ); } return m; } /** * Generate automophism keys for a given private key * * @param publicKey original public key. * @param origPrivateKey original private key. * @param indexList list of automorphism indices to be computed * @return returns the evaluation keys; index 0 of the vector corresponds to plaintext index 2, index 1 to plaintex index 3, etc. */ shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalAutomorphismKeyGen(const LPPublicKey<Element> publicKey, const LPPrivateKey<Element> origPrivateKey, const std::vector<usint> &indexList) const { if( publicKey == NULL || origPrivateKey == NULL ) PALISADE_THROW( type_error, "Null Keys"); if( publicKey->GetCryptoContext().get() != this ) PALISADE_THROW( type_error, "Key was not created in this CryptoContextImpl"); if( publicKey->GetCryptoContext() != origPrivateKey->GetCryptoContext() ) PALISADE_THROW( type_error, "Keys were not created in the same CryptoContextImpl"); TimeVar t; if( doTiming ) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalAutomorphismKeyGen(publicKey, origPrivateKey, indexList); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalAutomorphismKeyGen, TOC_US(t)) ); } return rv; } /** * Function for evaluating automorphism of ciphertext at index i * * @param ciphertext the input ciphertext. * @param i automorphism index * @param &evalKeys - reference to the vector of evaluation keys generated by EvalAutomorphismKeyGen. * @return resulting ciphertext */ Ciphertext<Element> EvalAutomorphism(ConstCiphertext<Element> ciphertext, usint i, const std::map<usint, LPEvalKey<Element>> &evalKeys) const { auto mf = evalKeys.begin(); if( mf == evalKeys.end() ) PALISADE_THROW( type_error, "Empty key map"); auto tk = mf->second; if( ciphertext == NULL || tk == NULL ) PALISADE_THROW( type_error, "Null inputs"); if( ciphertext->GetCryptoContext().get() != this ) PALISADE_THROW( type_error, "Ciphertext was not created in this CryptoContextImpl"); if( ciphertext->GetCryptoContext() != tk->GetCryptoContext() ) PALISADE_THROW( type_error, "Items were not created in the same CryptoContextImpl"); if( ciphertext->GetKeyTag() != tk->GetKeyTag() ) PALISADE_THROW( type_error, "Items were not encrypted with same keys" ); TimeVar t; if( doTiming ) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalAutomorphism(ciphertext, i, evalKeys); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalAutomorphismI, TOC_US(t)) ); } return rv; } /** * Generate automophism keys for a given private key; Uses the private key for encryption * * @param privateKey private key. * @param indexList list of automorphism indices to be computed * @return returns the evaluation keys */ shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalAutomorphismKeyGen(const LPPrivateKey<Element> privateKey, const std::vector<usint> &indexList) const { if( privateKey == NULL ) PALISADE_THROW( type_error, "Null input"); if( privateKey->GetCryptoContext().get() != this ) PALISADE_THROW( type_error, "Key was not created in this CryptoContextImpl"); TimeVar t; if( doTiming ) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalAutomorphismKeyGen(privateKey, indexList); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalAutomorphismK, TOC_US(t)) ); } return rv; } /** * EvalSumKeyGen Generates the key map to be used by evalsum * * @param privateKey private key. * @param publicKey public key (used in NTRU schemes). */ void EvalSumKeyGen( const LPPrivateKey<Element> privateKey, const LPPublicKey<Element> publicKey = nullptr); shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalSumRowsKeyGen( const LPPrivateKey<Element> privateKey, const LPPublicKey<Element> publicKey = nullptr, usint rowSize = 0); shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalSumColsKeyGen( const LPPrivateKey<Element> privateKey, const LPPublicKey<Element> publicKey = nullptr); /** * GetEvalSumKey returns the map * * @return the EvalSum key map */ static const std::map<usint, LPEvalKey<Element>>& GetEvalSumKeyMap(const string& id); static const std::map<string,shared_ptr<std::map<usint, LPEvalKey<Element>>>>& GetAllEvalSumKeys(); /** * Function for evaluating a sum of all components * * @param ciphertext the input ciphertext. * @param batchSize size of the batch * @return resulting ciphertext */ Ciphertext<Element> EvalSum(ConstCiphertext<Element> ciphertext, usint batchSize) const; Ciphertext<Element> EvalSumRows(ConstCiphertext<Element> ciphertext, usint rowSize, const std::map<usint, LPEvalKey<Element>> &evalKeys) const; Ciphertext<Element> EvalSumCols(ConstCiphertext<Element> ciphertext, usint rowSize, const std::map<usint, LPEvalKey<Element>> &evalKeys) const; /** * EvalSumKeyGen Generates the key map to be used by evalsum * * @param privateKey private key. * @param indexList list of indices. * @param publicKey public key (used in NTRU schemes). */ void EvalAtIndexKeyGen(const LPPrivateKey<Element> privateKey, const std::vector<int32_t> &indexList, const LPPublicKey<Element> publicKey = nullptr); /** * EvalFastRotationPrecompute implements the precomputation step of * hoisted automorphisms. * * Please refer to Section 5 of Halevi and Shoup, "Faster Homomorphic * linear transformations in HELib." for more details, link: * https://eprint.iacr.org/2018/244. * * Generally, automorphisms are performed with three steps: (1) the automorphism is * applied on the ciphertext, (2) the automorphed values are decomposed into digits, * and (3) key switching is applied to make it possible to further compute on the * ciphertext. * * Hoisted automorphisms is a technique that performs the digit decomposition for the * original ciphertext first, and then performs the automorphism and the key switching * on the decomposed digits. The benefit of this is that the digit decomposition is * independent of the automorphism rotation index, so it can be reused for multiple * different indices. This can greatly improve performance when we have to compute many * automorphisms on the same ciphertext. This routinely happens when we do permutations * (EvalPermute). * * EvalFastRotationPrecompute implements the digit decomposition step of hoisted * automorphisms. * * @param ct the input ciphertext on which to do the precomputation (digit decomposition) */ shared_ptr<vector<Element>> EvalFastRotationPrecompute( ConstCiphertext<Element> ct ) const { TimeVar t; if( doTiming ) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalFastRotationPrecompute(ct); if( doTiming ) { timeSamples->push_back( TimingInfo(OpFastRotPrecomp, TOC_US(t)) ); } return rv; } /** * EvalFastRotation implements the automorphism and key switching step of * hoisted automorphisms. * * Please refer to Section 5 of Halevi and Shoup, "Faster Homomorphic * linear transformations in HELib." for more details, link: * https://eprint.iacr.org/2018/244. * * Generally, automorphisms are performed with three steps: (1) the automorphism is * applied on the ciphertext, (2) the automorphed values are decomposed into digits, * and (3) key switching is applied to make it possible to further compute on the * ciphertext. * * Hoisted automorphisms is a technique that performs the digit decomposition for the * original ciphertext first, and then performs the automorphism and the key switching * on the decomposed digits. The benefit of this is that the digit decomposition is * independent of the automorphism rotation index, so it can be reused for multiple * different indices. This can greatly improve performance when we have to compute many * automorphisms on the same ciphertext. This routinely happens when we do permutations * (EvalPermute). * * EvalFastRotation implements the automorphism and key swithcing step of hoisted * automorphisms. * * This method assumes that all required rotation keys exist. This may not be true * if we are using baby-step/giant-step key switching. Please refer to Section 5.1 of * the above reference and EvalPermuteBGStepHoisted to see how to deal with this issue. * * @param ct the input ciphertext to perform the automorphism on * @param index the index of the rotation. Positive indices correspond to left rotations * and negative indices correspond to right rotations. * @param m is the cyclotomic order * @param digits the digit decomposition created by EvalFastRotationPrecompute at * the precomputation step. */ Ciphertext<Element> EvalFastRotation( ConstCiphertext<Element> ct, const usint index, const usint m, const shared_ptr<vector<Element>> digits ) const { TimeVar t; if( doTiming ) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalFastRotation(ct, index, m, digits); if( doTiming ) { timeSamples->push_back( TimingInfo(OpFastRot, TOC_US(t)) ); } return rv; } /** * Merges multiple ciphertexts with encrypted results in slot 0 into a single ciphertext * The slot assignment is done based on the order of ciphertexts in the vector * * @param ciphertextVector vector of ciphertexts to be merged. * @param &evalKeys - reference to the map of evaluation keys generated by EvalAutomorphismKeyGen. * @return resulting ciphertext */ Ciphertext<Element> EvalMerge(const vector<Ciphertext<Element>> &ciphertextVector) const; /** * GetEvalAutomorphismKey returns the map * * @return the EvalAutomorphism key map */ static const std::map<usint, LPEvalKey<Element>>& GetEvalAutomorphismKeyMap(const string& id); static const std::map<string,shared_ptr<std::map<usint, LPEvalKey<Element>>>>& GetAllEvalAutomorphismKeys(); /** * Moves i-th slot to slot 0 * * @param ciphertext. * @param i the index. * @return resulting ciphertext */ Ciphertext<Element> EvalAtIndex(ConstCiphertext<Element> ciphertext, int32_t index) const; /** * Evaluates inner product in batched encoding * * @param ciphertext1 first vector. * @param ciphertext2 second vector. * @param batchSize size of the batch to be summed up * @return resulting ciphertext */ Ciphertext<Element> EvalInnerProduct(ConstCiphertext<Element> ciphertext1, ConstCiphertext<Element> ciphertext2, usint batchSize) const; /** * Evaluates inner product in batched encoding * * @param ciphertext1 first vector. * @param ciphertext2 second vector. * @param batchSize size of the batch to be summed up * @return resulting ciphertext */ Ciphertext<Element> EvalInnerProduct(ConstCiphertext<Element> ciphertext1, ConstPlaintext ciphertext2, usint batchSize) const; /** * EvalCrossCorrelation - Computes the sliding sum of inner products (known as * as cross-correlation, sliding inner product, or sliding dot product in * image processing * @param x - first vector of row vectors * @param y - second vector of row vectors * @param batchSize - batch size for packed encoding * @param indexStart - starting index in the vectors of row vectors * @param length - length of the slice in the vectors of row vectors; default is 0 meaning to use the full length of the vector * @return sum(x_i*y_i), i.e., a sum of inner products */ Ciphertext<Element> EvalCrossCorrelation(const shared_ptr<Matrix<RationalCiphertext<Element>>> x, const shared_ptr<Matrix<RationalCiphertext<Element>>> y, usint batchSize, usint indexStart = 0, usint length = 0) const; /** * EvalLinRegressBatched- Computes the parameter vector for linear regression using the least squares method * Supported only in batched mode; currently works only for two regressors * @param x - matrix of regressors * @param y - vector of dependent variables * @return the parameter vector using (x^T x)^{-1} x^T y (using least squares method) */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalLinRegressBatched(const shared_ptr<Matrix<RationalCiphertext<Element>>> x, const shared_ptr<Matrix<RationalCiphertext<Element>>> y, usint batchSize) const; /** * EvalLinRegression - Computes the parameter vector for linear regression using the least squares method * @param x - matrix of regressors * @param y - vector of dependent variables * @return the parameter vector using (x^T x)^{-1} x^T y (using least squares method) */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalLinRegression(const shared_ptr<Matrix<RationalCiphertext<Element>>> x, const shared_ptr<Matrix<RationalCiphertext<Element>>> y) const { TypeCheck((*x)(0,0), (*y)(0,0)); // TODO only checking one; when Matrix is refactored, this should be revisited TimeVar t; if( doTiming ) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalLinRegression(x, y); if( doTiming ) { timeSamples->push_back( TimingInfo(OpLinRegression, TOC_US(t)) ); } return rv; } /** * KeySwitch - PALISADE KeySwitch method * @param keySwitchHint - reference to KeySwitchHint * @param ciphertext - vector of ciphertext * @return new CiphertextImpl after applying key switch */ Ciphertext<Element> KeySwitch( const LPEvalKey<Element> keySwitchHint, ConstCiphertext<Element> ciphertext) const { if( keySwitchHint == NULL || Mismatched(keySwitchHint->GetCryptoContext()) ) PALISADE_THROW(config_error, "Key passed to KeySwitch was not generated with this crypto context"); if( ciphertext == NULL || Mismatched(ciphertext->GetCryptoContext()) ) PALISADE_THROW(config_error, "Ciphertext passed to KeySwitch was not generated with this crypto context"); TimeVar t; if( doTiming ) TIC(t); auto rv = GetEncryptionAlgorithm()->KeySwitch(keySwitchHint, ciphertext); if( doTiming ) { timeSamples->push_back( TimingInfo(OpKeySwitch, TOC_US(t)) ); } return rv; } /** * Rescale - An alias for PALISADE ModReduce method. * This is because ModReduce is called Rescale in CKKS. * * @param ciphertext - vector of ciphertext * @return vector of mod reduced ciphertext */ Ciphertext<Element> Rescale(ConstCiphertext<Element> ciphertext) const { if( ciphertext == NULL || Mismatched(ciphertext->GetCryptoContext()) ) PALISADE_THROW(config_error, "Information passed to Rescale was not generated with this crypto context"); TimeVar t; if( doTiming ) TIC(t); auto rv = GetEncryptionAlgorithm()->ModReduce(ciphertext); if( doTiming ) { timeSamples->push_back( TimingInfo(OpModReduce, TOC_US(t)) ); } return rv; } /** * ModReduce - PALISADE ModReduce method * @param ciphertext - vector of ciphertext * @return vector of mod reduced ciphertext */ Ciphertext<Element> ModReduce(ConstCiphertext<Element> ciphertext) const { if( ciphertext == NULL || Mismatched(ciphertext->GetCryptoContext()) ) PALISADE_THROW(config_error, "Information passed to ModReduce was not generated with this crypto context"); TimeVar t; if( doTiming ) TIC(t); auto rv = GetEncryptionAlgorithm()->ModReduce(ciphertext); if( doTiming ) { timeSamples->push_back( TimingInfo(OpModReduce, TOC_US(t)) ); } return rv; } /** * ModReduce - PALISADE ModReduce method * @param ciphertext - vector of ciphertext * @return vector of mod reduced ciphertext */ RationalCiphertext<Element> ModReduceRational(RationalCiphertext<Element> ciphertext) const { TimeVar t; if( doTiming ) TIC(t); Ciphertext<Element> n = GetEncryptionAlgorithm()->ModReduce(ciphertext.GetNumerator()); Ciphertext<Element> d = GetEncryptionAlgorithm()->ModReduce(ciphertext.GetDenominator()); if( doTiming ) { timeSamples->push_back( TimingInfo(OpModReduce, TOC_US(t)) ); } return RationalCiphertext<Element>(n,d); } /** * ModReduce - PALISADE ModReduce method * @param ciphertext - vector of ciphertext * @return vector of mod reduced ciphertext */ shared_ptr<Matrix<RationalCiphertext<Element>>> ModReduceMatrix(shared_ptr<Matrix<RationalCiphertext<Element>>> ciphertext) const { // needs context check TimeVar t; if( doTiming ) TIC(t); shared_ptr<Matrix<RationalCiphertext<Element>>> m( new Matrix<RationalCiphertext<Element>>(ciphertext->GetAllocator(), ciphertext->GetRows(), ciphertext->GetCols())); for( size_t r = 0; r < m->GetRows(); r++ ) for( size_t c = 0; c < m->GetCols(); c++ ) (*m)(r,c) = ModReduceRational((*ciphertext)(r,c)); if( doTiming ) { timeSamples->push_back( TimingInfo(OpModReduceMatrix, TOC_US(t)) ); } return m; } /** * LevelReduce - PALISADE LevelReduce method * @param cipherText1 * @param linearKeySwitchHint * @return vector of level reduced ciphertext */ Ciphertext<Element> LevelReduce(ConstCiphertext<Element> cipherText1, const LPEvalKeyNTRU<Element> linearKeySwitchHint, size_t levels = 1) const { const shared_ptr<LPCryptoParametersCKKS<DCRTPoly>> cryptoParams = std::dynamic_pointer_cast<LPCryptoParametersCKKS<DCRTPoly>>(cipherText1->GetCryptoParameters()); if( cipherText1 == NULL || Mismatched(cipherText1->GetCryptoContext()) ) { PALISADE_THROW(config_error, "Information passed to LevelReduce was not generated with this crypto context"); } TimeVar t; if( doTiming ) TIC(t); auto rv = GetEncryptionAlgorithm()->LevelReduce(cipherText1, linearKeySwitchHint, levels); if( doTiming ) { timeSamples->push_back( TimingInfo(OpLevelReduce, TOC_US(t)) ); } return rv; } /** * ComposedEvalMult - PALISADE composed evalmult * @param ciphertext1 - vector for first cipher text * @param ciphertext2 - vector for second cipher text * @param quadKeySwitchHint - is the quadratic key switch hint from original private key to the quadratic key * return vector of resulting ciphertext */ Ciphertext<Element> ComposedEvalMult( ConstCiphertext<Element> ciphertext1, ConstCiphertext<Element> ciphertext2) const { if( ciphertext1 == NULL || ciphertext2 == NULL || ciphertext1->GetKeyTag() != ciphertext2->GetKeyTag() || Mismatched(ciphertext1->GetCryptoContext()) ) PALISADE_THROW(config_error, "Ciphertexts passed to ComposedEvalMult were not generated with this crypto context"); auto ek = GetEvalMultKeyVector(ciphertext1->GetKeyTag()); TimeVar t; if( doTiming ) TIC(t); auto rv = GetEncryptionAlgorithm()->ComposedEvalMult(ciphertext1, ciphertext2, ek[0]); if( doTiming ) { timeSamples->push_back( TimingInfo(OpComposedEvalMult, TOC_US(t)) ); } return rv; } static LPPublicKey<Element> deserializePublicKey(const Serialized& serObj) __attribute__ ((deprecated("serialization changed, see wiki for details"))); static LPPrivateKey<Element> deserializeSecretKey(const Serialized& serObj) __attribute__ ((deprecated("serialization changed, see wiki for details"))); static LPEvalKey<Element> deserializeEvalKey(const Serialized& serObj) __attribute__ ((deprecated("serialization changed, see wiki for details"))); static LPEvalKey<Element> deserializeEvalKeyInContext(const Serialized& serObj, CryptoContext<Element> cc) __attribute__ ((deprecated("serialization changed, see wiki for details"))); template <class Archive> void save( Archive & ar, std::uint32_t const version ) const { ar( cereal::make_nvp("cc", params) ); ar( cereal::make_nvp("kt", scheme) ); ar( cereal::make_nvp("si", m_schemeId) ); } template <class Archive> void load( Archive & ar, std::uint32_t const version ) { if( version > SerializedVersion() ) { PALISADE_THROW(deserialize_error, "serialized object version " + std::to_string(version) + " is from a later version of the library"); } ar( cereal::make_nvp("cc", params) ); ar( cereal::make_nvp("kt", scheme) ); ar( cereal::make_nvp("si", m_schemeId) ); // NOTE: a pointer to this object will be wrapped in a shared_ptr, and is a "CryptoContext". // PALISADE relies on the notion that identical CryptoContextImpls are not duplicated in memory // Once we deserialize this object, we must check to see if there is a matching object // for this object that's already existing in memory // if it DOES exist, use it. If it does NOT exist, add this to the cache of all contexts } virtual std::string SerializedObjectName() const { return "CryptoContext"; } static uint32_t SerializedVersion() { return 1; } }; /** * @brief CryptoObject * * A class to aid in referring to the crypto context that an object belongs to */ template<typename Element> class CryptoObject { protected: CryptoContext<Element> context; /*!< crypto context this object belongs to */ string keyTag; /*!< tag used to find the evaluation key needed for SHE/FHE operations */ public: CryptoObject(CryptoContext<Element> cc = 0, const string& tag = "") : context(cc), keyTag(tag) {} CryptoObject(const CryptoObject& rhs) { context = rhs.context; keyTag = rhs.keyTag; } CryptoObject(const CryptoObject&& rhs) { context = std::move(rhs.context); keyTag = std::move(rhs.keyTag); } virtual ~CryptoObject() {} const CryptoObject& operator=(const CryptoObject& rhs) { this->context = rhs.context; this->keyTag = rhs.keyTag; return *this; } const CryptoObject& operator=(const CryptoObject&& rhs) { this->context = std::move(rhs.context); this->keyTag = std::move(rhs.keyTag); return *this; } bool operator==(const CryptoObject& rhs) const { return context.get() == rhs.context.get() && keyTag == rhs.keyTag; } CryptoContext<Element> GetCryptoContext() const { return context; } const shared_ptr<LPCryptoParameters<Element>> GetCryptoParameters() const { return context->GetCryptoParameters(); } const EncodingParams GetEncodingParameters() const { return context->GetCryptoParameters()->GetEncodingParams(); } const string GetKeyTag() const { return keyTag; } void SetKeyTag(const string& tag) { keyTag = tag; } template <class Archive> void save( Archive & ar, std::uint32_t const version ) const { ar( ::cereal::make_nvp("cc", context) ); ar( ::cereal::make_nvp("kt", keyTag) ); } template <class Archive> void load( Archive & ar, std::uint32_t const version ) { if( version > SerializedVersion() ) { PALISADE_THROW(deserialize_error, "serialized object version " + std::to_string(version) + " is from a later version of the library"); } ar( ::cereal::make_nvp("cc", context) ); ar( ::cereal::make_nvp("kt", keyTag) ); context = CryptoContextFactory<Element>::GetContext(context->GetCryptoParameters(),context->GetEncryptionAlgorithm()); } std::string SerializedObjectName() const { return "CryptoObject"; } static uint32_t SerializedVersion() { return 1; } }; /** * @brief CryptoContextFactory * * A class that contains static methods to generate new crypto contexts from user parameters * */ template<typename Element> class CryptoContextFactory { protected: static vector<CryptoContext<Element>> AllContexts; public: static void ReleaseAllContexts(); static int GetContextCount(); static CryptoContext<Element> GetSingleContext(); static CryptoContext<Element> GetContext( shared_ptr<LPCryptoParameters<Element>> params, shared_ptr<LPPublicKeyEncryptionScheme<Element>> scheme, const string & schemeId = "Not"); static CryptoContext<Element> GetContextForPointer(CryptoContextImpl<Element>* cc); static const vector<CryptoContext<Element>>& GetAllContexts(); /** * construct a PALISADE CryptoContextImpl for the BFV Scheme * @param params ring parameters * @param plaintextModulus plaintext modulus * @param relinWindow bits in the base of digits in key switching/relinearization * @param stdDev sigma - distribution parameter for error distribution * @param delta - the plaintext scaling parameter floor(q/t) in BFV * @param mode - mode for generating secret keys (RLWE vs OPTIMIZED) * @param bigmodulus - large modulus used in tensoring of homomorphic multiplication * @param bigrootofunity - root of unity for bigmodulus * @param depth of supported computation circuit (not used; for future use) * @param assuranceMeasure alpha - effective bound for gaussians: - sqrt{alpha}*sigma..sqrt{alpha}*sigma * @param security level - root Hermite factor * @param bigmodulusarb - additional large modulus for bigmoduls for the case of general (non-power-of-two) cyclotomics * @param bigrootofunityarb - root of unity for bigmodulusarb * @param maxDepth the maximum power of secret key for which the relinearization key is generated (by default, it is 2); setting it to a value larger than 2 adds support for homomorphic multiplication w/o relinearization * @return new context */ static CryptoContext<Element> genCryptoContextBFV(shared_ptr<typename Element::Params> params, const PlaintextModulus plaintextmodulus, usint relinWindow, float stDev, const std::string& delta, MODE mode = RLWE, const std::string& bigmodulus = "0", const std::string& bigrootofunity = "0", int depth = 0, int assuranceMeasure = 0, float securityLevel = 0, const std::string& bigmodulusarb = "0", const std::string& bigrootofunityarb = "0", int maxDepth = 2); /** * construct a PALISADE CryptoContextImpl for the BFV Scheme * @param params ring parameters * @param encodingParams plaintext encoding parameters * @param relinWindow bits in the base of digits in key switching/relinearization * @param stdDev sigma - distribution parameter for error distribution * @param delta - the plaintext scaling parameter floor(q/t) in BFV * @param mode - mode for generating secret keys (RLWE vs OPTIMIZED) * @param bigmodulus - large modulus used in tensoring of homomorphic multiplication * @param bigrootofunity - root of unity for bigmodulus * @param depth of supported computation circuit (not used; for future use) * @param assuranceMeasure alpha - effective bound for gaussians: - sqrt{alpha}*sigma..sqrt{alpha}*sigma * @param security level - root Hermite factor * @param bigmodulusarb - additional large modulus for bigmoduls for the case of general (non-power-of-two) cyclotomics * @param bigrootofunityarb - root of unity for bigmodulusarb * @param maxDepth the maximum power of secret key for which the relinearization key is generated (by default, it is 2); setting it to a value larger than 2 adds support for homomorphic multiplication w/o relinearization * @return new context */ static CryptoContext<Element> genCryptoContextBFV(shared_ptr<typename Element::Params> params, EncodingParams encodingParams, usint relinWindow, float stDev, const std::string& delta, MODE mode = RLWE, const std::string& bigmodulus = "0", const std::string& bigrootofunity = "0", int depth = 0, int assuranceMeasure = 0, float securityLevel = 0, const std::string& bigmodulusarb = "0", const std::string& bigrootofunityarb = "0", int maxDepth = 2); /** * construct a PALISADE CryptoContextImpl for the BFV Scheme using the scheme's ParamsGen methods * @param plaintextModulus plaintext modulus * @param securityLevel root Hermite factor (lattice security parameter) * @param relinWindow bits in the base of digits in key switching/relinearization * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the relinearization key is generated (by default, it is 2); setting it to a value larger than 2 adds support for homomorphic multiplication w/o relinearization * @param n ring dimension in case the user wants to use a custom ring dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFV( const PlaintextModulus plaintextModulus, float securityLevel, usint relinWindow, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFV Scheme using the scheme's ParamsGen methods * @param plaintextModulus plaintext modulus * @param securityLevel standard security level * @param relinWindow bits in the base of digits in key switching/relinearization * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the relinearization key is generated (by default, it is 2); setting it to a value larger than 2 adds support for homomorphic multiplication w/o relinearization * @param n ring dimension in case the user wants to use a custom ring dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFV( const PlaintextModulus plaintextModulus, SecurityLevel securityLevel, usint relinWindow, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFV Scheme using the scheme's ParamsGen methods * @param encodingParams plaintext encoding parameters * @param securityLevel root Hermite factor (lattice security parameter) * @param distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the relinearization key is generated (by default, it is 2); setting it to a value larger than 2 adds support for homomorphic multiplication w/o relinearization * @param n ring dimension in case the user wants to use a custom ring dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFV( EncodingParams encodingParams, float securityLevel, usint relinWindow, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFV Scheme using the scheme's ParamsGen methods * @param encodingParams plaintext encoding parameters * @param securityLevel standard security level * @param distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the relinearization key is generated (by default, it is 2); setting it to a value larger than 2 adds support for homomorphic multiplication w/o relinearization * @param n ring dimension in case the user wants to use a custom ring dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFV( EncodingParams encodingParams, SecurityLevel securityLevel, usint relinWindow, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrns Scheme using the scheme's ParamsGen methods * @param plaintextModulus plaintext modulus * @param securityLevel root Hermite factor (lattice security parameter) * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the relinearization key is generated (by default, it is 2); setting it to a value larger than 2 adds support for homomorphic multiplication w/o relinearization * @param relinWindow the key switching window (bits in the base for digits) used for digit decomposition (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrns( const PlaintextModulus plaintextModulus, float securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrns Scheme using the scheme's ParamsGen methods * @param plaintextModulus plaintext modulus * @param securityLevel standard secuirity level * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the relinearization key is generated (by default, it is 2); setting it to a value larger than 2 adds support for homomorphic multiplication w/o relinearization * @param relinWindow the key switching window (bits in the base for digits) used for digit decomposition (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrns( const PlaintextModulus plaintextModulus, SecurityLevel securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrns Scheme using the scheme's ParamsGen methods * @param encodingParams plaintext encoding parameters * @param securityLevel root Hermite factor (lattice security parameter) * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the relinearization key is generated (by default, it is 2); setting it to a value larger than 2 adds support for homomorphic multiplication w/o relinearization * @param relinWindow the key switching window used for digit decomposition (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrns( EncodingParams encodingParams, float securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrns Scheme using the scheme's ParamsGen methods * @param encodingParams plaintext encoding parameters * @param securityLevel standard security level * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the relinearization key is generated (by default, it is 2); setting it to a value larger than 2 adds support for homomorphic multiplication w/o relinearization * @param relinWindow the key switching window used for digit decomposition (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrns( EncodingParams encodingParams, SecurityLevel securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrnsB Scheme using the scheme's ParamsGen methods * @param plaintextModulus plaintext modulus * @param securityLevel root Hermite factor (lattice security parameter) * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the relinearization key is generated (by default, it is 2); setting it to a value larger than 2 adds support for homomorphic multiplication w/o relinearization * @param relinWindow the key switching window used for digit decomposition (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrnsB( const PlaintextModulus plaintextModulus, float securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrnsB Scheme using the scheme's ParamsGen methods * @param plaintextModulus plaintext modulus * @param securityLevel standard security level * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the relinearization key is generated (by default, it is 2); setting it to a value larger than 2 adds support for homomorphic multiplication w/o relinearization * @param relinWindow the key switching window used for digit decomposition (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrnsB( const PlaintextModulus plaintextModulus, SecurityLevel securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrnsB Scheme using the scheme's ParamsGen methods * @param encodingParams plaintext encoding parameters * @param securityLevel root Hermite factor (lattice security parameter) * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the relinearization key is generated (by default, it is 2); setting it to a value larger than 2 adds support for homomorphic multiplication w/o relinearization * @param relinWindow the key switching window used for digit decomposition (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrnsB( EncodingParams encodingParams, float securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrnsB Scheme using the scheme's ParamsGen methods * @param encodingParams plaintext encoding parameters * @param securityLevel standard security level * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the relinearization key is generated (by default, it is 2); setting it to a value larger than 2 adds support for homomorphic multiplication w/o relinearization * @param relinWindow the key switching window used for digit decomposition (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrnsB( EncodingParams encodingParams, SecurityLevel securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BGV Scheme * @param params ring parameters * @param plaintextModulus plaintext modulus * @param relinWindow bits in the base of digits in key switching/relinearization * @param stdDev sigma - distribution parameter for error distribution * @param mode secret key distribution mode (RLWE [Gaussian noise] or OPTIMIZED [ternary uniform distribution]) * @param depth of supported computation circuit (not used; for future use) * @return new context */ static CryptoContext<Element> genCryptoContextBGV(shared_ptr<typename Element::Params> params, const PlaintextModulus plaintextmodulus, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1); /** * construct a PALISADE CryptoContextImpl for the BGV Scheme * @param params ring parameters * @param encodingParams plaintext encoding parameters * @param relinWindow bits in the base of digits in key switching/relinearization * @param stdDev sigma - distribution parameter for error distribution * @param mode secret key distribution mode (RLWE [Gaussian noise] or OPTIMIZED [ternary uniform distribution]) * @param depth of supported computation circuit (not used; for future use) * @return new context */ static CryptoContext<Element> genCryptoContextBGV(shared_ptr<typename Element::Params> params, EncodingParams encodingParams, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1); /** * construct a PALISADE CryptoContextImpl for the CKKS Scheme * @param plaintextmodulus * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param mode * @param depth * @param maxDepth the maximum power of secret key for which the relinearization key is generated * @param ksTech key switching technique to use (e.g., GHS or BV) * @param rsTech rescaling technique to use (e.g., APPROXRESCALE or EXACTRESCALE) * @return new context */ static CryptoContext<Element> genCryptoContextCKKS(shared_ptr<typename Element::Params> params, const PlaintextModulus plaintextmodulus, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1, int maxDepth = 2, KeySwitchTechnique ksTech = BV, RescalingTechnique rsTech = APPROXRESCALE); /** * construct a PALISADE CryptoContextImpl for the CKKS Scheme * @param encodingParams * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param mode * @param maxDepth the maximum power of secret key for which the relinearization key is generated * @param ksTech key switching technique to use (e.g., GHS or BV) * @param rsTech rescaling technique to use (e.g., APPROXRESCALE or EXACTRESCALE) * @return new context */ static CryptoContext<Element> genCryptoContextCKKS(shared_ptr<typename Element::Params> params, EncodingParams encodingParams, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1, int maxDepth = 2, enum KeySwitchTechnique ksTech = BV, RescalingTechnique rsTech = APPROXRESCALE); /** * Automatically generate the moduli chain and construct a PALISADE * CryptoContextImpl for the CKKS Scheme with it. * * @param cyclOrder the cyclotomic order M * @param numPrimes the number of towers/primes to use when building the moduli chain * @param scaleExp the plaintext scaling factor, which is equal to dcrtBits in our implementation of CKKS * @param batchSize the batch size of the ciphertext * @param mode RLWE or OPTIMIZED * @param depth * @param maxDepth the maximum power of secret key for which the relinearization key is generated * @param firstModSize the bit-length of the first modulus * @param ksTech key switching technique to use (e.g., GHS or BV) * @param rsTech rescaling technique to use (e.g., APPROXRESCALE or EXACTRESCALE) * @param numLargeDigits the number of big digits to use in HYBRID key switching * @return new context */ static CryptoContext<Element> genCryptoContextCKKSWithParamsGen( usint cyclOrder, usint numPrimes, usint scaleExp, usint relinWindow, usint batchSize, MODE mode, int depth = 1, int maxDepth = 2, usint firstModSize = 60, enum KeySwitchTechnique ksTech = BV, enum RescalingTechnique rsTech = APPROXRESCALE, uint32_t numLargeDigits = 4); /** * Construct a PALISADE CryptoContextImpl for the CKKS Scheme. * * @param multiplicativeDepth the depth of multiplications supported by the scheme (equal to number of towers - 1) * @param scalingFactorBits the size of the scaling factor in bits * @param batchSize the number of slots being used in the ciphertext * @param stdLevel the standard security level we want the scheme to satisfy * @param ringDim the ring dimension (if not specified selected automatically based on stdLevel) * @param ksTech key switching technique to use (e.g., HYBRID, GHS or BV) * @param rsTech rescaling technique to use (e.g., APPROXRESCALE or EXACTRESCALE) * @param numLargeDigits the number of big digits to use in HYBRID key switching * @param maxDepth the maximum power of secret key for which the relinearization key is generated * @param firstModSize the bit-length of the first modulus * @param relinWindow the relinearization windows (used in BV key switching, use 0 for RNS decomposition) * @param mode RLWE (gaussian distribution) or OPTIMIZED (ternary distribution) * @return new context */ static CryptoContext<Element> genCryptoContextCKKS( usint multiplicativeDepth, usint scalingFactorBits, usint batchSize, SecurityLevel stdLevel = HEStd_128_classic, usint ringDim = 0, enum RescalingTechnique rsTech = EXACTRESCALE, enum KeySwitchTechnique ksTech = HYBRID, uint32_t numLargeDigits = 0, int maxDepth = 2, usint firstModSize = 60, usint relinWindow = 0, MODE mode = OPTIMIZED); /** * construct a PALISADE CryptoContextImpl for the StehleSteinfeld Scheme * @param params ring parameters * @param plaintextModulus plaintext modulus * @param relinWindow bits in the base of digits in key switching/relinearization * @param stdDev sigma - distribution parameter for error distribution * @param stdDev distribution parameter for secret key distribution * @param depth of supported computation circuit (not used; for future use) * @param assuranceMeasure alpha - effective bound for gaussians: - sqrt{alpha}*sigma..sqrt{alpha}*sigma * @param security level - root Hermite factor * @return new context */ static CryptoContext<Element> genCryptoContextStehleSteinfeld(shared_ptr<typename Element::Params> params, const PlaintextModulus plaintextmodulus, usint relinWindow, float stDev, float stDevStSt, int depth = 1, int assuranceMeasure = 9, float securityLevel = 1.006); /** * construct a PALISADE CryptoContextImpl for the StehleSteinfeld Scheme * @param params ring parameters * @param encodingParams plaintext encoding parameters * @param relinWindow bits in the base of digits in key switching/relinearization * @param stdDev sigma - distribution parameter for error distribution * @param stdDev distribution parameter for secret key distribution * @param depth of supported computation circuit (not used; for future use) * @param assuranceMeasure alpha - effective bound for gaussians: - sqrt{alpha}*sigma..sqrt{alpha}*sigma * @param security level - root Hermite factor * @return new context */ static CryptoContext<Element> genCryptoContextStehleSteinfeld(shared_ptr<typename Element::Params> params, EncodingParams encodingParams, usint relinWindow, float stDev, float stDevStSt, int depth = 1, int assuranceMeasure = 9, float securityLevel = 1.006); /** * construct a PALISADE CryptoContextImpl for the Null Scheme * @param m cyclotomic order (ring dimension n = m/2 for power-of-two cyclotomics) * @param plaintextModulus plaintext modulus * @return */ static CryptoContext<Element> genCryptoContextNull(unsigned int m, const PlaintextModulus ptModulus); /** * construct a PALISADE CryptoContextImpl for the Null Scheme * @param m cyclotomic order (ring dimension n = m/2 for power-of-two cyclotomics) * @param encodingParams plaintext encoding parameters * @return */ static CryptoContext<Element> genCryptoContextNull(unsigned int m, EncodingParams encodingParams); static CryptoContext<Element> DeserializeAndCreateContext(const Serialized& serObj) __attribute__ ((deprecated("serialization changed, see wiki for details"))); }; } #endif /* SRC_PKE_CRYPTOCONTEXT_H_ */
tree-vect-data-refs.c
/* Data References Analysis and Manipulation Utilities for Vectorization. Copyright (C) 2003-2015 Free Software Foundation, Inc. Contributed by Dorit Naishlos <dorit@il.ibm.com> and Ira Rosen <irar@il.ibm.com> This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "dumpfile.h" #include "tm.h" #include "hash-set.h" #include "machmode.h" #include "vec.h" #include "double-int.h" #include "input.h" #include "alias.h" #include "symtab.h" #include "wide-int.h" #include "inchash.h" #include "tree.h" #include "fold-const.h" #include "stor-layout.h" #include "tm_p.h" #include "target.h" #include "predict.h" #include "hard-reg-set.h" #include "function.h" #include "dominance.h" #include "cfg.h" #include "basic-block.h" #include "gimple-pretty-print.h" #include "tree-ssa-alias.h" #include "internal-fn.h" #include "tree-eh.h" #include "gimple-expr.h" #include "is-a.h" #include "gimple.h" #include "gimplify.h" #include "gimple-iterator.h" #include "gimplify-me.h" #include "gimple-ssa.h" #include "tree-phinodes.h" #include "ssa-iterators.h" #include "stringpool.h" #include "tree-ssanames.h" #include "tree-ssa-loop-ivopts.h" #include "tree-ssa-loop-manip.h" #include "tree-ssa-loop.h" #include "cfgloop.h" #include "tree-chrec.h" #include "tree-scalar-evolution.h" #include "tree-vectorizer.h" #include "diagnostic-core.h" #include "hash-map.h" #include "plugin-api.h" #include "ipa-ref.h" #include "cgraph.h" /* Need to include rtl.h, expr.h, etc. for optabs. */ #include "hashtab.h" #include "rtl.h" #include "flags.h" #include "statistics.h" #include "real.h" #include "fixed-value.h" #include "insn-config.h" #include "expmed.h" #include "dojump.h" #include "explow.h" #include "calls.h" #include "emit-rtl.h" #include "varasm.h" #include "stmt.h" #include "expr.h" #include "insn-codes.h" #include "optabs.h" #include "builtins.h" /* Return true if load- or store-lanes optab OPTAB is implemented for COUNT vectors of type VECTYPE. NAME is the name of OPTAB. */ static bool vect_lanes_optab_supported_p (const char *name, convert_optab optab, tree vectype, unsigned HOST_WIDE_INT count) { machine_mode mode, array_mode; bool limit_p; mode = TYPE_MODE (vectype); limit_p = !targetm.array_mode_supported_p (mode, count); array_mode = mode_for_size (count * GET_MODE_BITSIZE (mode), MODE_INT, limit_p); if (array_mode == BLKmode) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "no array mode for %s[" HOST_WIDE_INT_PRINT_DEC "]\n", GET_MODE_NAME (mode), count); return false; } if (convert_optab_handler (optab, array_mode, mode) == CODE_FOR_nothing) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "cannot use %s<%s><%s>\n", name, GET_MODE_NAME (array_mode), GET_MODE_NAME (mode)); return false; } if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "can use %s<%s><%s>\n", name, GET_MODE_NAME (array_mode), GET_MODE_NAME (mode)); return true; } /* Return the smallest scalar part of STMT. This is used to determine the vectype of the stmt. We generally set the vectype according to the type of the result (lhs). For stmts whose result-type is different than the type of the arguments (e.g., demotion, promotion), vectype will be reset appropriately (later). Note that we have to visit the smallest datatype in this function, because that determines the VF. If the smallest datatype in the loop is present only as the rhs of a promotion operation - we'd miss it. Such a case, where a variable of this datatype does not appear in the lhs anywhere in the loop, can only occur if it's an invariant: e.g.: 'int_x = (int) short_inv', which we'd expect to have been optimized away by invariant motion. However, we cannot rely on invariant motion to always take invariants out of the loop, and so in the case of promotion we also have to check the rhs. LHS_SIZE_UNIT and RHS_SIZE_UNIT contain the sizes of the corresponding types. */ tree vect_get_smallest_scalar_type (gimple stmt, HOST_WIDE_INT *lhs_size_unit, HOST_WIDE_INT *rhs_size_unit) { tree scalar_type = gimple_expr_type (stmt); HOST_WIDE_INT lhs, rhs; lhs = rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type)); if (is_gimple_assign (stmt) && (gimple_assign_cast_p (stmt) || gimple_assign_rhs_code (stmt) == WIDEN_MULT_EXPR || gimple_assign_rhs_code (stmt) == WIDEN_LSHIFT_EXPR || gimple_assign_rhs_code (stmt) == FLOAT_EXPR)) { tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (stmt)); rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type)); if (rhs < lhs) scalar_type = rhs_type; } *lhs_size_unit = lhs; *rhs_size_unit = rhs; return scalar_type; } /* Insert DDR into LOOP_VINFO list of ddrs that may alias and need to be tested at run-time. Return TRUE if DDR was successfully inserted. Return false if versioning is not supported. */ static bool vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo) { struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); if ((unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS) == 0) return false; if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "mark for run-time aliasing test between "); dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_A (ddr))); dump_printf (MSG_NOTE, " and "); dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_B (ddr))); dump_printf (MSG_NOTE, "\n"); } if (optimize_loop_nest_for_size_p (loop)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "versioning not supported when optimizing" " for size.\n"); return false; } /* FORNOW: We don't support versioning with outer-loop vectorization. */ if (loop->inner) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "versioning not yet supported for outer-loops.\n"); return false; } /* FORNOW: We don't support creating runtime alias tests for non-constant step. */ if (TREE_CODE (DR_STEP (DDR_A (ddr))) != INTEGER_CST || TREE_CODE (DR_STEP (DDR_B (ddr))) != INTEGER_CST) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "versioning not yet supported for non-constant " "step\n"); return false; } LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).safe_push (ddr); return true; } /* Function vect_analyze_data_ref_dependence. Return TRUE if there (might) exist a dependence between a memory-reference DRA and a memory-reference DRB. When versioning for alias may check a dependence at run-time, return FALSE. Adjust *MAX_VF according to the data dependence. */ static bool vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr, loop_vec_info loop_vinfo, int *max_vf) { unsigned int i; struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); struct data_reference *dra = DDR_A (ddr); struct data_reference *drb = DDR_B (ddr); stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra)); stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb)); lambda_vector dist_v; unsigned int loop_depth; /* In loop analysis all data references should be vectorizable. */ if (!STMT_VINFO_VECTORIZABLE (stmtinfo_a) || !STMT_VINFO_VECTORIZABLE (stmtinfo_b)) gcc_unreachable (); /* Independent data accesses. */ if (DDR_ARE_DEPENDENT (ddr) == chrec_known) return false; if (dra == drb || (DR_IS_READ (dra) && DR_IS_READ (drb))) return false; /* Even if we have an anti-dependence then, as the vectorized loop covers at least two scalar iterations, there is always also a true dependence. As the vectorizer does not re-order loads and stores we can ignore the anti-dependence if TBAA can disambiguate both DRs similar to the case with known negative distance anti-dependences (positive distance anti-dependences would violate TBAA constraints). */ if (((DR_IS_READ (dra) && DR_IS_WRITE (drb)) || (DR_IS_WRITE (dra) && DR_IS_READ (drb))) && !alias_sets_conflict_p (get_alias_set (DR_REF (dra)), get_alias_set (DR_REF (drb)))) return false; /* Unknown data dependence. */ if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know) { /* If user asserted safelen consecutive iterations can be executed concurrently, assume independence. */ if (loop->safelen >= 2) { if (loop->safelen < *max_vf) *max_vf = loop->safelen; LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false; return false; } if (STMT_VINFO_GATHER_P (stmtinfo_a) || STMT_VINFO_GATHER_P (stmtinfo_b)) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "versioning for alias not supported for: " "can't determine dependence between "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra)); dump_printf (MSG_MISSED_OPTIMIZATION, " and "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb)); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return true; } if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "versioning for alias required: " "can't determine dependence between "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra)); dump_printf (MSG_MISSED_OPTIMIZATION, " and "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb)); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } /* Add to list of ddrs that need to be tested at run-time. */ return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo); } /* Known data dependence. */ if (DDR_NUM_DIST_VECTS (ddr) == 0) { /* If user asserted safelen consecutive iterations can be executed concurrently, assume independence. */ if (loop->safelen >= 2) { if (loop->safelen < *max_vf) *max_vf = loop->safelen; LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false; return false; } if (STMT_VINFO_GATHER_P (stmtinfo_a) || STMT_VINFO_GATHER_P (stmtinfo_b)) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "versioning for alias not supported for: " "bad dist vector for "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra)); dump_printf (MSG_MISSED_OPTIMIZATION, " and "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb)); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return true; } if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "versioning for alias required: " "bad dist vector for "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra)); dump_printf (MSG_MISSED_OPTIMIZATION, " and "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb)); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } /* Add to list of ddrs that need to be tested at run-time. */ return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo); } loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr)); FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v) { int dist = dist_v[loop_depth]; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "dependence distance = %d.\n", dist); if (dist == 0) { if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "dependence distance == 0 between "); dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra)); dump_printf (MSG_NOTE, " and "); dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb)); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } /* When we perform grouped accesses and perform implicit CSE by detecting equal accesses and doing disambiguation with runtime alias tests like for .. = a[i]; .. = a[i+1]; a[i] = ..; a[i+1] = ..; *p = ..; .. = a[i]; .. = a[i+1]; where we will end up loading { a[i], a[i+1] } once, make sure that inserting group loads before the first load and stores after the last store will do the right thing. Similar for groups like a[i] = ...; ... = a[i]; a[i+1] = ...; where loads from the group interleave with the store. */ if (STMT_VINFO_GROUPED_ACCESS (stmtinfo_a) || STMT_VINFO_GROUPED_ACCESS (stmtinfo_b)) { gimple earlier_stmt; earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb)); if (DR_IS_WRITE (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt)))) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "READ_WRITE dependence in interleaving." "\n"); return true; } } continue; } if (dist > 0 && DDR_REVERSED_P (ddr)) { /* If DDR_REVERSED_P the order of the data-refs in DDR was reversed (to make distance vector positive), and the actual distance is negative. */ if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "dependence distance negative.\n"); /* Record a negative dependence distance to later limit the amount of stmt copying / unrolling we can perform. Only need to handle read-after-write dependence. */ if (DR_IS_READ (drb) && (STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) == 0 || STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) > (unsigned)dist)) STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) = dist; continue; } if (abs (dist) >= 2 && abs (dist) < *max_vf) { /* The dependence distance requires reduction of the maximal vectorization factor. */ *max_vf = abs (dist); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "adjusting maximal vectorization factor to %i\n", *max_vf); } if (abs (dist) >= *max_vf) { /* Dependence distance does not create dependence, as far as vectorization is concerned, in this case. */ if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "dependence distance >= VF.\n"); continue; } if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized, possible dependence " "between data-refs "); dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra)); dump_printf (MSG_NOTE, " and "); dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb)); dump_printf (MSG_NOTE, "\n"); } return true; } return false; } /* Function vect_analyze_data_ref_dependences. Examine all the data references in the loop, and make sure there do not exist any data dependences between them. Set *MAX_VF according to the maximum vectorization factor the data dependences allow. */ bool vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo, int *max_vf) { unsigned int i; struct data_dependence_relation *ddr; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_data_ref_dependences ===\n"); LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = true; if (!compute_all_dependences (LOOP_VINFO_DATAREFS (loop_vinfo), &LOOP_VINFO_DDRS (loop_vinfo), LOOP_VINFO_LOOP_NEST (loop_vinfo), true)) return false; FOR_EACH_VEC_ELT (LOOP_VINFO_DDRS (loop_vinfo), i, ddr) if (vect_analyze_data_ref_dependence (ddr, loop_vinfo, max_vf)) return false; return true; } /* Function vect_slp_analyze_data_ref_dependence. Return TRUE if there (might) exist a dependence between a memory-reference DRA and a memory-reference DRB. When versioning for alias may check a dependence at run-time, return FALSE. Adjust *MAX_VF according to the data dependence. */ static bool vect_slp_analyze_data_ref_dependence (struct data_dependence_relation *ddr) { struct data_reference *dra = DDR_A (ddr); struct data_reference *drb = DDR_B (ddr); /* We need to check dependences of statements marked as unvectorizable as well, they still can prohibit vectorization. */ /* Independent data accesses. */ if (DDR_ARE_DEPENDENT (ddr) == chrec_known) return false; if (dra == drb) return false; /* Read-read is OK. */ if (DR_IS_READ (dra) && DR_IS_READ (drb)) return false; /* If dra and drb are part of the same interleaving chain consider them independent. */ if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (DR_STMT (dra))) && (GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (dra))) == GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (drb))))) return false; /* Unknown data dependence. */ if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "can't determine dependence between "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra)); dump_printf (MSG_MISSED_OPTIMIZATION, " and "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb)); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } } else if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "determined dependence between "); dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra)); dump_printf (MSG_NOTE, " and "); dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb)); dump_printf (MSG_NOTE, "\n"); } /* We do not vectorize basic blocks with write-write dependencies. */ if (DR_IS_WRITE (dra) && DR_IS_WRITE (drb)) return true; /* If we have a read-write dependence check that the load is before the store. When we vectorize basic blocks, vector load can be only before corresponding scalar load, and vector store can be only after its corresponding scalar store. So the order of the acceses is preserved in case the load is before the store. */ gimple earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb)); if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt)))) { /* That only holds for load-store pairs taking part in vectorization. */ if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dra))) && STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (drb)))) return false; } return true; } /* Function vect_analyze_data_ref_dependences. Examine all the data references in the basic-block, and make sure there do not exist any data dependences between them. Set *MAX_VF according to the maximum vectorization factor the data dependences allow. */ bool vect_slp_analyze_data_ref_dependences (bb_vec_info bb_vinfo) { struct data_dependence_relation *ddr; unsigned int i; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== vect_slp_analyze_data_ref_dependences ===\n"); if (!compute_all_dependences (BB_VINFO_DATAREFS (bb_vinfo), &BB_VINFO_DDRS (bb_vinfo), vNULL, true)) return false; FOR_EACH_VEC_ELT (BB_VINFO_DDRS (bb_vinfo), i, ddr) if (vect_slp_analyze_data_ref_dependence (ddr)) return false; return true; } /* Function vect_compute_data_ref_alignment Compute the misalignment of the data reference DR. Output: 1. If during the misalignment computation it is found that the data reference cannot be vectorized then false is returned. 2. DR_MISALIGNMENT (DR) is defined. FOR NOW: No analysis is actually performed. Misalignment is calculated only for trivial cases. TODO. */ static bool vect_compute_data_ref_alignment (struct data_reference *dr) { gimple stmt = DR_STMT (dr); stmt_vec_info stmt_info = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = NULL; tree ref = DR_REF (dr); tree vectype; tree base, base_addr; tree misalign; tree aligned_to; unsigned HOST_WIDE_INT alignment; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "vect_compute_data_ref_alignment:\n"); if (loop_vinfo) loop = LOOP_VINFO_LOOP (loop_vinfo); /* Initialize misalignment to unknown. */ SET_DR_MISALIGNMENT (dr, -1); /* Strided loads perform only component accesses, misalignment information is irrelevant for them. */ if (STMT_VINFO_STRIDE_LOAD_P (stmt_info)) return true; misalign = DR_INIT (dr); aligned_to = DR_ALIGNED_TO (dr); base_addr = DR_BASE_ADDRESS (dr); vectype = STMT_VINFO_VECTYPE (stmt_info); /* In case the dataref is in an inner-loop of the loop that is being vectorized (LOOP), we use the base and misalignment information relative to the outer-loop (LOOP). This is ok only if the misalignment stays the same throughout the execution of the inner-loop, which is why we have to check that the stride of the dataref in the inner-loop evenly divides by the vector size. */ if (loop && nested_in_vect_loop_p (loop, stmt)) { tree step = DR_STEP (dr); HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step); if (dr_step % GET_MODE_SIZE (TYPE_MODE (vectype)) == 0) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "inner step divides the vector-size.\n"); misalign = STMT_VINFO_DR_INIT (stmt_info); aligned_to = STMT_VINFO_DR_ALIGNED_TO (stmt_info); base_addr = STMT_VINFO_DR_BASE_ADDRESS (stmt_info); } else { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "inner step doesn't divide the vector-size.\n"); misalign = NULL_TREE; } } /* Similarly, if we're doing basic-block vectorization, we can only use base and misalignment information relative to an innermost loop if the misalignment stays the same throughout the execution of the loop. As above, this is the case if the stride of the dataref evenly divides by the vector size. */ if (!loop) { tree step = DR_STEP (dr); HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step); if (dr_step % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "SLP: step doesn't divide the vector-size.\n"); misalign = NULL_TREE; } } /* To look at alignment of the base we have to preserve an inner MEM_REF as that carries alignment information of the actual access. */ base = ref; while (handled_component_p (base)) base = TREE_OPERAND (base, 0); if (TREE_CODE (base) == MEM_REF) base = build2 (MEM_REF, TREE_TYPE (base), base_addr, build_int_cst (TREE_TYPE (TREE_OPERAND (base, 1)), 0)); unsigned int base_alignment = get_object_alignment (base); if (base_alignment >= TYPE_ALIGN (TREE_TYPE (vectype))) DR_VECT_AUX (dr)->base_element_aligned = true; alignment = TYPE_ALIGN_UNIT (vectype); if ((compare_tree_int (aligned_to, alignment) < 0) || !misalign) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "Unknown alignment for access: "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return true; } if (base_alignment < TYPE_ALIGN (vectype)) { /* Strip an inner MEM_REF to a bare decl if possible. */ if (TREE_CODE (base) == MEM_REF && integer_zerop (TREE_OPERAND (base, 1)) && TREE_CODE (TREE_OPERAND (base, 0)) == ADDR_EXPR) base = TREE_OPERAND (TREE_OPERAND (base, 0), 0); if (!vect_can_force_dr_alignment_p (base, TYPE_ALIGN (vectype))) { if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "can't force alignment of ref: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, ref); dump_printf (MSG_NOTE, "\n"); } return true; } /* Force the alignment of the decl. NOTE: This is the only change to the code we make during the analysis phase, before deciding to vectorize the loop. */ if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "force alignment of "); dump_generic_expr (MSG_NOTE, TDF_SLIM, ref); dump_printf (MSG_NOTE, "\n"); } DR_VECT_AUX (dr)->base_decl = base; DR_VECT_AUX (dr)->base_misaligned = true; DR_VECT_AUX (dr)->base_element_aligned = true; } /* If this is a backward running DR then first access in the larger vectype actually is N-1 elements before the address in the DR. Adjust misalign accordingly. */ if (tree_int_cst_sgn (DR_STEP (dr)) < 0) { tree offset = ssize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1); /* DR_STEP(dr) is the same as -TYPE_SIZE of the scalar type, otherwise we wouldn't be here. */ offset = fold_build2 (MULT_EXPR, ssizetype, offset, DR_STEP (dr)); /* PLUS because DR_STEP was negative. */ misalign = size_binop (PLUS_EXPR, misalign, offset); } SET_DR_MISALIGNMENT (dr, wi::mod_floor (misalign, alignment, SIGNED).to_uhwi ()); if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "misalign = %d bytes of ref ", DR_MISALIGNMENT (dr)); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return true; } /* Function vect_compute_data_refs_alignment Compute the misalignment of data references in the loop. Return FALSE if a data reference is found that cannot be vectorized. */ static bool vect_compute_data_refs_alignment (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo) { vec<data_reference_p> datarefs; struct data_reference *dr; unsigned int i; if (loop_vinfo) datarefs = LOOP_VINFO_DATAREFS (loop_vinfo); else datarefs = BB_VINFO_DATAREFS (bb_vinfo); FOR_EACH_VEC_ELT (datarefs, i, dr) if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) && !vect_compute_data_ref_alignment (dr)) { if (bb_vinfo) { /* Mark unsupported statement as unvectorizable. */ STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false; continue; } else return false; } return true; } /* Function vect_update_misalignment_for_peel DR - the data reference whose misalignment is to be adjusted. DR_PEEL - the data reference whose misalignment is being made zero in the vector loop by the peel. NPEEL - the number of iterations in the peel loop if the misalignment of DR_PEEL is known at compile time. */ static void vect_update_misalignment_for_peel (struct data_reference *dr, struct data_reference *dr_peel, int npeel) { unsigned int i; vec<dr_p> same_align_drs; struct data_reference *current_dr; int dr_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr)))); int dr_peel_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr_peel)))); stmt_vec_info stmt_info = vinfo_for_stmt (DR_STMT (dr)); stmt_vec_info peel_stmt_info = vinfo_for_stmt (DR_STMT (dr_peel)); /* For interleaved data accesses the step in the loop must be multiplied by the size of the interleaving group. */ if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) dr_size *= GROUP_SIZE (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info))); if (STMT_VINFO_GROUPED_ACCESS (peel_stmt_info)) dr_peel_size *= GROUP_SIZE (peel_stmt_info); /* It can be assumed that the data refs with the same alignment as dr_peel are aligned in the vector loop. */ same_align_drs = STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (DR_STMT (dr_peel))); FOR_EACH_VEC_ELT (same_align_drs, i, current_dr) { if (current_dr != dr) continue; gcc_assert (DR_MISALIGNMENT (dr) / dr_size == DR_MISALIGNMENT (dr_peel) / dr_peel_size); SET_DR_MISALIGNMENT (dr, 0); return; } if (known_alignment_for_access_p (dr) && known_alignment_for_access_p (dr_peel)) { bool negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0; int misal = DR_MISALIGNMENT (dr); tree vectype = STMT_VINFO_VECTYPE (stmt_info); misal += negative ? -npeel * dr_size : npeel * dr_size; misal &= (TYPE_ALIGN (vectype) / BITS_PER_UNIT) - 1; SET_DR_MISALIGNMENT (dr, misal); return; } if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Setting misalignment to -1.\n"); SET_DR_MISALIGNMENT (dr, -1); } /* Function vect_verify_datarefs_alignment Return TRUE if all data references in the loop can be handled with respect to alignment. */ bool vect_verify_datarefs_alignment (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo) { vec<data_reference_p> datarefs; struct data_reference *dr; enum dr_alignment_support supportable_dr_alignment; unsigned int i; if (loop_vinfo) datarefs = LOOP_VINFO_DATAREFS (loop_vinfo); else datarefs = BB_VINFO_DATAREFS (bb_vinfo); FOR_EACH_VEC_ELT (datarefs, i, dr) { gimple stmt = DR_STMT (dr); stmt_vec_info stmt_info = vinfo_for_stmt (stmt); if (!STMT_VINFO_RELEVANT_P (stmt_info)) continue; /* For interleaving, only the alignment of the first access matters. Skip statements marked as not vectorizable. */ if ((STMT_VINFO_GROUPED_ACCESS (stmt_info) && GROUP_FIRST_ELEMENT (stmt_info) != stmt) || !STMT_VINFO_VECTORIZABLE (stmt_info)) continue; /* Strided loads perform only component accesses, alignment is irrelevant for them. */ if (STMT_VINFO_STRIDE_LOAD_P (stmt_info)) continue; supportable_dr_alignment = vect_supportable_dr_alignment (dr, false); if (!supportable_dr_alignment) { if (dump_enabled_p ()) { if (DR_IS_READ (dr)) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: unsupported unaligned load."); else dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: unsupported unaligned " "store."); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dr)); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return false; } if (supportable_dr_alignment != dr_aligned && dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Vectorizing an unaligned access.\n"); } return true; } /* Given an memory reference EXP return whether its alignment is less than its size. */ static bool not_size_aligned (tree exp) { if (!tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (exp)))) return true; return (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (exp))) > get_object_alignment (exp)); } /* Function vector_alignment_reachable_p Return true if vector alignment for DR is reachable by peeling a few loop iterations. Return false otherwise. */ static bool vector_alignment_reachable_p (struct data_reference *dr) { gimple stmt = DR_STMT (dr); stmt_vec_info stmt_info = vinfo_for_stmt (stmt); tree vectype = STMT_VINFO_VECTYPE (stmt_info); if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) { /* For interleaved access we peel only if number of iterations in the prolog loop ({VF - misalignment}), is a multiple of the number of the interleaved accesses. */ int elem_size, mis_in_elements; int nelements = TYPE_VECTOR_SUBPARTS (vectype); /* FORNOW: handle only known alignment. */ if (!known_alignment_for_access_p (dr)) return false; elem_size = GET_MODE_SIZE (TYPE_MODE (vectype)) / nelements; mis_in_elements = DR_MISALIGNMENT (dr) / elem_size; if ((nelements - mis_in_elements) % GROUP_SIZE (stmt_info)) return false; } /* If misalignment is known at the compile time then allow peeling only if natural alignment is reachable through peeling. */ if (known_alignment_for_access_p (dr) && !aligned_access_p (dr)) { HOST_WIDE_INT elmsize = int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype))); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "data size =" HOST_WIDE_INT_PRINT_DEC, elmsize); dump_printf (MSG_NOTE, ". misalignment = %d.\n", DR_MISALIGNMENT (dr)); } if (DR_MISALIGNMENT (dr) % elmsize) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "data size does not divide the misalignment.\n"); return false; } } if (!known_alignment_for_access_p (dr)) { tree type = TREE_TYPE (DR_REF (dr)); bool is_packed = not_size_aligned (DR_REF (dr)); if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "Unknown misalignment, is_packed = %d\n",is_packed); if ((TYPE_USER_ALIGN (type) && !is_packed) || targetm.vectorize.vector_alignment_reachable (type, is_packed)) return true; else return false; } return true; } /* Calculate the cost of the memory access represented by DR. */ static void vect_get_data_access_cost (struct data_reference *dr, unsigned int *inside_cost, unsigned int *outside_cost, stmt_vector_for_cost *body_cost_vec) { gimple stmt = DR_STMT (dr); stmt_vec_info stmt_info = vinfo_for_stmt (stmt); int nunits = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); int ncopies = vf / nunits; if (DR_IS_READ (dr)) vect_get_load_cost (dr, ncopies, true, inside_cost, outside_cost, NULL, body_cost_vec, false); else vect_get_store_cost (dr, ncopies, inside_cost, body_cost_vec); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "vect_get_data_access_cost: inside_cost = %d, " "outside_cost = %d.\n", *inside_cost, *outside_cost); } /* Insert DR into peeling hash table with NPEEL as key. */ static void vect_peeling_hash_insert (loop_vec_info loop_vinfo, struct data_reference *dr, int npeel) { struct _vect_peel_info elem, *slot; _vect_peel_info **new_slot; bool supportable_dr_alignment = vect_supportable_dr_alignment (dr, true); elem.npeel = npeel; slot = LOOP_VINFO_PEELING_HTAB (loop_vinfo)->find (&elem); if (slot) slot->count++; else { slot = XNEW (struct _vect_peel_info); slot->npeel = npeel; slot->dr = dr; slot->count = 1; new_slot = LOOP_VINFO_PEELING_HTAB (loop_vinfo)->find_slot (slot, INSERT); *new_slot = slot; } if (!supportable_dr_alignment && unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo))) slot->count += VECT_MAX_COST; } /* Traverse peeling hash table to find peeling option that aligns maximum number of data accesses. */ int vect_peeling_hash_get_most_frequent (_vect_peel_info **slot, _vect_peel_extended_info *max) { vect_peel_info elem = *slot; if (elem->count > max->peel_info.count || (elem->count == max->peel_info.count && max->peel_info.npeel > elem->npeel)) { max->peel_info.npeel = elem->npeel; max->peel_info.count = elem->count; max->peel_info.dr = elem->dr; } return 1; } /* Traverse peeling hash table and calculate cost for each peeling option. Find the one with the lowest cost. */ int vect_peeling_hash_get_lowest_cost (_vect_peel_info **slot, _vect_peel_extended_info *min) { vect_peel_info elem = *slot; int save_misalignment, dummy; unsigned int inside_cost = 0, outside_cost = 0, i; gimple stmt = DR_STMT (elem->dr); stmt_vec_info stmt_info = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo); struct data_reference *dr; stmt_vector_for_cost prologue_cost_vec, body_cost_vec, epilogue_cost_vec; prologue_cost_vec.create (2); body_cost_vec.create (2); epilogue_cost_vec.create (2); FOR_EACH_VEC_ELT (datarefs, i, dr) { stmt = DR_STMT (dr); stmt_info = vinfo_for_stmt (stmt); /* For interleaving, only the alignment of the first access matters. */ if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && GROUP_FIRST_ELEMENT (stmt_info) != stmt) continue; save_misalignment = DR_MISALIGNMENT (dr); vect_update_misalignment_for_peel (dr, elem->dr, elem->npeel); vect_get_data_access_cost (dr, &inside_cost, &outside_cost, &body_cost_vec); SET_DR_MISALIGNMENT (dr, save_misalignment); } auto_vec<stmt_info_for_cost> scalar_cost_vec; vect_get_single_scalar_iteration_cost (loop_vinfo, &scalar_cost_vec); outside_cost += vect_get_known_peeling_cost (loop_vinfo, elem->npeel, &dummy, &scalar_cost_vec, &prologue_cost_vec, &epilogue_cost_vec); /* Prologue and epilogue costs are added to the target model later. These costs depend only on the scalar iteration cost, the number of peeling iterations finally chosen, and the number of misaligned statements. So discard the information found here. */ prologue_cost_vec.release (); epilogue_cost_vec.release (); if (inside_cost < min->inside_cost || (inside_cost == min->inside_cost && outside_cost < min->outside_cost)) { min->inside_cost = inside_cost; min->outside_cost = outside_cost; min->body_cost_vec.release (); min->body_cost_vec = body_cost_vec; min->peel_info.dr = elem->dr; min->peel_info.npeel = elem->npeel; } else body_cost_vec.release (); return 1; } /* Choose best peeling option by traversing peeling hash table and either choosing an option with the lowest cost (if cost model is enabled) or the option that aligns as many accesses as possible. */ static struct data_reference * vect_peeling_hash_choose_best_peeling (loop_vec_info loop_vinfo, unsigned int *npeel, stmt_vector_for_cost *body_cost_vec) { struct _vect_peel_extended_info res; res.peel_info.dr = NULL; res.body_cost_vec = stmt_vector_for_cost (); if (!unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo))) { res.inside_cost = INT_MAX; res.outside_cost = INT_MAX; LOOP_VINFO_PEELING_HTAB (loop_vinfo) ->traverse <_vect_peel_extended_info *, vect_peeling_hash_get_lowest_cost> (&res); } else { res.peel_info.count = 0; LOOP_VINFO_PEELING_HTAB (loop_vinfo) ->traverse <_vect_peel_extended_info *, vect_peeling_hash_get_most_frequent> (&res); } *npeel = res.peel_info.npeel; *body_cost_vec = res.body_cost_vec; return res.peel_info.dr; } /* Function vect_enhance_data_refs_alignment This pass will use loop versioning and loop peeling in order to enhance the alignment of data references in the loop. FOR NOW: we assume that whatever versioning/peeling takes place, only the original loop is to be vectorized. Any other loops that are created by the transformations performed in this pass - are not supposed to be vectorized. This restriction will be relaxed. This pass will require a cost model to guide it whether to apply peeling or versioning or a combination of the two. For example, the scheme that intel uses when given a loop with several memory accesses, is as follows: choose one memory access ('p') which alignment you want to force by doing peeling. Then, either (1) generate a loop in which 'p' is aligned and all other accesses are not necessarily aligned, or (2) use loop versioning to generate one loop in which all accesses are aligned, and another loop in which only 'p' is necessarily aligned. ("Automatic Intra-Register Vectorization for the Intel Architecture", Aart J.C. Bik, Milind Girkar, Paul M. Grey and Ximmin Tian, International Journal of Parallel Programming, Vol. 30, No. 2, April 2002.) Devising a cost model is the most critical aspect of this work. It will guide us on which access to peel for, whether to use loop versioning, how many versions to create, etc. The cost model will probably consist of generic considerations as well as target specific considerations (on powerpc for example, misaligned stores are more painful than misaligned loads). Here are the general steps involved in alignment enhancements: -- original loop, before alignment analysis: for (i=0; i<N; i++){ x = q[i]; # DR_MISALIGNMENT(q) = unknown p[i] = y; # DR_MISALIGNMENT(p) = unknown } -- After vect_compute_data_refs_alignment: for (i=0; i<N; i++){ x = q[i]; # DR_MISALIGNMENT(q) = 3 p[i] = y; # DR_MISALIGNMENT(p) = unknown } -- Possibility 1: we do loop versioning: if (p is aligned) { for (i=0; i<N; i++){ # loop 1A x = q[i]; # DR_MISALIGNMENT(q) = 3 p[i] = y; # DR_MISALIGNMENT(p) = 0 } } else { for (i=0; i<N; i++){ # loop 1B x = q[i]; # DR_MISALIGNMENT(q) = 3 p[i] = y; # DR_MISALIGNMENT(p) = unaligned } } -- Possibility 2: we do loop peeling: for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized). x = q[i]; p[i] = y; } for (i = 3; i < N; i++){ # loop 2A x = q[i]; # DR_MISALIGNMENT(q) = 0 p[i] = y; # DR_MISALIGNMENT(p) = unknown } -- Possibility 3: combination of loop peeling and versioning: for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized). x = q[i]; p[i] = y; } if (p is aligned) { for (i = 3; i<N; i++){ # loop 3A x = q[i]; # DR_MISALIGNMENT(q) = 0 p[i] = y; # DR_MISALIGNMENT(p) = 0 } } else { for (i = 3; i<N; i++){ # loop 3B x = q[i]; # DR_MISALIGNMENT(q) = 0 p[i] = y; # DR_MISALIGNMENT(p) = unaligned } } These loops are later passed to loop_transform to be vectorized. The vectorizer will use the alignment information to guide the transformation (whether to generate regular loads/stores, or with special handling for misalignment). */ bool vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo) { vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); enum dr_alignment_support supportable_dr_alignment; struct data_reference *dr0 = NULL, *first_store = NULL; struct data_reference *dr; unsigned int i, j; bool do_peeling = false; bool do_versioning = false; bool stat; gimple stmt; stmt_vec_info stmt_info; unsigned int npeel = 0; bool all_misalignments_unknown = true; unsigned int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); unsigned possible_npeel_number = 1; tree vectype; unsigned int nelements, mis, same_align_drs_max = 0; stmt_vector_for_cost body_cost_vec = stmt_vector_for_cost (); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== vect_enhance_data_refs_alignment ===\n"); /* While cost model enhancements are expected in the future, the high level view of the code at this time is as follows: A) If there is a misaligned access then see if peeling to align this access can make all data references satisfy vect_supportable_dr_alignment. If so, update data structures as needed and return true. B) If peeling wasn't possible and there is a data reference with an unknown misalignment that does not satisfy vect_supportable_dr_alignment then see if loop versioning checks can be used to make all data references satisfy vect_supportable_dr_alignment. If so, update data structures as needed and return true. C) If neither peeling nor versioning were successful then return false if any data reference does not satisfy vect_supportable_dr_alignment. D) Return true (all data references satisfy vect_supportable_dr_alignment). Note, Possibility 3 above (which is peeling and versioning together) is not being done at this time. */ /* (1) Peeling to force alignment. */ /* (1.1) Decide whether to perform peeling, and how many iterations to peel: Considerations: + How many accesses will become aligned due to the peeling - How many accesses will become unaligned due to the peeling, and the cost of misaligned accesses. - The cost of peeling (the extra runtime checks, the increase in code size). */ FOR_EACH_VEC_ELT (datarefs, i, dr) { stmt = DR_STMT (dr); stmt_info = vinfo_for_stmt (stmt); if (!STMT_VINFO_RELEVANT_P (stmt_info)) continue; /* For interleaving, only the alignment of the first access matters. */ if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && GROUP_FIRST_ELEMENT (stmt_info) != stmt) continue; /* For invariant accesses there is nothing to enhance. */ if (integer_zerop (DR_STEP (dr))) continue; /* Strided loads perform only component accesses, alignment is irrelevant for them. */ if (STMT_VINFO_STRIDE_LOAD_P (stmt_info)) continue; supportable_dr_alignment = vect_supportable_dr_alignment (dr, true); do_peeling = vector_alignment_reachable_p (dr); if (do_peeling) { if (known_alignment_for_access_p (dr)) { unsigned int npeel_tmp; bool negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0; /* Save info about DR in the hash table. */ if (!LOOP_VINFO_PEELING_HTAB (loop_vinfo)) LOOP_VINFO_PEELING_HTAB (loop_vinfo) = new hash_table<peel_info_hasher> (1); vectype = STMT_VINFO_VECTYPE (stmt_info); nelements = TYPE_VECTOR_SUBPARTS (vectype); mis = DR_MISALIGNMENT (dr) / GET_MODE_SIZE (TYPE_MODE ( TREE_TYPE (DR_REF (dr)))); npeel_tmp = (negative ? (mis - nelements) : (nelements - mis)) & (nelements - 1); /* For multiple types, it is possible that the bigger type access will have more than one peeling option. E.g., a loop with two types: one of size (vector size / 4), and the other one of size (vector size / 8). Vectorization factor will 8. If both access are misaligned by 3, the first one needs one scalar iteration to be aligned, and the second one needs 5. But the the first one will be aligned also by peeling 5 scalar iterations, and in that case both accesses will be aligned. Hence, except for the immediate peeling amount, we also want to try to add full vector size, while we don't exceed vectorization factor. We do this automtically for cost model, since we calculate cost for every peeling option. */ if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo))) possible_npeel_number = vf /nelements; /* Handle the aligned case. We may decide to align some other access, making DR unaligned. */ if (DR_MISALIGNMENT (dr) == 0) { npeel_tmp = 0; if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo))) possible_npeel_number++; } for (j = 0; j < possible_npeel_number; j++) { gcc_assert (npeel_tmp <= vf); vect_peeling_hash_insert (loop_vinfo, dr, npeel_tmp); npeel_tmp += nelements; } all_misalignments_unknown = false; /* Data-ref that was chosen for the case that all the misalignments are unknown is not relevant anymore, since we have a data-ref with known alignment. */ dr0 = NULL; } else { /* If we don't know any misalignment values, we prefer peeling for data-ref that has the maximum number of data-refs with the same alignment, unless the target prefers to align stores over load. */ if (all_misalignments_unknown) { unsigned same_align_drs = STMT_VINFO_SAME_ALIGN_REFS (stmt_info).length (); if (!dr0 || same_align_drs_max < same_align_drs) { same_align_drs_max = same_align_drs; dr0 = dr; } /* For data-refs with the same number of related accesses prefer the one where the misalign computation will be invariant in the outermost loop. */ else if (same_align_drs_max == same_align_drs) { struct loop *ivloop0, *ivloop; ivloop0 = outermost_invariant_loop_for_expr (loop, DR_BASE_ADDRESS (dr0)); ivloop = outermost_invariant_loop_for_expr (loop, DR_BASE_ADDRESS (dr)); if ((ivloop && !ivloop0) || (ivloop && ivloop0 && flow_loop_nested_p (ivloop, ivloop0))) dr0 = dr; } if (!first_store && DR_IS_WRITE (dr)) first_store = dr; } /* If there are both known and unknown misaligned accesses in the loop, we choose peeling amount according to the known accesses. */ if (!supportable_dr_alignment) { dr0 = dr; if (!first_store && DR_IS_WRITE (dr)) first_store = dr; } } } else { if (!aligned_access_p (dr)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "vector alignment may not be reachable\n"); break; } } } /* Check if we can possibly peel the loop. */ if (!vect_can_advance_ivs_p (loop_vinfo) || !slpeel_can_duplicate_loop_p (loop, single_exit (loop))) do_peeling = false; /* If we don't know how many times the peeling loop will run assume it will run VF-1 times and disable peeling if the remaining iters are less than the vectorization factor. */ if (do_peeling && all_misalignments_unknown && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && (LOOP_VINFO_INT_NITERS (loop_vinfo) < 2 * (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1)) do_peeling = false; if (do_peeling && all_misalignments_unknown && vect_supportable_dr_alignment (dr0, false)) { /* Check if the target requires to prefer stores over loads, i.e., if misaligned stores are more expensive than misaligned loads (taking drs with same alignment into account). */ if (first_store && DR_IS_READ (dr0)) { unsigned int load_inside_cost = 0, load_outside_cost = 0; unsigned int store_inside_cost = 0, store_outside_cost = 0; unsigned int load_inside_penalty = 0, load_outside_penalty = 0; unsigned int store_inside_penalty = 0, store_outside_penalty = 0; stmt_vector_for_cost dummy; dummy.create (2); vect_get_data_access_cost (dr0, &load_inside_cost, &load_outside_cost, &dummy); vect_get_data_access_cost (first_store, &store_inside_cost, &store_outside_cost, &dummy); dummy.release (); /* Calculate the penalty for leaving FIRST_STORE unaligned (by aligning the load DR0). */ load_inside_penalty = store_inside_cost; load_outside_penalty = store_outside_cost; for (i = 0; STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt ( DR_STMT (first_store))).iterate (i, &dr); i++) if (DR_IS_READ (dr)) { load_inside_penalty += load_inside_cost; load_outside_penalty += load_outside_cost; } else { load_inside_penalty += store_inside_cost; load_outside_penalty += store_outside_cost; } /* Calculate the penalty for leaving DR0 unaligned (by aligning the FIRST_STORE). */ store_inside_penalty = load_inside_cost; store_outside_penalty = load_outside_cost; for (i = 0; STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt ( DR_STMT (dr0))).iterate (i, &dr); i++) if (DR_IS_READ (dr)) { store_inside_penalty += load_inside_cost; store_outside_penalty += load_outside_cost; } else { store_inside_penalty += store_inside_cost; store_outside_penalty += store_outside_cost; } if (load_inside_penalty > store_inside_penalty || (load_inside_penalty == store_inside_penalty && load_outside_penalty > store_outside_penalty)) dr0 = first_store; } /* In case there are only loads with different unknown misalignments, use peeling only if it may help to align other accesses in the loop. */ if (!first_store && !STMT_VINFO_SAME_ALIGN_REFS ( vinfo_for_stmt (DR_STMT (dr0))).length () && vect_supportable_dr_alignment (dr0, false) != dr_unaligned_supported) do_peeling = false; } if (do_peeling && !dr0) { /* Peeling is possible, but there is no data access that is not supported unless aligned. So we try to choose the best possible peeling. */ /* We should get here only if there are drs with known misalignment. */ gcc_assert (!all_misalignments_unknown); /* Choose the best peeling from the hash table. */ dr0 = vect_peeling_hash_choose_best_peeling (loop_vinfo, &npeel, &body_cost_vec); if (!dr0 || !npeel) do_peeling = false; /* If peeling by npeel will result in a remaining loop not iterating enough to be vectorized then do not peel. */ if (do_peeling && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && (LOOP_VINFO_INT_NITERS (loop_vinfo) < LOOP_VINFO_VECT_FACTOR (loop_vinfo) + npeel)) do_peeling = false; } if (do_peeling) { stmt = DR_STMT (dr0); stmt_info = vinfo_for_stmt (stmt); vectype = STMT_VINFO_VECTYPE (stmt_info); nelements = TYPE_VECTOR_SUBPARTS (vectype); if (known_alignment_for_access_p (dr0)) { bool negative = tree_int_cst_compare (DR_STEP (dr0), size_zero_node) < 0; if (!npeel) { /* Since it's known at compile time, compute the number of iterations in the peeled loop (the peeling factor) for use in updating DR_MISALIGNMENT values. The peeling factor is the vectorization factor minus the misalignment as an element count. */ mis = DR_MISALIGNMENT (dr0); mis /= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr0)))); npeel = ((negative ? mis - nelements : nelements - mis) & (nelements - 1)); } /* For interleaved data access every iteration accesses all the members of the group, therefore we divide the number of iterations by the group size. */ stmt_info = vinfo_for_stmt (DR_STMT (dr0)); if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) npeel /= GROUP_SIZE (stmt_info); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Try peeling by %d\n", npeel); } /* Ensure that all data refs can be vectorized after the peel. */ FOR_EACH_VEC_ELT (datarefs, i, dr) { int save_misalignment; if (dr == dr0) continue; stmt = DR_STMT (dr); stmt_info = vinfo_for_stmt (stmt); /* For interleaving, only the alignment of the first access matters. */ if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && GROUP_FIRST_ELEMENT (stmt_info) != stmt) continue; /* Strided loads perform only component accesses, alignment is irrelevant for them. */ if (STMT_VINFO_STRIDE_LOAD_P (stmt_info)) continue; save_misalignment = DR_MISALIGNMENT (dr); vect_update_misalignment_for_peel (dr, dr0, npeel); supportable_dr_alignment = vect_supportable_dr_alignment (dr, false); SET_DR_MISALIGNMENT (dr, save_misalignment); if (!supportable_dr_alignment) { do_peeling = false; break; } } if (do_peeling && known_alignment_for_access_p (dr0) && npeel == 0) { stat = vect_verify_datarefs_alignment (loop_vinfo, NULL); if (!stat) do_peeling = false; else { body_cost_vec.release (); return stat; } } if (do_peeling) { unsigned max_allowed_peel = PARAM_VALUE (PARAM_VECT_MAX_PEELING_FOR_ALIGNMENT); if (max_allowed_peel != (unsigned)-1) { unsigned max_peel = npeel; if (max_peel == 0) { gimple dr_stmt = DR_STMT (dr0); stmt_vec_info vinfo = vinfo_for_stmt (dr_stmt); tree vtype = STMT_VINFO_VECTYPE (vinfo); max_peel = TYPE_VECTOR_SUBPARTS (vtype) - 1; } if (max_peel > max_allowed_peel) { do_peeling = false; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Disable peeling, max peels reached: %d\n", max_peel); } } } if (do_peeling) { /* (1.2) Update the DR_MISALIGNMENT of each data reference DR_i. If the misalignment of DR_i is identical to that of dr0 then set DR_MISALIGNMENT (DR_i) to zero. If the misalignment of DR_i and dr0 are known at compile time then increment DR_MISALIGNMENT (DR_i) by the peeling factor times the element size of DR_i (MOD the vectorization factor times the size). Otherwise, the misalignment of DR_i must be set to unknown. */ FOR_EACH_VEC_ELT (datarefs, i, dr) if (dr != dr0) vect_update_misalignment_for_peel (dr, dr0, npeel); LOOP_VINFO_UNALIGNED_DR (loop_vinfo) = dr0; if (npeel) LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) = npeel; else LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) = DR_MISALIGNMENT (dr0); SET_DR_MISALIGNMENT (dr0, 0); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "Alignment of access forced using peeling.\n"); dump_printf_loc (MSG_NOTE, vect_location, "Peeling for alignment will be applied.\n"); } /* The inside-loop cost will be accounted for in vectorizable_load and vectorizable_store correctly with adjusted alignments. Drop the body_cst_vec on the floor here. */ body_cost_vec.release (); stat = vect_verify_datarefs_alignment (loop_vinfo, NULL); gcc_assert (stat); return stat; } } body_cost_vec.release (); /* (2) Versioning to force alignment. */ /* Try versioning if: 1) optimize loop for speed 2) there is at least one unsupported misaligned data ref with an unknown misalignment, and 3) all misaligned data refs with a known misalignment are supported, and 4) the number of runtime alignment checks is within reason. */ do_versioning = optimize_loop_nest_for_speed_p (loop) && (!loop->inner); /* FORNOW */ if (do_versioning) { FOR_EACH_VEC_ELT (datarefs, i, dr) { stmt = DR_STMT (dr); stmt_info = vinfo_for_stmt (stmt); /* For interleaving, only the alignment of the first access matters. */ if (aligned_access_p (dr) || (STMT_VINFO_GROUPED_ACCESS (stmt_info) && GROUP_FIRST_ELEMENT (stmt_info) != stmt)) continue; /* Strided loads perform only component accesses, alignment is irrelevant for them. */ if (STMT_VINFO_STRIDE_LOAD_P (stmt_info)) continue; supportable_dr_alignment = vect_supportable_dr_alignment (dr, false); if (!supportable_dr_alignment) { gimple stmt; int mask; tree vectype; if (known_alignment_for_access_p (dr) || LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length () >= (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS)) { do_versioning = false; break; } stmt = DR_STMT (dr); vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt)); gcc_assert (vectype); /* The rightmost bits of an aligned address must be zeros. Construct the mask needed for this test. For example, GET_MODE_SIZE for the vector mode V4SI is 16 bytes so the mask must be 15 = 0xf. */ mask = GET_MODE_SIZE (TYPE_MODE (vectype)) - 1; /* FORNOW: use the same mask to test all potentially unaligned references in the loop. The vectorizer currently supports a single vector size, see the reference to GET_MODE_NUNITS (TYPE_MODE (vectype)) where the vectorization factor is computed. */ gcc_assert (!LOOP_VINFO_PTR_MASK (loop_vinfo) || LOOP_VINFO_PTR_MASK (loop_vinfo) == mask); LOOP_VINFO_PTR_MASK (loop_vinfo) = mask; LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).safe_push ( DR_STMT (dr)); } } /* Versioning requires at least one misaligned data reference. */ if (!LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)) do_versioning = false; else if (!do_versioning) LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).truncate (0); } if (do_versioning) { vec<gimple> may_misalign_stmts = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo); gimple stmt; /* It can now be assumed that the data references in the statements in LOOP_VINFO_MAY_MISALIGN_STMTS will be aligned in the version of the loop being vectorized. */ FOR_EACH_VEC_ELT (may_misalign_stmts, i, stmt) { stmt_vec_info stmt_info = vinfo_for_stmt (stmt); dr = STMT_VINFO_DATA_REF (stmt_info); SET_DR_MISALIGNMENT (dr, 0); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Alignment of access forced using versioning.\n"); } if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Versioning for alignment will be applied.\n"); /* Peeling and versioning can't be done together at this time. */ gcc_assert (! (do_peeling && do_versioning)); stat = vect_verify_datarefs_alignment (loop_vinfo, NULL); gcc_assert (stat); return stat; } /* This point is reached if neither peeling nor versioning is being done. */ gcc_assert (! (do_peeling || do_versioning)); stat = vect_verify_datarefs_alignment (loop_vinfo, NULL); return stat; } /* Function vect_find_same_alignment_drs. Update group and alignment relations according to the chosen vectorization factor. */ static void vect_find_same_alignment_drs (struct data_dependence_relation *ddr, loop_vec_info loop_vinfo) { unsigned int i; struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo); struct data_reference *dra = DDR_A (ddr); struct data_reference *drb = DDR_B (ddr); stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra)); stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb)); int dra_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dra)))); int drb_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (drb)))); lambda_vector dist_v; unsigned int loop_depth; if (DDR_ARE_DEPENDENT (ddr) == chrec_known) return; if (dra == drb) return; if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know) return; /* Loop-based vectorization and known data dependence. */ if (DDR_NUM_DIST_VECTS (ddr) == 0) return; /* Data-dependence analysis reports a distance vector of zero for data-references that overlap only in the first iteration but have different sign step (see PR45764). So as a sanity check require equal DR_STEP. */ if (!operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0)) return; loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr)); FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v) { int dist = dist_v[loop_depth]; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "dependence distance = %d.\n", dist); /* Same loop iteration. */ if (dist == 0 || (dist % vectorization_factor == 0 && dra_size == drb_size)) { /* Two references with distance zero have the same alignment. */ STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_a).safe_push (drb); STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_b).safe_push (dra); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "accesses have the same alignment.\n"); dump_printf (MSG_NOTE, "dependence distance modulo vf == 0 between "); dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra)); dump_printf (MSG_NOTE, " and "); dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb)); dump_printf (MSG_NOTE, "\n"); } } } } /* Function vect_analyze_data_refs_alignment Analyze the alignment of the data-references in the loop. Return FALSE if a data reference is found that cannot be vectorized. */ bool vect_analyze_data_refs_alignment (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_data_refs_alignment ===\n"); /* Mark groups of data references with same alignment using data dependence information. */ if (loop_vinfo) { vec<ddr_p> ddrs = LOOP_VINFO_DDRS (loop_vinfo); struct data_dependence_relation *ddr; unsigned int i; FOR_EACH_VEC_ELT (ddrs, i, ddr) vect_find_same_alignment_drs (ddr, loop_vinfo); } if (!vect_compute_data_refs_alignment (loop_vinfo, bb_vinfo)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: can't calculate alignment " "for data ref.\n"); return false; } return true; } /* Analyze groups of accesses: check that DR belongs to a group of accesses of legal size, step, etc. Detect gaps, single element interleaving, and other special cases. Set grouped access info. Collect groups of strided stores for further use in SLP analysis. */ static bool vect_analyze_group_access (struct data_reference *dr) { tree step = DR_STEP (dr); tree scalar_type = TREE_TYPE (DR_REF (dr)); HOST_WIDE_INT type_size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type)); gimple stmt = DR_STMT (dr); stmt_vec_info stmt_info = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step); HOST_WIDE_INT groupsize, last_accessed_element = 1; bool slp_impossible = false; struct loop *loop = NULL; if (loop_vinfo) loop = LOOP_VINFO_LOOP (loop_vinfo); /* For interleaving, GROUPSIZE is STEP counted in elements, i.e., the size of the interleaving group (including gaps). */ groupsize = absu_hwi (dr_step) / type_size; /* Not consecutive access is possible only if it is a part of interleaving. */ if (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))) { /* Check if it this DR is a part of interleaving, and is a single element of the group that is accessed in the loop. */ /* Gaps are supported only for loads. STEP must be a multiple of the type size. The size of the group must be a power of 2. */ if (DR_IS_READ (dr) && (dr_step % type_size) == 0 && groupsize > 0 && exact_log2 (groupsize) != -1) { GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = stmt; GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize; if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "Detected single element interleaving "); dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr)); dump_printf (MSG_NOTE, " step "); dump_generic_expr (MSG_NOTE, TDF_SLIM, step); dump_printf (MSG_NOTE, "\n"); } if (loop_vinfo) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Data access with gaps requires scalar " "epilogue loop\n"); if (loop->inner) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "Peeling for outer loop is not" " supported\n"); return false; } LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true; } return true; } if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not consecutive access "); dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } if (bb_vinfo) { /* Mark the statement as unvectorizable. */ STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false; return true; } return false; } if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt) { /* First stmt in the interleaving chain. Check the chain. */ gimple next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt)); struct data_reference *data_ref = dr; unsigned int count = 1; tree prev_init = DR_INIT (data_ref); gimple prev = stmt; HOST_WIDE_INT diff, gaps = 0; unsigned HOST_WIDE_INT count_in_bytes; while (next) { /* Skip same data-refs. In case that two or more stmts share data-ref (supported only for loads), we vectorize only the first stmt, and the rest get their vectorized loads from the first one. */ if (!tree_int_cst_compare (DR_INIT (data_ref), DR_INIT (STMT_VINFO_DATA_REF ( vinfo_for_stmt (next))))) { if (DR_IS_WRITE (data_ref)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "Two store stmts share the same dr.\n"); return false; } /* For load use the same data-ref load. */ GROUP_SAME_DR_STMT (vinfo_for_stmt (next)) = prev; prev = next; next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next)); continue; } prev = next; data_ref = STMT_VINFO_DATA_REF (vinfo_for_stmt (next)); /* All group members have the same STEP by construction. */ gcc_checking_assert (operand_equal_p (DR_STEP (data_ref), step, 0)); /* Check that the distance between two accesses is equal to the type size. Otherwise, we have gaps. */ diff = (TREE_INT_CST_LOW (DR_INIT (data_ref)) - TREE_INT_CST_LOW (prev_init)) / type_size; if (diff != 1) { /* FORNOW: SLP of accesses with gaps is not supported. */ slp_impossible = true; if (DR_IS_WRITE (data_ref)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "interleaved store with gaps\n"); return false; } gaps += diff - 1; } last_accessed_element += diff; /* Store the gap from the previous member of the group. If there is no gap in the access, GROUP_GAP is always 1. */ GROUP_GAP (vinfo_for_stmt (next)) = diff; prev_init = DR_INIT (data_ref); next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next)); /* Count the number of data-refs in the chain. */ count++; } /* COUNT is the number of accesses found, we multiply it by the size of the type to get COUNT_IN_BYTES. */ count_in_bytes = type_size * count; /* Check that the size of the interleaving (including gaps) is not greater than STEP. */ if (dr_step != 0 && absu_hwi (dr_step) < count_in_bytes + gaps * type_size) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "interleaving size is greater than step for "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dr)); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return false; } /* Check that the size of the interleaving is equal to STEP for stores, i.e., that there are no gaps. */ if (dr_step != 0 && absu_hwi (dr_step) != count_in_bytes) { if (DR_IS_READ (dr)) { slp_impossible = true; /* There is a gap after the last load in the group. This gap is a difference between the groupsize and the number of elements. When there is no gap, this difference should be 0. */ GROUP_GAP (vinfo_for_stmt (stmt)) = groupsize - count; } else { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "interleaved store with gaps\n"); return false; } } /* Check that STEP is a multiple of type size. */ if (dr_step != 0 && (dr_step % type_size) != 0) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "step is not a multiple of type size: step "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, step); dump_printf (MSG_MISSED_OPTIMIZATION, " size "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, TYPE_SIZE_UNIT (scalar_type)); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return false; } if (groupsize == 0) groupsize = count; GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Detected interleaving of size %d\n", (int)groupsize); /* SLP: create an SLP data structure for every interleaving group of stores for further analysis in vect_analyse_slp. */ if (DR_IS_WRITE (dr) && !slp_impossible) { if (loop_vinfo) LOOP_VINFO_GROUPED_STORES (loop_vinfo).safe_push (stmt); if (bb_vinfo) BB_VINFO_GROUPED_STORES (bb_vinfo).safe_push (stmt); } /* There is a gap in the end of the group. */ if (groupsize - last_accessed_element > 0 && loop_vinfo) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "Data access with gaps requires scalar " "epilogue loop\n"); if (loop->inner) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "Peeling for outer loop is not supported\n"); return false; } LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true; } } return true; } /* Analyze the access pattern of the data-reference DR. In case of non-consecutive accesses call vect_analyze_group_access() to analyze groups of accesses. */ static bool vect_analyze_data_ref_access (struct data_reference *dr) { tree step = DR_STEP (dr); tree scalar_type = TREE_TYPE (DR_REF (dr)); gimple stmt = DR_STMT (dr); stmt_vec_info stmt_info = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = NULL; if (loop_vinfo) loop = LOOP_VINFO_LOOP (loop_vinfo); if (loop_vinfo && !step) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "bad data-ref access in loop\n"); return false; } /* Allow invariant loads in not nested loops. */ if (loop_vinfo && integer_zerop (step)) { GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL; if (nested_in_vect_loop_p (loop, stmt)) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "zero step in inner loop of nest\n"); return false; } return DR_IS_READ (dr); } if (loop && nested_in_vect_loop_p (loop, stmt)) { /* Interleaved accesses are not yet supported within outer-loop vectorization for references in the inner-loop. */ GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL; /* For the rest of the analysis we use the outer-loop step. */ step = STMT_VINFO_DR_STEP (stmt_info); if (integer_zerop (step)) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "zero step in outer loop.\n"); if (DR_IS_READ (dr)) return true; else return false; } } /* Consecutive? */ if (TREE_CODE (step) == INTEGER_CST) { HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step); if (!tree_int_cst_compare (step, TYPE_SIZE_UNIT (scalar_type)) || (dr_step < 0 && !compare_tree_int (TYPE_SIZE_UNIT (scalar_type), -dr_step))) { /* Mark that it is not interleaving. */ GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL; return true; } } if (loop && nested_in_vect_loop_p (loop, stmt)) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "grouped access in outer loop.\n"); return false; } /* Assume this is a DR handled by non-constant strided load case. */ if (TREE_CODE (step) != INTEGER_CST) return STMT_VINFO_STRIDE_LOAD_P (stmt_info); /* Not consecutive access - check if it's a part of interleaving group. */ return vect_analyze_group_access (dr); } /* A helper function used in the comparator function to sort data references. T1 and T2 are two data references to be compared. The function returns -1, 0, or 1. */ static int compare_tree (tree t1, tree t2) { int i, cmp; enum tree_code code; char tclass; if (t1 == t2) return 0; if (t1 == NULL) return -1; if (t2 == NULL) return 1; if (TREE_CODE (t1) != TREE_CODE (t2)) return TREE_CODE (t1) < TREE_CODE (t2) ? -1 : 1; code = TREE_CODE (t1); switch (code) { /* For const values, we can just use hash values for comparisons. */ case INTEGER_CST: case REAL_CST: case FIXED_CST: case STRING_CST: case COMPLEX_CST: case VECTOR_CST: { hashval_t h1 = iterative_hash_expr (t1, 0); hashval_t h2 = iterative_hash_expr (t2, 0); if (h1 != h2) return h1 < h2 ? -1 : 1; break; } case SSA_NAME: cmp = compare_tree (SSA_NAME_VAR (t1), SSA_NAME_VAR (t2)); if (cmp != 0) return cmp; if (SSA_NAME_VERSION (t1) != SSA_NAME_VERSION (t2)) return SSA_NAME_VERSION (t1) < SSA_NAME_VERSION (t2) ? -1 : 1; break; default: tclass = TREE_CODE_CLASS (code); /* For var-decl, we could compare their UIDs. */ if (tclass == tcc_declaration) { if (DECL_UID (t1) != DECL_UID (t2)) return DECL_UID (t1) < DECL_UID (t2) ? -1 : 1; break; } /* For expressions with operands, compare their operands recursively. */ for (i = TREE_OPERAND_LENGTH (t1) - 1; i >= 0; --i) { cmp = compare_tree (TREE_OPERAND (t1, i), TREE_OPERAND (t2, i)); if (cmp != 0) return cmp; } } return 0; } /* Compare two data-references DRA and DRB to group them into chunks suitable for grouping. */ static int dr_group_sort_cmp (const void *dra_, const void *drb_) { data_reference_p dra = *(data_reference_p *)const_cast<void *>(dra_); data_reference_p drb = *(data_reference_p *)const_cast<void *>(drb_); int cmp; /* Stabilize sort. */ if (dra == drb) return 0; /* Ordering of DRs according to base. */ if (!operand_equal_p (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb), 0)) { cmp = compare_tree (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb)); if (cmp != 0) return cmp; } /* And according to DR_OFFSET. */ if (!dr_equal_offsets_p (dra, drb)) { cmp = compare_tree (DR_OFFSET (dra), DR_OFFSET (drb)); if (cmp != 0) return cmp; } /* Put reads before writes. */ if (DR_IS_READ (dra) != DR_IS_READ (drb)) return DR_IS_READ (dra) ? -1 : 1; /* Then sort after access size. */ if (!operand_equal_p (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))), TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))), 0)) { cmp = compare_tree (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))), TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb)))); if (cmp != 0) return cmp; } /* And after step. */ if (!operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0)) { cmp = compare_tree (DR_STEP (dra), DR_STEP (drb)); if (cmp != 0) return cmp; } /* Then sort after DR_INIT. In case of identical DRs sort after stmt UID. */ cmp = tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb)); if (cmp == 0) return gimple_uid (DR_STMT (dra)) < gimple_uid (DR_STMT (drb)) ? -1 : 1; return cmp; } /* Function vect_analyze_data_ref_accesses. Analyze the access pattern of all the data references in the loop. FORNOW: the only access pattern that is considered vectorizable is a simple step 1 (consecutive) access. FORNOW: handle only arrays and pointer accesses. */ bool vect_analyze_data_ref_accesses (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo) { unsigned int i; vec<data_reference_p> datarefs; struct data_reference *dr; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_data_ref_accesses ===\n"); if (loop_vinfo) datarefs = LOOP_VINFO_DATAREFS (loop_vinfo); else datarefs = BB_VINFO_DATAREFS (bb_vinfo); if (datarefs.is_empty ()) return true; /* Sort the array of datarefs to make building the interleaving chains linear. Don't modify the original vector's order, it is needed for determining what dependencies are reversed. */ vec<data_reference_p> datarefs_copy = datarefs.copy (); datarefs_copy.qsort (dr_group_sort_cmp); /* Build the interleaving chains. */ for (i = 0; i < datarefs_copy.length () - 1;) { data_reference_p dra = datarefs_copy[i]; stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra)); stmt_vec_info lastinfo = NULL; for (i = i + 1; i < datarefs_copy.length (); ++i) { data_reference_p drb = datarefs_copy[i]; stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb)); /* ??? Imperfect sorting (non-compatible types, non-modulo accesses, same accesses) can lead to a group to be artificially split here as we don't just skip over those. If it really matters we can push those to a worklist and re-iterate over them. The we can just skip ahead to the next DR here. */ /* Check that the data-refs have same first location (except init) and they are both either store or load (not load and store, not masked loads or stores). */ if (DR_IS_READ (dra) != DR_IS_READ (drb) || !operand_equal_p (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb), 0) || !dr_equal_offsets_p (dra, drb) || !gimple_assign_single_p (DR_STMT (dra)) || !gimple_assign_single_p (DR_STMT (drb))) break; /* Check that the data-refs have the same constant size and step. */ tree sza = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))); tree szb = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))); if (!tree_fits_uhwi_p (sza) || !tree_fits_uhwi_p (szb) || !tree_int_cst_equal (sza, szb) || !tree_fits_shwi_p (DR_STEP (dra)) || !tree_fits_shwi_p (DR_STEP (drb)) || !tree_int_cst_equal (DR_STEP (dra), DR_STEP (drb))) break; /* Do not place the same access in the interleaving chain twice. */ if (tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb)) == 0) break; /* Check the types are compatible. ??? We don't distinguish this during sorting. */ if (!types_compatible_p (TREE_TYPE (DR_REF (dra)), TREE_TYPE (DR_REF (drb)))) break; /* Sorting has ensured that DR_INIT (dra) <= DR_INIT (drb). */ HOST_WIDE_INT init_a = TREE_INT_CST_LOW (DR_INIT (dra)); HOST_WIDE_INT init_b = TREE_INT_CST_LOW (DR_INIT (drb)); gcc_assert (init_a <= init_b); /* If init_b == init_a + the size of the type * k, we have an interleaving, and DRA is accessed before DRB. */ HOST_WIDE_INT type_size_a = tree_to_uhwi (sza); if ((init_b - init_a) % type_size_a != 0) break; /* The step (if not zero) is greater than the difference between data-refs' inits. This splits groups into suitable sizes. */ HOST_WIDE_INT step = tree_to_shwi (DR_STEP (dra)); if (step != 0 && step <= (init_b - init_a)) break; if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "Detected interleaving "); dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra)); dump_printf (MSG_NOTE, " and "); dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb)); dump_printf (MSG_NOTE, "\n"); } /* Link the found element into the group list. */ if (!GROUP_FIRST_ELEMENT (stmtinfo_a)) { GROUP_FIRST_ELEMENT (stmtinfo_a) = DR_STMT (dra); lastinfo = stmtinfo_a; } GROUP_FIRST_ELEMENT (stmtinfo_b) = DR_STMT (dra); GROUP_NEXT_ELEMENT (lastinfo) = DR_STMT (drb); lastinfo = stmtinfo_b; } } FOR_EACH_VEC_ELT (datarefs_copy, i, dr) if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) && !vect_analyze_data_ref_access (dr)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: complicated access pattern.\n"); if (bb_vinfo) { /* Mark the statement as not vectorizable. */ STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false; continue; } else { datarefs_copy.release (); return false; } } datarefs_copy.release (); return true; } /* Operator == between two dr_with_seg_len objects. This equality operator is used to make sure two data refs are the same one so that we will consider to combine the aliasing checks of those two pairs of data dependent data refs. */ static bool operator == (const dr_with_seg_len& d1, const dr_with_seg_len& d2) { return operand_equal_p (DR_BASE_ADDRESS (d1.dr), DR_BASE_ADDRESS (d2.dr), 0) && compare_tree (d1.offset, d2.offset) == 0 && compare_tree (d1.seg_len, d2.seg_len) == 0; } /* Function comp_dr_with_seg_len_pair. Comparison function for sorting objects of dr_with_seg_len_pair_t so that we can combine aliasing checks in one scan. */ static int comp_dr_with_seg_len_pair (const void *p1_, const void *p2_) { const dr_with_seg_len_pair_t* p1 = (const dr_with_seg_len_pair_t *) p1_; const dr_with_seg_len_pair_t* p2 = (const dr_with_seg_len_pair_t *) p2_; const dr_with_seg_len &p11 = p1->first, &p12 = p1->second, &p21 = p2->first, &p22 = p2->second; /* For DR pairs (a, b) and (c, d), we only consider to merge the alias checks if a and c have the same basic address snd step, and b and d have the same address and step. Therefore, if any a&c or b&d don't have the same address and step, we don't care the order of those two pairs after sorting. */ int comp_res; if ((comp_res = compare_tree (DR_BASE_ADDRESS (p11.dr), DR_BASE_ADDRESS (p21.dr))) != 0) return comp_res; if ((comp_res = compare_tree (DR_BASE_ADDRESS (p12.dr), DR_BASE_ADDRESS (p22.dr))) != 0) return comp_res; if ((comp_res = compare_tree (DR_STEP (p11.dr), DR_STEP (p21.dr))) != 0) return comp_res; if ((comp_res = compare_tree (DR_STEP (p12.dr), DR_STEP (p22.dr))) != 0) return comp_res; if ((comp_res = compare_tree (p11.offset, p21.offset)) != 0) return comp_res; if ((comp_res = compare_tree (p12.offset, p22.offset)) != 0) return comp_res; return 0; } /* Function vect_vfa_segment_size. Create an expression that computes the size of segment that will be accessed for a data reference. The functions takes into account that realignment loads may access one more vector. Input: DR: The data reference. LENGTH_FACTOR: segment length to consider. Return an expression whose value is the size of segment which will be accessed by DR. */ static tree vect_vfa_segment_size (struct data_reference *dr, tree length_factor) { tree segment_length; if (integer_zerop (DR_STEP (dr))) segment_length = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr))); else segment_length = size_binop (MULT_EXPR, fold_convert (sizetype, DR_STEP (dr)), fold_convert (sizetype, length_factor)); if (vect_supportable_dr_alignment (dr, false) == dr_explicit_realign_optimized) { tree vector_size = TYPE_SIZE_UNIT (STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr)))); segment_length = size_binop (PLUS_EXPR, segment_length, vector_size); } return segment_length; } /* Function vect_prune_runtime_alias_test_list. Prune a list of ddrs to be tested at run-time by versioning for alias. Merge several alias checks into one if possible. Return FALSE if resulting list of ddrs is longer then allowed by PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS, otherwise return TRUE. */ bool vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo) { vec<ddr_p> may_alias_ddrs = LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo); vec<dr_with_seg_len_pair_t>& comp_alias_ddrs = LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo); int vect_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo); tree scalar_loop_iters = LOOP_VINFO_NITERS (loop_vinfo); ddr_p ddr; unsigned int i; tree length_factor; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== vect_prune_runtime_alias_test_list ===\n"); if (may_alias_ddrs.is_empty ()) return true; /* Basically, for each pair of dependent data refs store_ptr_0 and load_ptr_0, we create an expression: ((store_ptr_0 + store_segment_length_0) <= load_ptr_0) || (load_ptr_0 + load_segment_length_0) <= store_ptr_0)) for aliasing checks. However, in some cases we can decrease the number of checks by combining two checks into one. For example, suppose we have another pair of data refs store_ptr_0 and load_ptr_1, and if the following condition is satisfied: load_ptr_0 < load_ptr_1 && load_ptr_1 - load_ptr_0 - load_segment_length_0 < store_segment_length_0 (this condition means, in each iteration of vectorized loop, the accessed memory of store_ptr_0 cannot be between the memory of load_ptr_0 and load_ptr_1.) we then can use only the following expression to finish the alising checks between store_ptr_0 & load_ptr_0 and store_ptr_0 & load_ptr_1: ((store_ptr_0 + store_segment_length_0) <= load_ptr_0) || (load_ptr_1 + load_segment_length_1 <= store_ptr_0)) Note that we only consider that load_ptr_0 and load_ptr_1 have the same basic address. */ comp_alias_ddrs.create (may_alias_ddrs.length ()); /* First, we collect all data ref pairs for aliasing checks. */ FOR_EACH_VEC_ELT (may_alias_ddrs, i, ddr) { struct data_reference *dr_a, *dr_b; gimple dr_group_first_a, dr_group_first_b; tree segment_length_a, segment_length_b; gimple stmt_a, stmt_b; dr_a = DDR_A (ddr); stmt_a = DR_STMT (DDR_A (ddr)); dr_group_first_a = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_a)); if (dr_group_first_a) { stmt_a = dr_group_first_a; dr_a = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_a)); } dr_b = DDR_B (ddr); stmt_b = DR_STMT (DDR_B (ddr)); dr_group_first_b = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_b)); if (dr_group_first_b) { stmt_b = dr_group_first_b; dr_b = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_b)); } if (!operand_equal_p (DR_STEP (dr_a), DR_STEP (dr_b), 0)) length_factor = scalar_loop_iters; else length_factor = size_int (vect_factor); segment_length_a = vect_vfa_segment_size (dr_a, length_factor); segment_length_b = vect_vfa_segment_size (dr_b, length_factor); dr_with_seg_len_pair_t dr_with_seg_len_pair (dr_with_seg_len (dr_a, segment_length_a), dr_with_seg_len (dr_b, segment_length_b)); if (compare_tree (DR_BASE_ADDRESS (dr_a), DR_BASE_ADDRESS (dr_b)) > 0) std::swap (dr_with_seg_len_pair.first, dr_with_seg_len_pair.second); comp_alias_ddrs.safe_push (dr_with_seg_len_pair); } /* Second, we sort the collected data ref pairs so that we can scan them once to combine all possible aliasing checks. */ comp_alias_ddrs.qsort (comp_dr_with_seg_len_pair); /* Third, we scan the sorted dr pairs and check if we can combine alias checks of two neighbouring dr pairs. */ for (size_t i = 1; i < comp_alias_ddrs.length (); ++i) { /* Deal with two ddrs (dr_a1, dr_b1) and (dr_a2, dr_b2). */ dr_with_seg_len *dr_a1 = &comp_alias_ddrs[i-1].first, *dr_b1 = &comp_alias_ddrs[i-1].second, *dr_a2 = &comp_alias_ddrs[i].first, *dr_b2 = &comp_alias_ddrs[i].second; /* Remove duplicate data ref pairs. */ if (*dr_a1 == *dr_a2 && *dr_b1 == *dr_b2) { if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "found equal ranges "); dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_a1->dr)); dump_printf (MSG_NOTE, ", "); dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_b1->dr)); dump_printf (MSG_NOTE, " and "); dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_a2->dr)); dump_printf (MSG_NOTE, ", "); dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_b2->dr)); dump_printf (MSG_NOTE, "\n"); } comp_alias_ddrs.ordered_remove (i--); continue; } if (*dr_a1 == *dr_a2 || *dr_b1 == *dr_b2) { /* We consider the case that DR_B1 and DR_B2 are same memrefs, and DR_A1 and DR_A2 are two consecutive memrefs. */ if (*dr_a1 == *dr_a2) { std::swap (dr_a1, dr_b1); std::swap (dr_a2, dr_b2); } if (!operand_equal_p (DR_BASE_ADDRESS (dr_a1->dr), DR_BASE_ADDRESS (dr_a2->dr), 0) || !tree_fits_shwi_p (dr_a1->offset) || !tree_fits_shwi_p (dr_a2->offset)) continue; /* Make sure dr_a1 starts left of dr_a2. */ if (tree_int_cst_lt (dr_a2->offset, dr_a1->offset)) std::swap (*dr_a1, *dr_a2); unsigned HOST_WIDE_INT diff = tree_to_shwi (dr_a2->offset) - tree_to_shwi (dr_a1->offset); bool do_remove = false; /* If the left segment does not extend beyond the start of the right segment the new segment length is that of the right plus the segment distance. */ if (tree_fits_uhwi_p (dr_a1->seg_len) && compare_tree_int (dr_a1->seg_len, diff) <= 0) { dr_a1->seg_len = size_binop (PLUS_EXPR, dr_a2->seg_len, size_int (diff)); do_remove = true; } /* Generally the new segment length is the maximum of the left segment size and the right segment size plus the distance. ??? We can also build tree MAX_EXPR here but it's not clear this is profitable. */ else if (tree_fits_uhwi_p (dr_a1->seg_len) && tree_fits_uhwi_p (dr_a2->seg_len)) { unsigned HOST_WIDE_INT seg_len_a1 = tree_to_uhwi (dr_a1->seg_len); unsigned HOST_WIDE_INT seg_len_a2 = tree_to_uhwi (dr_a2->seg_len); dr_a1->seg_len = size_int (MAX (seg_len_a1, diff + seg_len_a2)); do_remove = true; } /* Now we check if the following condition is satisfied: DIFF - SEGMENT_LENGTH_A < SEGMENT_LENGTH_B where DIFF = DR_A2->OFFSET - DR_A1->OFFSET. However, SEGMENT_LENGTH_A or SEGMENT_LENGTH_B may not be constant so we have to make a best estimation. We can get the minimum value of SEGMENT_LENGTH_B as a constant, represented by MIN_SEG_LEN_B, then either of the following two conditions can guarantee the one above: 1: DIFF <= MIN_SEG_LEN_B 2: DIFF - SEGMENT_LENGTH_A < MIN_SEG_LEN_B */ else { unsigned HOST_WIDE_INT min_seg_len_b = (tree_fits_uhwi_p (dr_b1->seg_len) ? tree_to_uhwi (dr_b1->seg_len) : vect_factor); if (diff <= min_seg_len_b || (tree_fits_uhwi_p (dr_a1->seg_len) && diff - tree_to_uhwi (dr_a1->seg_len) < min_seg_len_b)) { dr_a1->seg_len = size_binop (PLUS_EXPR, dr_a2->seg_len, size_int (diff)); do_remove = true; } } if (do_remove) { if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "merging ranges for "); dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_a1->dr)); dump_printf (MSG_NOTE, ", "); dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_b1->dr)); dump_printf (MSG_NOTE, " and "); dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_a2->dr)); dump_printf (MSG_NOTE, ", "); dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_b2->dr)); dump_printf (MSG_NOTE, "\n"); } comp_alias_ddrs.ordered_remove (i--); } } } dump_printf_loc (MSG_NOTE, vect_location, "improved number of alias checks from %d to %d\n", may_alias_ddrs.length (), comp_alias_ddrs.length ()); if ((int) comp_alias_ddrs.length () > PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS)) return false; return true; } /* Check whether a non-affine read in stmt is suitable for gather load and if so, return a builtin decl for that operation. */ tree vect_check_gather (gimple stmt, loop_vec_info loop_vinfo, tree *basep, tree *offp, int *scalep) { HOST_WIDE_INT scale = 1, pbitpos, pbitsize; struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); stmt_vec_info stmt_info = vinfo_for_stmt (stmt); struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); tree offtype = NULL_TREE; tree decl, base, off; machine_mode pmode; int punsignedp, pvolatilep; base = DR_REF (dr); /* For masked loads/stores, DR_REF (dr) is an artificial MEM_REF, see if we can use the def stmt of the address. */ if (is_gimple_call (stmt) && gimple_call_internal_p (stmt) && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD || gimple_call_internal_fn (stmt) == IFN_MASK_STORE) && TREE_CODE (base) == MEM_REF && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME && integer_zerop (TREE_OPERAND (base, 1)) && !expr_invariant_in_loop_p (loop, TREE_OPERAND (base, 0))) { gimple def_stmt = SSA_NAME_DEF_STMT (TREE_OPERAND (base, 0)); if (is_gimple_assign (def_stmt) && gimple_assign_rhs_code (def_stmt) == ADDR_EXPR) base = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 0); } /* The gather builtins need address of the form loop_invariant + vector * {1, 2, 4, 8} or loop_invariant + sign_extend (vector) * { 1, 2, 4, 8 }. Unfortunately DR_BASE_ADDRESS/DR_OFFSET can be a mixture of loop invariants/SSA_NAMEs defined in the loop, with casts, multiplications and additions in it. To get a vector, we need a single SSA_NAME that will be defined in the loop and will contain everything that is not loop invariant and that can be vectorized. The following code attempts to find such a preexistng SSA_NAME OFF and put the loop invariants into a tree BASE that can be gimplified before the loop. */ base = get_inner_reference (base, &pbitsize, &pbitpos, &off, &pmode, &punsignedp, &pvolatilep, false); gcc_assert (base != NULL_TREE && (pbitpos % BITS_PER_UNIT) == 0); if (TREE_CODE (base) == MEM_REF) { if (!integer_zerop (TREE_OPERAND (base, 1))) { if (off == NULL_TREE) { offset_int moff = mem_ref_offset (base); off = wide_int_to_tree (sizetype, moff); } else off = size_binop (PLUS_EXPR, off, fold_convert (sizetype, TREE_OPERAND (base, 1))); } base = TREE_OPERAND (base, 0); } else base = build_fold_addr_expr (base); if (off == NULL_TREE) off = size_zero_node; /* If base is not loop invariant, either off is 0, then we start with just the constant offset in the loop invariant BASE and continue with base as OFF, otherwise give up. We could handle that case by gimplifying the addition of base + off into some SSA_NAME and use that as off, but for now punt. */ if (!expr_invariant_in_loop_p (loop, base)) { if (!integer_zerop (off)) return NULL_TREE; off = base; base = size_int (pbitpos / BITS_PER_UNIT); } /* Otherwise put base + constant offset into the loop invariant BASE and continue with OFF. */ else { base = fold_convert (sizetype, base); base = size_binop (PLUS_EXPR, base, size_int (pbitpos / BITS_PER_UNIT)); } /* OFF at this point may be either a SSA_NAME or some tree expression from get_inner_reference. Try to peel off loop invariants from it into BASE as long as possible. */ STRIP_NOPS (off); while (offtype == NULL_TREE) { enum tree_code code; tree op0, op1, add = NULL_TREE; if (TREE_CODE (off) == SSA_NAME) { gimple def_stmt = SSA_NAME_DEF_STMT (off); if (expr_invariant_in_loop_p (loop, off)) return NULL_TREE; if (gimple_code (def_stmt) != GIMPLE_ASSIGN) break; op0 = gimple_assign_rhs1 (def_stmt); code = gimple_assign_rhs_code (def_stmt); op1 = gimple_assign_rhs2 (def_stmt); } else { if (get_gimple_rhs_class (TREE_CODE (off)) == GIMPLE_TERNARY_RHS) return NULL_TREE; code = TREE_CODE (off); extract_ops_from_tree (off, &code, &op0, &op1); } switch (code) { case POINTER_PLUS_EXPR: case PLUS_EXPR: if (expr_invariant_in_loop_p (loop, op0)) { add = op0; off = op1; do_add: add = fold_convert (sizetype, add); if (scale != 1) add = size_binop (MULT_EXPR, add, size_int (scale)); base = size_binop (PLUS_EXPR, base, add); continue; } if (expr_invariant_in_loop_p (loop, op1)) { add = op1; off = op0; goto do_add; } break; case MINUS_EXPR: if (expr_invariant_in_loop_p (loop, op1)) { add = fold_convert (sizetype, op1); add = size_binop (MINUS_EXPR, size_zero_node, add); off = op0; goto do_add; } break; case MULT_EXPR: if (scale == 1 && tree_fits_shwi_p (op1)) { scale = tree_to_shwi (op1); off = op0; continue; } break; case SSA_NAME: off = op0; continue; CASE_CONVERT: if (!POINTER_TYPE_P (TREE_TYPE (op0)) && !INTEGRAL_TYPE_P (TREE_TYPE (op0))) break; if (TYPE_PRECISION (TREE_TYPE (op0)) == TYPE_PRECISION (TREE_TYPE (off))) { off = op0; continue; } if (TYPE_PRECISION (TREE_TYPE (op0)) < TYPE_PRECISION (TREE_TYPE (off))) { off = op0; offtype = TREE_TYPE (off); STRIP_NOPS (off); continue; } break; default: break; } break; } /* If at the end OFF still isn't a SSA_NAME or isn't defined in the loop, punt. */ if (TREE_CODE (off) != SSA_NAME || expr_invariant_in_loop_p (loop, off)) return NULL_TREE; if (offtype == NULL_TREE) offtype = TREE_TYPE (off); decl = targetm.vectorize.builtin_gather (STMT_VINFO_VECTYPE (stmt_info), offtype, scale); if (decl == NULL_TREE) return NULL_TREE; if (basep) *basep = base; if (offp) *offp = off; if (scalep) *scalep = scale; return decl; } /* Function vect_analyze_data_refs. Find all the data references in the loop or basic block. The general structure of the analysis of data refs in the vectorizer is as follows: 1- vect_analyze_data_refs(loop/bb): call compute_data_dependences_for_loop/bb to find and analyze all data-refs in the loop/bb and their dependences. 2- vect_analyze_dependences(): apply dependence testing using ddrs. 3- vect_analyze_drs_alignment(): check that ref_stmt.alignment is ok. 4- vect_analyze_drs_access(): check that ref_stmt.step is ok. */ bool vect_analyze_data_refs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo, int *min_vf, unsigned *n_stmts) { struct loop *loop = NULL; basic_block bb = NULL; unsigned int i; vec<data_reference_p> datarefs; struct data_reference *dr; tree scalar_type; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_data_refs ===\n"); if (loop_vinfo) { basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); loop = LOOP_VINFO_LOOP (loop_vinfo); datarefs = LOOP_VINFO_DATAREFS (loop_vinfo); if (!find_loop_nest (loop, &LOOP_VINFO_LOOP_NEST (loop_vinfo))) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: loop contains function calls" " or data references that cannot be analyzed\n"); return false; } for (i = 0; i < loop->num_nodes; i++) { gimple_stmt_iterator gsi; for (gsi = gsi_start_bb (bbs[i]); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple stmt = gsi_stmt (gsi); if (is_gimple_debug (stmt)) continue; ++*n_stmts; if (!find_data_references_in_stmt (loop, stmt, &datarefs)) { if (is_gimple_call (stmt) && loop->safelen) { tree fndecl = gimple_call_fndecl (stmt), op; if (fndecl != NULL_TREE) { struct cgraph_node *node = cgraph_node::get (fndecl); if (node != NULL && node->simd_clones != NULL) { unsigned int j, n = gimple_call_num_args (stmt); for (j = 0; j < n; j++) { op = gimple_call_arg (stmt, j); if (DECL_P (op) || (REFERENCE_CLASS_P (op) && get_base_address (op))) break; } op = gimple_call_lhs (stmt); /* Ignore #pragma omp declare simd functions if they don't have data references in the call stmt itself. */ if (j == n && !(op && (DECL_P (op) || (REFERENCE_CLASS_P (op) && get_base_address (op))))) continue; } } } LOOP_VINFO_DATAREFS (loop_vinfo) = datarefs; if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: loop contains function " "calls or data references that cannot " "be analyzed\n"); return false; } } } LOOP_VINFO_DATAREFS (loop_vinfo) = datarefs; } else { gimple_stmt_iterator gsi; bb = BB_VINFO_BB (bb_vinfo); for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple stmt = gsi_stmt (gsi); if (is_gimple_debug (stmt)) continue; ++*n_stmts; if (!find_data_references_in_stmt (NULL, stmt, &BB_VINFO_DATAREFS (bb_vinfo))) { /* Mark the rest of the basic-block as unvectorizable. */ for (; !gsi_end_p (gsi); gsi_next (&gsi)) { stmt = gsi_stmt (gsi); STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt)) = false; } break; } } datarefs = BB_VINFO_DATAREFS (bb_vinfo); } /* Go through the data-refs, check that the analysis succeeded. Update pointer from stmt_vec_info struct to DR and vectype. */ FOR_EACH_VEC_ELT (datarefs, i, dr) { gimple stmt; stmt_vec_info stmt_info; tree base, offset, init; bool gather = false; bool simd_lane_access = false; int vf; again: if (!dr || !DR_REF (dr)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: unhandled data-ref\n"); return false; } stmt = DR_STMT (dr); stmt_info = vinfo_for_stmt (stmt); /* Discard clobbers from the dataref vector. We will remove clobber stmts during vectorization. */ if (gimple_clobber_p (stmt)) { free_data_ref (dr); if (i == datarefs.length () - 1) { datarefs.pop (); break; } datarefs.ordered_remove (i); dr = datarefs[i]; goto again; } /* Check that analysis of the data-ref succeeded. */ if (!DR_BASE_ADDRESS (dr) || !DR_OFFSET (dr) || !DR_INIT (dr) || !DR_STEP (dr)) { bool maybe_gather = DR_IS_READ (dr) && !TREE_THIS_VOLATILE (DR_REF (dr)) && targetm.vectorize.builtin_gather != NULL; bool maybe_simd_lane_access = loop_vinfo && loop->simduid; /* If target supports vector gather loads, or if this might be a SIMD lane access, see if they can't be used. */ if (loop_vinfo && (maybe_gather || maybe_simd_lane_access) && !nested_in_vect_loop_p (loop, stmt)) { struct data_reference *newdr = create_data_ref (NULL, loop_containing_stmt (stmt), DR_REF (dr), stmt, true); gcc_assert (newdr != NULL && DR_REF (newdr)); if (DR_BASE_ADDRESS (newdr) && DR_OFFSET (newdr) && DR_INIT (newdr) && DR_STEP (newdr) && integer_zerop (DR_STEP (newdr))) { if (maybe_simd_lane_access) { tree off = DR_OFFSET (newdr); STRIP_NOPS (off); if (TREE_CODE (DR_INIT (newdr)) == INTEGER_CST && TREE_CODE (off) == MULT_EXPR && tree_fits_uhwi_p (TREE_OPERAND (off, 1))) { tree step = TREE_OPERAND (off, 1); off = TREE_OPERAND (off, 0); STRIP_NOPS (off); if (CONVERT_EXPR_P (off) && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (off, 0))) < TYPE_PRECISION (TREE_TYPE (off))) off = TREE_OPERAND (off, 0); if (TREE_CODE (off) == SSA_NAME) { gimple def = SSA_NAME_DEF_STMT (off); tree reft = TREE_TYPE (DR_REF (newdr)); if (is_gimple_call (def) && gimple_call_internal_p (def) && (gimple_call_internal_fn (def) == IFN_GOMP_SIMD_LANE)) { tree arg = gimple_call_arg (def, 0); gcc_assert (TREE_CODE (arg) == SSA_NAME); arg = SSA_NAME_VAR (arg); if (arg == loop->simduid /* For now. */ && tree_int_cst_equal (TYPE_SIZE_UNIT (reft), step)) { DR_OFFSET (newdr) = ssize_int (0); DR_STEP (newdr) = step; DR_ALIGNED_TO (newdr) = size_int (BIGGEST_ALIGNMENT); dr = newdr; simd_lane_access = true; } } } } } if (!simd_lane_access && maybe_gather) { dr = newdr; gather = true; } } if (!gather && !simd_lane_access) free_data_ref (newdr); } if (!gather && !simd_lane_access) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: data ref analysis " "failed "); dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } if (bb_vinfo) break; return false; } } if (TREE_CODE (DR_BASE_ADDRESS (dr)) == INTEGER_CST) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: base addr of dr is a " "constant\n"); if (bb_vinfo) break; if (gather || simd_lane_access) free_data_ref (dr); return false; } if (TREE_THIS_VOLATILE (DR_REF (dr))) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: volatile type "); dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } if (bb_vinfo) break; return false; } if (stmt_can_throw_internal (stmt)) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: statement can throw an " "exception "); dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } if (bb_vinfo) break; if (gather || simd_lane_access) free_data_ref (dr); return false; } if (TREE_CODE (DR_REF (dr)) == COMPONENT_REF && DECL_BIT_FIELD (TREE_OPERAND (DR_REF (dr), 1))) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: statement is bitfield " "access "); dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } if (bb_vinfo) break; if (gather || simd_lane_access) free_data_ref (dr); return false; } base = unshare_expr (DR_BASE_ADDRESS (dr)); offset = unshare_expr (DR_OFFSET (dr)); init = unshare_expr (DR_INIT (dr)); if (is_gimple_call (stmt) && (!gimple_call_internal_p (stmt) || (gimple_call_internal_fn (stmt) != IFN_MASK_LOAD && gimple_call_internal_fn (stmt) != IFN_MASK_STORE))) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: dr in a call "); dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } if (bb_vinfo) break; if (gather || simd_lane_access) free_data_ref (dr); return false; } /* Update DR field in stmt_vec_info struct. */ /* If the dataref is in an inner-loop of the loop that is considered for for vectorization, we also want to analyze the access relative to the outer-loop (DR contains information only relative to the inner-most enclosing loop). We do that by building a reference to the first location accessed by the inner-loop, and analyze it relative to the outer-loop. */ if (loop && nested_in_vect_loop_p (loop, stmt)) { tree outer_step, outer_base, outer_init; HOST_WIDE_INT pbitsize, pbitpos; tree poffset; machine_mode pmode; int punsignedp, pvolatilep; affine_iv base_iv, offset_iv; tree dinit; /* Build a reference to the first location accessed by the inner-loop: *(BASE+INIT). (The first location is actually BASE+INIT+OFFSET, but we add OFFSET separately later). */ tree inner_base = build_fold_indirect_ref (fold_build_pointer_plus (base, init)); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "analyze in outer-loop: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, inner_base); dump_printf (MSG_NOTE, "\n"); } outer_base = get_inner_reference (inner_base, &pbitsize, &pbitpos, &poffset, &pmode, &punsignedp, &pvolatilep, false); gcc_assert (outer_base != NULL_TREE); if (pbitpos % BITS_PER_UNIT != 0) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "failed: bit offset alignment.\n"); return false; } outer_base = build_fold_addr_expr (outer_base); if (!simple_iv (loop, loop_containing_stmt (stmt), outer_base, &base_iv, false)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "failed: evolution of base is not affine.\n"); return false; } if (offset) { if (poffset) poffset = fold_build2 (PLUS_EXPR, TREE_TYPE (offset), offset, poffset); else poffset = offset; } if (!poffset) { offset_iv.base = ssize_int (0); offset_iv.step = ssize_int (0); } else if (!simple_iv (loop, loop_containing_stmt (stmt), poffset, &offset_iv, false)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "evolution of offset is not affine.\n"); return false; } outer_init = ssize_int (pbitpos / BITS_PER_UNIT); split_constant_offset (base_iv.base, &base_iv.base, &dinit); outer_init = size_binop (PLUS_EXPR, outer_init, dinit); split_constant_offset (offset_iv.base, &offset_iv.base, &dinit); outer_init = size_binop (PLUS_EXPR, outer_init, dinit); outer_step = size_binop (PLUS_EXPR, fold_convert (ssizetype, base_iv.step), fold_convert (ssizetype, offset_iv.step)); STMT_VINFO_DR_STEP (stmt_info) = outer_step; /* FIXME: Use canonicalize_base_object_address (base_iv.base); */ STMT_VINFO_DR_BASE_ADDRESS (stmt_info) = base_iv.base; STMT_VINFO_DR_INIT (stmt_info) = outer_init; STMT_VINFO_DR_OFFSET (stmt_info) = fold_convert (ssizetype, offset_iv.base); STMT_VINFO_DR_ALIGNED_TO (stmt_info) = size_int (highest_pow2_factor (offset_iv.base)); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "\touter base_address: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, STMT_VINFO_DR_BASE_ADDRESS (stmt_info)); dump_printf (MSG_NOTE, "\n\touter offset from base address: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, STMT_VINFO_DR_OFFSET (stmt_info)); dump_printf (MSG_NOTE, "\n\touter constant offset from base address: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, STMT_VINFO_DR_INIT (stmt_info)); dump_printf (MSG_NOTE, "\n\touter step: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, STMT_VINFO_DR_STEP (stmt_info)); dump_printf (MSG_NOTE, "\n\touter aligned to: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, STMT_VINFO_DR_ALIGNED_TO (stmt_info)); dump_printf (MSG_NOTE, "\n"); } } if (STMT_VINFO_DATA_REF (stmt_info)) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: more than one data ref " "in stmt: "); dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } if (bb_vinfo) break; if (gather || simd_lane_access) free_data_ref (dr); return false; } STMT_VINFO_DATA_REF (stmt_info) = dr; if (simd_lane_access) { STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) = true; free_data_ref (datarefs[i]); datarefs[i] = dr; } /* Set vectype for STMT. */ scalar_type = TREE_TYPE (DR_REF (dr)); STMT_VINFO_VECTYPE (stmt_info) = get_vectype_for_scalar_type (scalar_type); if (!STMT_VINFO_VECTYPE (stmt_info)) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: no vectype for stmt: "); dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); dump_printf (MSG_MISSED_OPTIMIZATION, " scalar_type: "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_DETAILS, scalar_type); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } if (bb_vinfo) break; if (gather || simd_lane_access) { STMT_VINFO_DATA_REF (stmt_info) = NULL; if (gather) free_data_ref (dr); } return false; } else { if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "got vectype for stmt: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); dump_generic_expr (MSG_NOTE, TDF_SLIM, STMT_VINFO_VECTYPE (stmt_info)); dump_printf (MSG_NOTE, "\n"); } } /* Adjust the minimal vectorization factor according to the vector type. */ vf = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)); if (vf > *min_vf) *min_vf = vf; if (gather) { tree off; gather = 0 != vect_check_gather (stmt, loop_vinfo, NULL, &off, NULL); if (gather && get_vectype_for_scalar_type (TREE_TYPE (off)) == NULL_TREE) gather = false; if (!gather) { STMT_VINFO_DATA_REF (stmt_info) = NULL; free_data_ref (dr); if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: not suitable for gather " "load "); dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return false; } datarefs[i] = dr; STMT_VINFO_GATHER_P (stmt_info) = true; } else if (loop_vinfo && TREE_CODE (DR_STEP (dr)) != INTEGER_CST) { if (nested_in_vect_loop_p (loop, stmt) || !DR_IS_READ (dr)) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: not suitable for strided " "load "); dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return false; } STMT_VINFO_STRIDE_LOAD_P (stmt_info) = true; } } /* If we stopped analysis at the first dataref we could not analyze when trying to vectorize a basic-block mark the rest of the datarefs as not vectorizable and truncate the vector of datarefs. That avoids spending useless time in analyzing their dependence. */ if (i != datarefs.length ()) { gcc_assert (bb_vinfo != NULL); for (unsigned j = i; j < datarefs.length (); ++j) { data_reference_p dr = datarefs[j]; STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false; free_data_ref (dr); } datarefs.truncate (i); } return true; } /* Function vect_get_new_vect_var. Returns a name for a new variable. The current naming scheme appends the prefix "vect_" or "vect_p" (depending on the value of VAR_KIND) to the name of vectorizer generated variables, and appends that to NAME if provided. */ tree vect_get_new_vect_var (tree type, enum vect_var_kind var_kind, const char *name) { const char *prefix; tree new_vect_var; switch (var_kind) { case vect_simple_var: prefix = "vect"; break; case vect_scalar_var: prefix = "stmp"; break; case vect_pointer_var: prefix = "vectp"; break; default: gcc_unreachable (); } if (name) { char* tmp = concat (prefix, "_", name, NULL); new_vect_var = create_tmp_reg (type, tmp); free (tmp); } else new_vect_var = create_tmp_reg (type, prefix); return new_vect_var; } /* Duplicate ptr info and set alignment/misaligment on NAME from DR. */ static void vect_duplicate_ssa_name_ptr_info (tree name, data_reference *dr, stmt_vec_info stmt_info) { duplicate_ssa_name_ptr_info (name, DR_PTR_INFO (dr)); unsigned int align = TYPE_ALIGN_UNIT (STMT_VINFO_VECTYPE (stmt_info)); int misalign = DR_MISALIGNMENT (dr); if (misalign == -1) mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (name)); else set_ptr_info_alignment (SSA_NAME_PTR_INFO (name), align, misalign); } /* Function vect_create_addr_base_for_vector_ref. Create an expression that computes the address of the first memory location that will be accessed for a data reference. Input: STMT: The statement containing the data reference. NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list. OFFSET: Optional. If supplied, it is be added to the initial address. LOOP: Specify relative to which loop-nest should the address be computed. For example, when the dataref is in an inner-loop nested in an outer-loop that is now being vectorized, LOOP can be either the outer-loop, or the inner-loop. The first memory location accessed by the following dataref ('in' points to short): for (i=0; i<N; i++) for (j=0; j<M; j++) s += in[i+j] is as follows: if LOOP=i_loop: &in (relative to i_loop) if LOOP=j_loop: &in+i*2B (relative to j_loop) BYTE_OFFSET: Optional, defaulted to NULL. If supplied, it is added to the initial address. Unlike OFFSET, which is number of elements to be added, BYTE_OFFSET is measured in bytes. Output: 1. Return an SSA_NAME whose value is the address of the memory location of the first vector of the data reference. 2. If new_stmt_list is not NULL_TREE after return then the caller must insert these statement(s) which define the returned SSA_NAME. FORNOW: We are only handling array accesses with step 1. */ tree vect_create_addr_base_for_vector_ref (gimple stmt, gimple_seq *new_stmt_list, tree offset, struct loop *loop, tree byte_offset) { stmt_vec_info stmt_info = vinfo_for_stmt (stmt); struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); tree data_ref_base; const char *base_name; tree addr_base; tree dest; gimple_seq seq = NULL; tree base_offset; tree init; tree vect_ptr_type; tree step = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr))); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); if (loop_vinfo && loop && loop != (gimple_bb (stmt))->loop_father) { struct loop *outer_loop = LOOP_VINFO_LOOP (loop_vinfo); gcc_assert (nested_in_vect_loop_p (outer_loop, stmt)); data_ref_base = unshare_expr (STMT_VINFO_DR_BASE_ADDRESS (stmt_info)); base_offset = unshare_expr (STMT_VINFO_DR_OFFSET (stmt_info)); init = unshare_expr (STMT_VINFO_DR_INIT (stmt_info)); } else { data_ref_base = unshare_expr (DR_BASE_ADDRESS (dr)); base_offset = unshare_expr (DR_OFFSET (dr)); init = unshare_expr (DR_INIT (dr)); } if (loop_vinfo) base_name = get_name (data_ref_base); else { base_offset = ssize_int (0); init = ssize_int (0); base_name = get_name (DR_REF (dr)); } /* Create base_offset */ base_offset = size_binop (PLUS_EXPR, fold_convert (sizetype, base_offset), fold_convert (sizetype, init)); if (offset) { offset = fold_build2 (MULT_EXPR, sizetype, fold_convert (sizetype, offset), step); base_offset = fold_build2 (PLUS_EXPR, sizetype, base_offset, offset); } if (byte_offset) { byte_offset = fold_convert (sizetype, byte_offset); base_offset = fold_build2 (PLUS_EXPR, sizetype, base_offset, byte_offset); } /* base + base_offset */ if (loop_vinfo) addr_base = fold_build_pointer_plus (data_ref_base, base_offset); else { addr_base = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (DR_REF (dr))), unshare_expr (DR_REF (dr))); } vect_ptr_type = build_pointer_type (STMT_VINFO_VECTYPE (stmt_info)); addr_base = fold_convert (vect_ptr_type, addr_base); dest = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var, base_name); addr_base = force_gimple_operand (addr_base, &seq, false, dest); gimple_seq_add_seq (new_stmt_list, seq); if (DR_PTR_INFO (dr) && TREE_CODE (addr_base) == SSA_NAME) { vect_duplicate_ssa_name_ptr_info (addr_base, dr, stmt_info); if (offset || byte_offset) mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (addr_base)); } if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "created "); dump_generic_expr (MSG_NOTE, TDF_SLIM, addr_base); dump_printf (MSG_NOTE, "\n"); } return addr_base; } /* Function vect_create_data_ref_ptr. Create a new pointer-to-AGGR_TYPE variable (ap), that points to the first location accessed in the loop by STMT, along with the def-use update chain to appropriately advance the pointer through the loop iterations. Also set aliasing information for the pointer. This pointer is used by the callers to this function to create a memory reference expression for vector load/store access. Input: 1. STMT: a stmt that references memory. Expected to be of the form GIMPLE_ASSIGN <name, data-ref> or GIMPLE_ASSIGN <data-ref, name>. 2. AGGR_TYPE: the type of the reference, which should be either a vector or an array. 3. AT_LOOP: the loop where the vector memref is to be created. 4. OFFSET (optional): an offset to be added to the initial address accessed by the data-ref in STMT. 5. BSI: location where the new stmts are to be placed if there is no loop 6. ONLY_INIT: indicate if ap is to be updated in the loop, or remain pointing to the initial address. 7. BYTE_OFFSET (optional, defaults to NULL): a byte offset to be added to the initial address accessed by the data-ref in STMT. This is similar to OFFSET, but OFFSET is counted in elements, while BYTE_OFFSET in bytes. Output: 1. Declare a new ptr to vector_type, and have it point to the base of the data reference (initial addressed accessed by the data reference). For example, for vector of type V8HI, the following code is generated: v8hi *ap; ap = (v8hi *)initial_address; if OFFSET is not supplied: initial_address = &a[init]; if OFFSET is supplied: initial_address = &a[init + OFFSET]; if BYTE_OFFSET is supplied: initial_address = &a[init] + BYTE_OFFSET; Return the initial_address in INITIAL_ADDRESS. 2. If ONLY_INIT is true, just return the initial pointer. Otherwise, also update the pointer in each iteration of the loop. Return the increment stmt that updates the pointer in PTR_INCR. 3. Set INV_P to true if the access pattern of the data reference in the vectorized loop is invariant. Set it to false otherwise. 4. Return the pointer. */ tree vect_create_data_ref_ptr (gimple stmt, tree aggr_type, struct loop *at_loop, tree offset, tree *initial_address, gimple_stmt_iterator *gsi, gimple *ptr_incr, bool only_init, bool *inv_p, tree byte_offset) { const char *base_name; stmt_vec_info stmt_info = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = NULL; bool nested_in_vect_loop = false; struct loop *containing_loop = NULL; tree aggr_ptr_type; tree aggr_ptr; tree new_temp; gimple vec_stmt; gimple_seq new_stmt_list = NULL; edge pe = NULL; basic_block new_bb; tree aggr_ptr_init; struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); tree aptr; gimple_stmt_iterator incr_gsi; bool insert_after; tree indx_before_incr, indx_after_incr; gimple incr; tree step; bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); gcc_assert (TREE_CODE (aggr_type) == ARRAY_TYPE || TREE_CODE (aggr_type) == VECTOR_TYPE); if (loop_vinfo) { loop = LOOP_VINFO_LOOP (loop_vinfo); nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt); containing_loop = (gimple_bb (stmt))->loop_father; pe = loop_preheader_edge (loop); } else { gcc_assert (bb_vinfo); only_init = true; *ptr_incr = NULL; } /* Check the step (evolution) of the load in LOOP, and record whether it's invariant. */ if (nested_in_vect_loop) step = STMT_VINFO_DR_STEP (stmt_info); else step = DR_STEP (STMT_VINFO_DATA_REF (stmt_info)); if (integer_zerop (step)) *inv_p = true; else *inv_p = false; /* Create an expression for the first address accessed by this load in LOOP. */ base_name = get_name (DR_BASE_ADDRESS (dr)); if (dump_enabled_p ()) { tree dr_base_type = TREE_TYPE (DR_BASE_OBJECT (dr)); dump_printf_loc (MSG_NOTE, vect_location, "create %s-pointer variable to type: ", get_tree_code_name (TREE_CODE (aggr_type))); dump_generic_expr (MSG_NOTE, TDF_SLIM, aggr_type); if (TREE_CODE (dr_base_type) == ARRAY_TYPE) dump_printf (MSG_NOTE, " vectorizing an array ref: "); else if (TREE_CODE (dr_base_type) == VECTOR_TYPE) dump_printf (MSG_NOTE, " vectorizing a vector ref: "); else if (TREE_CODE (dr_base_type) == RECORD_TYPE) dump_printf (MSG_NOTE, " vectorizing a record based array ref: "); else dump_printf (MSG_NOTE, " vectorizing a pointer ref: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_BASE_OBJECT (dr)); dump_printf (MSG_NOTE, "\n"); } /* (1) Create the new aggregate-pointer variable. Vector and array types inherit the alias set of their component type by default so we need to use a ref-all pointer if the data reference does not conflict with the created aggregated data reference because it is not addressable. */ bool need_ref_all = false; if (!alias_sets_conflict_p (get_alias_set (aggr_type), get_alias_set (DR_REF (dr)))) need_ref_all = true; /* Likewise for any of the data references in the stmt group. */ else if (STMT_VINFO_GROUP_SIZE (stmt_info) > 1) { gimple orig_stmt = STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info); do { stmt_vec_info sinfo = vinfo_for_stmt (orig_stmt); struct data_reference *sdr = STMT_VINFO_DATA_REF (sinfo); if (!alias_sets_conflict_p (get_alias_set (aggr_type), get_alias_set (DR_REF (sdr)))) { need_ref_all = true; break; } orig_stmt = STMT_VINFO_GROUP_NEXT_ELEMENT (sinfo); } while (orig_stmt); } aggr_ptr_type = build_pointer_type_for_mode (aggr_type, ptr_mode, need_ref_all); aggr_ptr = vect_get_new_vect_var (aggr_ptr_type, vect_pointer_var, base_name); /* Note: If the dataref is in an inner-loop nested in LOOP, and we are vectorizing LOOP (i.e., outer-loop vectorization), we need to create two def-use update cycles for the pointer: one relative to the outer-loop (LOOP), which is what steps (3) and (4) below do. The other is relative to the inner-loop (which is the inner-most loop containing the dataref), and this is done be step (5) below. When vectorizing inner-most loops, the vectorized loop (LOOP) is also the inner-most loop, and so steps (3),(4) work the same, and step (5) is redundant. Steps (3),(4) create the following: vp0 = &base_addr; LOOP: vp1 = phi(vp0,vp2) ... ... vp2 = vp1 + step goto LOOP If there is an inner-loop nested in loop, then step (5) will also be applied, and an additional update in the inner-loop will be created: vp0 = &base_addr; LOOP: vp1 = phi(vp0,vp2) ... inner: vp3 = phi(vp1,vp4) vp4 = vp3 + inner_step if () goto inner ... vp2 = vp1 + step if () goto LOOP */ /* (2) Calculate the initial address of the aggregate-pointer, and set the aggregate-pointer to point to it before the loop. */ /* Create: (&(base[init_val+offset]+byte_offset) in the loop preheader. */ new_temp = vect_create_addr_base_for_vector_ref (stmt, &new_stmt_list, offset, loop, byte_offset); if (new_stmt_list) { if (pe) { new_bb = gsi_insert_seq_on_edge_immediate (pe, new_stmt_list); gcc_assert (!new_bb); } else gsi_insert_seq_before (gsi, new_stmt_list, GSI_SAME_STMT); } *initial_address = new_temp; /* Create: p = (aggr_type *) initial_base */ if (TREE_CODE (new_temp) != SSA_NAME || !useless_type_conversion_p (aggr_ptr_type, TREE_TYPE (new_temp))) { vec_stmt = gimple_build_assign (aggr_ptr, fold_convert (aggr_ptr_type, new_temp)); aggr_ptr_init = make_ssa_name (aggr_ptr, vec_stmt); /* Copy the points-to information if it exists. */ if (DR_PTR_INFO (dr)) vect_duplicate_ssa_name_ptr_info (aggr_ptr_init, dr, stmt_info); gimple_assign_set_lhs (vec_stmt, aggr_ptr_init); if (pe) { new_bb = gsi_insert_on_edge_immediate (pe, vec_stmt); gcc_assert (!new_bb); } else gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT); } else aggr_ptr_init = new_temp; /* (3) Handle the updating of the aggregate-pointer inside the loop. This is needed when ONLY_INIT is false, and also when AT_LOOP is the inner-loop nested in LOOP (during outer-loop vectorization). */ /* No update in loop is required. */ if (only_init && (!loop_vinfo || at_loop == loop)) aptr = aggr_ptr_init; else { /* The step of the aggregate pointer is the type size. */ tree iv_step = TYPE_SIZE_UNIT (aggr_type); /* One exception to the above is when the scalar step of the load in LOOP is zero. In this case the step here is also zero. */ if (*inv_p) iv_step = size_zero_node; else if (tree_int_cst_sgn (step) == -1) iv_step = fold_build1 (NEGATE_EXPR, TREE_TYPE (iv_step), iv_step); standard_iv_increment_position (loop, &incr_gsi, &insert_after); create_iv (aggr_ptr_init, fold_convert (aggr_ptr_type, iv_step), aggr_ptr, loop, &incr_gsi, insert_after, &indx_before_incr, &indx_after_incr); incr = gsi_stmt (incr_gsi); set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL)); /* Copy the points-to information if it exists. */ if (DR_PTR_INFO (dr)) { vect_duplicate_ssa_name_ptr_info (indx_before_incr, dr, stmt_info); vect_duplicate_ssa_name_ptr_info (indx_after_incr, dr, stmt_info); } if (ptr_incr) *ptr_incr = incr; aptr = indx_before_incr; } if (!nested_in_vect_loop || only_init) return aptr; /* (4) Handle the updating of the aggregate-pointer inside the inner-loop nested in LOOP, if exists. */ gcc_assert (nested_in_vect_loop); if (!only_init) { standard_iv_increment_position (containing_loop, &incr_gsi, &insert_after); create_iv (aptr, fold_convert (aggr_ptr_type, DR_STEP (dr)), aggr_ptr, containing_loop, &incr_gsi, insert_after, &indx_before_incr, &indx_after_incr); incr = gsi_stmt (incr_gsi); set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL)); /* Copy the points-to information if it exists. */ if (DR_PTR_INFO (dr)) { vect_duplicate_ssa_name_ptr_info (indx_before_incr, dr, stmt_info); vect_duplicate_ssa_name_ptr_info (indx_after_incr, dr, stmt_info); } if (ptr_incr) *ptr_incr = incr; return indx_before_incr; } else gcc_unreachable (); } /* Function bump_vector_ptr Increment a pointer (to a vector type) by vector-size. If requested, i.e. if PTR-INCR is given, then also connect the new increment stmt to the existing def-use update-chain of the pointer, by modifying the PTR_INCR as illustrated below: The pointer def-use update-chain before this function: DATAREF_PTR = phi (p_0, p_2) .... PTR_INCR: p_2 = DATAREF_PTR + step The pointer def-use update-chain after this function: DATAREF_PTR = phi (p_0, p_2) .... NEW_DATAREF_PTR = DATAREF_PTR + BUMP .... PTR_INCR: p_2 = NEW_DATAREF_PTR + step Input: DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated in the loop. PTR_INCR - optional. The stmt that updates the pointer in each iteration of the loop. The increment amount across iterations is expected to be vector_size. BSI - location where the new update stmt is to be placed. STMT - the original scalar memory-access stmt that is being vectorized. BUMP - optional. The offset by which to bump the pointer. If not given, the offset is assumed to be vector_size. Output: Return NEW_DATAREF_PTR as illustrated above. */ tree bump_vector_ptr (tree dataref_ptr, gimple ptr_incr, gimple_stmt_iterator *gsi, gimple stmt, tree bump) { stmt_vec_info stmt_info = vinfo_for_stmt (stmt); struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); tree vectype = STMT_VINFO_VECTYPE (stmt_info); tree update = TYPE_SIZE_UNIT (vectype); gassign *incr_stmt; ssa_op_iter iter; use_operand_p use_p; tree new_dataref_ptr; if (bump) update = bump; new_dataref_ptr = copy_ssa_name (dataref_ptr); incr_stmt = gimple_build_assign (new_dataref_ptr, POINTER_PLUS_EXPR, dataref_ptr, update); vect_finish_stmt_generation (stmt, incr_stmt, gsi); /* Copy the points-to information if it exists. */ if (DR_PTR_INFO (dr)) { duplicate_ssa_name_ptr_info (new_dataref_ptr, DR_PTR_INFO (dr)); mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (new_dataref_ptr)); } if (!ptr_incr) return new_dataref_ptr; /* Update the vector-pointer's cross-iteration increment. */ FOR_EACH_SSA_USE_OPERAND (use_p, ptr_incr, iter, SSA_OP_USE) { tree use = USE_FROM_PTR (use_p); if (use == dataref_ptr) SET_USE (use_p, new_dataref_ptr); else gcc_assert (tree_int_cst_compare (use, update) == 0); } return new_dataref_ptr; } /* Function vect_create_destination_var. Create a new temporary of type VECTYPE. */ tree vect_create_destination_var (tree scalar_dest, tree vectype) { tree vec_dest; const char *name; char *new_name; tree type; enum vect_var_kind kind; kind = vectype ? vect_simple_var : vect_scalar_var; type = vectype ? vectype : TREE_TYPE (scalar_dest); gcc_assert (TREE_CODE (scalar_dest) == SSA_NAME); name = get_name (scalar_dest); if (name) new_name = xasprintf ("%s_%u", name, SSA_NAME_VERSION (scalar_dest)); else new_name = xasprintf ("_%u", SSA_NAME_VERSION (scalar_dest)); vec_dest = vect_get_new_vect_var (type, kind, new_name); free (new_name); return vec_dest; } /* Function vect_grouped_store_supported. Returns TRUE if interleave high and interleave low permutations are supported, and FALSE otherwise. */ bool vect_grouped_store_supported (tree vectype, unsigned HOST_WIDE_INT count) { machine_mode mode = TYPE_MODE (vectype); /* vect_permute_store_chain requires the group size to be equal to 3 or be a power of two. */ if (count != 3 && exact_log2 (count) == -1) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "the size of the group of accesses" " is not a power of 2 or not eqaul to 3\n"); return false; } /* Check that the permutation is supported. */ if (VECTOR_MODE_P (mode)) { unsigned int i, nelt = GET_MODE_NUNITS (mode); unsigned char *sel = XALLOCAVEC (unsigned char, nelt); if (count == 3) { unsigned int j0 = 0, j1 = 0, j2 = 0; unsigned int i, j; for (j = 0; j < 3; j++) { int nelt0 = ((3 - j) * nelt) % 3; int nelt1 = ((3 - j) * nelt + 1) % 3; int nelt2 = ((3 - j) * nelt + 2) % 3; for (i = 0; i < nelt; i++) { if (3 * i + nelt0 < nelt) sel[3 * i + nelt0] = j0++; if (3 * i + nelt1 < nelt) sel[3 * i + nelt1] = nelt + j1++; if (3 * i + nelt2 < nelt) sel[3 * i + nelt2] = 0; } if (!can_vec_perm_p (mode, false, sel)) { if (dump_enabled_p ()) dump_printf (MSG_MISSED_OPTIMIZATION, "permutaion op not supported by target.\n"); return false; } for (i = 0; i < nelt; i++) { if (3 * i + nelt0 < nelt) sel[3 * i + nelt0] = 3 * i + nelt0; if (3 * i + nelt1 < nelt) sel[3 * i + nelt1] = 3 * i + nelt1; if (3 * i + nelt2 < nelt) sel[3 * i + nelt2] = nelt + j2++; } if (!can_vec_perm_p (mode, false, sel)) { if (dump_enabled_p ()) dump_printf (MSG_MISSED_OPTIMIZATION, "permutaion op not supported by target.\n"); return false; } } return true; } else { /* If length is not equal to 3 then only power of 2 is supported. */ gcc_assert (exact_log2 (count) != -1); for (i = 0; i < nelt / 2; i++) { sel[i * 2] = i; sel[i * 2 + 1] = i + nelt; } if (can_vec_perm_p (mode, false, sel)) { for (i = 0; i < nelt; i++) sel[i] += nelt / 2; if (can_vec_perm_p (mode, false, sel)) return true; } } } if (dump_enabled_p ()) dump_printf (MSG_MISSED_OPTIMIZATION, "permutaion op not supported by target.\n"); return false; } /* Return TRUE if vec_store_lanes is available for COUNT vectors of type VECTYPE. */ bool vect_store_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count) { return vect_lanes_optab_supported_p ("vec_store_lanes", vec_store_lanes_optab, vectype, count); } /* Function vect_permute_store_chain. Given a chain of interleaved stores in DR_CHAIN of LENGTH that must be a power of 2 or equal to 3, generate interleave_high/low stmts to reorder the data correctly for the stores. Return the final references for stores in RESULT_CHAIN. E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8. The input is 4 vectors each containing 8 elements. We assign a number to each element, the input sequence is: 1st vec: 0 1 2 3 4 5 6 7 2nd vec: 8 9 10 11 12 13 14 15 3rd vec: 16 17 18 19 20 21 22 23 4th vec: 24 25 26 27 28 29 30 31 The output sequence should be: 1st vec: 0 8 16 24 1 9 17 25 2nd vec: 2 10 18 26 3 11 19 27 3rd vec: 4 12 20 28 5 13 21 30 4th vec: 6 14 22 30 7 15 23 31 i.e., we interleave the contents of the four vectors in their order. We use interleave_high/low instructions to create such output. The input of each interleave_high/low operation is two vectors: 1st vec 2nd vec 0 1 2 3 4 5 6 7 the even elements of the result vector are obtained left-to-right from the high/low elements of the first vector. The odd elements of the result are obtained left-to-right from the high/low elements of the second vector. The output of interleave_high will be: 0 4 1 5 and of interleave_low: 2 6 3 7 The permutation is done in log LENGTH stages. In each stage interleave_high and interleave_low stmts are created for each pair of vectors in DR_CHAIN, where the first argument is taken from the first half of DR_CHAIN and the second argument from it's second half. In our example, I1: interleave_high (1st vec, 3rd vec) I2: interleave_low (1st vec, 3rd vec) I3: interleave_high (2nd vec, 4th vec) I4: interleave_low (2nd vec, 4th vec) The output for the first stage is: I1: 0 16 1 17 2 18 3 19 I2: 4 20 5 21 6 22 7 23 I3: 8 24 9 25 10 26 11 27 I4: 12 28 13 29 14 30 15 31 The output of the second stage, i.e. the final result is: I1: 0 8 16 24 1 9 17 25 I2: 2 10 18 26 3 11 19 27 I3: 4 12 20 28 5 13 21 30 I4: 6 14 22 30 7 15 23 31. */ void vect_permute_store_chain (vec<tree> dr_chain, unsigned int length, gimple stmt, gimple_stmt_iterator *gsi, vec<tree> *result_chain) { tree vect1, vect2, high, low; gimple perm_stmt; tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt)); tree perm_mask_low, perm_mask_high; tree data_ref; tree perm3_mask_low, perm3_mask_high; unsigned int i, n, log_length = exact_log2 (length); unsigned int j, nelt = TYPE_VECTOR_SUBPARTS (vectype); unsigned char *sel = XALLOCAVEC (unsigned char, nelt); result_chain->quick_grow (length); memcpy (result_chain->address (), dr_chain.address (), length * sizeof (tree)); if (length == 3) { unsigned int j0 = 0, j1 = 0, j2 = 0; for (j = 0; j < 3; j++) { int nelt0 = ((3 - j) * nelt) % 3; int nelt1 = ((3 - j) * nelt + 1) % 3; int nelt2 = ((3 - j) * nelt + 2) % 3; for (i = 0; i < nelt; i++) { if (3 * i + nelt0 < nelt) sel[3 * i + nelt0] = j0++; if (3 * i + nelt1 < nelt) sel[3 * i + nelt1] = nelt + j1++; if (3 * i + nelt2 < nelt) sel[3 * i + nelt2] = 0; } perm3_mask_low = vect_gen_perm_mask_checked (vectype, sel); for (i = 0; i < nelt; i++) { if (3 * i + nelt0 < nelt) sel[3 * i + nelt0] = 3 * i + nelt0; if (3 * i + nelt1 < nelt) sel[3 * i + nelt1] = 3 * i + nelt1; if (3 * i + nelt2 < nelt) sel[3 * i + nelt2] = nelt + j2++; } perm3_mask_high = vect_gen_perm_mask_checked (vectype, sel); vect1 = dr_chain[0]; vect2 = dr_chain[1]; /* Create interleaving stmt: low = VEC_PERM_EXPR <vect1, vect2, {j, nelt, *, j + 1, nelt + j + 1, *, j + 2, nelt + j + 2, *, ...}> */ data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_low"); perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect1, vect2, perm3_mask_low); vect_finish_stmt_generation (stmt, perm_stmt, gsi); vect1 = data_ref; vect2 = dr_chain[2]; /* Create interleaving stmt: low = VEC_PERM_EXPR <vect1, vect2, {0, 1, nelt + j, 3, 4, nelt + j + 1, 6, 7, nelt + j + 2, ...}> */ data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_high"); perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect1, vect2, perm3_mask_high); vect_finish_stmt_generation (stmt, perm_stmt, gsi); (*result_chain)[j] = data_ref; } } else { /* If length is not equal to 3 then only power of 2 is supported. */ gcc_assert (exact_log2 (length) != -1); for (i = 0, n = nelt / 2; i < n; i++) { sel[i * 2] = i; sel[i * 2 + 1] = i + nelt; } perm_mask_high = vect_gen_perm_mask_checked (vectype, sel); for (i = 0; i < nelt; i++) sel[i] += nelt / 2; perm_mask_low = vect_gen_perm_mask_checked (vectype, sel); for (i = 0, n = log_length; i < n; i++) { for (j = 0; j < length/2; j++) { vect1 = dr_chain[j]; vect2 = dr_chain[j+length/2]; /* Create interleaving stmt: high = VEC_PERM_EXPR <vect1, vect2, {0, nelt, 1, nelt+1, ...}> */ high = make_temp_ssa_name (vectype, NULL, "vect_inter_high"); perm_stmt = gimple_build_assign (high, VEC_PERM_EXPR, vect1, vect2, perm_mask_high); vect_finish_stmt_generation (stmt, perm_stmt, gsi); (*result_chain)[2*j] = high; /* Create interleaving stmt: low = VEC_PERM_EXPR <vect1, vect2, {nelt/2, nelt*3/2, nelt/2+1, nelt*3/2+1, ...}> */ low = make_temp_ssa_name (vectype, NULL, "vect_inter_low"); perm_stmt = gimple_build_assign (low, VEC_PERM_EXPR, vect1, vect2, perm_mask_low); vect_finish_stmt_generation (stmt, perm_stmt, gsi); (*result_chain)[2*j+1] = low; } memcpy (dr_chain.address (), result_chain->address (), length * sizeof (tree)); } } } /* Function vect_setup_realignment This function is called when vectorizing an unaligned load using the dr_explicit_realign[_optimized] scheme. This function generates the following code at the loop prolog: p = initial_addr; x msq_init = *(floor(p)); # prolog load realignment_token = call target_builtin; loop: x msq = phi (msq_init, ---) The stmts marked with x are generated only for the case of dr_explicit_realign_optimized. The code above sets up a new (vector) pointer, pointing to the first location accessed by STMT, and a "floor-aligned" load using that pointer. It also generates code to compute the "realignment-token" (if the relevant target hook was defined), and creates a phi-node at the loop-header bb whose arguments are the result of the prolog-load (created by this function) and the result of a load that takes place in the loop (to be created by the caller to this function). For the case of dr_explicit_realign_optimized: The caller to this function uses the phi-result (msq) to create the realignment code inside the loop, and sets up the missing phi argument, as follows: loop: msq = phi (msq_init, lsq) lsq = *(floor(p')); # load in loop result = realign_load (msq, lsq, realignment_token); For the case of dr_explicit_realign: loop: msq = *(floor(p)); # load in loop p' = p + (VS-1); lsq = *(floor(p')); # load in loop result = realign_load (msq, lsq, realignment_token); Input: STMT - (scalar) load stmt to be vectorized. This load accesses a memory location that may be unaligned. BSI - place where new code is to be inserted. ALIGNMENT_SUPPORT_SCHEME - which of the two misalignment handling schemes is used. Output: REALIGNMENT_TOKEN - the result of a call to the builtin_mask_for_load target hook, if defined. Return value - the result of the loop-header phi node. */ tree vect_setup_realignment (gimple stmt, gimple_stmt_iterator *gsi, tree *realignment_token, enum dr_alignment_support alignment_support_scheme, tree init_addr, struct loop **at_loop) { stmt_vec_info stmt_info = vinfo_for_stmt (stmt); tree vectype = STMT_VINFO_VECTYPE (stmt_info); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); struct loop *loop = NULL; edge pe = NULL; tree scalar_dest = gimple_assign_lhs (stmt); tree vec_dest; gimple inc; tree ptr; tree data_ref; basic_block new_bb; tree msq_init = NULL_TREE; tree new_temp; gphi *phi_stmt; tree msq = NULL_TREE; gimple_seq stmts = NULL; bool inv_p; bool compute_in_loop = false; bool nested_in_vect_loop = false; struct loop *containing_loop = (gimple_bb (stmt))->loop_father; struct loop *loop_for_initial_load = NULL; if (loop_vinfo) { loop = LOOP_VINFO_LOOP (loop_vinfo); nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt); } gcc_assert (alignment_support_scheme == dr_explicit_realign || alignment_support_scheme == dr_explicit_realign_optimized); /* We need to generate three things: 1. the misalignment computation 2. the extra vector load (for the optimized realignment scheme). 3. the phi node for the two vectors from which the realignment is done (for the optimized realignment scheme). */ /* 1. Determine where to generate the misalignment computation. If INIT_ADDR is NULL_TREE, this indicates that the misalignment calculation will be generated by this function, outside the loop (in the preheader). Otherwise, INIT_ADDR had already been computed for us by the caller, inside the loop. Background: If the misalignment remains fixed throughout the iterations of the loop, then both realignment schemes are applicable, and also the misalignment computation can be done outside LOOP. This is because we are vectorizing LOOP, and so the memory accesses in LOOP advance in steps that are a multiple of VS (the Vector Size), and therefore the misalignment in different vectorized LOOP iterations is always the same. The problem arises only if the memory access is in an inner-loop nested inside LOOP, which is now being vectorized using outer-loop vectorization. This is the only case when the misalignment of the memory access may not remain fixed throughout the iterations of the inner-loop (as explained in detail in vect_supportable_dr_alignment). In this case, not only is the optimized realignment scheme not applicable, but also the misalignment computation (and generation of the realignment token that is passed to REALIGN_LOAD) have to be done inside the loop. In short, INIT_ADDR indicates whether we are in a COMPUTE_IN_LOOP mode or not, which in turn determines if the misalignment is computed inside the inner-loop, or outside LOOP. */ if (init_addr != NULL_TREE || !loop_vinfo) { compute_in_loop = true; gcc_assert (alignment_support_scheme == dr_explicit_realign); } /* 2. Determine where to generate the extra vector load. For the optimized realignment scheme, instead of generating two vector loads in each iteration, we generate a single extra vector load in the preheader of the loop, and in each iteration reuse the result of the vector load from the previous iteration. In case the memory access is in an inner-loop nested inside LOOP, which is now being vectorized using outer-loop vectorization, we need to determine whether this initial vector load should be generated at the preheader of the inner-loop, or can be generated at the preheader of LOOP. If the memory access has no evolution in LOOP, it can be generated in the preheader of LOOP. Otherwise, it has to be generated inside LOOP (in the preheader of the inner-loop). */ if (nested_in_vect_loop) { tree outerloop_step = STMT_VINFO_DR_STEP (stmt_info); bool invariant_in_outerloop = (tree_int_cst_compare (outerloop_step, size_zero_node) == 0); loop_for_initial_load = (invariant_in_outerloop ? loop : loop->inner); } else loop_for_initial_load = loop; if (at_loop) *at_loop = loop_for_initial_load; if (loop_for_initial_load) pe = loop_preheader_edge (loop_for_initial_load); /* 3. For the case of the optimized realignment, create the first vector load at the loop preheader. */ if (alignment_support_scheme == dr_explicit_realign_optimized) { /* Create msq_init = *(floor(p1)) in the loop preheader */ gassign *new_stmt; gcc_assert (!compute_in_loop); vec_dest = vect_create_destination_var (scalar_dest, vectype); ptr = vect_create_data_ref_ptr (stmt, vectype, loop_for_initial_load, NULL_TREE, &init_addr, NULL, &inc, true, &inv_p); new_temp = copy_ssa_name (ptr); new_stmt = gimple_build_assign (new_temp, BIT_AND_EXPR, ptr, build_int_cst (TREE_TYPE (ptr), -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype))); new_bb = gsi_insert_on_edge_immediate (pe, new_stmt); gcc_assert (!new_bb); data_ref = build2 (MEM_REF, TREE_TYPE (vec_dest), new_temp, build_int_cst (reference_alias_ptr_type (DR_REF (dr)), 0)); new_stmt = gimple_build_assign (vec_dest, data_ref); new_temp = make_ssa_name (vec_dest, new_stmt); gimple_assign_set_lhs (new_stmt, new_temp); if (pe) { new_bb = gsi_insert_on_edge_immediate (pe, new_stmt); gcc_assert (!new_bb); } else gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT); msq_init = gimple_assign_lhs (new_stmt); } /* 4. Create realignment token using a target builtin, if available. It is done either inside the containing loop, or before LOOP (as determined above). */ if (targetm.vectorize.builtin_mask_for_load) { gcall *new_stmt; tree builtin_decl; /* Compute INIT_ADDR - the initial addressed accessed by this memref. */ if (!init_addr) { /* Generate the INIT_ADDR computation outside LOOP. */ init_addr = vect_create_addr_base_for_vector_ref (stmt, &stmts, NULL_TREE, loop); if (loop) { pe = loop_preheader_edge (loop); new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts); gcc_assert (!new_bb); } else gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT); } builtin_decl = targetm.vectorize.builtin_mask_for_load (); new_stmt = gimple_build_call (builtin_decl, 1, init_addr); vec_dest = vect_create_destination_var (scalar_dest, gimple_call_return_type (new_stmt)); new_temp = make_ssa_name (vec_dest, new_stmt); gimple_call_set_lhs (new_stmt, new_temp); if (compute_in_loop) gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT); else { /* Generate the misalignment computation outside LOOP. */ pe = loop_preheader_edge (loop); new_bb = gsi_insert_on_edge_immediate (pe, new_stmt); gcc_assert (!new_bb); } *realignment_token = gimple_call_lhs (new_stmt); /* The result of the CALL_EXPR to this builtin is determined from the value of the parameter and no global variables are touched which makes the builtin a "const" function. Requiring the builtin to have the "const" attribute makes it unnecessary to call mark_call_clobbered. */ gcc_assert (TREE_READONLY (builtin_decl)); } if (alignment_support_scheme == dr_explicit_realign) return msq; gcc_assert (!compute_in_loop); gcc_assert (alignment_support_scheme == dr_explicit_realign_optimized); /* 5. Create msq = phi <msq_init, lsq> in loop */ pe = loop_preheader_edge (containing_loop); vec_dest = vect_create_destination_var (scalar_dest, vectype); msq = make_ssa_name (vec_dest); phi_stmt = create_phi_node (msq, containing_loop->header); add_phi_arg (phi_stmt, msq_init, pe, UNKNOWN_LOCATION); return msq; } /* Function vect_grouped_load_supported. Returns TRUE if even and odd permutations are supported, and FALSE otherwise. */ bool vect_grouped_load_supported (tree vectype, unsigned HOST_WIDE_INT count) { machine_mode mode = TYPE_MODE (vectype); /* vect_permute_load_chain requires the group size to be equal to 3 or be a power of two. */ if (count != 3 && exact_log2 (count) == -1) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "the size of the group of accesses" " is not a power of 2 or not equal to 3\n"); return false; } /* Check that the permutation is supported. */ if (VECTOR_MODE_P (mode)) { unsigned int i, j, nelt = GET_MODE_NUNITS (mode); unsigned char *sel = XALLOCAVEC (unsigned char, nelt); if (count == 3) { unsigned int k; for (k = 0; k < 3; k++) { for (i = 0; i < nelt; i++) if (3 * i + k < 2 * nelt) sel[i] = 3 * i + k; else sel[i] = 0; if (!can_vec_perm_p (mode, false, sel)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "shuffle of 3 loads is not supported by" " target\n"); return false; } for (i = 0, j = 0; i < nelt; i++) if (3 * i + k < 2 * nelt) sel[i] = i; else sel[i] = nelt + ((nelt + k) % 3) + 3 * (j++); if (!can_vec_perm_p (mode, false, sel)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "shuffle of 3 loads is not supported by" " target\n"); return false; } } return true; } else { /* If length is not equal to 3 then only power of 2 is supported. */ gcc_assert (exact_log2 (count) != -1); for (i = 0; i < nelt; i++) sel[i] = i * 2; if (can_vec_perm_p (mode, false, sel)) { for (i = 0; i < nelt; i++) sel[i] = i * 2 + 1; if (can_vec_perm_p (mode, false, sel)) return true; } } } if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "extract even/odd not supported by target\n"); return false; } /* Return TRUE if vec_load_lanes is available for COUNT vectors of type VECTYPE. */ bool vect_load_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count) { return vect_lanes_optab_supported_p ("vec_load_lanes", vec_load_lanes_optab, vectype, count); } /* Function vect_permute_load_chain. Given a chain of interleaved loads in DR_CHAIN of LENGTH that must be a power of 2 or equal to 3, generate extract_even/odd stmts to reorder the input data correctly. Return the final references for loads in RESULT_CHAIN. E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8. The input is 4 vectors each containing 8 elements. We assign a number to each element, the input sequence is: 1st vec: 0 1 2 3 4 5 6 7 2nd vec: 8 9 10 11 12 13 14 15 3rd vec: 16 17 18 19 20 21 22 23 4th vec: 24 25 26 27 28 29 30 31 The output sequence should be: 1st vec: 0 4 8 12 16 20 24 28 2nd vec: 1 5 9 13 17 21 25 29 3rd vec: 2 6 10 14 18 22 26 30 4th vec: 3 7 11 15 19 23 27 31 i.e., the first output vector should contain the first elements of each interleaving group, etc. We use extract_even/odd instructions to create such output. The input of each extract_even/odd operation is two vectors 1st vec 2nd vec 0 1 2 3 4 5 6 7 and the output is the vector of extracted even/odd elements. The output of extract_even will be: 0 2 4 6 and of extract_odd: 1 3 5 7 The permutation is done in log LENGTH stages. In each stage extract_even and extract_odd stmts are created for each pair of vectors in DR_CHAIN in their order. In our example, E1: extract_even (1st vec, 2nd vec) E2: extract_odd (1st vec, 2nd vec) E3: extract_even (3rd vec, 4th vec) E4: extract_odd (3rd vec, 4th vec) The output for the first stage will be: E1: 0 2 4 6 8 10 12 14 E2: 1 3 5 7 9 11 13 15 E3: 16 18 20 22 24 26 28 30 E4: 17 19 21 23 25 27 29 31 In order to proceed and create the correct sequence for the next stage (or for the correct output, if the second stage is the last one, as in our example), we first put the output of extract_even operation and then the output of extract_odd in RESULT_CHAIN (which is then copied to DR_CHAIN). The input for the second stage is: 1st vec (E1): 0 2 4 6 8 10 12 14 2nd vec (E3): 16 18 20 22 24 26 28 30 3rd vec (E2): 1 3 5 7 9 11 13 15 4th vec (E4): 17 19 21 23 25 27 29 31 The output of the second stage: E1: 0 4 8 12 16 20 24 28 E2: 2 6 10 14 18 22 26 30 E3: 1 5 9 13 17 21 25 29 E4: 3 7 11 15 19 23 27 31 And RESULT_CHAIN after reordering: 1st vec (E1): 0 4 8 12 16 20 24 28 2nd vec (E3): 1 5 9 13 17 21 25 29 3rd vec (E2): 2 6 10 14 18 22 26 30 4th vec (E4): 3 7 11 15 19 23 27 31. */ static void vect_permute_load_chain (vec<tree> dr_chain, unsigned int length, gimple stmt, gimple_stmt_iterator *gsi, vec<tree> *result_chain) { tree data_ref, first_vect, second_vect; tree perm_mask_even, perm_mask_odd; tree perm3_mask_low, perm3_mask_high; gimple perm_stmt; tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt)); unsigned int i, j, log_length = exact_log2 (length); unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype); unsigned char *sel = XALLOCAVEC (unsigned char, nelt); result_chain->quick_grow (length); memcpy (result_chain->address (), dr_chain.address (), length * sizeof (tree)); if (length == 3) { unsigned int k; for (k = 0; k < 3; k++) { for (i = 0; i < nelt; i++) if (3 * i + k < 2 * nelt) sel[i] = 3 * i + k; else sel[i] = 0; perm3_mask_low = vect_gen_perm_mask_checked (vectype, sel); for (i = 0, j = 0; i < nelt; i++) if (3 * i + k < 2 * nelt) sel[i] = i; else sel[i] = nelt + ((nelt + k) % 3) + 3 * (j++); perm3_mask_high = vect_gen_perm_mask_checked (vectype, sel); first_vect = dr_chain[0]; second_vect = dr_chain[1]; /* Create interleaving stmt (low part of): low = VEC_PERM_EXPR <first_vect, second_vect2, {k, 3 + k, 6 + k, ...}> */ data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_low"); perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect, second_vect, perm3_mask_low); vect_finish_stmt_generation (stmt, perm_stmt, gsi); /* Create interleaving stmt (high part of): high = VEC_PERM_EXPR <first_vect, second_vect2, {k, 3 + k, 6 + k, ...}> */ first_vect = data_ref; second_vect = dr_chain[2]; data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_high"); perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect, second_vect, perm3_mask_high); vect_finish_stmt_generation (stmt, perm_stmt, gsi); (*result_chain)[k] = data_ref; } } else { /* If length is not equal to 3 then only power of 2 is supported. */ gcc_assert (exact_log2 (length) != -1); for (i = 0; i < nelt; ++i) sel[i] = i * 2; perm_mask_even = vect_gen_perm_mask_checked (vectype, sel); for (i = 0; i < nelt; ++i) sel[i] = i * 2 + 1; perm_mask_odd = vect_gen_perm_mask_checked (vectype, sel); for (i = 0; i < log_length; i++) { for (j = 0; j < length; j += 2) { first_vect = dr_chain[j]; second_vect = dr_chain[j+1]; /* data_ref = permute_even (first_data_ref, second_data_ref); */ data_ref = make_temp_ssa_name (vectype, NULL, "vect_perm_even"); perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect, second_vect, perm_mask_even); vect_finish_stmt_generation (stmt, perm_stmt, gsi); (*result_chain)[j/2] = data_ref; /* data_ref = permute_odd (first_data_ref, second_data_ref); */ data_ref = make_temp_ssa_name (vectype, NULL, "vect_perm_odd"); perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect, second_vect, perm_mask_odd); vect_finish_stmt_generation (stmt, perm_stmt, gsi); (*result_chain)[j/2+length/2] = data_ref; } memcpy (dr_chain.address (), result_chain->address (), length * sizeof (tree)); } } } /* Function vect_shift_permute_load_chain. Given a chain of loads in DR_CHAIN of LENGTH 2 or 3, generate sequence of stmts to reorder the input data accordingly. Return the final references for loads in RESULT_CHAIN. Return true if successed, false otherwise. E.g., LENGTH is 3 and the scalar type is short, i.e., VF is 8. The input is 3 vectors each containing 8 elements. We assign a number to each element, the input sequence is: 1st vec: 0 1 2 3 4 5 6 7 2nd vec: 8 9 10 11 12 13 14 15 3rd vec: 16 17 18 19 20 21 22 23 The output sequence should be: 1st vec: 0 3 6 9 12 15 18 21 2nd vec: 1 4 7 10 13 16 19 22 3rd vec: 2 5 8 11 14 17 20 23 We use 3 shuffle instructions and 3 * 3 - 1 shifts to create such output. First we shuffle all 3 vectors to get correct elements order: 1st vec: ( 0 3 6) ( 1 4 7) ( 2 5) 2nd vec: ( 8 11 14) ( 9 12 15) (10 13) 3rd vec: (16 19 22) (17 20 23) (18 21) Next we unite and shift vector 3 times: 1st step: shift right by 6 the concatenation of: "1st vec" and "2nd vec" ( 0 3 6) ( 1 4 7) |( 2 5) _ ( 8 11 14) ( 9 12 15)| (10 13) "2nd vec" and "3rd vec" ( 8 11 14) ( 9 12 15) |(10 13) _ (16 19 22) (17 20 23)| (18 21) "3rd vec" and "1st vec" (16 19 22) (17 20 23) |(18 21) _ ( 0 3 6) ( 1 4 7)| ( 2 5) | New vectors | So that now new vectors are: 1st vec: ( 2 5) ( 8 11 14) ( 9 12 15) 2nd vec: (10 13) (16 19 22) (17 20 23) 3rd vec: (18 21) ( 0 3 6) ( 1 4 7) 2nd step: shift right by 5 the concatenation of: "1st vec" and "3rd vec" ( 2 5) ( 8 11 14) |( 9 12 15) _ (18 21) ( 0 3 6)| ( 1 4 7) "2nd vec" and "1st vec" (10 13) (16 19 22) |(17 20 23) _ ( 2 5) ( 8 11 14)| ( 9 12 15) "3rd vec" and "2nd vec" (18 21) ( 0 3 6) |( 1 4 7) _ (10 13) (16 19 22)| (17 20 23) | New vectors | So that now new vectors are: 1st vec: ( 9 12 15) (18 21) ( 0 3 6) 2nd vec: (17 20 23) ( 2 5) ( 8 11 14) 3rd vec: ( 1 4 7) (10 13) (16 19 22) READY 3rd step: shift right by 5 the concatenation of: "1st vec" and "1st vec" ( 9 12 15) (18 21) |( 0 3 6) _ ( 9 12 15) (18 21)| ( 0 3 6) shift right by 3 the concatenation of: "2nd vec" and "2nd vec" (17 20 23) |( 2 5) ( 8 11 14) _ (17 20 23)| ( 2 5) ( 8 11 14) | New vectors | So that now all vectors are READY: 1st vec: ( 0 3 6) ( 9 12 15) (18 21) 2nd vec: ( 2 5) ( 8 11 14) (17 20 23) 3rd vec: ( 1 4 7) (10 13) (16 19 22) This algorithm is faster than one in vect_permute_load_chain if: 1. "shift of a concatination" is faster than general permutation. This is usually so. 2. The TARGET machine can't execute vector instructions in parallel. This is because each step of the algorithm depends on previous. The algorithm in vect_permute_load_chain is much more parallel. The algorithm is applicable only for LOAD CHAIN LENGTH less than VF. */ static bool vect_shift_permute_load_chain (vec<tree> dr_chain, unsigned int length, gimple stmt, gimple_stmt_iterator *gsi, vec<tree> *result_chain) { tree vect[3], vect_shift[3], data_ref, first_vect, second_vect; tree perm2_mask1, perm2_mask2, perm3_mask; tree select_mask, shift1_mask, shift2_mask, shift3_mask, shift4_mask; gimple perm_stmt; tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt)); unsigned int i; unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype); unsigned char *sel = XALLOCAVEC (unsigned char, nelt); stmt_vec_info stmt_info = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); result_chain->quick_grow (length); memcpy (result_chain->address (), dr_chain.address (), length * sizeof (tree)); if (exact_log2 (length) != -1 && LOOP_VINFO_VECT_FACTOR (loop_vinfo) > 4) { unsigned int j, log_length = exact_log2 (length); for (i = 0; i < nelt / 2; ++i) sel[i] = i * 2; for (i = 0; i < nelt / 2; ++i) sel[nelt / 2 + i] = i * 2 + 1; if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "shuffle of 2 fields structure is not \ supported by target\n"); return false; } perm2_mask1 = vect_gen_perm_mask_checked (vectype, sel); for (i = 0; i < nelt / 2; ++i) sel[i] = i * 2 + 1; for (i = 0; i < nelt / 2; ++i) sel[nelt / 2 + i] = i * 2; if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "shuffle of 2 fields structure is not \ supported by target\n"); return false; } perm2_mask2 = vect_gen_perm_mask_checked (vectype, sel); /* Generating permutation constant to shift all elements. For vector length 8 it is {4 5 6 7 8 9 10 11}. */ for (i = 0; i < nelt; i++) sel[i] = nelt / 2 + i; if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "shift permutation is not supported by target\n"); return false; } shift1_mask = vect_gen_perm_mask_checked (vectype, sel); /* Generating permutation constant to select vector from 2. For vector length 8 it is {0 1 2 3 12 13 14 15}. */ for (i = 0; i < nelt / 2; i++) sel[i] = i; for (i = nelt / 2; i < nelt; i++) sel[i] = nelt + i; if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "select is not supported by target\n"); return false; } select_mask = vect_gen_perm_mask_checked (vectype, sel); for (i = 0; i < log_length; i++) { for (j = 0; j < length; j += 2) { first_vect = dr_chain[j]; second_vect = dr_chain[j + 1]; data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle2"); perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect, first_vect, perm2_mask1); vect_finish_stmt_generation (stmt, perm_stmt, gsi); vect[0] = data_ref; data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle2"); perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, second_vect, second_vect, perm2_mask2); vect_finish_stmt_generation (stmt, perm_stmt, gsi); vect[1] = data_ref; data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift"); perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[0], vect[1], shift1_mask); vect_finish_stmt_generation (stmt, perm_stmt, gsi); (*result_chain)[j/2 + length/2] = data_ref; data_ref = make_temp_ssa_name (vectype, NULL, "vect_select"); perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[0], vect[1], select_mask); vect_finish_stmt_generation (stmt, perm_stmt, gsi); (*result_chain)[j/2] = data_ref; } memcpy (dr_chain.address (), result_chain->address (), length * sizeof (tree)); } return true; } if (length == 3 && LOOP_VINFO_VECT_FACTOR (loop_vinfo) > 2) { unsigned int k = 0, l = 0; /* Generating permutation constant to get all elements in rigth order. For vector length 8 it is {0 3 6 1 4 7 2 5}. */ for (i = 0; i < nelt; i++) { if (3 * k + (l % 3) >= nelt) { k = 0; l += (3 - (nelt % 3)); } sel[i] = 3 * k + (l % 3); k++; } if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "shuffle of 3 fields structure is not \ supported by target\n"); return false; } perm3_mask = vect_gen_perm_mask_checked (vectype, sel); /* Generating permutation constant to shift all elements. For vector length 8 it is {6 7 8 9 10 11 12 13}. */ for (i = 0; i < nelt; i++) sel[i] = 2 * (nelt / 3) + (nelt % 3) + i; if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "shift permutation is not supported by target\n"); return false; } shift1_mask = vect_gen_perm_mask_checked (vectype, sel); /* Generating permutation constant to shift all elements. For vector length 8 it is {5 6 7 8 9 10 11 12}. */ for (i = 0; i < nelt; i++) sel[i] = 2 * (nelt / 3) + 1 + i; if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "shift permutation is not supported by target\n"); return false; } shift2_mask = vect_gen_perm_mask_checked (vectype, sel); /* Generating permutation constant to shift all elements. For vector length 8 it is {3 4 5 6 7 8 9 10}. */ for (i = 0; i < nelt; i++) sel[i] = (nelt / 3) + (nelt % 3) / 2 + i; if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "shift permutation is not supported by target\n"); return false; } shift3_mask = vect_gen_perm_mask_checked (vectype, sel); /* Generating permutation constant to shift all elements. For vector length 8 it is {5 6 7 8 9 10 11 12}. */ for (i = 0; i < nelt; i++) sel[i] = 2 * (nelt / 3) + (nelt % 3) / 2 + i; if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "shift permutation is not supported by target\n"); return false; } shift4_mask = vect_gen_perm_mask_checked (vectype, sel); for (k = 0; k < 3; k++) { data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3"); perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, dr_chain[k], dr_chain[k], perm3_mask); vect_finish_stmt_generation (stmt, perm_stmt, gsi); vect[k] = data_ref; } for (k = 0; k < 3; k++) { data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift1"); perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[k % 3], vect[(k + 1) % 3], shift1_mask); vect_finish_stmt_generation (stmt, perm_stmt, gsi); vect_shift[k] = data_ref; } for (k = 0; k < 3; k++) { data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift2"); perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect_shift[(4 - k) % 3], vect_shift[(3 - k) % 3], shift2_mask); vect_finish_stmt_generation (stmt, perm_stmt, gsi); vect[k] = data_ref; } (*result_chain)[3 - (nelt % 3)] = vect[2]; data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift3"); perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[0], vect[0], shift3_mask); vect_finish_stmt_generation (stmt, perm_stmt, gsi); (*result_chain)[nelt % 3] = data_ref; data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift4"); perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[1], vect[1], shift4_mask); vect_finish_stmt_generation (stmt, perm_stmt, gsi); (*result_chain)[0] = data_ref; return true; } return false; } /* Function vect_transform_grouped_load. Given a chain of input interleaved data-refs (in DR_CHAIN), build statements to perform their permutation and ascribe the result vectorized statements to the scalar statements. */ void vect_transform_grouped_load (gimple stmt, vec<tree> dr_chain, int size, gimple_stmt_iterator *gsi) { machine_mode mode; vec<tree> result_chain = vNULL; /* DR_CHAIN contains input data-refs that are a part of the interleaving. RESULT_CHAIN is the output of vect_permute_load_chain, it contains permuted vectors, that are ready for vector computation. */ result_chain.create (size); /* If reassociation width for vector type is 2 or greater target machine can execute 2 or more vector instructions in parallel. Otherwise try to get chain for loads group using vect_shift_permute_load_chain. */ mode = TYPE_MODE (STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt))); if (targetm.sched.reassociation_width (VEC_PERM_EXPR, mode) > 1 || exact_log2 (size) != -1 || !vect_shift_permute_load_chain (dr_chain, size, stmt, gsi, &result_chain)) vect_permute_load_chain (dr_chain, size, stmt, gsi, &result_chain); vect_record_grouped_load_vectors (stmt, result_chain); result_chain.release (); } /* RESULT_CHAIN contains the output of a group of grouped loads that were generated as part of the vectorization of STMT. Assign the statement for each vector to the associated scalar statement. */ void vect_record_grouped_load_vectors (gimple stmt, vec<tree> result_chain) { gimple first_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)); gimple next_stmt, new_stmt; unsigned int i, gap_count; tree tmp_data_ref; /* Put a permuted data-ref in the VECTORIZED_STMT field. Since we scan the chain starting from it's first node, their order corresponds the order of data-refs in RESULT_CHAIN. */ next_stmt = first_stmt; gap_count = 1; FOR_EACH_VEC_ELT (result_chain, i, tmp_data_ref) { if (!next_stmt) break; /* Skip the gaps. Loads created for the gaps will be removed by dead code elimination pass later. No need to check for the first stmt in the group, since it always exists. GROUP_GAP is the number of steps in elements from the previous access (if there is no gap GROUP_GAP is 1). We skip loads that correspond to the gaps. */ if (next_stmt != first_stmt && gap_count < GROUP_GAP (vinfo_for_stmt (next_stmt))) { gap_count++; continue; } while (next_stmt) { new_stmt = SSA_NAME_DEF_STMT (tmp_data_ref); /* We assume that if VEC_STMT is not NULL, this is a case of multiple copies, and we put the new vector statement in the first available RELATED_STMT. */ if (!STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt))) STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)) = new_stmt; else { if (!GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt))) { gimple prev_stmt = STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)); gimple rel_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt)); while (rel_stmt) { prev_stmt = rel_stmt; rel_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (rel_stmt)); } STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt)) = new_stmt; } } next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt)); gap_count = 1; /* If NEXT_STMT accesses the same DR as the previous statement, put the same TMP_DATA_REF as its vectorized statement; otherwise get the next data-ref from RESULT_CHAIN. */ if (!next_stmt || !GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt))) break; } } } /* Function vect_force_dr_alignment_p. Returns whether the alignment of a DECL can be forced to be aligned on ALIGNMENT bit boundary. */ bool vect_can_force_dr_alignment_p (const_tree decl, unsigned int alignment) { if (TREE_CODE (decl) != VAR_DECL) return false; if (decl_in_symtab_p (decl) && !symtab_node::get (decl)->can_increase_alignment_p ()) return false; if (TREE_STATIC (decl)) return (alignment <= MAX_OFILE_ALIGNMENT); else return (alignment <= MAX_STACK_ALIGNMENT); } /* Return whether the data reference DR is supported with respect to its alignment. If CHECK_ALIGNED_ACCESSES is TRUE, check if the access is supported even it is aligned, i.e., check if it is possible to vectorize it with different alignment. */ enum dr_alignment_support vect_supportable_dr_alignment (struct data_reference *dr, bool check_aligned_accesses) { gimple stmt = DR_STMT (dr); stmt_vec_info stmt_info = vinfo_for_stmt (stmt); tree vectype = STMT_VINFO_VECTYPE (stmt_info); machine_mode mode = TYPE_MODE (vectype); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *vect_loop = NULL; bool nested_in_vect_loop = false; if (aligned_access_p (dr) && !check_aligned_accesses) return dr_aligned; /* For now assume all conditional loads/stores support unaligned access without any special code. */ if (is_gimple_call (stmt) && gimple_call_internal_p (stmt) && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD || gimple_call_internal_fn (stmt) == IFN_MASK_STORE)) return dr_unaligned_supported; if (loop_vinfo) { vect_loop = LOOP_VINFO_LOOP (loop_vinfo); nested_in_vect_loop = nested_in_vect_loop_p (vect_loop, stmt); } /* Possibly unaligned access. */ /* We can choose between using the implicit realignment scheme (generating a misaligned_move stmt) and the explicit realignment scheme (generating aligned loads with a REALIGN_LOAD). There are two variants to the explicit realignment scheme: optimized, and unoptimized. We can optimize the realignment only if the step between consecutive vector loads is equal to the vector size. Since the vector memory accesses advance in steps of VS (Vector Size) in the vectorized loop, it is guaranteed that the misalignment amount remains the same throughout the execution of the vectorized loop. Therefore, we can create the "realignment token" (the permutation mask that is passed to REALIGN_LOAD) at the loop preheader. However, in the case of outer-loop vectorization, when vectorizing a memory access in the inner-loop nested within the LOOP that is now being vectorized, while it is guaranteed that the misalignment of the vectorized memory access will remain the same in different outer-loop iterations, it is *not* guaranteed that is will remain the same throughout the execution of the inner-loop. This is because the inner-loop advances with the original scalar step (and not in steps of VS). If the inner-loop step happens to be a multiple of VS, then the misalignment remains fixed and we can use the optimized realignment scheme. For example: for (i=0; i<N; i++) for (j=0; j<M; j++) s += a[i+j]; When vectorizing the i-loop in the above example, the step between consecutive vector loads is 1, and so the misalignment does not remain fixed across the execution of the inner-loop, and the realignment cannot be optimized (as illustrated in the following pseudo vectorized loop): for (i=0; i<N; i+=4) for (j=0; j<M; j++){ vs += vp[i+j]; // misalignment of &vp[i+j] is {0,1,2,3,0,1,2,3,...} // when j is {0,1,2,3,4,5,6,7,...} respectively. // (assuming that we start from an aligned address). } We therefore have to use the unoptimized realignment scheme: for (i=0; i<N; i+=4) for (j=k; j<M; j+=4) vs += vp[i+j]; // misalignment of &vp[i+j] is always k (assuming // that the misalignment of the initial address is // 0). The loop can then be vectorized as follows: for (k=0; k<4; k++){ rt = get_realignment_token (&vp[k]); for (i=0; i<N; i+=4){ v1 = vp[i+k]; for (j=k; j<M; j+=4){ v2 = vp[i+j+VS-1]; va = REALIGN_LOAD <v1,v2,rt>; vs += va; v1 = v2; } } } */ if (DR_IS_READ (dr)) { bool is_packed = false; tree type = (TREE_TYPE (DR_REF (dr))); if (optab_handler (vec_realign_load_optab, mode) != CODE_FOR_nothing && (!targetm.vectorize.builtin_mask_for_load || targetm.vectorize.builtin_mask_for_load ())) { tree vectype = STMT_VINFO_VECTYPE (stmt_info); if ((nested_in_vect_loop && (TREE_INT_CST_LOW (DR_STEP (dr)) != GET_MODE_SIZE (TYPE_MODE (vectype)))) || !loop_vinfo) return dr_explicit_realign; else return dr_explicit_realign_optimized; } if (!known_alignment_for_access_p (dr)) is_packed = not_size_aligned (DR_REF (dr)); if ((TYPE_USER_ALIGN (type) && !is_packed) || targetm.vectorize. support_vector_misalignment (mode, type, DR_MISALIGNMENT (dr), is_packed)) /* Can't software pipeline the loads, but can at least do them. */ return dr_unaligned_supported; } else { bool is_packed = false; tree type = (TREE_TYPE (DR_REF (dr))); if (!known_alignment_for_access_p (dr)) is_packed = not_size_aligned (DR_REF (dr)); if ((TYPE_USER_ALIGN (type) && !is_packed) || targetm.vectorize. support_vector_misalignment (mode, type, DR_MISALIGNMENT (dr), is_packed)) return dr_unaligned_supported; } /* Unsupported. */ return dr_unaligned_unsupported; }
switch-branch-1.c
#include <assert.h> #define s 100 #pragma omp declare target int switch1 (unsigned a) { switch (a) { case 1 ... 11: return 11; case 12 ... 13: return 22; default: return 44; } } int switch2 (unsigned a) { switch (a) { case 1 ... 5: return 1; case 9 ... 11: return a + 3; case 12 ... 13: return a + 3; default: return 44; } } #define OFFSET 12 int switch3 (unsigned a) { switch (a) { case (OFFSET + 0): return 1; case (OFFSET + 1)...(OFFSET + 11): return 11; case (OFFSET + 12)...(OFFSET + 13): return (OFFSET + 22); default: return (OFFSET + 44); } } int switch4 (unsigned a) { switch (a) { case -2: return 1; case -1: return a + 3; case 3: return a + 3; default: return 44; } } #pragma omp end declare target #define low -33 #define high 55 int main (int argc) { int array[s]; #pragma omp target map(tofrom : array[:s]) { for (int i = low; i < high; i++) array[i - low] = switch1 (i); } for (int i = low; i < high; i++) assert (array[i - low] == switch1 (i)); #pragma omp target map(tofrom : array[:s]) { for (int i = low; i < high; i++) array[i - low] = switch2 (i); } for (int i = low; i < high; i++) assert (array[i - low] == switch2 (i)); #pragma omp target map(tofrom : array[:s]) { for (int i = low; i < high; i++) array[i - low] = switch3 (i); } for (int i = low; i < high; i++) assert (array[i - low] == switch3 (i)); #pragma omp target map(tofrom : array[:s]) { for (int i = low; i < high; i++) array[i - low] = switch4 (i); } for (int i = low; i < high; i++) assert (array[i - low] == switch4 (i)); return 0; }
mxBJ2d.c
#include "mex.h" #include <math.h> #ifdef _OPENMP #include <omp.h> #endif #define max(a, b) ((a > b) ? a : b) #define min(a, b) ((a < b) ? a : b) #define DEBUG 0 void cal_slope_limiter(double fmean, double gx, double gy, double xc, double yc, double *fmask, int Nfp, int Nv, double *x, double *y, int Np, double *fvmax, double *fvmin, double *alpha); void cal_gg_gradient(double *f_Q, double *x, double *y, int Np, // node values double *fmask, int Nfp, int Nv, double *ws, // edge info double area, double *gx, double *gy); /* * Limit the node values. * * Usages: * [ f_Q ] = BJ_limit_2d(f_Q, x, y, ... % node values * c_mean, xc, yc, area, ... % cell values * vmin, vmax, fmask, EToV, * Js, ws); % vertex info * */ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { /* check input & output */ if (nrhs != 12) mexErrMsgTxt("Wrong number of input arguments."); if (nlhs != 1) mexErrMsgTxt("Wrong number of output arguments"); /* get inputs */ double *f_Q = mxGetPr(prhs[0]); double *x = mxGetPr(prhs[1]); double *y = mxGetPr(prhs[2]); double *f_mean = mxGetPr(prhs[3]); double *xc = mxGetPr(prhs[4]); double *yc = mxGetPr(prhs[5]); double *area = mxGetPr(prhs[6]); double *v_min = mxGetPr(prhs[7]); double *v_max = mxGetPr(prhs[8]); double *fmask = mxGetPr(prhs[9]); double *EToV = mxGetPr(prhs[10]); double *ws = mxGetPr(prhs[11]); size_t Np = mxGetM(prhs[0]); // # of points in each element size_t K = mxGetN(prhs[0]); // # of elements size_t Nfp = mxGetM(prhs[9]); // # of points on each edge size_t Nv = mxGetN(prhs[9]); // # of vertex in each element #if DEBUG // mexPrintf("Np=%d, K=%d, Nfp=%d, Nv=%d\n", Np, K, Nfp, Nv); // for (int n = 0; n < Nfp * Nv; n++) // { // mexPrintf("n=%d, Js=%f, ws=%f\n", n, Js[n], ws[n]); // } #endif /* allocation of output */ plhs[0] = mxCreateDoubleMatrix((mwSize)Np, (mwSize)K, mxREAL); double *f_limt = mxGetPr(plhs[0]); #ifdef _OPENMP #pragma omp parallel for num_threads(DG_THREADS) #endif for (int k = 0; k < K; k++) { int flag = 0; double fvmax[Nv], fvmin[Nv]; // the vertex bounds for (int v = 0; v < Nv; v++) { int vid = (int)EToV[k * Nv + v] - 1; // convert to C type fvmax[v] = v_max[vid]; fvmin[v] = v_min[vid]; vid = (int)fmask[v * Nfp] - 1; double fv = f_Q[k * Np + vid]; if ((fv > fvmax[v]) | (fv < fvmin[v])) flag = 1; #if DEBUG mexPrintf("k=%d, v=%d, fv=%f, fvmax=%f, fvmin=%f, flag=%d\n", k, v, fv, fvmax[v], fvmin[v], flag); #endif } if (flag == 0) { for (int n = 0; n < Np; n++) { int ind = n + k * Np; f_limt[ind] = f_Q[ind]; } continue; // non-trobled cell } double gx, gy; // gradient cal_gg_gradient(f_Q + k * Np, x + k * Np, y + k * Np, Np, fmask, Nfp, Nv, ws, area[k], &gx, &gy); #if DEBUG mexPrintf("k=%d, dfdx=%f, dfdy=%f\n", k, gx, gy); #endif double alpha; cal_slope_limiter(f_mean[k], gx, gy, xc[k], yc[k], fmask, Nfp, Nv, x + k * Np, y + k * Np, Np, fvmax, fvmin, &alpha); #if DEBUG mexPrintf("k=%d, alpha=%f\n", k, alpha); #endif for (int n = 0; n < Np; n++) { int ind = n + k * Np; double xp = x[ind]; double yp = y[ind]; f_limt[ind] = f_mean[k] + alpha * (gx * (xp - xc[k]) + gy * (yp - yc[k])); #if DEBUG mexPrintf("f[%d]=%f, ", n, f_limt[ind]); #endif } #if DEBUG mexPrintf("\n"); #endif } return; } /** * Calculate the gradient by Green-Gauss formula. */ void cal_gg_gradient(double *f_Q, double *x, double *y, int Np, // node values double *fmask, int Nfp, int Nv, double *ws, // edge info double area, double *gx, double *gy) { *gx = 0; // initialize the gradient *gy = 0; int sk = 0; for (int f = 0; f < Nv; f++) { int v1 = (int)fmask[f * Nfp] - 1; int v2 = (int)fmask[((f + 1) % Nv) * Nfp] - 1; double dx = x[v2] - x[v1]; double dy = y[v2] - y[v1]; #if DEBUG // mexPrintf("f=%d, v1=%d, v2=%d, dx=%f, dy=%f\n", f, v1, v2, dx, dy); #endif for (int n = 0; n < Nfp; n++) { int node_id = (int)fmask[f * Nfp + n] - 1; double j = sqrt( dx*dx + dy*dy ) / 2; double w = ws[sk++]; *gx += j * w * dy * f_Q[node_id]; *gx -= j * w * dx * f_Q[node_id]; #if DEBUG // mexPrintf("f=%d, n=%d, nid=%d, j=%f, w=%f, gx=%f, gy=%f, area=%f\n", // f, n, node_id, j, w, *gx, *gy, area); #endif } } *gx /= area; *gy /= area; return; } /** * Calculate the slope limiter from the BJ formula. * Reference: Kuzmin (2010), Eq. (16). */ void cal_slope_limiter(double fmean, double gx, double gy, double xc, double yc, double *fmask, int Nfp, int Nv, double *x, double *y, int Np, double *fvmax, double *fvmin, double *alpha) { *alpha = 1.0; // initialization for (int n = 0; n < Nv; n++) { int ind = (int)fmask[n * Nfp] - 1; // the index of the vertex double xv = x[ind]; double yv = y[ind]; double fv = fmean + gx * (xv - xc) + gy * (yv - yc); if (fv > fvmax[n]) { double temp = min(1, (fvmax[n] - fmean) / (fv - fmean)); *alpha = min(temp, *alpha); } else if (fv < fvmin[n]) { double temp = min(1, (fvmin[n] - fmean) / (fv - fmean)); *alpha = min(temp, *alpha); } #if DEBUG mexPrintf("n=%d, fv=%f, fvmax=%f, fvmin=%f, fmean=%f, alpha=%f\n", n, fv, fvmax[n], fvmin[n], fmean, *alpha); #endif } return; }
mkl_quantized_conv_ops.h
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef TENSORFLOW_CORE_KERNELS_MKL_QUANTIZED_CONV_OPS_H_ #define TENSORFLOW_CORE_KERNELS_MKL_QUANTIZED_CONV_OPS_H_ #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/tensor.h" #ifdef INTEL_MKL namespace tensorflow { template <class T> float MklFloatForOneQuantizedLevel(float range_min, float range_max) { int64 highest = static_cast<int64>(Eigen::NumTraits<T>::highest()); int64 lowest = static_cast<int64>(Eigen::NumTraits<T>::lowest()); // Adjusting for having a symmetric range. // for example: for 8-bit [-127, 127] as opposed to [-128, 127]. if (lowest < -highest) ++lowest; const float float_for_one_quantized_level = (range_max - range_min) / (highest - lowest); return float_for_one_quantized_level; } template <class T1, class T2, class T3> void MklQuantizationRangeForMultiplication(float min_a, float max_a, float min_b, float max_b, float* min_c, float* max_c) { const float a_float_for_one_quant_level = MklFloatForOneQuantizedLevel<T1>(min_a, max_a); const float b_float_for_one_quant_level = MklFloatForOneQuantizedLevel<T2>(min_b, max_b); const int64 c_highest = static_cast<int64>(Eigen::NumTraits<T3>::highest()); const int64 c_lowest = static_cast<int64>(Eigen::NumTraits<T3>::lowest()); const float c_float_for_one_quant_level = a_float_for_one_quant_level * b_float_for_one_quant_level; *min_c = c_float_for_one_quant_level * c_lowest; *max_c = c_float_for_one_quant_level * c_highest; } template <class T1, class T2, class T3> void MklQuantizationRangeForMultiplication(float min_a, float max_a, const Tensor& min_b_vector, const Tensor& max_b_vector, Tensor** min_c_vector, Tensor** max_c_vector) { DCHECK(min_b_vector.NumElements() == (*min_c_vector)->NumElements()); DCHECK(max_b_vector.NumElements() == (*max_c_vector)->NumElements()); size_t n_channel = min_b_vector.NumElements(); const int64 c_highest = static_cast<int64>(Eigen::NumTraits<T3>::highest()); const int64 c_lowest = static_cast<int64>(Eigen::NumTraits<T3>::lowest()); const float* min_b = min_b_vector.flat<float>().data(); const float* max_b = max_b_vector.flat<float>().data(); float* min_c = (*min_c_vector)->flat<float>().data(); float* max_c = (*max_c_vector)->flat<float>().data(); #pragma omp parallel for for (size_t n = 0; n < n_channel; ++n) { float a_float_for_one_quant_level = MklFloatForOneQuantizedLevel<T1>(min_a, max_a); float b_float_for_one_quant_level = MklFloatForOneQuantizedLevel<T2>(min_b[n], max_b[n]); float c_float_for_one_quant_level = a_float_for_one_quant_level * b_float_for_one_quant_level; min_c[n] = c_float_for_one_quant_level * c_lowest; max_c[n] = c_float_for_one_quant_level * c_highest; } } } // namespace tensorflow #endif // INTEL_MKL #endif // TENSORFLOW_CORE_KERNELS_MKL_QUANTIZED_CONV_OPS_H_
exact_parallel_minimum_cut.h
/****************************************************************************** * exact_parallel_minimum_cut.h * * Source of VieCut. * ****************************************************************************** * Copyright (C) 2018 Alexander Noe <alexander.noe@univie.ac.at> * * Published under the MIT license in the LICENSE file. *****************************************************************************/ #pragma once #include <algorithm> #include <cstdint> #include <cstdlib> #include <deque> #include <functional> #include <memory> #include <unordered_map> #include <vector> #include "algorithms/global_mincut/minimum_cut_helpers.h" #include "algorithms/global_mincut/noi_minimum_cut.h" #include "algorithms/global_mincut/viecut.h" #include "coarsening/test_wrapper.h" #include "common/configuration.h" #include "common/definitions.h" #include "data_structure/graph_access.h" #include "data_structure/priority_queues/fifo_node_bucket_pq.h" #include "data_structure/priority_queues/maxNodeHeap.h" #include "data_structure/priority_queues/node_bucket_pq.h" #include "tools/random_functions.h" #include "tools/timer.h" #ifdef PARALLEL #include "parallel/coarsening/contract_graph.h" #include "parallel/coarsening/contraction_tests.h" #include "parallel/coarsening/sparsify.h" #include "parallel/data_structure/union_find.h" #else #include "coarsening/contract_graph.h" #include "coarsening/contraction_tests.h" #include "coarsening/sparsify.h" #include "data_structure/union_find.h" #endif class exact_parallel_minimum_cut : public minimum_cut { public: exact_parallel_minimum_cut() { } ~exact_parallel_minimum_cut() { } static constexpr bool debug = false; static constexpr bool timing = true; EdgeWeight perform_minimum_cut(std::shared_ptr<graph_access> G) { return perform_minimum_cut(G, false); } EdgeWeight perform_minimum_cut(std::shared_ptr<graph_access> G, bool indirect) { if (!minimum_cut_helpers::graphValid(G)) return -1; std::vector<std::shared_ptr<graph_access> > graphs; timer t; EdgeWeight mincut = G->getMinDegree(); #ifdef PARALLEL viecut heuristic_mc; mincut = heuristic_mc.perform_minimum_cut(G, true); LOGC(timing) << "VieCut found cut " << mincut << " [Time: " << t.elapsed() << "s]"; #endif graphs.push_back(G); // if PARALLEL is set, NodeInCut are already set to the result of viecut // This is what we want. #ifndef PARALLEL minimum_cut_helpers::setInitialCutValues(graphs); #endif while (graphs.back()->number_of_nodes() > 2 && mincut > 0) { std::shared_ptr<graph_access> curr_g = graphs.back(); timer ts; #ifdef PARALLEL noi_minimum_cut noi; auto uf = parallel_modified_capforest(curr_g, mincut); if (uf.n() == curr_g->number_of_nodes()) { uf = noi.modified_capforest(curr_g, mincut); LOG1 << "seq capforest needed"; } #else LOG1 << "Error: Running exact_parallel_minimum_cut without PARALLEL" << " Using normal noi_minimum_cut instead!"; noi_minimum_cut noi; auto uf = noi.modified_capforest(curr_g, mincut); #endif if (uf.n() > 1) { std::vector<NodeID> mapping(curr_g->number_of_nodes()); std::vector<NodeID> part(curr_g->number_of_nodes(), UNDEFINED_NODE); std::vector<std::vector<NodeID> > reverse_mapping; NodeID current_pid = 0; for (NodeID n : curr_g->nodes()) { NodeID part_id = uf.Find(n); if (part[part_id] == UNDEFINED_NODE) { part[part_id] = current_pid++; } mapping[n] = part[part_id]; curr_g->setPartitionIndex(n, part[part_id]); } graphs.push_back( contraction::contractGraph(curr_g, mapping, current_pid, reverse_mapping)); mincut = minimum_cut_helpers::updateCut(graphs, mincut); } else { break; } } if (!indirect && configuration::getConfig()->save_cut) minimum_cut_helpers::retrieveMinimumCut(graphs); return mincut; } std::vector<NodeID> randomStartNodes(std::shared_ptr<graph_access> G) { std::vector<NodeID> start_nodes; for (int i = 0; i < omp_get_max_threads(); ++i) start_nodes.push_back( random_functions::next() % G->number_of_nodes()); return start_nodes; } std::vector<NodeID> bfsStartNodes(std::shared_ptr<graph_access> G) { NodeID starting_node = random_functions::next() % G->number_of_nodes(); std::vector<NodeID> start_nodes; start_nodes.push_back(starting_node); for (int i = 1; i < omp_get_max_threads(); ++i) { std::deque<NodeID> bfs; std::vector<bool> nodes(G->number_of_nodes(), false); size_t found = i; for (auto el : start_nodes) { bfs.push_back(el); nodes[el] = true; } while (!bfs.empty() && found < G->number_of_nodes()) { NodeID no = bfs.front(); bfs.pop_front(); for (EdgeID e : G->edges_of(no)) { NodeID tgt = G->getEdgeTarget(e); if (!nodes[tgt]) { found++; nodes[tgt] = true; bfs.push_back(tgt); if (found == G->number_of_nodes()) { start_nodes.push_back(tgt); break; } } } } } return start_nodes; } union_find parallel_modified_capforest( std::shared_ptr<graph_access> G, const EdgeWeight mincut) { union_find uf(G->number_of_nodes()); LOG << "Contract all edges with value at least " << mincut; timer t; timer timer2; std::vector<NodeID> start_nodes = randomStartNodes(G); // std::vector<bool> would be bad for thread-safety std::vector<uint8_t> visited(G->number_of_nodes(), false); std::vector<size_t> times(G->number_of_nodes(), 0); #pragma omp parallel for for (int i = 0; i < omp_get_num_threads(); ++i) { fifo_node_bucket_pq pq(G->number_of_nodes(), mincut + 1); std::vector<bool> blacklisted(G->number_of_nodes(), false); std::vector<NodeID> r_v(G->number_of_nodes(), 0); NodeID starting_node = start_nodes[i]; NodeID current_node = starting_node; pq.insert(current_node, 0); timer t; size_t elements = 0; while (!pq.empty()) { current_node = pq.deleteMax(); blacklisted[current_node] = true; if (visited[current_node]) { continue; } else { visited[current_node] = true; } elements++; for (EdgeID e : G->edges_of(current_node)) { NodeID tgt = G->getEdgeTarget(e); if (r_v[tgt] < mincut) { if ((r_v[tgt] + G->getEdgeWeight(e)) >= mincut) { if (!blacklisted[tgt]) { uf.Union(current_node, tgt); } } if (!visited[tgt]) { size_t new_rv = std::min(r_v[tgt] + G->getEdgeWeight(e), mincut); r_v[tgt] = new_rv; if (!visited[tgt] && !blacklisted[tgt]) { if (pq.contains(tgt)) { pq.increaseKey(tgt, new_rv); } else { pq.insert(tgt, new_rv); } } } } } } } return uf; } };
utils.c
// This program is free software: you can use, modify and/or redistribute it // under the terms of the simplified BSD License. You should have received a // copy of this license along this program. If not, see // <http://www.opensource.org/licenses/bsd-license.html>. // // Copyright (C) 2012, Coloma Ballester <coloma.ballester@upf.edu> // Copyright (C) 2013-2014 J. F. Garamendi <jf.garamendi@upf.edu> // All rights reserved. #include <stdlib.h> #include <math.h> #include <stdio.h> #include "utils.h" double *me_sgauss(double std, int n) { int i, shift; double sum, v; double *out; v = (0.5 * (double) (n - 1)) / (double) (std); v = 0.5 * v * v / log(10.); out = (double *) malloc(sizeof(double) * n); if (!out) { printf("Not enough memory for out in sgauss.\n"); exit(1); } shift = -0.5 * (double) (n - 1); if (n == 1) { out[0] = 1.0; } else { /* store Gaussian signal */ for (i = (n + 1) / 2; i--;) { v = ((double) i + (double) shift) / (double) std; out[i] = out[n - 1 - i] = exp(-0.5 * v * v); } /* normalize to get unit mass */ for (sum = 0.0, i = n; i--;) sum += (double) out[i]; for (i = n; i--;) out[i] /= sum; } return (out); } void me_sepconvol(double *in, double *out, int nx, int ny, double *filter_x, double *filter_y, int size_x, int size_y) { double *tmp; int nx1, ny1, x, y, org, i, s; double sum; nx1 = nx - 1; ny1 = ny - 1; /* Initialize temporal image */ tmp = (double *) malloc(sizeof(double) * nx * ny); /* convolution along x axis */ org = (size_x - 1) >> 1; for (y = 0; y <= ny1; y++) { for (x = 0; x <= nx1; x++) { sum = 0.0; for (i = size_x; i--;) { s = x - (i - org); while ((s < 0) || (s > nx1)) { if (s < 0) s = 0 - s - 1; if (s > nx1) s = nx1 - (s - nx1) + 1; } sum += filter_x[i] * in[y * nx + s]; } tmp[y * nx + x] = sum; } } /* convolution along y axis */ org = (size_y - 1) >> 1; for (y = 0; y <= ny1; y++) { for (x = 0; x <= nx1; x++) { sum = 0.0; for (i = size_y; i--;) { s = y - (i - org); while ((s < 0) || (s > ny1)) { if (s < 0) s = 0 - s - 1; if (s > ny1) s = ny1 - (s - ny1) + 1; } sum += filter_y[i] * tmp[s * nx + x]; } out[y * nx + x] = sum; } } /* Free memory */ free(tmp); } void me_save_image(double *in, int nx, int ny) { FILE *fp; fp = fopen("image.bin", "w"); fwrite(in, sizeof(double), nx * ny, fp); fclose(fp); printf("File has been written to image.bin\n"); } int me_median_compare(const void *i, const void *j) { double *val1, *val2; val1 = (double *) i; val2 = (double *) j; if (*val1 < *val2) return -1; if (*val1 > *val2) return 1; return 0; } void me_median_filtering(double *in, int nx, int ny, int wsize) { double median_vector[wsize * wsize], *out; int i, pixels, nrow, ncol, border; int x, y, xx, yy, xx0, yy0; border = wsize >> 1; out = (double *) malloc(sizeof(double) * nx * ny); nrow = ny; ncol = nx; for (x = 0; x < ncol; x++) { for (y = 0; y < nrow; y++) { i = 0; for (yy = y - border; yy <= y + border; yy++) for (xx = x - border; xx <= x + border; xx++) { xx0 = xx; yy0 = yy; /* Symmetry */ if (xx0 < 0) xx0 = -xx0 - 1; if (xx0 >= ncol) xx0 = 2 * ncol - xx0 - 1; if (yy0 < 0) yy0 = -yy0 - 1; if (yy0 >= nrow) yy0 = 2 * nrow - yy0 - 1; /* Access data */ median_vector[i++] = in[yy0 * ncol + xx0]; } qsort(median_vector, i, sizeof(double), me_median_compare); out[y * ncol + x] = median_vector[i / 2]; } } pixels = nx * ny; for (i = 0; i < pixels; i++) in[i] = out[i]; free(out); } /* BORRAR SIN COMPARARLO CON NADA void warping( const double *input, const double *u, const double *v, double *output, const int nx, const int ny ) { #pragma omp parallel { #pragma omp for schedule(dynamic) nowait for(int j = 0; j < ny; j++) for(int i = 0; i < nx; i++) { const double uu = (double) (i + u[i + nx * j]); const double vv = (double) (j + v[i + nx * j]); if ((uu < 0) || (uu > (nx - 1)) || (vv < 0) || (vv > (ny - 1))) { output[i + nx * j] = 0; } else { output[i + nx * j] = me_interpolate_bicubic(input, nx, ny, uu, vv); } } } } */ /** * * Function to normalize the images between 0 and 255 * **/ void image_normalization_3(double *I0, double *I1, double *I2, int size) { double max0, min0, max1, min1, max2, min2, max, min; getminmax(&min0, &max0, I0, size); getminmax(&min1, &max1, I1, size); getminmax(&min2, &max2, I2, size); max = max0; min = min0; if (max1 > max) max = max1; if (min1 < min) min = min1; if (max2 > max) max = max2; if (min2 < min) min = min2; const double den = max - min; for (int i = 0; i < size; i++) { I0[i] = 255.0 * (I0[i] - min) / den; I1[i] = 255.0 * (I1[i] - min) / den; I2[i] = 255.0 * (I2[i] - min) / den; } } void image_normalization_4(const double *I_1, // input image-1 const double *I0, // input image0 const double *I1, // input image1 const double *filtI0, //Smooth vefsion of I0 double *I_1n, // normalized output image -1 double *I0n, // normalized output image0 double *I1n, // normalized output image1 double *filtI0n, // normalized output image filtI0 int size // size of the image ) { double max_1, max0, max1, maxf0, min_1, min0, minf0, min1; // obtain the max and min of each image getminmax(&min_1, &max_1, I_1, size); getminmax(&min0, &max0, I0, size); getminmax(&min1, &max1, I1, size); getminmax(&minf0, &maxf0, filtI0, size); // obtain the max and min of images double max = (max_1 > max0) ? max_1 : max0; max = (max > max1) ? max : max1; max = (max > maxf0) ? max : maxf0; double min = (min_1 < min0) ? min_1 : min0; min = (min < min1) ? min : min1; min = (min < minf0) ? min : minf0; const double den = max - min; if (den > 0) // normalize both images for (int i = 0; i < size; i++) { I_1n[i] = 255.0 * (I_1[i] - min) / den; I0n[i] = 255.0 * (I0[i] - min) / den; I1n[i] = 255.0 * (I1[i] - min) / den; filtI0n[i] = 255.0 * (filtI0[i] - min) / den; } else // copy the original images for (int i = 0; i < size; i++) { I_1n[i] = I_1[i]; I0n[i] = I0[i]; I1n[i] = I1[i]; filtI0n[i] = filtI0[i]; } } /** * * Compute the max and min of an array * **/ void getminmax(double *min, // output min double *max, // output max const double *x, // input array int n // array size ) { *min = *max = x[0]; for (int i = 1; i < n; i++) { if (x[i] < *min) *min = x[i]; if (x[i] > *max) *max = x[i]; } }
GB_unaryop__abs_uint32_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint32_bool // op(A') function: GB_tran__abs_uint32_bool // C type: uint32_t // A type: bool // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = aij #define GB_ATYPE \ bool #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint32_t z = (uint32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT32 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint32_bool ( uint32_t *restrict Cx, const bool *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint32_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ceil_ref.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: qtang@openailab.com * Update: hhchen@openailab.com */ #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "utility/sys_port.h" #include "utility/float.h" #include "utility/log.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include <math.h> int ref_ceil_fp32(struct tensor* input_tensor, struct tensor* output_tensor, int num_thread) { // dims size = 2 or 3 if (input_tensor->dim_num < 4) { float* input_data = (float*)input_tensor->data; float* out_data = (float*)output_tensor->data; int total_size = input_tensor->elem_num; for (int i = 0; i < total_size; i++) { input_data[i] = ceilf(out_data[i]); } return 0; } // dims size 3 else if (input_tensor->dim_num == 4) { int w = input_tensor->dims[3]; int h = output_tensor->dims[2]; int channels = input_tensor->dims[1]; int size = h * w; int c_step = h * w; float* input_data = (float*)input_tensor->data; float* out_data = (float*)output_tensor->data; #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < channels; q++) { float* src = input_data + c_step * q; float* dst = out_data + c_step * q; for (int i = 0; i < size; i++) { dst[i] = ceilf(src[i]); } } return 0; } return -1; } int ref_ceil_uint8(struct tensor* input_tensor, struct tensor* output_tensor, int num_thread) { /* dequant */ uint8_t* input_uint8 = (uint8_t*)input_tensor->data; uint8_t* output_uint8 = (uint8_t*)output_tensor->data; float input_scale = input_tensor->scale; float output_scale = output_tensor->scale; int32_t input_zero = input_tensor->zero_point; int32_t output_zero = output_tensor->zero_point; int input_size = input_tensor->elem_num; int output_size = output_tensor->elem_num; float* input_data = ( float* )sys_malloc(input_size * sizeof(float)); float* out_data = ( float* )sys_malloc(output_size * sizeof(float)); for (int i = 0; i < input_size; i++) { input_data[i] = (( float )input_uint8[i] - ( float )input_zero) * input_scale; } // dims size = 2 or 3 if (input_tensor->dim_num < 4) { int total_size = input_tensor->elem_num; for (int i = 0; i < total_size; i++) { input_data[i] = ceil(out_data[i]); } // return 0; } // dims size 3 else if (input_tensor->dim_num == 4) { int w = input_tensor->dims[3]; int h = output_tensor->dims[2]; int channels = input_tensor->dims[1]; int size = h * w; int c_step = h * w; #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < channels; q++) { float* src = input_data + c_step * q; float* dst = out_data + c_step * q; for (int i = 0; i < size; i++) { dst[i] = ceil(src[i]); } } // return 0; } /* quant */ for (int i = 0; i < output_size; i++) { int udata = round(out_data[i] / output_scale + output_zero); if (udata > 255) udata = 255; else if (udata < 0) udata = 0; output_uint8[i] = udata; } sys_free(input_data); sys_free(out_data); return 0; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); struct tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); int ret = -1; if (input_tensor->data_type == TENGINE_DT_FP32) ret = ref_ceil_fp32(input_tensor, output_tensor, exec_graph->num_thread); else if(input_tensor->data_type == TENGINE_DT_UINT8) ret = ref_ceil_uint8(input_tensor, output_tensor, exec_graph->num_thread); else TLOG_ERR("Input data type %d not to be supported.\n", input_tensor->data_type); return ret; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node) { return OPS_SCORE_CANDO; } static struct node_ops hcl_node_ops = {.prerun = prerun, .run = run, .reshape = NULL, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; int register_ceil_ref_op() { return register_builtin_node_ops(OP_CEIL, &hcl_node_ops); } int unregister_ceil_ref_op() { return unregister_builtin_node_ops(OP_CEIL, &hcl_node_ops); }
gemm_symm_int8.h
// chgemm is pleased to support the open source community by supporting ncnn available. // // author:tpoisonooo (https://github.com/tpoisonooo/chgemm) implement symmetric int8 GEMM on aarch64. // // Copyright (C) 2019 tpoisonooo. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #pragma once #if __aarch64__ #define DECOMPOSE_K\ int ktmp = k;\ int k8 = k >> 3;\ int k8_even = (k8 % 2 == 0) ? 0: 1;\ k -= (k8 << 3);\ int k4 = k >> 2;\ k -= (k4 << 2);\ int k2 = k >> 1;\ k -= (k2 << 1);\ int k1 = k;\ k = ktmp; #define DECOMPOSE_N\ int ntmp = n;\ int n4 = n >> 2;\ n -= (n4 << 2);\ int n2 = n >> 1;\ n -= (n2 << 1);\ int n1 = n;\ n = ntmp; #define PRINT_MATRIX 0 #if PRINT_MATRIX static void print_int8_matrix(char* name, const int8_t *a, int m, int k, int ldx) { fprintf(stdout, "------------- %s \n", name); for (int i = 0; i < m; ++i) { for (int j = 0; j < k; ++j) { fprintf(stdout, "%d \t", a[i * ldx + j]); } fprintf(stdout, "\n\n"); } } static void print_int32_matrix(char* name, const int32_t *a, int m, int k, int ldx) { fprintf(stdout, "------------- %s \n", name); for (int i = 0; i < m; ++i) { for (int j = 0; j < k; ++j) { fprintf(stdout, "%d \t", a[i * ldx + j]); } fprintf(stdout, "\n\n"); } } #endif #undef PRINT_MATRIX static void reorder_b(const int8_t* b, int8_t* sb, const int k, const int n, const int ldx) { int i = 0; for (; i+3 < n; i += 4) { const int8_t *p0 = b + i; const int8_t *p1 = b + 1 * ldx + i; const int8_t *p2 = b + 2 * ldx + i; const int8_t *p3 = b + 3 * ldx + i; const int8_t *p4 = b + 4 * ldx + i; const int8_t *p5 = b + 5 * ldx + i; const int8_t *p6 = b + 6 * ldx + i; const int8_t *p7 = b + 7 * ldx + i; int j = 0; for (; j+7 < k; j += 8) { sb[0] = p0[0]; sb[1] = p1[0]; sb[2] = p2[0]; sb[3] = p3[0]; sb[4] = p4[0]; sb[5] = p5[0]; sb[6] = p6[0]; sb[7] = p7[0]; sb[8] = p0[1]; sb[9] = p1[1]; sb[10] = p2[1]; sb[11] = p3[1]; sb[12] = p4[1]; sb[13] = p5[1]; sb[14] = p6[1]; sb[15] = p7[1]; sb[16] = p0[2]; sb[17] = p1[2]; sb[18] = p2[2]; sb[19] = p3[2]; sb[20] = p4[2]; sb[21] = p5[2]; sb[22] = p6[2]; sb[23] = p7[2]; sb[24] = p0[3]; sb[25] = p1[3]; sb[26] = p2[3]; sb[27] = p3[3]; sb[28] = p4[3]; sb[29] = p5[3]; sb[30] = p6[3]; sb[31] = p7[3]; sb += 32; p0 += 8 * ldx; p1 += 8 * ldx; p2 += 8 * ldx; p3 += 8 * ldx; p4 += 8 * ldx; p5 += 8 * ldx; p6 += 8 * ldx; p7 += 8 * ldx; } if (j+3 < k) { j += 4; sb[0] = p0[0]; sb[1] = p1[0]; sb[2] = p2[0]; sb[3] = p3[0]; sb[4] = p0[1]; sb[5] = p1[1]; sb[6] = p2[1]; sb[7] = p3[1]; sb[8] = p0[2]; sb[9] = p1[2]; sb[10] = p2[2]; sb[11] = p3[2]; sb[12] = p0[3]; sb[13] = p1[3]; sb[14] = p2[3]; sb[15] = p3[3]; sb += 16; p0 += 4 * ldx; p1 += 4 * ldx; p2 += 4 * ldx; p3 += 4 * ldx; } if (j+1 < k) { j += 2; sb[0] = p0[0]; sb[1] = p1[0]; sb[2] = p0[1]; sb[3] = p1[1]; sb[4] = p0[2]; sb[5] = p1[2]; sb[6] = p0[3]; sb[7] = p1[3]; sb += 8; p0 += 2 * ldx; p1 += 2 * ldx; } if (j < k) { sb[0] = p0[0]; sb[1] = p0[1]; sb[2] = p0[2]; sb[3] = p0[3]; sb += 4; p0 += ldx; } } if (i+1 < n) { const int8_t *p0 = b + i; const int8_t *p1 = b + 1 * ldx + i; const int8_t *p2 = b + 2 * ldx + i; const int8_t *p3 = b + 3 * ldx + i; const int8_t *p4 = b + 4 * ldx + i; const int8_t *p5 = b + 5 * ldx + i; const int8_t *p6 = b + 6 * ldx + i; const int8_t *p7 = b + 7 * ldx + i; int j = 0; for (; j+7 < k; j += 8) { sb[0] = p0[0]; sb[1] = p1[0]; sb[2] = p2[0]; sb[3] = p3[0]; sb[4] = p4[0]; sb[5] = p5[0]; sb[6] = p6[0]; sb[7] = p7[0]; sb[8] = p0[1]; sb[9] = p1[1]; sb[10] = p2[1]; sb[11] = p3[1]; sb[12] = p4[1]; sb[13] = p5[1]; sb[14] = p6[1]; sb[15] = p7[1]; sb += 16; p0 += 8 * ldx; p1 += 8 * ldx; p2 += 8 * ldx; p3 += 8 * ldx; p4 += 8 * ldx; p5 += 8 * ldx; p6 += 8 * ldx; p7 += 8 * ldx; } if (j+3 < k) { j += 4; sb[0] = p0[0]; sb[1] = p1[0]; sb[2] = p2[0]; sb[3] = p3[0]; sb[4] = p0[1]; sb[5] = p1[1]; sb[6] = p2[1]; sb[7] = p3[1]; sb += 8; p0 += 4 * ldx; p1 += 4 * ldx; p2 += 4 * ldx; p3 += 4 * ldx; } if (j+1 < k) { j += 2; sb[0] = p0[0]; sb[1] = p1[0]; sb[2] = p0[1]; sb[3] = p1[1]; sb += 4; p0 += 2 * ldx; p1 += 2 * ldx; } if (j < k) { sb[0] = p0[0]; sb[1] = p0[1]; sb += 2; p0 += ldx; } i += 2; } if (i < n) { const int8_t *p0 = b + i; const int8_t *p1 = b + 1 * ldx + i; const int8_t *p2 = b + 2 * ldx + i; const int8_t *p3 = b + 3 * ldx + i; const int8_t *p4 = b + 4 * ldx + i; const int8_t *p5 = b + 5 * ldx + i; const int8_t *p6 = b + 6 * ldx + i; const int8_t *p7 = b + 7 * ldx + i; int j = 0; for (; j+7 < k; j += 8) { sb[0] = p0[0]; sb[1] = p1[0]; sb[2] = p2[0]; sb[3] = p3[0]; sb[4] = p4[0]; sb[5] = p5[0]; sb[6] = p6[0]; sb[7] = p7[0]; sb += 8; p0 += 8 * ldx; p1 += 8 * ldx; p2 += 8 * ldx; p3 += 8 * ldx; p4 += 8 * ldx; p5 += 8 * ldx; p6 += 8 * ldx; p7 += 8 * ldx; } if (j+3 < k) { j += 4; sb[0] = p0[0]; sb[1] = p1[0]; sb[2] = p2[0]; sb[3] = p3[0]; sb += 4; p0 += 4 * ldx; p1 += 4 * ldx; p2 += 4 * ldx; p3 += 4 * ldx; } if (j+1 < k) { j += 2; sb[0] = p0[0]; sb[1] = p1[0]; sb += 2; p0 += 2 * ldx; p1 += 2 * ldx; } if (j < k) { sb[0] = p0[0]; sb += 1; p0 += ldx; } } } static void reorder_a(int8_t* a, int8_t* sa, int m, const int k, const int ldx) { int i = 0; for (; i + 3 < m; i += 4) { int8_t *p0 = a; int8_t *p1 = a + ldx; int8_t *p2 = a + 2 * ldx; int8_t *p3 = a + 3 * ldx; int j = 0; for (; j + 7 < k; j += 8) { asm volatile ( "ld1 {v0.8b}, [%0], #8 \n" "ld1 {v1.8b}, [%1], #8 \n" "ld1 {v2.8b}, [%2], #8 \n" "ld1 {v3.8b}, [%3], #8 \n" "st1 {v0.8b, v1.8b, v2.8b, v3.8b}, [%4], #32\n" : "=r"(p0), "=r"(p1), "=r"(p2), "=r"(p3), "=r"(sa) : "0"(p0), "1"(p1), "2"(p2), "3"(p3), "4"(sa) : "cc", "memory", "v0", "v1", "v2", "v3" ); } if (j + 3 < k) { j += 4; asm volatile ( "ld1 {v0.8b}, [%0] \n" "add %0, %0, #4 \n" "ld1 {v1.8b}, [%1] \n" "add %1, %1, #4 \n" "ld1 {v2.8b}, [%2] \n" "add %2, %2, #4 \n" "ld1 {v3.8b}, [%3] \n" "add %3, %3, #4 \n" "trn1 v0.2s, v0.2s, v1.2s \n" "st1 {v0.8b}, [%4], #8 \n" "trn1 v2.2s, v2.2s, v3.2s \n" "st1 {v2.8b}, [%4], #8 \n" : "=r"(p0), "=r"(p1), "=r"(p2), "=r"(p3), "=r"(sa) : "0"(p0), "1"(p1), "2"(p2), "3"(p3), "4"(sa) : "cc", "memory", "v0", "v1", "v2", "v3" ); } if (j + 1 < k) { j += 2; asm volatile ( "ld1 {v0.8b}, [%0] \n" "add %0, %0, #2 \n" "ld1 {v1.8b}, [%1] \n" "add %1, %1, #2 \n" "ld1 {v2.8b}, [%2] \n" "add %2, %2, #2 \n" "ld1 {v3.8b}, [%3] \n" "add %3, %3, #2 \n" "trn1 v0.4h, v0.4h, v1.4h \n" "trn1 v2.4h, v2.4h, v3.4h \n" "trn1 v0.2s, v0.2s, v2.2s \n" "st1 {v0.8b}, [%4], #8 \n" : "=r"(p0), "=r"(p1), "=r"(p2), "=r"(p3), "=r"(sa) : "0"(p0), "1"(p1), "2"(p2), "3"(p3), "4"(sa) : "cc", "memory", "v0", "v1", "v2", "v3" ); } if (j < k) { *sa++ = *p0; *sa++ = *p1; *sa++ = *p2; *sa++ = *p3; } a += 4 * ldx; } if (i + 1 < m) { i += 2; int8_t *p0 = a; int8_t *p1 = a + ldx; int j = 0; for (; j + 7 < k; j += 8) { asm volatile ( "ld1 {v0.8b}, [%0], #8 \n" "ld1 {v1.8b}, [%1], #8 \n" "st1 {v0.8b, v1.8b}, [%2], #16\n" : "=r"(p0), "=r"(p1), "=r"(sa) : "0"(p0), "1"(p1), "2"(sa) : "cc", "memory", "v0", "v1" ); } if (j + 3 < k) { j += 4; asm volatile ( "ld1 {v0.8b}, [%0] \n" "add %0, %0, #4 \n" "ld1 {v1.8b}, [%1] \n" "add %1, %1, #4 \n" "trn1 v0.2s, v0.2s, v1.2s \n" "st1 {v0.8b}, [%2], #8 \n" : "=r"(p0), "=r"(p1), "=r"(sa) : "0"(p0), "1"(p1), "2"(sa) : "cc", "memory", "v0", "v1" ); } if (j + 1 < k) { j += 2; sa[0] = p0[0]; sa[1] = p0[1]; sa[2] = p1[0]; sa[3] = p1[1]; sa += 4; p0 += 2; p1 += 2; } if (j < k) { sa[0] = p0[0]; sa[1] = p1[0]; sa += 2; } a += 2 * ldx; } if (i < m) { memcpy(sa, a, sizeof(int8_t) * ldx); } } void int8kernel_m1(void* dst, int8_t* sa, int8_t* sb, int, int k, int n, int, float* scales, float* bias) { void *pc = dst; int8_t *pa = sa; int8_t *pb = sb; DECOMPOSE_K DECOMPOSE_N // int8_t* pTmp = (int8_t*)fastMalloc(16); if (n4 > 0) { asm volatile( "9: \n" " eor v8.16b, v8.16b, v8.16b \n" " eor v9.16b, v9.16b, v9.16b \n" " eor v10.16b, v10.16b, v10.16b\n" " eor v11.16b, v11.16b, v11.16b\n" " mov x8, %0 // PanelA\n" " cmp %w4, #0 \n" " beq 1f \n" " mov w19, %w4 \n" " cmp %w3, #0 \n" " beq 2f// loop number is even \n" " // start loopm1_kd8_nd4\n" " subs w19, w19, #1 \n" " ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [%1], #32 // load four lines of B\n" " ld1 {v2.8b}, [%0], #8 // load two lines of PanelA\n" " smull v0.8h, v4.8b, v2.8b \n" " saddlp v8.4s, v0.8h \n" " smull v0.8h, v5.8b, v2.8b \n" " saddlp v9.4s, v0.8h \n" " smull v0.8h, v6.8b, v2.8b \n" " saddlp v10.4s, v0.8h \n" " smull v0.8h, v7.8b, v2.8b \n" " saddlp v11.4s, v0.8h \n" " cmp w19, #0 \n" " beq 3f \n" " 2: \n" " ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [%1], #32 \n" " ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [%1], #32\n" " ld1 {v2.8b, v3.8b}, [%0], #16 \n" " smull v0.8h, v2.8b, v4.8b \n" " smlal v0.8h, v3.8b, v12.8b \n" " sadalp v8.4s, v0.8h \n" " smull v1.8h, v2.8b, v5.8b \n" " smlal v1.8h, v3.8b, v13.8b \n" " sadalp v9.4s, v1.8h \n" " smull v0.8h, v2.8b, v6.8b \n" " smlal v0.8h, v3.8b, v14.8b \n" " sadalp v10.4s, v0.8h \n" " smull v1.8h, v2.8b, v7.8b \n" " smlal v1.8h, v3.8b, v15.8b \n" " sadalp v11.4s, v1.8h \n" " subs w19, w19, #2 \n" " bne 2b \n" " 3: \n" " addp v8.4s, v8.4s, v9.4s \n" " addp v10.4s, v10.4s, v11.4s\n" " addp v8.4s, v8.4s, v10.4s \n" " // start process kd4 kd2 kd1 cases\n" " 1: \n" " cmp %w5, #0 \n" " beq 4f \n" " // start subkernel_m1n4k4 \n" " ld1 {v4.8b, v5.8b}, [%1], #16 // load B4x4\n" " sxtl v4.8h, v4.8b \n" " sxtl v5.8h, v5.8b \n" " mov v6.d[0], v4.d[1] \n" " mov v7.d[0], v5.d[1] \n" " ld1 {v2.8b}, [%0] // load A1x4\n" " add %0, %0, #4 \n" " sxtl v2.8h, v2.8b \n" " smull v12.4s, v2.4h, v4.4h \n" " smull v13.4s, v2.4h, v6.4h \n" " smull v14.4s, v2.4h, v5.4h \n" " smull v15.4s, v2.4h, v7.4h \n" " addp v12.4s, v12.4s, v13.4s\n" " addp v14.4s, v14.4s, v15.4s\n" " addp v12.4s, v12.4s, v14.4s\n" " add v8.4s, v8.4s, v12.4s \n" " 4: \n" " cmp %w6, #0 \n" " beq 5f \n" " // start subkernel_m1n4k2\n" " ld1 {v4.8b}, [%0] // load A1x2 \n" " add %0, %0, #2 \n" " ld1 {v0.8b}, [%1], #8 // load B2x4 \n" " mov v4.h[1], v4.h[0] \n" " mov v4.s[1], v4.s[0] \n" " smull v0.8h, v0.8b, v4.8b \n" " sadalp v8.4s, v0.8h \n" " 5: \n" " cmp %w7, #0 \n" " beq 6f \n" " // start subkernel_m1n4k1 \n" " ld1 {v4.8b}, [%1] // load B1x4\n" " add %1, %1, #4 \n" " ld1 {v2.8b}, [%0] // load A1x1\n" " add %0, %0, #1 \n" " sxtl v4.8h, v4.8b \n" " sxtl v2.8h, v2.8b \n" " smlal v8.4s, v4.4h, v2.h[0]\n" " 6: \n" " cmp %9, #0 \n" " beq 7f \n" " ldr w24, [%9] \n" " // int32 => fp32 \n" " scvtf v8.4s, v8.4s \n" " // fp32 *= scale_tm \n" " mov v12.s[0], w24 \n" " fmul v8.4s, v8.4s, v12.s[0]\n" " cmp %10, #0 \n" " beq 8f \n" " // fp32 += bias_tm \n" " ldr w24, [%10] \n" " dup v15.4s, w24 \n" " fadd v8.4s, v8.4s, v15.4s \n" " 8: \n" " // fp32 -> int32 \n" " fcvtas v8.4s, v8.4s\n" " // int32 -> int16 \n" " sqxtn v8.4h, v8.4s \n" " // int16 -> int8 \n" " sqxtn v8.8b, v8.8h \n" " // save \n" " st1 {v8.s}[0], [%2]\n" " add %2, %2, #4 \n" " b m1_loopnd4_finish\n" " 7: \n" " st1 {v8.4s}, [%2], #16 \n" " m1_loopnd4_finish: \n" " subs %w8, %w8, #1 \n" " mov %0, x8 \n" " bne 9b \n" : "=r"(pa), // %0 "=r"(pb), // %1 "=r"(pc), // %2 "=r"(k8_even),// %3 "=r"(k8), // %4 "=r"(k4), // %5 "=r"(k2), // %6 "=r"(k1), // %7 "=r"(n4), // %8 "=r"(scales), // %9 "=r"(bias) // %10 : "0"(pa), "1"(pb), "2"(pc), "3"(k8_even), "4"(k8), "5"(k4), "6"(k2), "7"(k1), "8"(n4), "9"(scales), "10"(bias) : "cc", "memory", "x8", "w19", "w24", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } if (n2 > 0) { asm volatile( "m1_nd2_start: \n" " eor v8.16b, v8.16b, v8.16b \n" " eor v9.16b, v9.16b, v9.16b \n" " eor v10.16b, v10.16b, v10.16b\n" " eor v11.16b, v11.16b, v11.16b\n" " mov x8, %0 // PanelA\n" " cmp %w4, #0 \n" " beq 1f // k <= 7\n" " mov w19, %w4\n" " cmp %w3, #0 \n" " beq 2f // loop number is even \n" " // start loopmd1_kd8_nd2 \n" " subs w19, w19, #1 \n" " ld1 {v4.8b, v5.8b}, [%1], #16 // load two lines of B\n" " ld1 {v2.8b}, [%0], #8 // load two lines of PanelA\n" " smull v0.8h, v4.8b, v2.8b \n" " saddlp v8.4s, v0.8h \n" " smull v0.8h, v5.8b, v2.8b \n" " saddlp v9.4s, v0.8h \n" " cmp w19, #0 \n" " beq 3f \n" " 2: \n" " ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [%1], #32\n" " ld1 {v2.8b, v3.8b}, [%0], #16 \n" " smull v0.8h, v2.8b, v4.8b \n" " smlal v0.8h, v3.8b, v6.8b \n" " sadalp v8.4s, v0.8h \n" " smull v1.8h, v2.8b, v5.8b \n" " smlal v1.8h, v3.8b, v7.8b \n" " sadalp v9.4s, v1.8h \n" " subs w19, w19, #2 \n" " bne 2b \n" " 3: \n" " addp v8.4s, v8.4s, v9.4s \n" " addp v8.4s, v8.4s, v8.4s \n" " // start process kd4 kd2 kd1 cases \n" " 1: \n" " cmp %w5, 0 \n" " beq 4f \n" " // start subkernel_m1n2k4 \n" " ld1 {v4.8b}, [%1], #8 // load B4x2\n" " sxtl v4.8h, v4.8b \n" " mov v6.d[0], v4.d[1] \n" " ld1 {v2.8b}, [%0] // load A1x4\n" " add %0, %0, #4 \n" " sxtl v2.8h, v2.8b \n" " smull v9.4s, v2.4h, v4.4h \n" " smull v10.4s, v2.4h, v6.4h \n" " addp v9.4s, v9.4s, v10.4s \n" " addp v9.4s, v9.4s, v9.4s \n" " add v8.4s, v8.4s, v9.4s \n" " 4: \n" " cmp %w6, 0 \n" " beq 5f \n" " // start subkernel_m1n2k2 \n" " ld1 {v4.8b}, [%0] // load A1x2\n" " add %0, %0, #2 \n" " ld1 {v0.8b}, [%1] // load B2x2\n" " add %1, %1, #4 \n" " mov v4.h[1], v4.h[0] \n" " smull v0.8h, v4.8b, v0.8b \n" " saddlp v0.4s, v0.8h \n" " add v8.4s, v8.4s, v0.4s \n" " 5: \n" " cmp %w7, 0 \n" " beq 6f \n" " // start subkernel_m1n2k1 \n" " ld1 {v4.8b}, [%1] // load B1x2\n" " add %1, %1, #2 \n" " ld1 {v2.8b}, [%0] // load A1x1\n" " add %0, %0, #2 \n" " sxtl v4.8h, v4.8b \n" " sxtl v2.8h, v2.8b \n" " smlal v8.4s, v4.4h, v2.h[0]\n" " 6: \n" " cmp %9, #0 \n" " beq 7f \n" " // v12: s0 s1 \n" " ldr w24, [%9] \n" " mov v12.s[0], w24 \n" " mov v12.s[1], v12.s[0] \n" " // int32 => fp32 \n" " scvtf v8.2s, v8.2s \n" " // fp32 *= scale_tm \n" " fmul v8.2s, v8.2s, v12.2s \n" " cmp %10, #0 \n" " beq 8f \n" " // fp32 += bias_tm \n" " ldr w24, [%10] \n" " mov v12.s[0], w24 \n" " mov v12.s[1], v12.s[0] \n" " fadd v8.2s, v8.2s, v12.2s \n" " 8:\n" " // fp32 -> int32 \n" " fcvtas v8.2s, v8.2s\n" " // int32 -> int16 \n" " sqxtn v8.4h, v8.4s \n" " // int16 -> int8 \n" " sqxtn v8.8b, v8.8h \n" " // save \n" " st1 {v8.h}[0], [%2]\n" " add %2, %2, #2 \n" " b m1_loopnd2_finish\n" " 7: \n" " st1 {v8.2s}, [%2], #8 \n" " m1_loopnd2_finish: \n" " mov %0, x8 \n" : "=r"(pa), // %0 "=r"(pb), // %1 "=r"(pc), // %2 "=r"(k8_even),// %3 "=r"(k8), // %4 "=r"(k4), // %5 "=r"(k2), // %6 "=r"(k1), // %7 "=r"(n4), // %8 "=r"(scales), // %9 "=r"(bias) // %10 : "0"(pa), "1"(pb), "2"(pc), "3"(k8_even), "4"(k8), "5"(k4), "6"(k2), "7"(k1), "8"(n4), "9"(scales), "10"(bias) : "cc", "memory", "x8", "w19", "w24", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } if (n1 > 0) { asm volatile ( "m1_nd1_start: \n" " eor v8.16b, v8.16b, v8.16b \n" " eor v9.16b, v9.16b, v9.16b \n" " eor v10.16b, v10.16b, v10.16b\n" " eor v11.16b, v11.16b, v11.16b\n" " cmp %w4, #0 \n" " beq 1f // k <= 7 \n" " mov w19, %w4\n" " cmp %w3, #0 \n" " beq 2f // loop number is even \n" " // start loopkd8_nd1 \n" " subs w19, w19, #1 \n" " ld1 {v4.8b}, [%1], #8 // load B line \n" " ld1 {v2.8b}, [%0], #8 // load A line \n" " smull v0.8h, v4.8b, v2.8b \n" " saddlp v8.4s, v0.8h \n" " cmp w19, #0 \n" " beq 3f \n" " 2: \n" " ld1 {v4.8b, v5.8b}, [%1], #16 \n" " ld1 {v24.8b, v25.8b}, [%0], #16\n" " smull v0.8h, v24.8b, v4.8b \n" " smlal v0.8h, v25.8b, v5.8b \n" " sadalp v8.4s, v0.8h \n" " subs w19, w19, #2 \n" " bne 2b \n" " 3: \n" " addp v8.4s, v8.4s, v8.4s \n" " addp v8.4s, v8.4s, v8.4s \n" " // start process kd4 kd2 kd1 cases\n" " 1: \n" " cmp %w5, 0 \n" " beq 4f \n" " // start subkernel_m1n1k4 \n" " ld1 {v4.8b}, [%1] // load B4x1\n" " add %1, %1, #4 \n" " sxtl v4.8h, v4.8b // extend B4x1 to v4\n" " ld1 {v2.8b}, [%0] // load A1x4\n" " add %0, %0, #4 \n" " sxtl v2.8h, v2.8b \n" " smull v9.4s, v2.4h, v4.4h \n" " addp v9.4s, v9.4s, v9.4s \n" " addp v9.4s, v9.4s, v9.4s \n" " add v8.4s, v8.4s, v9.4s \n" " 4: \n" " cmp %w6, 0 \n" " beq 5f \n" " // start subkernel_m1n1k2 \n" " ld1 {v4.8b}, [%0] // load A1x2\n" " add %0, %0, #2 \n" " ld1 {v0.8b}, [%1] // load B2x1\n" " add %1, %1, #2 \n" " smull v0.8h, v0.8b, v4.8b \n" " saddlp v0.4s, v0.8h \n" " add v8.4s, v8.4s, v0.4s \n" " 5: \n" " cmp %w7, 0 \n" " beq 6f \n" " // start subkernel_m1n1k1 \n" " ld1 {v0.8b}, [%1] // load B1x1 \n" " add %1, %1, #1 \n" " ld1 {v1.8b}, [%0] // load A1x1 \n" " add %0, %0, #1 \n" " sxtl v1.8h, v1.8b \n" " sxtl v0.8h, v0.8b \n" " smull v0.4s, v1.4h, v0.h[0] \n" " add v8.4s, v8.4s, v0.4s \n" " 6: \n" " cmp %9, #0 \n" " beq 7f \n" " // int32 => fp32 \n" " scvtf v8.2s, v8.2s \n" " // fp32 *= scale_tm\n" " ldr w24, [%9] \n" " mov v12.s[0], w24 \n" " fmul v8.2s, v8.2s, v12.2s \n" " cmp %10, #0 \n" " beq 8f \n" " // fp32 += bias_tm \n" " ldr w24, [%10] \n" " mov v12.s[0], w24 \n" " fadd v8.2s, v8.2s, v12.2s \n" " 8: \n" " // fp32 -> int32 \n" " fcvtas v8.2s, v8.2s\n" " // int32 -> int16 \n" " sqxtn v8.4h, v8.4s \n" " // int16 -> int8 \n" " sqxtn v8.8b, v8.8h \n" " // save \n" " st1 {v8.b}[0], [%2]\n" " b m1_finish \n" " 7: \n" " st1 {v8.s}[0], [%2] \n" " m1_finish: \n" " mov x0, #0 \n" : "=r"(pa), // %0 "=r"(pb), // %1 "=r"(pc), // %2 "=r"(k8_even),// %3 "=r"(k8), // %4 "=r"(k4), // %5 "=r"(k2), // %6 "=r"(k1), // %7 "=r"(n4), // %8 "=r"(scales), // %9 "=r"(bias) // %10 : "0"(pa), "1"(pb), "2"(pc), "3"(k8_even), "4"(k8), "5"(k4), "6"(k2), "7"(k1), "8"(n4), "9"(scales), "10"(bias) : "cc", "memory", "x0", "x8", "w19", "w24", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } } void int8kernel_m2(void* dst, int8_t* sa, int8_t* sb, int, int k, int n, int ldc, float* scales, float* bias) { void *pc0, *pc1; if (scales == nullptr) { pc0 = (int32_t*)dst; pc1 = ((int32_t*)pc0) + ldc; } else { pc0 = dst; pc1 = ((int8_t*)pc0) + ldc; } int8_t *pa = sa; int8_t *pb = sb; DECOMPOSE_K DECOMPOSE_N if (n4 > 0) { asm volatile( "9: \n" " eor v8.16b, v8.16b, v8.16b \n" " eor v9.16b, v9.16b, v9.16b \n" " eor v10.16b, v10.16b, v10.16b \n" " eor v11.16b, v11.16b, v11.16b \n" " eor v12.16b, v12.16b, v12.16b \n" " eor v13.16b, v13.16b, v13.16b \n" " eor v14.16b, v14.16b, v14.16b \n" " eor v15.16b, v15.16b, v15.16b \n" " eor v16.16b, v16.16b, v16.16b \n" " eor v17.16b, v17.16b, v17.16b \n" " eor v18.16b, v18.16b, v18.16b \n" " eor v19.16b, v19.16b, v19.16b \n" " eor v20.16b, v20.16b, v20.16b \n" " eor v21.16b, v21.16b, v21.16b \n" " eor v22.16b, v22.16b, v22.16b \n" " eor v23.16b, v23.16b, v23.16b \n" " mov x8, %0 // PanelA \n" " cmp %w5, #0 \n" " beq 1f \n" " mov w17, %w5 \n" " cmp %w4, #0 \n" " beq 2f // loop number is even \n" " // start loopm2_kd8_nd4\n" " subs w17, w17, #1 \n" " ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [%1], #32 // load four lines of B\n" " ld1 {v2.8b, v3.8b}, [%0], #16 // load two lines of PanelA \n" " smull v0.8h, v4.8b, v2.8b \n" " smull v1.8h, v4.8b, v3.8b \n" " saddlp v8.4s, v0.8h \n" " saddlp v12.4s, v1.8h \n" " smull v0.8h, v5.8b, v2.8b \n" " smull v1.8h, v5.8b, v3.8b \n" " saddlp v9.4s, v0.8h \n" " saddlp v13.4s, v1.8h \n" " smull v0.8h, v6.8b, v2.8b \n" " smull v1.8h, v6.8b, v3.8b \n" " saddlp v10.4s, v0.8h \n" " saddlp v14.4s, v1.8h \n" " smull v0.8h, v7.8b, v2.8b \n" " smull v1.8h, v7.8b, v3.8b \n" " saddlp v11.4s, v0.8h \n" " saddlp v15.4s, v1.8h \n" " cmp w17, #0 \n" " beq 3f \n" " 2: \n" " add x12, %1, #32 \n" " ld1 {v4.8b, v5.8b}, [%1], #16 \n" " ld1 {v2.8b, v3.8b}, [%0], #16 \n" " smull v0.8h, v4.8b, v2.8b \n" " smull v1.8h, v5.8b, v2.8b \n" " ld1 {v6.8b, v7.8b}, [x12], #16 \n" " ld1 {v24.8b, v25.8b}, [%0], #16\n" " smlal v0.8h, v6.8b, v24.8b \n" " smlal v1.8h, v7.8b, v24.8b \n" " sadalp v8.4s, v0.8h\n" " sadalp v9.4s, v1.8h\n" " smull v0.8h, v4.8b, v3.8b \n" " smull v1.8h, v5.8b, v3.8b \n" " smlal v0.8h, v6.8b, v25.8b \n" " smlal v1.8h, v7.8b, v25.8b \n" " sadalp v12.4s, v0.8h\n" " sadalp v13.4s, v1.8h\n" " // start v10v11, v14v15, v18v19, v22v23, error here!\n" " ld1 {v4.8b, v5.8b}, [%1], #16 \n" " smull v0.8h, v4.8b, v2.8b \n" " smull v1.8h, v5.8b, v2.8b \n" " ld1 {v6.8b, v7.8b}, [x12], #16 \n" " smlal v0.8h, v6.8b, v24.8b \n" " smlal v1.8h, v7.8b, v24.8b \n" " sadalp v10.4s, v0.8h \n" " sadalp v11.4s, v1.8h \n" " smull v0.8h, v4.8b, v3.8b \n" " smull v1.8h, v5.8b, v3.8b \n" " smlal v0.8h, v6.8b, v25.8b \n" " smlal v1.8h, v7.8b, v25.8b \n" " sadalp v14.4s, v0.8h \n" " sadalp v15.4s, v1.8h \n" " add %1, %1, #32 \n" " subs w17, w17, #2 \n" " bne 2b \n" " 3: \n" " addp v8.4s, v8.4s, v9.4s \n" " addp v10.4s, v10.4s, v11.4s\n" " addp v12.4s, v12.4s, v13.4s\n" " addp v14.4s, v14.4s, v15.4s\n" " addp v8.4s, v8.4s, v10.4s \n" " addp v9.4s, v12.4s, v14.4s \n" " // start process kd4 kd2 kd1 cases \n" " 1: \n" " cmp %w6, #0 \n" " beq 4f \n" " // start subkernel_m2n4k4 \n" " ld1 {v4.8b, v5.8b}, [%1], #16 // load B4x4\n" " sxtl v4.8h, v4.8b \n" " sxtl v5.8h, v5.8b \n" " mov v6.d[0], v4.d[1] \n" " mov v7.d[0], v5.d[1] \n" " ld1 {v2.8b}, [%0], #8 // load A2x4\n" " sxtl v2.8h, v2.8b \n" " mov v3.d[0], v2.d[1] \n" " smull v12.4s, v2.4h, v4.4h \n" " smull v13.4s, v2.4h, v6.4h \n" " smull v14.4s, v2.4h, v5.4h \n" " smull v15.4s, v2.4h, v7.4h \n" " addp v12.4s, v12.4s, v13.4s\n" " addp v14.4s, v14.4s, v15.4s\n" " addp v12.4s, v12.4s, v14.4s\n" " add v8.4s, v8.4s, v12.4s \n" " smull v16.4s, v3.4h, v4.4h \n" " smull v17.4s, v3.4h, v6.4h \n" " smull v18.4s, v3.4h, v5.4h \n" " smull v19.4s, v3.4h, v7.4h \n" " addp v16.4s, v16.4s, v17.4s\n" " addp v18.4s, v18.4s, v19.4s\n" " addp v16.4s, v16.4s, v18.4s\n" " add v9.4s, v9.4s, v16.4s \n" " 4: \n" " cmp %w7, #0 \n" " beq 5f \n" " // start subkernel_m2n4k2 \n" " ld1 {v4.8b}, [%0] // load A2x2 \n" " add %0, %0, #4 \n" " ld1 {v0.8b}, [%1], #8 // load B2x4 \n" " // 00 11 22 33 \n" " rev32 v1.4h, v0.4h // 11 00 33 22 \n" " rev64 v2.2s, v0.2s // 22 33 00 11 \n" " rev64 v3.4h, v0.4h // 33 22 11 00 \n" " smull v12.8h, v4.8b, v0.8b \n" " smull v13.8h, v4.8b, v1.8b \n" " smull v14.8h, v4.8b, v2.8b \n" " smull v15.8h, v4.8b, v3.8b \n" " saddlp v12.4s, v12.8h \n" " saddlp v13.4s, v13.8h \n" " saddlp v14.4s, v14.8h \n" " saddlp v15.4s, v15.8h \n" " mov v16.s[0], v12.s[0] \n" " mov v16.s[1], v13.s[0] \n" " mov v16.s[2], v14.s[0] \n" " mov v16.s[3], v15.s[0] \n" " mov v17.s[0], v13.s[1] \n" " mov v17.s[1], v12.s[1] \n" " mov v17.s[2], v15.s[1] \n" " mov v17.s[3], v14.s[1] \n" " add v8.4s, v8.4s, v16.4s \n" " add v9.4s, v9.4s, v17.4s \n" " 5: \n" " cmp %w8, #0 \n" " beq 6f \n" " // start subkernel_m2n4k1 \n" " ld1 {v4.8b}, [%1] // load B1x4\n" " add %1, %1, #4 \n" " ld1 {v2.8b}, [%0] // load A2x1\n" " add %0, %0, #2 \n" " sxtl v4.8h, v4.8b \n" " sxtl v2.8h, v2.8b \n" " smlal v8.4s, v4.4h, v2.h[0]\n" " smlal v9.4s, v4.4h, v2.h[1]\n" " 6: \n" " cmp %10, #0 \n" " beq 7f \n" " ld1 {v12.2s}, [%10] \n" " // int32 => fp32 \n" " scvtf v8.4s, v8.4s \n" " scvtf v9.4s, v9.4s \n" " // fp32 *= scale_tm \n" " fmul v8.4s, v8.4s, v12.s[0]\n" " fmul v9.4s, v9.4s, v12.s[1]\n" " cmp %11, #0 \n" " beq 8f \n" " // fp32 += scales_tm \n" " ld1 {v14.2s}, [%11] \n" " dup v15.4s, v14.s[0] \n" " fadd v8.4s, v8.4s, v15.4s \n" " dup v15.4s, v14.s[1] \n" " fadd v9.4s, v9.4s, v15.4s \n" " 8: \n" " // fp32 -> int32 \n" " fcvtas v8.4s, v8.4s\n" " fcvtas v9.4s, v9.4s\n" " // int32 -> int16 \n" " sqxtn v6.4h, v8.4s \n" " sqxtn2 v6.8h, v9.4s\n" " // int16 -> int8 \n" " sqxtn v8.8b, v6.8h \n" " // save \n" " st1 {v8.s}[0], [%2] \n" " add %2, %2, #4 \n" " st1 {v8.s}[1], [%3] \n" " add %3, %3, #4 \n" " b m2_loopnd4_finish \n" " 7: \n" " st1 {v8.4s}, [%2], #16 \n" " st1 {v9.4s}, [%3], #16 \n" " m2_loopnd4_finish: \n" " subs %w9, %w9, #1 \n" " mov %0, x8 \n" " bne 9b \n" : "=r"(pa), // %0 "=r"(pb), // %1 "=r"(pc0), // %2 "=r"(pc1), // %3 "=r"(k8_even),// %4 "=r"(k8), // %5 "=r"(k4), // %6 "=r"(k2), // %7 "=r"(k1), // %8 "=r"(n4), // %9 "=r"(scales), // %10 "=r"(bias) // %11 : "0"(pa), "1"(pb), "2"(pc0), "3"(pc1), "4"(k8_even), "5"(k8), "6"(k4), "7"(k2), "8"(k1), "9"(n4), "10"(scales), "11"(bias) : "cc", "memory", "x8", "w17", "x12", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } if (n2 > 0) { asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "m2_nd2_start: \n" " mov x8, %0 // PanelA \n" " cmp %w5, #0 \n" " beq 1f \n" " mov w17, %w5 \n" " cmp %w4, #0 \n" " beq 2f // loop number is even \n" " // start loopmd2_kd8_nd2 \n" " subs w17, w17, #1 \n" " ld1 {v4.8b, v5.8b}, [%1], #16 // load two lines of B\n" " ld1 {v2.8b, v3.8b}, [%0], #16 // load two lines of PanelA\n" " smull v0.8h, v4.8b, v2.8b \n" " smull v1.8h, v4.8b, v3.8b \n" " saddlp v8.4s, v0.8h \n" " saddlp v12.4s, v1.8h \n" " smull v0.8h, v5.8b, v2.8b \n" " smull v1.8h, v5.8b, v3.8b \n" " saddlp v9.4s, v0.8h \n" " saddlp v13.4s, v1.8h \n" " cmp w17, #0 \n" " beq 3f \n" " 2: \n" " ld1 {v4.8b, v5.8b}, [%1], #16 \n" " ld1 {v2.8b, v3.8b}, [%0], #16 \n" " smull v0.8h, v4.8b, v2.8b \n" " ld1 {v6.8b, v7.8b}, [%1], #16 \n" " smull v1.8h, v5.8b, v2.8b \n" " ld1 {v24.8b, v25.8b}, [%0], #16\n" " smlal v0.8h, v6.8b, v24.8b \n" " smlal v1.8h, v7.8b, v24.8b \n" " sadalp v8.4s, v0.8h\n" " sadalp v9.4s, v1.8h\n" " smull v0.8h, v4.8b, v3.8b \n" " smull v1.8h, v5.8b, v3.8b \n" " smlal v0.8h, v6.8b, v25.8b \n" " smlal v1.8h, v7.8b, v25.8b \n" " sadalp v12.4s, v0.8h \n" " sadalp v13.4s, v1.8h \n" " subs w17, w17, #2 \n" " bne 2b \n" " 3: \n" " addp v8.4s, v8.4s, v9.4s \n" " addp v12.4s, v12.4s, v13.4s\n" " addp v8.4s, v8.4s, v8.4s \n" " addp v12.4s, v12.4s, v12.4s\n" " // start process kd4 kd2 kd1 cases\n" " 1: \n" " cmp %w6, #0 \n" " beq 4f \n" " // start subkernel_m2n2k4 \n" " ld1 {v4.8b}, [%1], #8 // load B4x2\n" " sxtl v4.8h, v4.8b \n" " mov v6.d[0], v4.d[1] \n" " ld1 {v2.8b}, [%0], #8 // load first A2x4\n" " sxtl v2.8h, v2.8b \n" " mov v3.d[0], v2.d[1] \n" " smull v9.4s, v2.4h, v4.4h \n" " smull v10.4s, v2.4h, v6.4h \n" " addp v9.4s, v9.4s, v10.4s \n" " addp v9.4s, v9.4s, v9.4s \n" " add v8.4s, v8.4s, v9.4s \n" " smull v13.4s, v3.4h, v4.4h \n" " smull v14.4s, v3.4h, v6.4h \n" " addp v13.4s, v13.4s, v14.4s\n" " addp v13.4s, v13.4s, v13.4s\n" " add v12.4s, v12.4s, v13.4s \n" " 4: \n" " cmp %w7, 0 \n" " beq 5f \n" " // start subkernel_m2n2k2 \n" " ld1 {v4.8b}, [%0] // load A2x2\n" " add %0, %0, #4 \n" " ld1 {v0.8b}, [%1] // load B2x2\n" " add %1, %1, #4 \n" " // 00 11\n" " rev32 v1.4h, v0.4h // 11 00\n" " smull v21.8h, v4.8b, v0.8b \n" " smull v22.8h, v4.8b, v1.8b \n" " saddlp v21.4s, v21.8h \n" " saddlp v22.4s, v22.8h \n" " mov v9.s[0], v21.s[0] \n" " mov v9.s[1], v22.s[0] \n" " add v8.4s, v8.4s, v9.4s \n" " mov v13.s[0], v22.s[1] \n" " mov v13.s[1], v21.s[1] \n" " add v12.4s, v12.4s, v13.4s \n" " 5: \n" " cmp %w8, #0 \n" " beq 6f \n" " // start subkernel_m2n2k1 \n" " ld1 {v4.8b}, [%1] // load B1x2\n" " add %1, %1, #2 \n" " ld1 {v2.8b}, [%0] // load A4x1\n" " add %0, %0, #2 \n" " sxtl v4.8h, v4.8b \n" " sxtl v2.8h, v2.8b \n" " smlal v8.4s, v4.4h, v2.h[0]\n" " smlal v12.4s, v4.4h, v2.h[1] \n" " 6: \n" " cmp %9, #0 \n" " beq 7f \n" " mov v8.d[1], v12.d[0] \n" " // v12: 0 1 \n" " ld1 {v12.2s}, [%9] \n" " zip1 v12.4s, v12.4s, v12.4s\n" " // v12: 0 0 1 1 \n" " // int32 => fp32 \n" " scvtf v8.4s, v8.4s \n" " // fp32 *= scale_tm \n" " fmul v8.4s, v8.4s, v12.4s \n" " cmp %10, #0 \n" " beq 8f \n" " // fp32 += bias_tm \n" " ld1 {v12.2s}, [%10] \n" " zip1 v12.4s, v12.4s, v12.4s\n" " fadd v8.4s, v8.4s, v12.4s \n" " 8: \n" " // fp32 -> int32 \n" " fcvtas v8.4s, v8.4s \n" " // int32 -> int16 \n" " sqxtn v8.4h, v8.4s \n" " // int16 -> int8 \n" " sqxtn v8.8b, v8.8h \n" " // save \n" " st1 {v8.h}[0], [%2] \n" " add %2, %2, #2 \n" " st1 {v8.h}[1], [%3] \n" " add %3, %3, #2 \n" " b m2_loopnd2_finish \n" " 7:" " st1 {v8.2s}, [%2], #8 \n" " st1 {v12.2s}, [%3], #8 \n" " m2_loopnd2_finish: \n" " mov %0, x8 \n" : "=r"(pa), // %0 "=r"(pb), // %1 "=r"(pc0), // %2 "=r"(pc1), // %3 "=r"(k8_even),// %4 "=r"(k8), // %5 "=r"(k4), // %6 "=r"(k2), // %7 "=r"(k1), // %8 "=r"(scales), // %9 "=r"(bias) // %10 : "0"(pa), "1"(pb), "2"(pc0), "3"(pc1), "4"(k8_even), "5"(k8), "6"(k4), "7"(k2), "8"(k1), "9"(scales), "10"(bias) : "cc", "memory", "x8", "x12", "w17", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } if (n1 > 0) { asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "m2_nd1_start: \n" " cmp %w5, #0 \n" " beq 1f // k <=7\n" " mov w17, %w5\n" " cmp %w4, #0 \n" " beq 2f // loop number is even \n" " // start loopkd8_nd1 \n" " subs w17, w17, #1 \n" " ld1 {v4.8b}, [%1], #8 // load four lines of B\n" " ld1 {v2.8b, v3.8b}, [%0], #16 // load two lines of PanelA\n" " smull v0.8h, v4.8b, v2.8b \n" " smull v1.8h, v4.8b, v3.8b \n" " saddlp v8.4s, v0.8h \n" " saddlp v12.4s, v1.8h \n" " cmp w17, #0 \n" " beq 3f \n" " 2: \n" " ld1 {v4.8b, v5.8b}, [%1], #16 \n" " ld1 {v24.8b, v25.8b, v26.8b, v27.8b}, [%0], #32\n" " smull v0.8h, v24.8b, v4.8b \n" " smlal v0.8h, v26.8b, v5.8b \n" " sadalp v8.4s, v0.8h \n" " smull v1.8h, v25.8b, v4.8b \n" " smlal v1.8h, v27.8b, v5.8b \n" " sadalp v12.4s, v1.8h \n" " subs w17, w17, #2 \n" " bne 2b \n" " 3: \n" " addp v8.4s, v8.4s, v8.4s \n" " addp v8.4s, v8.4s, v8.4s \n" " addp v12.4s, v12.4s, v12.4s\n" " addp v12.4s, v12.4s, v12.4s\n" " // start process kd4 kd2 kd1 cases\n" " 1: \n" " cmp %w6, #0 \n" " beq 4f \n" " // start subkernel_m2n1k2 \n" " ld1 {v4.8b}, [%1] // load B4x1\n" " add %1, %1, #4 \n" " sxtl v4.8h, v4.8b // extend B4x1 to v4\n" " ld1 {v2.8b}, [%0], #8 // load A2x4 \n" " sxtl v2.8h, v2.8b \n" " mov v5.d[0], v2.d[1] \n" " smull v9.4s, v2.4h, v4.4h \n" " addp v9.4s, v9.4s, v9.4s \n" " addp v9.4s, v9.4s, v9.4s \n" " add v8.4s, v8.4s, v9.4s \n" " smull v13.4s, v5.4h, v4.4h \n" " addp v13.4s, v13.4s, v13.4s\n" " addp v13.4s, v13.4s, v13.4s\n" " add v12.4s, v12.4s, v13.4s \n" " 4: \n" " cmp %w7, 0 \n" " beq 5f \n" " // start subkernel_m2n1k2 \n" " ld1 {v4.8b}, [%0] // load A2x2\n" " add %0, %0, #4 \n" " ld1 {v0.8b}, [%1] // load B2x1\n" " add %1, %1, #2 \n" " mov v0.h[1], v0.h[0] \n" " smull v0.8h, v0.8b, v4.8b \n" " saddlp v0.4s, v0.8h \n" " mov v9.s[0], v0.s[0] \n" " add v8.4s, v8.4s, v9.4s \n" " mov v13.s[0], v0.s[1] \n" " add v12.4s, v12.4s, v13.4s \n" " 5: \n" " cmp %w8, 0 \n" " beq 6f \n" " // start subkernel_m2n1k1 \n" " ld1 {v0.8b}, [%1] // load B1x1\n" " add %1, %1, #1 \n" " ld1 {v1.8b}, [%0] // load A2x1\n" " add %0, %0, #2 \n" " sxtl v1.8h, v1.8b \n" " sxtl v0.8h, v0.8b \n" " smull v0.4s, v1.4h, v0.h[0]\n" " mov v1.s[0], v0.s[1] \n" " add v8.4s, v8.4s, v0.4s \n" " add v12.4s, v12.4s, v1.4s \n" " 6: \n" " cmp %w9, #0 \n" " beq 7f \n" " mov v8.s[1], v12.s[0] \n" " // v12: s0 s1 \n" " ld1 {v12.2s}, [%9] \n" " // int32 => fp32 \n" " scvtf v8.2s, v8.2s \n" " // fp32 *= scale_tm \n" " fmul v8.2s, v8.2s, v12.2s \n" " cmp %10, #0 \n" " beq 8f \n" " // fp32 += bias_tm \n" " ld1 {v12.2s}, [%10] \n" " fadd v8.2s, v8.2s, v12.2s \n" " 8: \n" " // fp32 -> int32 \n" " fcvtas v8.2s, v8.2s \n" " // int32 -> int16 \n" " sqxtn v8.4h, v8.4s \n" " // int16 -> int8 \n" " sqxtn v8.8b, v8.8h \n" " // save \n" " st1 {v8.b}[0], [%2] \n" " st1 {v8.b}[1], [%3] \n" " b m2_finish \n" " 7: \n" " st1 {v8.s}[0], [%2] \n" " st1 {v12.s}[0], [%3] \n" " m2_finish: \n" " mov x0, #0 \n" : "=r"(pa), // %0 "=r"(pb), // %1 "=r"(pc0), // %2 "=r"(pc1), // %3 "=r"(k8_even),// %4 "=r"(k8), // %5 "=r"(k4), // %6 "=r"(k2), // %7 "=r"(k1), // %8 "=r"(scales), // %9 "=r"(bias) // %10 : "0"(pa), "1"(pb), "2"(pc0), "3"(pc1), "4"(k8_even), "5"(k8), "6"(k4), "7"(k2), "8"(k1), "9"(scales), "10"(bias) : "cc", "memory", "x0", "x8", "w17", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } } void int8kernel_m4(void* dst, int8_t* sa, int8_t* sb, int, int k, int n, int ldc, float* scales, float* bias) { void *pc0, *pc1, *pc2, *pc3; if (scales == nullptr) { pc0 = (int32_t*)dst; pc1 = ((int32_t*)pc0) + ldc; pc2 = ((int32_t*)pc1) + ldc; pc3 = ((int32_t*)pc2) + ldc; } else { pc0 = dst; pc1 = ((int8_t*)pc0) + ldc; pc2 = ((int8_t*)pc1) + ldc; pc3 = ((int8_t*)pc2) + ldc; } int8_t *pa = sa; int8_t *pb = sb; DECOMPOSE_K DECOMPOSE_N if (n4 > 0) { asm volatile( "8: \n" " eor v8.8b, v8.8b, v8.8b \n" " eor v9.8b, v9.8b, v9.8b \n" " eor v10.8b, v10.8b, v10.8b \n" " eor v11.8b, v11.8b, v11.8b \n" " eor v12.8b, v12.8b, v12.8b \n" " eor v13.8b, v13.8b, v13.8b \n" " eor v14.8b, v14.8b, v14.8b \n" " eor v15.8b, v15.8b, v15.8b \n" " eor v16.8b, v16.8b, v16.8b \n" " eor v17.8b, v17.8b, v17.8b \n" " eor v18.8b, v18.8b, v18.8b \n" " eor v19.8b, v19.8b, v19.8b \n" " eor v20.8b, v20.8b, v20.8b \n" " eor v21.8b, v21.8b, v21.8b \n" " eor v22.8b, v22.8b, v22.8b \n" " eor v23.8b, v23.8b, v23.8b \n" " mov x8, %0 \n" " cmp %w7, #0 \n" " beq 1f \n" " mov w20, %w7 \n" " cmp %w6, #0 \n" " beq 2f \n" " subs w20, w20, #1 \n" " ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [%1], #32 \n" " ld1 {v2.8b, v3.8b}, [%0], #16 \n" " smull v0.8h, v4.8b, v2.8b \n" " smull v1.8h, v4.8b, v3.8b \n" " saddlp v8.4s, v0.8h \n" " saddlp v12.4s, v1.8h \n" " smull v0.8h, v5.8b, v2.8b \n" " smull v1.8h, v5.8b, v3.8b \n" " saddlp v9.4s, v0.8h \n" " saddlp v13.4s, v1.8h \n" " smull v0.8h, v6.8b, v2.8b \n" " smull v1.8h, v6.8b, v3.8b \n" " saddlp v10.4s, v0.8h \n" " saddlp v14.4s, v1.8h \n" " smull v0.8h, v7.8b, v2.8b \n" " smull v1.8h, v7.8b, v3.8b \n" " saddlp v11.4s, v0.8h \n" " ld1 {v2.8b, v3.8b}, [%0], #16 \n" " saddlp v15.4s, v1.8h \n" " smull v0.8h, v4.8b, v2.8b \n" " smull v1.8h, v4.8b, v3.8b \n" " saddlp v16.4s, v0.8h \n" " saddlp v20.4s, v1.8h \n" " smull v0.8h, v5.8b, v2.8b \n" " smull v1.8h, v5.8b, v3.8b \n" " saddlp v17.4s, v0.8h \n" " saddlp v21.4s, v1.8h \n" " smull v0.8h, v6.8b, v2.8b \n" " smull v1.8h, v6.8b, v3.8b \n" " saddlp v18.4s, v0.8h \n" " saddlp v22.4s, v1.8h \n" " smull v0.8h, v7.8b, v2.8b \n" " smull v1.8h, v7.8b, v3.8b \n" " saddlp v19.4s, v0.8h \n" " saddlp v23.4s, v1.8h \n" " cmp w20, #0 \n" " beq 3f \n" " 2: \n" " add x15, %x1, #32 \n" " add x14, %x0, #32 \n" " ld1 {v4.8b, v5.8b}, [%1], #16\n" " ld1 {v2.8b, v3.8b}, [%0], #16\n" " smull v0.8h, v4.8b, v2.8b \n" " ld1 {v6.8b, v7.8b}, [x15], #16 \n" " smull v1.8h, v5.8b, v2.8b \n" " ld1 {v24.8b, v25.8b}, [x14], #16\n" " smlal v0.8h, v6.8b, v24.8b\n" " smlal v1.8h, v7.8b, v24.8b\n" " sadalp v8.4s, v0.8h\n" " sadalp v9.4s, v1.8h\n" " smull v0.8h, v4.8b, v3.8b \n" " smull v1.8h, v5.8b, v3.8b \n" " smlal v0.8h, v6.8b, v25.8b \n" " smlal v1.8h, v7.8b, v25.8b \n" " sadalp v12.4s, v0.8h\n" " sadalp v13.4s, v1.8h\n" " // finish v8v9 v12v13, start proc v16v17,v20v21\n" " ld1 {v28.8b, v29.8b}, [%0], #16 \n" " smull v0.8h, v4.8b, v28.8b \n" " smull v1.8h, v5.8b, v28.8b \n" " ld1 {v26.8b, v27.8b}, [x14], #16\n" " smlal v0.8h, v6.8b, v26.8b \n" " smlal v1.8h, v7.8b, v26.8b \n" " sadalp v16.4s, v0.8h \n" " sadalp v17.4s, v1.8h \n" " smull v0.8h, v4.8b, v29.8b \n" " smull v1.8h, v5.8b, v29.8b \n" " smlal v0.8h, v6.8b, v27.8b \n" " smlal v1.8h, v7.8b, v27.8b \n" " sadalp v20.4s, v0.8h \n" " sadalp v21.4s, v1.8h \n" " // start v10v11, v14v15, v18v19, v22v23\n" " ld1 {v4.8b, v5.8b}, [%1], #16 \n" " smull v0.8h, v4.8b, v2.8b \n" " smull v1.8h, v5.8b, v2.8b \n" " ld1 {v6.8b, v7.8b}, [x15], #16 \n" " smlal v0.8h, v6.8b, v24.8b \n" " smlal v1.8h, v7.8b, v24.8b \n" " sadalp v10.4s, v0.8h \n" " sadalp v11.4s, v1.8h \n" " smull v0.8h, v4.8b, v3.8b \n" " smull v1.8h, v5.8b, v3.8b \n" " smlal v0.8h, v6.8b, v25.8b \n" " smlal v1.8h, v7.8b, v25.8b \n" " sadalp v14.4s, v0.8h \n" " sadalp v15.4s, v1.8h \n" " smull v0.8h, v4.8b, v28.8b \n" " smull v1.8h, v5.8b, v28.8b \n" " smlal v0.8h, v6.8b, v26.8b \n" " smlal v1.8h, v7.8b, v26.8b \n" " sadalp v18.4s, v0.8h \n" " sadalp v19.4s, v1.8h \n" " smull v0.8h, v4.8b, v29.8b \n" " smull v1.8h, v5.8b, v29.8b \n" " smlal v0.8h, v6.8b, v27.8b \n" " smlal v1.8h, v7.8b, v27.8b \n" " sadalp v22.4s, v0.8h \n" " sadalp v23.4s, v1.8h \n" " add %0, %0, #32 \n" " add %1, %1, #32 \n" " subs w20, w20, #2 \n" " bne 2b \n" // start nd2 " 3: \n" " addp v8.4s, v8.4s, v9.4s \n" " addp v10.4s, v10.4s, v11.4s\n" " addp v12.4s, v12.4s, v13.4s\n" " addp v14.4s, v14.4s, v15.4s\n" " addp v16.4s, v16.4s, v17.4s\n" " addp v18.4s, v18.4s, v19.4s\n" " addp v20.4s, v20.4s, v21.4s\n" " addp v22.4s, v22.4s, v23.4s\n" " addp v8.4s, v8.4s, v10.4s \n" " addp v9.4s, v12.4s, v14.4s \n" " addp v10.4s, v16.4s, v18.4s\n" " addp v11.4s, v20.4s, v22.4s\n" " // start process kd4 kd2 kd1 cases\n" " 1: \n" " cmp %w8, #0 \n" " beq 4f \n" " // start subkernel_m4n4k4\n" " ld1 {v4.8b, v5.8b}, [%1], #16 // load B4x4\n" " sxtl v4.8h, v4.8b \n" " mov v6.d[0], v4.d[1] \n" " sxtl v5.8h, v5.8b \n" " mov v7.d[0], v5.d[1] \n" " ld1 {v2.8b}, [%0], #8 // load A2x4\n" " sxtl v2.8h, v2.8b \n" " mov v3.d[0], v2.d[1] \n" " smull v12.4s, v2.4h, v4.4h \n" " smull v13.4s, v2.4h, v6.4h \n" " smull v14.4s, v2.4h, v5.4h \n" " addp v12.4s, v12.4s, v13.4s\n" " smull v15.4s, v2.4h, v7.4h \n" " addp v14.4s, v14.4s, v15.4s\n" " addp v12.4s, v12.4s, v14.4s\n" " smull v16.4s, v3.4h, v4.4h \n" " add v8.4s, v8.4s, v12.4s \n" " smull v17.4s, v3.4h, v6.4h \n" " smull v18.4s, v3.4h, v5.4h \n" " addp v16.4s, v16.4s, v17.4s\n" " smull v19.4s, v3.4h, v7.4h \n" " addp v18.4s, v18.4s, v19.4s\n" " addp v16.4s, v16.4s, v18.4s\n" " add v9.4s, v9.4s, v16.4s \n" " ld1 {v2.8b}, [%0], #8 // load next A2x4\n" " sxtl v2.8h, v2.8b \n" " mov v3.d[0], v2.d[1] \n" " smull v12.4s, v2.4h, v4.4h \n" " smull v13.4s, v2.4h, v6.4h \n" " smull v14.4s, v2.4h, v5.4h \n" " addp v12.4s, v12.4s, v13.4s\n" " smull v15.4s, v2.4h, v7.4h \n" " addp v14.4s, v14.4s, v15.4s\n" " addp v12.4s, v12.4s, v14.4s\n" " smull v16.4s, v3.4h, v4.4h \n" " add v10.4s, v10.4s, v12.4s \n" " smull v17.4s, v3.4h, v6.4h \n" " smull v18.4s, v3.4h, v5.4h \n" " addp v16.4s, v16.4s, v17.4s\n" " smull v19.4s, v3.4h, v7.4h \n" " addp v18.4s, v18.4s, v19.4s\n" " addp v16.4s, v16.4s, v18.4s\n" " add v11.4s, v11.4s, v16.4s \n" " 4: \n" " cmp %w9, #0 \n" " beq 5f \n" " // start subkernel_m4n4k2 \n" " ld1 {v0.8b}, [%1], #8 // load B2x4 \n" " // 00 11 22 33 \n" " rev32 v1.4h, v0.4h // 11 00 33 22 \n" " rev64 v2.2s, v0.2s // 22 33 00 11 \n" " ld1 {v4.8b}, [%0], #8 // load A4x2 \n" " rev64 v3.4h, v0.4h // 33 22 11 00 \n" " smull v12.8h, v4.8b, v0.8b \n" " smull v13.8h, v4.8b, v1.8b \n" " saddlp v12.4s, v12.8h \n" " smull v14.8h, v4.8b, v2.8b \n" " saddlp v13.4s, v13.8h \n" " smull v15.8h, v4.8b, v3.8b \n" " saddlp v14.4s, v14.8h \n" " saddlp v15.4s, v15.8h \n" " mov v16.s[0], v12.s[0] \n" " mov v16.s[1], v13.s[0] \n" " mov v16.s[2], v14.s[0] \n" " mov v16.s[3], v15.s[0] \n" " mov v17.s[0], v13.s[1] \n" " mov v17.s[1], v12.s[1] \n" " mov v17.s[2], v15.s[1] \n" " mov v17.s[3], v14.s[1] \n" " mov v18.s[0], v14.s[2] \n" " mov v18.s[1], v15.s[2] \n" " mov v18.s[2], v12.s[2] \n" " mov v18.s[3], v13.s[2] \n" " mov v19.s[0], v15.s[3] \n" " mov v19.s[1], v14.s[3] \n" " mov v19.s[2], v13.s[3] \n" " mov v19.s[3], v12.s[3] \n" " add v8.4s, v8.4s, v16.4s \n" " add v9.4s, v9.4s, v17.4s \n" " add v10.4s, v10.4s, v18.4s \n" " add v11.4s, v11.4s, v19.4s \n" " 5: \n" " cmp %w10, #0 \n" " beq 6f \n" " // start subkernel_m4n4k1\n" " ld1 {v4.8b}, [%1] // load B1x4\n" " add %1, %1, #4 \n" " ld1 {v2.8b}, [%0] // load A4x1\n" " add %0, %0, #4 \n" " sxtl v4.8h, v4.8b \n" " sxtl v2.8h, v2.8b \n" " smlal v8.4s, v4.4h, v2.h[0] \n" " smlal v9.4s, v4.4h, v2.h[1] \n" " smlal v10.4s, v4.4h, v2.h[2] \n" " smlal v11.4s, v4.4h, v2.h[3] \n" " 6: \n" " cmp %12, #0 \n" " beq 9f \n" " ld1 {v12.4s}, [%12] \n" " // int32 => fp32 \n" " scvtf v8.4s, v8.4s \n" " scvtf v9.4s, v9.4s \n" " scvtf v10.4s, v10.4s \n" " scvtf v11.4s, v11.4s \n" " // fp32 *= scale_tm \n" " fmul v8.4s, v8.4s, v12.s[0] \n" " fmul v9.4s, v9.4s, v12.s[1] \n" " fmul v10.4s, v10.4s, v12.s[2] \n" " fmul v11.4s, v11.4s, v12.s[3] \n" " cmp %13, #0 \n" " beq 7f \n" " ld1 {v14.4s}, [%13] \n" " dup v15.4s, v14.s[0] \n" " fadd v8.4s, v8.4s, v15.4s \n" " dup v15.4s, v14.s[1] \n" " fadd v9.4s, v9.4s, v15.4s \n" " dup v15.4s, v14.s[2] \n" " fadd v10.4s, v10.4s, v15.4s\n" " dup v15.4s, v14.s[3] \n" " fadd v11.4s, v11.4s, v15.4s\n" " 7: \n" " // fp32 -> int32 \n" " fcvtas v8.4s, v8.4s \n" " fcvtas v9.4s, v9.4s \n" " fcvtas v10.4s, v10.4s \n" " fcvtas v11.4s, v11.4s \n" " // int32 -> int16 \n" " sqxtn v6.4h, v8.4s \n" " sqxtn2 v6.8h, v9.4s \n" " sqxtn v7.4h, v10.4s \n" " sqxtn2 v7.8h, v11.4s \n" " // int16 -> int8 \n" " sqxtn v8.8b, v6.8h \n" " sqxtn v9.8b, v7.8h \n" " // save \n" " st1 {v8.s}[0], [%2] \n" " add %x2, %x2, #4 \n" " st1 {v8.s}[1], [%3] \n" " add %x3, %x3, #4 \n" " st1 {v9.s}[0], [%4] \n" " add %x4, %x4, #4 \n" " st1 {v9.s}[1], [%5] \n" " add %x5, %x5, #4 \n" " b m4_loopnd4_finish \n" " 9: \n" " st1 {v8.4s}, [%x2], #16 \n" " st1 {v9.4s}, [%x3], #16 \n" " st1 {v10.4s}, [%x4], #16 \n" " st1 {v11.4s}, [%x5], #16 \n" " m4_loopnd4_finish: \n" " subs %x11, %x11, #1 \n" " mov %x0, x8 \n" " bne 8b \n" : "=r"(pa), // %0 "=r"(pb), // %1 "=r"(pc0), // %2 "=r"(pc1), // %3 "=r"(pc2), // %4 "=r"(pc3), // %5 "=r"(k8_even),// %6 "=r"(k8), // %7 "=r"(k4), // %8 "=r"(k2), // %9 "=r"(k1), // %10 "=r"(n4), // %11 "=r"(scales), // %12 "=r"(bias) // %13 : "0"(pa), "1"(pb), "2"(pc0), "3"(pc1), "4"(pc2), "5"(pc3), "6"(k8_even), "7"(k8), "8"(k4), "9"(k2), "10"(k1), "11"(n4), "12"(scales), "13"(bias) : "cc", "memory", "x8", "w20", "x14", "x15", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } if (n2 > 0) { asm volatile( " eor v8.8b, v8.8b, v8.8b \n" " eor v9.8b, v9.8b, v9.8b \n" " eor v10.8b, v10.8b, v10.8b \n" " eor v11.8b, v11.8b, v11.8b \n" " eor v12.8b, v12.8b, v12.8b \n" " eor v13.8b, v13.8b, v13.8b \n" " eor v14.8b, v14.8b, v14.8b \n" " eor v15.8b, v15.8b, v15.8b \n" " eor v16.8b, v16.8b, v16.8b \n" " eor v17.8b, v17.8b, v17.8b \n" " eor v18.8b, v18.8b, v18.8b \n" " eor v19.8b, v19.8b, v19.8b \n" " eor v20.8b, v20.8b, v20.8b \n" " eor v21.8b, v21.8b, v21.8b \n" " eor v22.8b, v22.8b, v22.8b \n" " eor v23.8b, v23.8b, v23.8b \n" "m4_nd2_start: \n" " mov x8, %x0 // PanelA \n" " cmp %w7, #0 \n" " beq 1f // k <= 7 \n" " mov w20, %w7 \n" " cmp %w6, #0 \n" " beq 2f// loop number is even \n" " // start loopkd8_nd2 \n" " subs w20, w20, #1 \n" " ld1 {v4.8b, v5.8b}, [%1], #16 // load two lines of B\n" " ld1 {v2.8b, v3.8b}, [%0], #16 // load two lines of PanelA\n" " smull v0.8h, v4.8b, v2.8b \n" " smull v1.8h, v4.8b, v3.8b \n" " saddlp v8.4s, v0.8h \n" " saddlp v12.4s, v1.8h \n" " smull v0.8h, v5.8b, v2.8b \n" " smull v1.8h, v5.8b, v3.8b \n" " saddlp v9.4s, v0.8h \n" " saddlp v13.4s, v1.8h \n" " ld1 {v2.8b, v3.8b}, [%0], #16 \n" " smull v0.8h, v4.8b, v2.8b \n" " smull v1.8h, v4.8b, v3.8b \n" " saddlp v16.4s, v0.8h \n" " saddlp v20.4s, v1.8h \n" " smull v0.8h, v5.8b, v2.8b \n" " smull v1.8h, v5.8b, v3.8b \n" " saddlp v17.4s, v0.8h \n" " saddlp v21.4s, v1.8h \n" " cmp w20, #0 \n" " beq 3f \n" " 2: \n" " add x15, %1, #16 \n" " add x14, %0, #32 \n" " ld1 {v4.8b, v5.8b}, [%1], #16 \n" " ld1 {v2.8b, v3.8b}, [%0], #16 \n" " smull v0.8h, v4.8b, v2.8b \n" " ld1 {v6.8b, v7.8b}, [x15], #16 \n" " smull v1.8h, v5.8b, v2.8b \n" " ld1 {v24.8b, v25.8b}, [x14], #16 \n" " smlal v0.8h, v6.8b, v24.8b \n" " smlal v1.8h, v7.8b, v24.8b \n" " sadalp v8.4s, v0.8h \n" " sadalp v9.4s, v1.8h \n" " smull v0.8h, v4.8b, v3.8b \n" " smull v1.8h, v5.8b, v3.8b \n" " smlal v0.8h, v6.8b, v25.8b \n" " smlal v1.8h, v7.8b, v25.8b \n" " sadalp v12.4s, v0.8h \n" " sadalp v13.4s, v1.8h \n" " // finish v8v9 v12v13, start proc v16v17,v20v21\n" " ld1 {v28.8b, v29.8b}, [%0], #16\n" " smull v0.8h, v4.8b, v28.8b\n" " smull v1.8h, v5.8b, v28.8b\n" " ld1 {v26.8b, v27.8b}, [x14], #16\n" " smlal v0.8h, v6.8b, v26.8b\n" " smlal v1.8h, v7.8b, v26.8b\n" " sadalp v16.4s, v0.8h\n" " sadalp v17.4s, v1.8h\n" " smull v0.8h, v4.8b, v29.8b\n" " smull v1.8h, v5.8b, v29.8b\n" " smlal v0.8h, v6.8b, v27.8b\n" " smlal v1.8h, v7.8b, v27.8b\n" " sadalp v20.4s, v0.8h\n" " sadalp v21.4s, v1.8h\n" " add %0, %0, #32 \n" " add %1, %1, #16 \n" " subs w20, w20, #2 \n" " bne 2b \n" " 3: \n" " addp v8.4s, v8.4s, v9.4s \n" " addp v12.4s, v12.4s, v13.4s\n" " addp v16.4s, v16.4s, v17.4s\n" " addp v20.4s, v20.4s, v21.4s\n" " addp v8.4s, v8.4s, v8.4s \n" " addp v12.4s, v12.4s, v12.4s\n" " addp v16.4s, v16.4s, v16.4s\n" " addp v20.4s, v20.4s, v20.4s\n" " // start process kd4 kd2 kd1 cases\n" " 1: \n" " cmp %w8, 0 \n" " beq 4f \n" " // start subkernel_m4n2k4 \n" " ld1 {v4.8b}, [%1], #8 // load B4x2\n" " sxtl v4.8h, v4.8b \n" " mov v6.d[0], v4.d[1] \n" " ld1 {v2.8b}, [%0], #8 // load first A2x4\n" " sxtl v2.8h, v2.8b \n" " mov v3.d[0], v2.d[1] \n" " smull v9.4s, v2.4h, v4.4h \n" " smull v10.4s, v2.4h, v6.4h \n" " addp v9.4s, v9.4s, v10.4s \n" " addp v9.4s, v9.4s, v9.4s \n" " add v8.4s, v8.4s, v9.4s \n" " smull v13.4s, v3.4h, v4.4h \n" " smull v14.4s, v3.4h, v6.4h \n" " addp v13.4s, v13.4s, v14.4s\n" " addp v13.4s, v13.4s, v13.4s\n" " add v12.4s, v12.4s, v13.4s \n" " ld1 {v2.8b}, [%0], #8 // load next A2x4\n" " sxtl v2.8h, v2.8b \n" " mov v3.d[0], v2.d[1] \n" " smull v17.4s, v2.4h, v4.4h \n" " smull v18.4s, v2.4h, v6.4h \n" " addp v17.4s, v17.4s, v18.4s\n" " addp v17.4s, v17.4s, v17.4s\n" " add v16.4s, v16.4s, v17.4s \n" " smull v21.4s, v3.4h, v4.4h \n" " smull v22.4s, v3.4h, v6.4h \n" " addp v21.4s, v21.4s, v22.4s\n" " addp v21.4s, v21.4s, v21.4s\n" " add v20.4s, v20.4s, v21.4s \n" " 4: \n" " cmp %w9, 0 \n" " beq 5f \n" " // start subkernel_m4n2k2 \n" " ld1 {v4.8b}, [%0], #8 //load A4x2\n" " ld1 {v0.8b}, [%1] // load B2x2 \n" " add %1, %1, #4 \n" " // 00 11 22 33 \n" " rev32 v1.4h, v0.4h // 11 00 33 22 \n" " rev64 v2.2s, v0.2s // 22 33 00 11 \n" " rev64 v3.4h, v0.4h // 33 22 11 00 \n" " smull v21.8h, v4.8b, v0.8b \n" " smull v22.8h, v4.8b, v1.8b \n" " smull v23.8h, v4.8b, v2.8b \n" " smull v24.8h, v4.8b, v3.8b \n" " saddlp v21.4s, v21.8h \n" " saddlp v22.4s, v22.8h \n" " saddlp v23.4s, v23.8h \n" " saddlp v24.4s, v24.8h \n" " mov v9.s[0], v21.s[0] \n" " mov v9.s[1], v22.s[0] \n" " add v8.4s, v8.4s, v9.4s\n" " mov v13.s[0], v22.s[1] \n" " mov v13.s[1], v21.s[1] \n" " add v12.4s, v12.4s, v13.4s \n" " mov v17.s[0], v23.s[2] \n" " mov v17.s[1], v24.s[2] \n" " add v16.4s, v16.4s, v17.4s \n" " mov v21.s[0], v24.s[3] \n" " mov v21.s[1], v23.s[3] \n" " add v20.4s, v20.4s, v21.4s \n" " 5: \n" " cmp %w10, 0 \n" " beq 6f \n" " // start subkernel_m4n2k1\n" " ld1 {v4.8b}, [%1] // load B1x2\n" " add %1, %1, #2 \n" " ld1 {v2.8b}, [%0] // load A4x1\n" " add %0, %0, #4 \n" " sxtl v4.8h, v4.8b \n" " sxtl v2.8h, v2.8b \n" " smlal v8.4s, v4.4h, v2.h[0] \n" " smlal v12.4s, v4.4h, v2.h[1] \n" " smlal v16.4s, v4.4h, v2.h[2] \n" " smlal v20.4s, v4.4h, v2.h[3] \n" " 6: \n" " cmp %11, #0 \n" " beq 7f \n" " mov v8.d[1], v12.d[0] \n" " mov v16.d[1], v20.d[0] \n" " // v12: 0 1 2 3 \n" " ld1 {v12.4s}, [%11] \n" " zip2 v13.4s, v12.4s, v12.4s \n" " zip1 v12.4s, v12.4s, v12.4s \n" " // v12: 0 0 1 1 \n" " // v13: 2 2 3 3 \n" " // int32 => fp32 \n" " scvtf v8.4s, v8.4s \n" " scvtf v16.4s, v16.4s \n" " // fp32 *= scale_tm \n" " fmul v8.4s, v8.4s, v12.4s \n" " fmul v16.4s, v16.4s, v13.4s\n" " cmp %12, #0 \n" " beq 8f // skip add scales \n" " // fp32 += scales_tm \n" " ld1 {v12.4s}, [%12] \n" " zip2 v13.4s, v12.4s, v12.4s\n" " zip1 v12.4s, v12.4s, v12.4s\n" " fadd v8.4s, v8.4s, v12.4s \n" " fadd v16.4s, v16.4s, v13.4s\n" " 8: \n" " // fp32 -> int32 \n" " fcvtas v8.4s, v8.4s \n" " fcvtas v16.4s, v16.4s \n" " // int32 -> int16 \n" " sqxtn v8.4h, v8.4s \n" " sqxtn v16.4h, v16.4s \n" " // int16 -> int8 \n" " sqxtn v8.8b, v8.8h \n" " sqxtn v16.8b, v16.8h \n" " // save \n" " st1 {v8.h}[0], [%2] \n" " add %2, %2, #2 \n" " st1 {v8.h}[1], [%3] \n" " add %3, %3, #2 \n" " st1 {v16.h}[0], [%4] \n" " add %4, %4, #2 \n" " st1 {v16.h}[1], [%5] \n" " add %5, %5, #2 \n" " b m4_loopnd2_finish \n" " 7: \n" " st1 {v8.2s}, [%2], #8 \n" " st1 {v12.2s}, [%3], #8 \n" " st1 {v16.2s}, [%4], #8 \n" " st1 {v20.2s}, [%5], #8 \n" " m4_loopnd2_finish: \n" " mov %0, x8 \n" : "=r"(pa), // %0 "=r"(pb), // %1 "=r"(pc0), // %2 "=r"(pc1), // %3 "=r"(pc2), // %4 "=r"(pc3), // %5 "=r"(k8_even),// %6 "=r"(k8), // %7 "=r"(k4), // %8 "=r"(k2), // %9 "=r"(k1), // %10 "=r"(scales), // %11 "=r"(bias) // %12 : "0"(pa), "1"(pb), "2"(pc0), "3"(pc1), "4"(pc2), "5"(pc3), "6"(k8_even), "7"(k8), "8"(k4), "9"(k2), "10"(k1), "11"(scales), "12"(bias) : "cc", "memory", "x8", "w20", "x14", "x15", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } if (n1 > 0) { asm volatile( " eor v8.8b, v8.8b, v8.8b \n" " eor v9.8b, v9.8b, v9.8b \n" " eor v10.8b, v10.8b, v10.8b \n" " eor v11.8b, v11.8b, v11.8b \n" " eor v12.8b, v12.8b, v12.8b \n" " eor v13.8b, v13.8b, v13.8b \n" " eor v14.8b, v14.8b, v14.8b \n" " eor v15.8b, v15.8b, v15.8b \n" " eor v16.8b, v16.8b, v16.8b \n" " eor v17.8b, v17.8b, v17.8b \n" " eor v18.8b, v18.8b, v18.8b \n" " eor v19.8b, v19.8b, v19.8b \n" " eor v20.8b, v20.8b, v20.8b \n" " eor v21.8b, v21.8b, v21.8b \n" " eor v22.8b, v22.8b, v22.8b \n" " eor v23.8b, v23.8b, v23.8b \n" "m4_n1_start: \n" " cmp %w7, #0 \n" " beq 10f \n" " mov w20, %w7 \n" " cmp %w6, #0 \n" " beq 11f// loop number is even \n" " // start loopkd8_nd1 \n" " subs w20, w20, #1 \n" " ld1 {v4.8b}, [%1], #8 // load four lines of B\n" " ld1 {v2.8b, v3.8b}, [%0], #16 // load two lines of PanelA\n" " smull v0.8h, v4.8b, v2.8b \n" " smull v1.8h, v4.8b, v3.8b \n" " saddlp v8.4s, v0.8h \n" " saddlp v12.4s, v1.8h \n" " ld1 {v2.8b, v3.8b}, [%0], #16 \n" " smull v0.8h, v4.8b, v2.8b \n" " smull v1.8h, v4.8b, v3.8b \n" " saddlp v16.4s, v0.8h \n" " saddlp v20.4s, v1.8h \n" " cmp w20, #0 \n" " beq 12f \n" " 11: \n" " ld1 {v4.8b, v5.8b}, [%1], #16 \n" " ld1 {v24.8b, v25.8b, v26.8b, v27.8b}, [%0], #32\n" " ld1 {v28.8b, v29.8b, v30.8b, v31.8b}, [%0], #32\n" " smull v0.8h, v24.8b, v4.8b \n" " smlal v0.8h, v28.8b, v5.8b \n" " sadalp v8.4s, v0.8h \n" " smull v1.8h, v25.8b, v4.8b \n" " smlal v1.8h, v29.8b, v5.8b \n" " sadalp v12.4s, v1.8h \n" " smull v0.8h, v26.8b, v4.8b \n" " smlal v0.8h, v30.8b, v5.8b \n" " sadalp v16.4s, v0.8h \n" " smull v1.8h, v27.8b, v4.8b \n" " smlal v1.8h, v31.8b, v5.8b \n" " sadalp v20.4s, v1.8h \n" " subs w20, w20, #2 \n" " bne 11b \n" " 12: \n" " addp v8.4s, v8.4s, v8.4s \n" " addp v8.4s, v8.4s, v8.4s \n" " addp v12.4s, v12.4s, v12.4s\n" " addp v12.4s, v12.4s, v12.4s\n" " addp v16.4s, v16.4s, v16.4s\n" " addp v16.4s, v16.4s, v16.4s\n" " addp v20.4s, v20.4s, v20.4s\n" " addp v20.4s, v20.4s, v20.4s\n" " // start process kd4 kd2 kd1 cases\n" " 10: \n" " cmp %w8, #0 \n" " beq 13f \n" " // start subkernel_m4n1k2 \n" " ld1 {v4.8b}, [%1] // load B4x1\n" " add %x1, %x1, #4 \n" " sxtl v4.8h, v4.8b // extend B4x1 to v4 \n" " ld1 {v2.8b, v3.8b}, [%0], #16 // load A4x4\n" " sxtl v2.8h, v2.8b \n" " mov v5.d[0], v2.d[1] \n" " sxtl v3.8h, v3.8b \n" " mov v6.d[0], v3.d[1] // extend A4x4 to v2,v5,v3,v6\n" " smull v9.4s, v2.4h, v4.4h \n" " addp v9.4s, v9.4s, v9.4s \n" " addp v9.4s, v9.4s, v9.4s \n" " add v8.4s, v8.4s, v9.4s \n" " smull v13.4s, v5.4h, v4.4h \n" " addp v13.4s, v13.4s, v13.4s\n" " addp v13.4s, v13.4s, v13.4s\n" " add v12.4s, v12.4s, v13.4s \n" " smull v17.4s, v3.4h, v4.4h \n" " addp v17.4s, v17.4s, v17.4s\n" " addp v17.4s, v17.4s, v17.4s\n" " add v16.4s, v16.4s, v17.4s \n" " smull v21.4s, v6.4h, v4.4h \n" " addp v21.4s, v21.4s, v21.4s\n" " addp v21.4s, v21.4s, v21.4s\n" " add v20.4s, v20.4s, v21.4s \n" " 13: \n" " cmp %w9, #0 \n" " beq 14f \n" " // start subkernel_m4n1k2 \n" " ld1 {v4.8b}, [%0], #8 // load A4x2 \n" " ld1 {v0.8b}, [%1] // load B2x1 \n" " add %1, %1, #2 \n" " mov v0.h[1], v0.h[0] \n" " mov v0.s[1], v0.s[0] \n" " smull v0.8h, v0.8b, v4.8b \n" " saddlp v0.4s, v0.8h \n" " mov v9.s[0], v0.s[0] \n" " add v8.4s, v8.4s, v9.4s \n" " mov v13.s[0], v0.s[1] \n" " add v12.4s, v12.4s, v13.4s \n" " mov v17.s[0], v0.s[2] \n" " add v16.4s, v16.4s, v17.4s \n" " mov v21.s[0], v0.s[3] \n" " add v20.4s, v20.4s, v21.4s \n" " 14: \n" " cmp %w10, #0 \n" " beq 15f \n" " // start subkernel_m4n1k1 \n" " ld1 {v4.8b}, [%1] // load B1x1\n" " add %1, %1, #1 \n" " ld1 {v2.8b}, [%0] // load A4x1\n" " add %0, %0, #4 \n" " sxtl v4.8h, v4.8b \n" " sxtl v2.8h, v2.8b \n" " smull v0.4s, v2.4h, v4.h[0]\n" " add v8.4s, v8.4s, v0.4s \n" " mov v13.s[0], v0.s[1] \n" " add v12.4s, v12.4s, v13.4s \n" " mov v17.s[0], v0.s[2] \n" " add v16.4s, v16.4s, v17.4s \n" " mov v21.s[0], v0.s[3] \n" " add v20.4s, v20.4s, v21.4s \n" " 15: \n" // REQUANT " cmp %11, #0 \n" " beq 16f \n" " mov v8.s[1], v12.s[0] \n" " mov v8.s[2], v16.s[0] \n" " mov v8.s[3], v20.s[0] \n" " // v12: s0 s1 s2 s3 \n" " ld1 {v12.4s}, [%11] \n" " // int32 => fp32 \n" " scvtf v8.4s, v8.4s \n" " // fp32 *= scale_tm \n" " fmul v8.4s, v8.4s, v12.4s \n" " cmp %12, #0 \n" " beq 17f \n" " // fp32 += bias_tm \n" " ld1 {v12.4s}, [%12] \n" " fadd v8.4s, v8.4s, v12.4s \n" " 17: \n" " // fp32 -> int32 \n" " fcvtas v8.4s, v8.4s \n" " // int32 -> int16 \n" " sqxtn v8.4h, v8.4s \n" " // int16 -> int8 \n" " sqxtn v8.8b, v8.8h \n" " // save \n" " st1 {v8.b}[0], [%2] \n" " st1 {v8.b}[1], [%3] \n" " st1 {v8.b}[2], [%4] \n" " st1 {v8.b}[3], [%5] \n" " b m4_finish \n" " // no need to add the last output pointer\n" " 16: \n" " st1 {v8.s}[0], [%2] \n" " st1 {v12.s}[0], [%3] \n" " st1 {v16.s}[0], [%4] \n" " st1 {v20.s}[0], [%5] \n" " m4_finish: \n" " mov x0, #0 \n" : "=r"(pa), // %0 "=r"(pb), // %1 "=r"(pc0), // %2 "=r"(pc1), // %3 "=r"(pc2), // %4 "=r"(pc3), // %5 "=r"(k8_even),// %6 "=r"(k8), // %7 "=r"(k4), // %8 "=r"(k2), // %9 "=r"(k1), // %10 "=r"(scales), // %11 "=r"(bias) // %12 : "0"(pa), "1"(pb), "2"(pc0), "3"(pc1), "4"(pc2), "5"(pc3), "6"(k8_even), "7"(k8), "8"(k4), "9"(k2), "10"(k1), "11"(scales), "12"(bias) : "cc", "memory", "x0", "x8", "w20", "x14", "x15", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } } #undef DECOMPOSE_K #undef DECOMPOSE_N void int8kernel(void* dst, const int8_t* sa, const int8_t* sb, int m, int k, int n, int ldc, float* scales, float* bias, const Option& opt) { int8_t* pa = (int8_t*)sa; int8_t* pb = (int8_t*)sb; const int nn = (m >> 2) << 2; if (scales == nullptr) { int32_t* pc = (int32_t*)dst; #pragma omp parallel for num_threads(opt.num_threads) for (int i = 0; i < nn; i += 4) { int8kernel_m4((void*)(pc + i * ldc), pa + i * k, pb, m, k, n, ldc, nullptr, nullptr); } pa += nn * k; pc += nn * ldc; switch(m-nn) { case 3: int8kernel_m2((void*)pc, pa, pb, m, k, n, ldc, nullptr, nullptr); pc += 2 * ldc; pa += 2 * k; int8kernel_m1((void*)pc, pa, pb, m, k, n, ldc, nullptr, nullptr); break; case 2: int8kernel_m2((void*)pc, pa, pb, m, k, n, ldc, nullptr, nullptr); break; case 1: int8kernel_m1((void*)pc, pa, pb, m, k, n, ldc, nullptr, nullptr); break; case 0: default: break; } } else { int8_t* pc = (int8_t*)dst; #pragma omp parallel for num_threads(opt.num_threads) for (int i = 0; i < nn; i += 4) { int8kernel_m4((void*)(pc + i * ldc), pa + i * k, pb, m, k, n, ldc, scales + i, (bias==nullptr)? nullptr: bias+i); } pa += nn * k; pc += nn * ldc; scales += nn; bias = (bias == nullptr)? nullptr: bias + nn; switch(m-nn) { case 3: int8kernel_m2((void*)pc, pa, pb, m, k, n, ldc, scales, bias); pc += 2 * ldc; pa += 2 * k; scales += 2; bias = (bias == nullptr)? nullptr: bias + 2; int8kernel_m1((void*)pc, pa, pb, m, k, n, ldc, scales, bias); break; case 2: int8kernel_m2((void*)pc, pa, pb, m, k, n, ldc, scales, bias); break; case 1: int8kernel_m1((void*)pc, pa, pb, m, k, n, ldc, scales, bias); break; case 0: default: break; } } return; } #endif
ocp_nlp_sqp_rti.c
/* * Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren, * Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor, * Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan, * Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl * * This file is part of acados. * * The 2-Clause BSD License * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE.; */ #include "acados/ocp_nlp/ocp_nlp_sqp_rti.h" // external #include <assert.h> #include <math.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #if defined(ACADOS_WITH_OPENMP) #include <omp.h> #endif // blasfeo #include "blasfeo/include/blasfeo_d_aux.h" #include "blasfeo/include/blasfeo_d_aux_ext_dep.h" #include "blasfeo/include/blasfeo_d_blas.h" // acados #include "acados/ocp_nlp/ocp_nlp_common.h" #include "acados/ocp_nlp/ocp_nlp_dynamics_cont.h" #include "acados/ocp_nlp/ocp_nlp_reg_common.h" #include "acados/ocp_qp/ocp_qp_common.h" #include "acados/utils/mem.h" #include "acados/utils/print.h" #include "acados/utils/timing.h" #include "acados/utils/types.h" /************************************************ * options ************************************************/ int ocp_nlp_sqp_rti_opts_calculate_size(void *config_, void *dims_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; int size = 0; size += sizeof(ocp_nlp_sqp_rti_opts); size += ocp_nlp_opts_calculate_size(config, dims); return size; } void *ocp_nlp_sqp_rti_opts_assign(void *config_, void *dims_, void *raw_memory) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; char *c_ptr = (char *) raw_memory; ocp_nlp_sqp_rti_opts *opts = (ocp_nlp_sqp_rti_opts *) c_ptr; c_ptr += sizeof(ocp_nlp_sqp_rti_opts); opts->nlp_opts = ocp_nlp_opts_assign(config, dims, c_ptr); c_ptr += ocp_nlp_opts_calculate_size(config, dims); assert((char *) raw_memory + ocp_nlp_sqp_rti_opts_calculate_size(config, dims) >= c_ptr); return opts; } void ocp_nlp_sqp_rti_opts_initialize_default(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; // ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_constraints_config **constraints = config->constraints; int ii; int N = dims->N; // this first !!! ocp_nlp_opts_initialize_default(config, dims, nlp_opts); // SQP RTI opts opts->warm_start_first_qp = false; // opts->compute_dual_sol = 1; opts->ext_qp_res = 0; // overwrite default submodules opts // do not compute adjoint in dynamics and constraints int compute_adj = 0; // dynamics for (ii = 0; ii < N; ii++) { dynamics[ii]->opts_set(dynamics[ii], opts->nlp_opts->dynamics[ii], "compute_adj", &compute_adj); } // constraints for (ii = 0; ii <= N; ii++) { constraints[ii]->opts_set(constraints[ii], opts->nlp_opts->constraints[ii], "compute_adj", &compute_adj); } return; } void ocp_nlp_sqp_rti_opts_update(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_opts_update(config, dims, nlp_opts); return; } void ocp_nlp_sqp_rti_opts_set(void *config_, void *opts_, const char *field, void* value) { ocp_nlp_sqp_rti_opts *opts = (ocp_nlp_sqp_rti_opts *) opts_; ocp_nlp_config *config = config_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; int ii; char module[MAX_STR_LEN]; char *ptr_module = NULL; int module_length = 0; // extract module name char *char_ = strchr(field, '_'); if (char_!=NULL) { module_length = char_-field; for (ii=0; ii<module_length; ii++) module[ii] = field[ii]; module[module_length] = '\0'; // add end of string ptr_module = module; } // pass options to QP module if ( ptr_module!=NULL && (!strcmp(ptr_module, "qp")) ) { // config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, field+module_length+1, value); ocp_nlp_opts_set(config, nlp_opts, field, value); if (!strcmp(field, "qp_warm_start")) { int* i_ptr = (int *) value; opts->qp_warm_start = *i_ptr; } } else // nlp opts { if (!strcmp(field, "ext_qp_res")) { int* ext_qp_res = (int *) value; opts->ext_qp_res = *ext_qp_res; } else if (!strcmp(field, "warm_start_first_qp")) { bool* warm_start_first_qp = (bool *) value; opts->warm_start_first_qp = *warm_start_first_qp; } else { ocp_nlp_opts_set(config, nlp_opts, field, value); // printf("\nerror: ocp_nlp_sqp_rti_opts_set: wrong field: %s\n", field); // exit(1); } } return; } void ocp_nlp_sqp_rti_opts_set_at_stage(void *config_, void *opts_, int stage, const char *field, void* value) { ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = (ocp_nlp_sqp_rti_opts *) opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_opts_set_at_stage(config, nlp_opts, stage, field, value); return; } /************************************************ * memory ************************************************/ int ocp_nlp_sqp_rti_memory_calculate_size(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; // ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; // ocp_nlp_dynamics_config **dynamics = config->dynamics; // ocp_nlp_cost_config **cost = config->cost; // ocp_nlp_constraints_config **constraints = config->constraints; // int N = dims->N; // int *nx = dims->nx; // int *nu = dims->nu; // int *nz = dims->nz; int size = 0; size += sizeof(ocp_nlp_sqp_rti_memory); // nlp mem size += ocp_nlp_memory_calculate_size(config, dims, nlp_opts); // stat int stat_m = 1+1; int stat_n = 2; if (opts->ext_qp_res) stat_n += 4; size += stat_n*stat_m*sizeof(double); size += 8; // initial align make_int_multiple_of(8, &size); return size; } void *ocp_nlp_sqp_rti_memory_assign(void *config_, void *dims_, void *opts_, void *raw_memory) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; // ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; // ocp_nlp_dynamics_config **dynamics = config->dynamics; // ocp_nlp_cost_config **cost = config->cost; // ocp_nlp_constraints_config **constraints = config->constraints; char *c_ptr = (char *) raw_memory; // int ii; // int N = dims->N; // int *nx = dims->nx; // int *nu = dims->nu; // int *nz = dims->nz; // initial align align_char_to(8, &c_ptr); ocp_nlp_sqp_rti_memory *mem = (ocp_nlp_sqp_rti_memory *) c_ptr; c_ptr += sizeof(ocp_nlp_sqp_rti_memory); // nlp mem mem->nlp_mem = ocp_nlp_memory_assign(config, dims, nlp_opts, c_ptr); c_ptr += ocp_nlp_memory_calculate_size(config, dims, nlp_opts); // stat mem->stat = (double *) c_ptr; mem->stat_m = 1+1; mem->stat_n = 2; if (opts->ext_qp_res) mem->stat_n += 4; c_ptr += mem->stat_m*mem->stat_n*sizeof(double); mem->status = ACADOS_READY; assert((char *) raw_memory+ocp_nlp_sqp_rti_memory_calculate_size(config, dims, opts) >= c_ptr); return mem; } /************************************************ * workspace ************************************************/ int ocp_nlp_sqp_rti_workspace_calculate_size(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; int size = 0; // sqp size += sizeof(ocp_nlp_sqp_rti_workspace); // nlp size += ocp_nlp_workspace_calculate_size(config, dims, nlp_opts); // qp in size += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims); // qp out size += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims); if (opts->ext_qp_res) { // qp res size += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims); // qp res ws size += ocp_qp_res_workspace_calculate_size(dims->qp_solver->orig_dims); } return size; } static void ocp_nlp_sqp_rti_cast_workspace(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_sqp_rti_opts *opts, ocp_nlp_sqp_rti_memory *mem, ocp_nlp_sqp_rti_workspace *work) { ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_memory *nlp_mem = mem->nlp_mem; // sqp char *c_ptr = (char *) work; c_ptr += sizeof(ocp_nlp_sqp_rti_workspace); // nlp work->nlp_work = ocp_nlp_workspace_assign(config, dims, nlp_opts, nlp_mem, c_ptr); c_ptr += ocp_nlp_workspace_calculate_size(config, dims, nlp_opts); // qp in work->tmp_qp_in = ocp_qp_in_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims); // qp out work->tmp_qp_out = ocp_qp_out_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims); if (opts->ext_qp_res) { // qp res work->qp_res = ocp_qp_res_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims); // qp res ws work->qp_res_ws = ocp_qp_res_workspace_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_res_workspace_calculate_size(dims->qp_solver->orig_dims); } assert((char *) work + ocp_nlp_sqp_rti_workspace_calculate_size(config, dims, opts) >= c_ptr); return; } /************************************************ * functions ************************************************/ int ocp_nlp_sqp_rti(void *config_, void *dims_, void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_) { acados_timer timer0, timer1; acados_tic(&timer0); ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_sqp_rti_memory *mem = mem_; ocp_nlp_in *nlp_in = nlp_in_; ocp_nlp_out *nlp_out = nlp_out_; ocp_nlp_memory *nlp_mem = mem->nlp_mem; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_sqp_rti_workspace *work = work_; ocp_nlp_sqp_rti_cast_workspace(config, dims, opts, mem, work); ocp_nlp_workspace *nlp_work = work->nlp_work; // zero timers double total_time = 0.0; double tmp_time; mem->time_qp_sol = 0.0; mem->time_qp_solver_call = 0.0; mem->time_lin = 0.0; mem->time_reg = 0.0; mem->time_tot = 0.0; int N = dims->N; int ii; int qp_iter = 0; int qp_status = 0; #if defined(ACADOS_WITH_OPENMP) // backup number of threads int num_threads_bkp = omp_get_num_threads(); // set number of threads omp_set_num_threads(opts->nlp_opts->num_threads); #pragma omp parallel { // beginning of parallel region #endif // alias to dynamics_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for nowait #endif for (ii = 0; ii < N; ii++) { config->dynamics[ii]->memory_set_ux_ptr(nlp_out->ux+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_ux1_ptr(nlp_out->ux+ii+1, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_tmp_ux1_ptr(nlp_work->tmp_nlp_out->ux+ii+1, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_pi_ptr(nlp_out->pi+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_tmp_pi_ptr(nlp_work->tmp_nlp_out->pi+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_BAbt_ptr(nlp_mem->qp_in->BAbt+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_dzduxt_ptr(nlp_mem->dzduxt+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_sim_guess_ptr(nlp_mem->sim_guess+ii, nlp_mem->set_sim_guess+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_z_alg_ptr(nlp_mem->z_alg+ii, nlp_mem->dynamics[ii]); } // alias to cost_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for nowait #endif for (ii = 0; ii <= N; ii++) { config->cost[ii]->memory_set_ux_ptr(nlp_out->ux+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_z_alg_ptr(nlp_mem->z_alg+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_dzdux_tran_ptr(nlp_mem->dzduxt+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_Z_ptr(nlp_mem->qp_in->Z+ii, nlp_mem->cost[ii]); } // alias to constraints_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for nowait #endif for (ii = 0; ii <= N; ii++) { config->constraints[ii]->memory_set_ux_ptr(nlp_out->ux+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_lam_ptr(nlp_out->lam+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_tmp_lam_ptr(nlp_work->tmp_nlp_out->lam+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_z_alg_ptr(nlp_mem->z_alg+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_dzdux_tran_ptr(nlp_mem->dzduxt+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_DCt_ptr(nlp_mem->qp_in->DCt+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_idxb_ptr(nlp_mem->qp_in->idxb[ii], nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_idxs_ptr(nlp_mem->qp_in->idxs[ii], nlp_mem->constraints[ii]); } // alias to regularize memory config->regularize->memory_set_RSQrq_ptr(dims->regularize, nlp_mem->qp_in->RSQrq, nlp_mem->regularize_mem); config->regularize->memory_set_rq_ptr(dims->regularize, nlp_mem->qp_in->rqz, nlp_mem->regularize_mem); config->regularize->memory_set_BAbt_ptr(dims->regularize, nlp_mem->qp_in->BAbt, nlp_mem->regularize_mem); config->regularize->memory_set_b_ptr(dims->regularize, nlp_mem->qp_in->b, nlp_mem->regularize_mem); config->regularize->memory_set_idxb_ptr(dims->regularize, nlp_mem->qp_in->idxb, nlp_mem->regularize_mem); config->regularize->memory_set_DCt_ptr(dims->regularize, nlp_mem->qp_in->DCt, nlp_mem->regularize_mem); config->regularize->memory_set_ux_ptr(dims->regularize, nlp_mem->qp_out->ux, nlp_mem->regularize_mem); config->regularize->memory_set_pi_ptr(dims->regularize, nlp_mem->qp_out->pi, nlp_mem->regularize_mem); config->regularize->memory_set_lam_ptr(dims->regularize, nlp_mem->qp_out->lam, nlp_mem->regularize_mem); // copy sampling times into dynamics model #if defined(ACADOS_WITH_OPENMP) #pragma omp for nowait #endif // NOTE(oj): this will lead in an error for irk_gnsf, T must be set in precompute; // -> remove here and make sure precompute is called everywhere (e.g. Python interface). for (ii = 0; ii < N; ii++) { config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], "T", nlp_in->Ts+ii); } #if defined(ACADOS_WITH_OPENMP) } // end of parallel region #endif // initialize QP ocp_nlp_initialize_qp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); /* SQP body */ int sqp_iter = 0; nlp_mem->sqp_iter = &sqp_iter; // linearizate NLP and update QP matrices acados_tic(&timer1); ocp_nlp_approximate_qp_matrices(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); mem->time_lin += acados_toc(&timer1); // update QP rhs for SQP (step prim var, abs dual var) ocp_nlp_approximate_qp_vectors_sqp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); // regularize Hessian acados_tic(&timer1); config->regularize->regularize_hessian(config->regularize, dims->regularize, opts->nlp_opts->regularize, nlp_mem->regularize_mem); mem->time_reg += acados_toc(&timer1); // printf("\n------- qp_in (sqp iter %d) --------\n", sqp_iter); // print_ocp_qp_in(nlp_mem->qp_in); // exit(1); if (!opts->warm_start_first_qp) { int tmp_int = 0; config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "warm_start", &tmp_int); } // solve qp acados_tic(&timer1); qp_status = qp_solver->evaluate(qp_solver, dims->qp_solver, nlp_mem->qp_in, nlp_mem->qp_out, opts->nlp_opts->qp_solver_opts, nlp_mem->qp_solver_mem, nlp_work->qp_work); mem->time_qp_sol += acados_toc(&timer1); qp_solver->memory_get(qp_solver, nlp_mem->qp_solver_mem, "time_qp_solver_call", &tmp_time); mem->time_qp_solver_call += tmp_time; // compute correct dual solution in case of Hessian regularization acados_tic(&timer1); config->regularize->correct_dual_sol(config->regularize, dims->regularize, opts->nlp_opts->regularize, nlp_mem->regularize_mem); mem->time_reg += acados_toc(&timer1); // TODO move into QP solver memory ??? qp_info *qp_info_; ocp_qp_out_get(nlp_mem->qp_out, "qp_info", &qp_info_); nlp_out->qp_iter = qp_info_->num_iter; qp_iter = qp_info_->num_iter; // compute external QP residuals (for debugging) if (opts->ext_qp_res) { ocp_qp_res_compute(nlp_mem->qp_in, nlp_mem->qp_out, work->qp_res, work->qp_res_ws); ocp_qp_res_compute_nrm_inf(work->qp_res, mem->stat+(mem->stat_n*1+2)); // printf("\nsqp_iter %d, res %e %e %e %e\n", sqp_iter, inf_norm_qp_res[0], inf_norm_qp_res[1], inf_norm_qp_res[2], inf_norm_qp_res[3]); } // printf("\n------- qp_out (sqp iter %d) ---------\n", sqp_iter); // print_ocp_qp_out(nlp_mem->qp_out); // exit(1); // save statistics mem->stat[mem->stat_n*1+0] = qp_status; mem->stat[mem->stat_n*1+1] = qp_iter; if ((qp_status!=ACADOS_SUCCESS) & (qp_status!=ACADOS_MAXITER)) { // print_ocp_qp_in(mem->qp_in); total_time += acados_toc(&timer0); mem->time_tot = total_time; nlp_out->total_time = total_time; printf("QP solver returned error status %d\n", qp_status); #if defined(ACADOS_WITH_OPENMP) // restore number of threads omp_set_num_threads(num_threads_bkp); #endif mem->status = ACADOS_QP_FAILURE; return mem->status; } ocp_nlp_update_variables_sqp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); // ocp_nlp_dims_print(nlp_out->dims); // ocp_nlp_out_print(nlp_out); // exit(1); total_time += acados_toc(&timer0); mem->time_tot = total_time; nlp_out->total_time = total_time; // print_ocp_qp_in(mem->qp_in); #if defined(ACADOS_WITH_OPENMP) // restore number of threads omp_set_num_threads(num_threads_bkp); #endif mem->status = ACADOS_SUCCESS; return mem->status; } int ocp_nlp_sqp_rti_precompute(void *config_, void *dims_, void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_sqp_rti_memory *mem = mem_; ocp_nlp_in *nlp_in = nlp_in_; // ocp_nlp_out *nlp_out = nlp_out_; ocp_nlp_memory *nlp_mem = mem->nlp_mem; ocp_nlp_sqp_rti_workspace *work = work_; ocp_nlp_sqp_rti_cast_workspace(config, dims, opts, mem, work); ocp_nlp_workspace *nlp_work = work->nlp_work; int N = dims->N; int status = ACADOS_SUCCESS; int ii; // TODO(giaf) flag to enable/disable checks for (ii = 0; ii <= N; ii++) { int module_val; config->constraints[ii]->dims_get(config->constraints[ii], dims->constraints[ii], "ns", &module_val); if (dims->ns[ii] != module_val) { printf("ocp_nlp_sqp_precompute: inconsistent dimension ns with constraint module."); exit(1); } } // precompute for (ii = 0; ii < N; ii++) { // set T config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], "T", nlp_in->Ts+ii); // dynamics precompute status = config->dynamics[ii]->precompute(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], opts->nlp_opts->dynamics[ii], nlp_mem->dynamics[ii], nlp_work->dynamics[ii]); if (status != ACADOS_SUCCESS) return status; } return status; } void ocp_nlp_sqp_rti_eval_param_sens(void *config_, void *dims_, void *opts_, void *mem_, void *work_, char *field, int stage, int index, void *sens_nlp_out_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_sqp_rti_memory *mem = mem_; ocp_nlp_memory *nlp_mem = mem->nlp_mem; ocp_nlp_out *sens_nlp_out = sens_nlp_out_; ocp_nlp_sqp_rti_workspace *work = work_; ocp_nlp_sqp_rti_cast_workspace(config, dims, opts, mem, work); ocp_nlp_workspace *nlp_work = work->nlp_work; d_ocp_qp_copy_all(nlp_mem->qp_in, work->tmp_qp_in); d_ocp_qp_set_rhs_zero(work->tmp_qp_in); double one = 1.0; if ((!strcmp("ex", field)) & (stage==0)) { d_ocp_qp_set_el("lbx", stage, index, &one, work->tmp_qp_in); d_ocp_qp_set_el("ubx", stage, index, &one, work->tmp_qp_in); // d_ocp_qp_print(work->tmp_qp_in->dim, work->tmp_qp_in); config->qp_solver->eval_sens(config->qp_solver, dims->qp_solver, work->tmp_qp_in, work->tmp_qp_out, opts->nlp_opts->qp_solver_opts, nlp_mem->qp_solver_mem, nlp_work->qp_work); // d_ocp_qp_sol_print(work->tmp_qp_out->dim, work->tmp_qp_out); // exit(1); /* copy tmp_qp_out into sens_nlp_out */ int i; int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; // int *nz = dims->nz; for (i = 0; i <= N; i++) { blasfeo_dveccp(nv[i], work->tmp_qp_out->ux + i, 0, sens_nlp_out->ux + i, 0); if (i < N) blasfeo_dveccp(nx[i + 1], work->tmp_qp_out->pi + i, 0, sens_nlp_out->pi + i, 0); blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->lam + i, 0, sens_nlp_out->lam + i, 0); blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->t + i, 0, sens_nlp_out->t + i, 0); } } else { printf("\nerror: field %s at stage %d not available in ocp_nlp_sqp_rti_eval_param_sens\n", field, stage); exit(1); } return; } // TODO rename memory_get ??? void ocp_nlp_sqp_rti_get(void *config_, void *dims_, void *mem_, const char *field, void *return_value_) { ocp_nlp_config *config = config_; ocp_nlp_dims *dims = dims_; ocp_nlp_sqp_rti_memory *mem = mem_; if (!strcmp("sqp_iter", field)) { int *value = return_value_; *value = 1; } else if (!strcmp("status", field)) { int *value = return_value_; *value = mem->status; } else if (!strcmp("time_tot", field) || !strcmp("tot_time", field)) { double *value = return_value_; *value = mem->time_tot; } else if (!strcmp("time_qp_sol", field) || !strcmp("time_qp", field)) { double *value = return_value_; *value = mem->time_qp_sol; } else if (!strcmp("time_qp_solver_call", field)) { double *value = return_value_; *value = mem->time_qp_solver_call; } else if (!strcmp("time_lin", field)) { double *value = return_value_; *value = mem->time_lin; } else if (!strcmp("time_reg", field)) { double *value = return_value_; *value = mem->time_reg; } else if (!strcmp("stat", field)) { double **value = return_value_; *value = mem->stat; } else if (!strcmp("stat_m", field)) { int *value = return_value_; *value = mem->stat_m; } else if (!strcmp("stat_n", field)) { int *value = return_value_; *value = mem->stat_n; } else if (!strcmp("nlp_mem", field)) { void **value = return_value_; *value = mem->nlp_mem; } else if (!strcmp("qp_xcond_dims", field)) { void **value = return_value_; *value = dims->qp_solver->xcond_dims; } else if (!strcmp("qp_xcond_in", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_solver_mem->xcond_qp_in; } else if (!strcmp("qp_xcond_out", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_solver_mem->xcond_qp_out; } else if (!strcmp("qp_in", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_in; } else if (!strcmp("qp_out", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_out; } else if (!strcmp("qp_iter", field)) { config->qp_solver->memory_get(config->qp_solver, mem->nlp_mem->qp_solver_mem, "iter", return_value_); } else { printf("\nerror: output type %s not available in ocp_nlp_sqp_rti module\n", field); exit(1); } } void ocp_nlp_sqp_rti_config_initialize_default(void *config_) { ocp_nlp_config *config = (ocp_nlp_config *) config_; config->opts_calculate_size = &ocp_nlp_sqp_rti_opts_calculate_size; config->opts_assign = &ocp_nlp_sqp_rti_opts_assign; config->opts_initialize_default = &ocp_nlp_sqp_rti_opts_initialize_default; config->opts_update = &ocp_nlp_sqp_rti_opts_update; config->opts_set = &ocp_nlp_sqp_rti_opts_set; config->opts_set_at_stage = &ocp_nlp_sqp_rti_opts_set_at_stage; config->memory_calculate_size = &ocp_nlp_sqp_rti_memory_calculate_size; config->memory_assign = &ocp_nlp_sqp_rti_memory_assign; config->workspace_calculate_size = &ocp_nlp_sqp_rti_workspace_calculate_size; config->evaluate = &ocp_nlp_sqp_rti; config->eval_param_sens = &ocp_nlp_sqp_rti_eval_param_sens; config->config_initialize_default = &ocp_nlp_sqp_rti_config_initialize_default; config->precompute = &ocp_nlp_sqp_rti_precompute; config->get = &ocp_nlp_sqp_rti_get; return; }
reduce3.h
/* * reduce3.h * * Created on: Dec 28, 2015 * Author: agibsonccc */ #ifndef REDUCE3_H_ #define REDUCE3_H_ #define EXTRA_PARAMS_LENGTH 10 #include <templatemath.h> #include <helper_cuda.h> #include <helpers/sharedmem.h> #ifdef _OPENMP #include <omp.h> #endif #include <pairwise_util.h> #include <dll.h> #include <helpers/shape.h> #include <ops/ops.h> #include <op_boilerplate.h> #ifdef __CUDACC__ #include <cuda.h> #include <cuda_runtime.h> #endif #ifndef _OPENMP #define omp_get_thread_num() 0 #define omp_get_max_threads() 1 #endif #include "legacy_ops.h" namespace functions { namespace reduce3 { /** * Reduce involving * 2 arrays */ template<typename T> class Reduce3 { public: #ifdef __CUDACC__ virtual __device__ inline T opAtomic(T d1, T d2, T *extraParamsRef) = 0; #endif #ifdef __CUDACC__ /** * Aggregate shared memory * @param sPartialsRef * @param tid * @param extraParams */ template<typename OpType> static __inline__ __device__ void aggregatePartials(T **sPartialsRef, Nd4jLong tid, Nd4jLong numItems, T *extraParamsRef) { // start the shared memory loop on the next power of 2 less // than the block size. If block size is not a power of 2, // accumulate the intermediate sums in the remainder range. T *sPartials = *sPartialsRef; Nd4jLong floorPow2 = numItems; if (floorPow2 & (floorPow2 - 1)) { while (floorPow2 & (floorPow2 - 1)) { floorPow2 &= floorPow2 - 1; } if (tid >= floorPow2) { sPartials[tid - floorPow2] = OpType::update(sPartials[tid - floorPow2], sPartials[tid], extraParamsRef); } __syncthreads(); } for (Nd4jLong activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) { if (tid < activeThreads) { sPartials[tid] = OpType::update(sPartials[tid], sPartials[tid + activeThreads], extraParamsRef); } __syncthreads(); } } /** Perform a reduction @param n the number of elements @param xOffset the starting offset @param dx the data to perform the reduction on @param incx the increment on which to perform the reduction @param extraParams extra parameters used for calculations @param result where to store the result of the reduction */ virtual __inline__ __device__ void transformNoElementWiseStride( T *dx, Nd4jLong *xShapeInfo, T *dy, Nd4jLong *yShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int postProcessOrNot, int *allocationPointer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo) { Nd4jLong n = shape::length(xShapeInfo); int rank = shape::rank(xShapeInfo); T *sPartials = (T *) manager->getSharedReductionBuffer(); //val.getPointer(); T startingVal = this->startingValue(dx); // FIXME: this ugly fast fix. __shared__ T extraZ[2]; if (threadIdx.x == 0) { extraZ[0] = (T) 0.0; extraZ[1] = (T) 0.0; } sPartials[threadIdx.x] = startingVal; __syncthreads(); Nd4jLong idx[MAX_RANK]; for(Nd4jLong i = blockIdx.x * gridDim.x + threadIdx.x; i < n; i += gridDim.x * blockDim.x) { shape::ind2subC(rank,shape::shapeOf(xShapeInfo),i, idx); auto offset = shape::getOffset(0,shape::shapeOf(xShapeInfo),shape::stride(xShapeInfo),idx,rank); auto yOffset = shape::getOffset(0,shape::shapeOf(yShapeInfo),shape::stride(yShapeInfo),idx,rank); sPartials[threadIdx.x] = update(sPartials[threadIdx.x], this->opAtomic(dx[offset], dy[yOffset], extraZ), extraZ); } T **sPartialsRef = (T **) &sPartials; aggregatePartials(sPartialsRef, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, n), extraZ); /** * Look at something that uses the extra params * and aggregates the extra values propelry. *This will be used in summary stats too. */ // write result for this block to global mem if (threadIdx.x == 0) { if (postProcessOrNot) { result[blockIdx.x] = postProcess(sPartials[0], n, extraZ); } else { result[blockIdx.x] = sPartials[0]; } } } /** * */ template<typename OpType> static inline __device__ void execScalarCuda( T *dx, Nd4jLong *xShapeInfo, T *dy, Nd4jLong *yShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *allocationPointer, T *reductionBuffer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo) { // SharedMemory <T> val; T *sPartials = (T *) manager->getSharedReductionBuffer(); // val.getPointer(); // FIXME: this ugly fast fix. __shared__ T extraZ[3]; if (threadIdx.x == 0) { extraZ[0] = (T) 0.0f; extraZ[1] = (T) 0.0f; if (extraParams != NULL) { extraZ[2] = extraParams[0]; } else extraZ[2] = (T) 0.0f; } __syncthreads(); T startingVal = OpType::startingValue(dx); Nd4jLong length = shape::length(xShapeInfo); int xElementWiseStride = shape::elementWiseStride(xShapeInfo); int yElementWiseStride = shape::elementWiseStride(yShapeInfo); int tid = blockIdx.x * blockDim.x + threadIdx.x; char xOrder = shape::order(xShapeInfo); char yOrder = shape::order(yShapeInfo); if(xOrder == yOrder && (xElementWiseStride > 0 && yElementWiseStride > 0) && shape::strideDescendingCAscendingF(xShapeInfo) && shape::strideDescendingCAscendingF(yShapeInfo)) { if (xElementWiseStride == 1 && yElementWiseStride == 1) { for(Nd4jLong i = tid; i < length; i+= gridDim.x * blockDim.x) { startingVal = OpType::update(startingVal, OpType::opAtomic(dx[i], dy[i], extraZ), extraZ); } } else { for(Nd4jLong i = tid; i < length; i+= gridDim.x * blockDim.x) { startingVal = OpType::update(startingVal, OpType::opAtomic(dx[i * xElementWiseStride], dy[i * yElementWiseStride], extraZ), extraZ); } } sPartials[threadIdx.x] = startingVal; } else { __shared__ Nd4jLong *xShape; __shared__ Nd4jLong *yShape; __shared__ Nd4jLong *xStride; __shared__ Nd4jLong *yStride; __shared__ int rank; if (threadIdx.x == 0) { xShape = shape::shapeOf(xShapeInfo); yShape = shape::shapeOf(yShapeInfo); xStride = shape::stride(xShapeInfo); yStride = shape::stride(yShapeInfo); rank = shape::rank(xShapeInfo); } __syncthreads(); T startingVal = OpType::startingValue(dx); T *sPartials = (T *) manager->getSharedReductionBuffer(); Nd4jLong xCoords[MAX_RANK]; Nd4jLong yCoords[MAX_RANK]; sPartials[threadIdx.x] = startingVal; for(Nd4jLong i = tid ;i < length; i += gridDim.x * blockDim.x) { shape::ind2subC(rank,xShape,i,xCoords); shape::ind2subC(rank,yShape,i,yCoords); auto offset = shape::getOffset(0, xShape, xStride, xCoords,rank); auto yOffset = shape::getOffset(0,yShape, yStride, yCoords,rank); sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::opAtomic(dx[offset], dy[yOffset], extraZ), extraZ); } } __syncthreads(); T **sPartialsRef = (T **) &sPartials; aggregatePartials<OpType>(sPartialsRef, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, length), extraZ); __syncthreads(); if (gridDim.x > 1) { unsigned int *tc = (unsigned int *)reductionBuffer; __shared__ bool amLast; int rank = shape::rank(xShapeInfo); tid = threadIdx.x; T *extraBuffer = (T *) allocationPointer; if (threadIdx.x == 0) { reductionBuffer[blockIdx.x] = sPartials[0]; extraBuffer[blockIdx.x] = extraZ[0]; extraBuffer[gridDim.x + blockIdx.x] = extraZ[1]; } __threadfence(); __syncthreads(); if (threadIdx.x == 0) { unsigned int ticket = atomicInc(&tc[16384], gridDim.x); amLast = (ticket == gridDim.x - 1); } sPartials[tid] = startingVal; __syncthreads(); if (amLast) { tc[16384] = 0; sPartials[threadIdx.x] = OpType::startingValue(dx); // TODO: later probably replace this. Right now we need extraZ sync for CosineSimilarity ONLY if (tid == 0 && extraZ[0] != (T) 0.0 && extraZ[1] != (T) 0.0) { extraZ[0] = 0.0; extraZ[1] = 0.0; for (int i = 0; i < gridDim.x; i++) { extraZ[0] += extraBuffer[i]; extraZ[1] += extraBuffer[gridDim.x + i]; } } for (Nd4jLong i = threadIdx.x; i < gridDim.x; i += blockDim.x) { sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], reductionBuffer[i], extraZ); } __syncthreads(); aggregatePartials<OpType>(sPartialsRef, threadIdx.x, nd4j::math::nd4j_min<int>(gridDim.x, blockDim.x), extraZ); __syncthreads(); if (threadIdx.x == 0) { result[0] = OpType::postProcess(sPartials[0], length, extraZ); } } } else { if (tid == 0) { unsigned int *tc = (unsigned *)reductionBuffer; tc[16384] = 0; result[0] = OpType::postProcess(sPartials[0], length, extraZ); } } } template<typename OpType> __device__ static inline void transformAll( T *dx, Nd4jLong *xShapeInfo, T *dy, Nd4jLong *yShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationPointer, UnifiedSharedMemory *manager, Nd4jLong *xTadShapeInfo, Nd4jLong *xOffsets, Nd4jLong *yTadShapeInfo, Nd4jLong *yOffsets) { // initialize partials first T *sPartials = (T *) manager->getSharedReductionBuffer(); T startingVal = OpType::startingValue(dx); sPartials[threadIdx.x] = startingVal; T *tempX = sPartials + blockDim.x; const int maxBlock = blockDim.x; __shared__ T extraZ[OpType::extraParamsLen > 0 ? OpType::extraParamsLen : 1]; __shared__ int xTadLength; __shared__ int yTadLength; __shared__ int xTads; __shared__ int yTads; __shared__ Nd4jLong *xShape; __shared__ Nd4jLong *xStride; __shared__ int xRank; __shared__ Nd4jLong *yShape; __shared__ Nd4jLong *yStride; __shared__ int yRank; //reading initial data if (threadIdx.x == 0) { xTadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength); yTadLength = shape::tadLength(yShapeInfo, dimension, dimensionLength); xTads = shape::length(xShapeInfo) / xTadLength; yTads = shape::length(yShapeInfo) / yTadLength; xShape = shape::shapeOf(xTadShapeInfo); xStride = shape::stride(xTadShapeInfo); xRank = shape::rank(xTadShapeInfo); yShape = shape::shapeOf(yTadShapeInfo); yStride = shape::stride(yTadShapeInfo); yRank = shape::rank(yTadShapeInfo); } __syncthreads(); Nd4jLong xCoord[MAX_RANK]; Nd4jLong yCoord[MAX_RANK]; int limit = xTadLength / maxBlock; if (xTadLength % maxBlock > 0) limit++; for (int r = blockIdx.x; r < xTads; r += blockDim.x * gridDim.x) { T *x = dx + xOffsets[r]; if (threadIdx.x < xTadLength && threadIdx.x < maxBlock) { if (shape::order(xTadShapeInfo) == 'c') { shape::ind2subC(xRank, xShape, threadIdx.x, xCoord); } else { shape::ind2sub(xRank, xShape, threadIdx.x, xCoord); } auto xO = shape::getOffset(0, xShape, xStride, xCoord, xRank); tempX[threadIdx.x] = x[xO]; } for (int g = 0; g < yTads; g++) { T *y = dy + yOffsets[g]; int ri = (r * yTads) + g; sPartials[threadIdx.x] = startingVal; if (OpType::extraParamsLen > 0 && threadIdx.x < OpType::extraParamsLen) { extraZ[threadIdx.x] = (T) startingVal; } __syncthreads(); // we might have data too large for single cache block, rendering cache useless though :( for (int t = 0; t < limit; t++) { // we reset tempX IF we have >1 tiles if (t >= 1 || (limit > 1 && g > 0)) if (threadIdx.x + (t * maxBlock) < xTadLength) { if (shape::order(xTadShapeInfo) == 'c') { shape::ind2subC(xRank, xShape, threadIdx.x + (t * maxBlock), xCoord); } else { shape::ind2sub(xRank, xShape, threadIdx.x + (t * maxBlock), xCoord); } Nd4jLong xO = shape::getOffset(0, xShape, xStride, xCoord, xRank); tempX[threadIdx.x] = x[xO]; // tempX[threadIdx.x] = x[threadIdx.x + (t * maxBlock)]; } for (int f = threadIdx.x + (t * maxBlock); f < xTadLength && f < threadIdx.x + ((t + 1) * maxBlock); f += blockDim.x * gridDim.x) { if (shape::order(yTadShapeInfo) == 'c') { shape::ind2subC(yRank, yShape, f, yCoord); } else { shape::ind2sub(yRank, yShape, f, yCoord); } Nd4jLong yO = shape::getOffset(0, yShape, yStride, yCoord, yRank); sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::opAtomic(tempX[threadIdx.x], y[yO], extraZ), extraZ); } // we MUST step through this block altogether __syncthreads(); } T **sPartialsRef = (T **) &sPartials; aggregatePartials<OpType>(sPartialsRef, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, xTadLength), extraZ); __syncthreads(); if (threadIdx.x == 0) { result[ri] = OpType::postProcess(sPartials[threadIdx.x],xTadLength, extraZ); } __syncthreads(); } } } /** Perform a reduction @param n the number of elements @param xOffset the starting offset @param dx the data to perform the reduction on @param incx the increment on which to perform the reduction @param extraParams extra parameters used for calculations @param result where to store the result of the reduction */ template<typename OpType> __device__ static inline void transform( T *dx, Nd4jLong *xShapeInfo, T *dy, Nd4jLong *yShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationPointer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { /** * Gpu information for the problem */ int tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int resultScalar; __shared__ int xElementWiseStride; __shared__ int yElementWiseStride; //shared memory space for storing intermediate results //SharedMemory <T> val; T *sPartials = (T *) manager->getSharedReductionBuffer(); //val.getPointer(); T init = OpType::startingValue(dx); sPartials[threadIdx.x] = init; __shared__ T extraZ[OpType::extraParamsLen > 0 ? OpType::extraParamsLen : 1]; //length for the tad __shared__ Nd4jLong resultLength; __shared__ int tadLength; __shared__ int yLength; __shared__ int tadElementWiseStride; __shared__ int yTadElementWiseStride; T startingVal = OpType::startingValue(dx); T reduction = OpType::startingValue(dx); if (threadIdx.x == 0) { if (resultShapeInfo != nullptr) resultLength = shape::length(resultShapeInfo); else resultLength = 1; if (dimensionLength == 1) { if (dimension == nullptr || dimension[0] == MAX_DIMENSION) resultScalar = 1; else resultScalar = 0; } else resultScalar = 0; if (resultLength == 1) resultScalar = 1; auto xStride = shape::stride(xShapeInfo); char xOrder = shape::order(xShapeInfo); tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength); tadElementWiseStride = shape::elementWiseStride(tadOnlyShapeInfo); yLength = shape::length(yShapeInfo); if (yTadOnlyShapeInfo != nullptr) yTadElementWiseStride = shape::elementWiseStride(yTadOnlyShapeInfo); } __syncthreads(); // code branch for TAD vs full array if (tadLength == yLength) { Nd4jLong xCoord[MAX_RANK]; Nd4jLong yCoord[MAX_RANK]; auto yShape = shape::shapeOf(yShapeInfo); auto yStride = shape::stride(yShapeInfo); auto xShape = shape::shapeOf(tadOnlyShapeInfo); auto xStride = shape::stride(tadOnlyShapeInfo); int yRank = shape::rank(yShapeInfo); int xRank = shape::rank(tadOnlyShapeInfo); for(int i = blockIdx.x; i < resultLength; i+= gridDim.x) { int xOffsetForTad = tadOffsets[i]; if (OpType::extraParamsLen > 0 && threadIdx.x < OpType::extraParamsLen) { extraZ[threadIdx.x] = (T) startingVal; } __syncthreads(); for(int j = threadIdx.x; j < tadLength; j += blockDim.x) { shape::ind2subC(xRank,xShape, j, xCoord); shape::ind2subC(yRank,yShape, j, yCoord); Nd4jLong xOffset = shape::getOffset(xOffsetForTad, xShape, xStride, xCoord, xRank); Nd4jLong yOffset = shape::getOffset(0, yShape, yStride, yCoord, yRank); sPartials[threadIdx.x] = j < blockDim.x ? OpType::opAtomic(dx[xOffset],dy[yOffset], extraZ) : OpType::update(sPartials[threadIdx.x], OpType::opAtomic(dx[xOffset],dy[yOffset], extraZ), extraZ); } __syncthreads(); T **sPartialsRef = (T **) &sPartials; aggregatePartials<OpType>(sPartialsRef, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraZ); __syncthreads(); if (threadIdx.x == 0) result[i] = OpType::postProcess(sPartials[threadIdx.x],tadLength, extraZ); __syncthreads(); } } else if (!resultScalar) { if(tadElementWiseStride >= 1 && yTadElementWiseStride) { for(int i = blockIdx.x; i < resultLength; i+= gridDim.x) { int xOffsetForTad = tadOffsets[i]; int yOffsetForTad = yTadOffsets[i]; if (OpType::extraParamsLen > 0 && threadIdx.x < OpType::extraParamsLen) { extraZ[threadIdx.x] = (T) startingVal; } __syncthreads(); if (threadIdx.x < tadLength) sPartials[threadIdx.x] = OpType::op(dx[xOffsetForTad + tadElementWiseStride * threadIdx.x],dy[yOffsetForTad + yTadElementWiseStride * threadIdx.x], extraZ); for(int j = threadIdx.x + blockDim.x; j < tadLength; j += blockDim.x) { sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(dx[xOffsetForTad + tadElementWiseStride * j],dy[yOffsetForTad + yTadElementWiseStride * j], extraZ), extraZ); } __syncthreads(); T **sPartialsRef = (T **) &sPartials; aggregatePartials<OpType>(sPartialsRef, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraZ); __syncthreads(); if (threadIdx.x == 0) result[i] = OpType::postProcess(sPartials[threadIdx.x],tadLength, extraZ); __syncthreads(); } } else { /* // DO NOT REMOVE THIS COMMENTED BLOCK PLEASE for (int r = blockIdx.x; r < tad->numTads; r += gridDim.x) { if (threadIdx.x == 0) tad->createOffsetForBlock(r); __syncthreads(); int tadOffsetForBlock = tad->tadOffsetForBlock; T *xVal = dx + tadOffsetForBlock; sPartials[threadIdx.x] = this->startingValue(xVal); for(int i = threadIdx.x; i < tad->tadLength; i+= blockDim.x) { int xOffsetForTad = shape::tadOffset(i, xShapeInfo, dimension, dimensionLength, nullptr); int yOffsetForTad = shape::tadOffset(i, yShapeInfo, dimension, dimensionLength, nullptr); sPartials[threadIdx.x] = this->update(sPartials[threadIdx.x],dx[tadOffsetForBlock + i * tad->tadElementWiseStride], extraParams); } __syncthreads(); // aggregate. do NOT reduce for elements > tadLength T **sPartialsRef = (T **) &sPartials; aggregatePartials(sPartialsRef, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tad->tadLength), extraParams); __syncthreads(); if (threadIdx.x == 0) result[r] = this->postProcess(sPartials[threadIdx.x], tad->tadLength, extraParams); } */ Nd4jLong xCoord[MAX_RANK]; Nd4jLong yCoord[MAX_RANK]; auto yShape = shape::shapeOf(yTadOnlyShapeInfo); auto yStride = shape::stride(yTadOnlyShapeInfo); auto xShape = shape::shapeOf(tadOnlyShapeInfo); auto xStride = shape::stride(tadOnlyShapeInfo); int yRank = shape::rank(yTadOnlyShapeInfo); int xRank = shape::rank(tadOnlyShapeInfo); for(int i = blockIdx.x; i < resultLength; i+= gridDim.x) { auto xOffsetForTad = tadOffsets[i]; auto yOffsetForTad = yTadOffsets[i]; if (OpType::extraParamsLen > 0 && threadIdx.x < OpType::extraParamsLen) { extraZ[threadIdx.x] = (T) startingVal; } __syncthreads(); for(int j = threadIdx.x; j < tadLength; j += blockDim.x) { shape::ind2subC(xRank,xShape, j, xCoord); shape::ind2subC(yRank,yShape, j, yCoord); auto xOffset = shape::getOffset(xOffsetForTad, xShape, xStride, xCoord, xRank); auto yOffset = shape::getOffset(yOffsetForTad, yShape, yStride, yCoord, yRank); sPartials[threadIdx.x] = j < blockDim.x ? OpType::opAtomic(dx[xOffset],dy[yOffset], extraZ) : OpType::update(sPartials[threadIdx.x], OpType::opAtomic(dx[xOffset],dy[yOffset], extraZ), extraZ); } __syncthreads(); T **sPartialsRef = (T **) &sPartials; aggregatePartials<OpType>(sPartialsRef, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraZ); __syncthreads(); if (threadIdx.x == 0) result[i] = OpType::postProcess(sPartials[threadIdx.x],tadLength, extraZ); __syncthreads(); } } } } #endif #ifdef __CUDACC__ __device__ static inline void exec( const int opNum, T *dx, Nd4jLong *xShapeInfo, T *dy, Nd4jLong *yShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationPointer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { DISPATCH_BY_OPNUM(transform, PARAMS(dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationPointer, manager, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets), REDUCE3_OPS); } __device__ static inline void execAllCuda( const int opNum, T *dx, Nd4jLong *xShapeInfo, T *dy, Nd4jLong *yShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationPointer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { DISPATCH_BY_OPNUM(transformAll, PARAMS(dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationPointer, manager, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets), REDUCE3_OPS); } __device__ static inline void execScalarCuda( const int opNum, T *dx, Nd4jLong *xShapeInfo, T *dy, Nd4jLong *yShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int * allocationPointer, T *reductionBuffer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo) { DISPATCH_BY_OPNUM(execScalarCuda, PARAMS(dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, allocationPointer, reductionBuffer, manager, tadOnlyShapeInfo), REDUCE3_OPS); } #endif #ifdef __CUDACC__ __host__ #endif static T execScalar( const int opNum, T *x, Nd4jLong *xShapeInfo, T *extraParamsVals, T *y, Nd4jLong *yShapeInfo) { RETURNING_DISPATCH_BY_OPNUM(execScalar, PARAMS(x, xShapeInfo, extraParamsVals, y, yShapeInfo), REDUCE3_OPS); } static void exec( const int opNum, T *x, Nd4jLong *xShapeInfo, T *extraParamsVals, T *y, Nd4jLong *yShapeInfo, T *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength) { DISPATCH_BY_OPNUM(exec, PARAMS(x, xShapeInfo, extraParamsVals, y, yShapeInfo, result, resultShapeInfoBuffer, dimension, dimensionLength), REDUCE3_OPS); } static void exec( const int opNum, T *x, Nd4jLong *xShapeInfo, T *extraParamsVals, T *y, Nd4jLong *yShapeInfo, T *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { DISPATCH_BY_OPNUM(exec, PARAMS(x, xShapeInfo, extraParamsVals, y, yShapeInfo, result, resultShapeInfoBuffer, dimension, dimensionLength, tadShapeInfo, tadOffsets), REDUCE3_OPS); } static void execAll( const int opNum, T *x, Nd4jLong *xShapeInfo, T *extraParamsVals, T *y, Nd4jLong *yShapeInfo, T *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *xTadShapeInfo, Nd4jLong *xOffsets, Nd4jLong *yTadShapeInfo, Nd4jLong *yOffsets) { DISPATCH_BY_OPNUM(execAll, PARAMS(x, xShapeInfo, extraParamsVals, y, yShapeInfo, result, resultShapeInfoBuffer, dimension, dimensionLength, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets), REDUCE3_OPS); } template<typename OpType> #ifdef __CUDACC__ __host__ #endif static T execScalar( T *x, Nd4jLong *xShapeInfo, T *extraParams, T *y, Nd4jLong *yShapeInfo) { T startingVal = OpType::startingValue(x); Nd4jLong length = shape::length(xShapeInfo); Nd4jLong xElementWiseStride = shape::elementWiseStride(xShapeInfo); Nd4jLong yElementWiseStride = shape::elementWiseStride(yShapeInfo); T extraParamsVals[3] = {(T) 0.0, (T) 0.0, (T) 0.0}; // it's possible case for EqualsWithEps op if (extraParams != nullptr) { extraParamsVals[2] = extraParams[0]; } char xOrder = shape::order(xShapeInfo); char yOrder = shape::order(yShapeInfo); if(xOrder == yOrder && (xElementWiseStride >=1 && yElementWiseStride >= 1) && shape::strideDescendingCAscendingF(xShapeInfo) && shape::strideDescendingCAscendingF(yShapeInfo)) { if (xElementWiseStride == 1 && yElementWiseStride == 1) { // TODO:: proper reduction required here for(int i = 0; i < length; i++) { startingVal = OpType::update(startingVal, OpType::op(x[i],y[i], extraParamsVals), extraParamsVals); } return OpType::postProcess(startingVal, length, extraParamsVals); } else { // TODO:: proper reduction required here for(Nd4jLong i = 0; i < length; i++) { startingVal = OpType::update(startingVal, OpType::op(x[i * xElementWiseStride],y[i * yElementWiseStride], extraParamsVals), extraParamsVals); } return OpType::postProcess(startingVal, length, extraParamsVals); } } else { Nd4jLong xCoords[MAX_RANK]; Nd4jLong yCoords[MAX_RANK]; int xRank = shape::rank(xShapeInfo); int yRank = shape::rank(yShapeInfo); Nd4jLong *xShape = shape::shapeOf(xShapeInfo); Nd4jLong *xStride = shape::stride(xShapeInfo); Nd4jLong *yShape = shape::shapeOf(yShapeInfo); Nd4jLong *yStride = shape::stride(yShapeInfo); for(unsigned int i = 0 ;i < length; i++) { shape::ind2subC(xRank, xShape, i, xCoords); shape::ind2subC(yRank, yShape, i, yCoords); Nd4jLong offset = shape::getOffset(0, xShape, xStride, xCoords, xRank); Nd4jLong yOffset = shape::getOffset(0, yShape, yStride, yCoords, yRank); startingVal = OpType::update(startingVal, OpType::op(x[offset], y[yOffset], extraParamsVals), extraParamsVals); } } return OpType::postProcess(startingVal, length, extraParamsVals);; } template<typename OpType> static void execAll( T *x, Nd4jLong *xShapeInfo, T *extraParams, T *y, Nd4jLong *yShapeInfo, T *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *xTadShapeInfo, Nd4jLong *xOffsets, Nd4jLong *yTadShapeInfo, Nd4jLong *yOffsets) { auto xTadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength); auto yTadLength = shape::tadLength(yShapeInfo, dimension, dimensionLength); auto xTads = shape::length(xShapeInfo) / xTadLength; auto yTads = shape::length(yShapeInfo) / yTadLength; auto xShape = shape::shapeOf(xTadShapeInfo); auto xStride = shape::stride(xTadShapeInfo); int xRank = shape::rank(xTadShapeInfo); auto yShape = shape::shapeOf(yTadShapeInfo); auto yStride = shape::stride(yTadShapeInfo); int yRank = shape::rank(yTadShapeInfo); Nd4jLong xCoord[MAX_RANK]; Nd4jLong yCoord[MAX_RANK]; T startingVal = OpType::startingValue(x); #pragma omp parallel for proc_bind(AFFINITY) default(shared) private(xCoord, yCoord) for (Nd4jLong r = 0; r < xTads; r++) { Nd4jLong xOffset = xOffsets[r]; T *lX = x + xOffset; for (Nd4jLong g = 0; g < yTads; g++) { auto yOffset = yOffsets[g]; T *lY = y + yOffset; auto ri = (r * yTads) + g; T *localExtraParams = nullptr; if (OpType::extraParamsLen > 0) localExtraParams = new T[OpType::extraParamsLen]; for (int extraParamsIdx = 0; extraParamsIdx < OpType::extraParamsLen; extraParamsIdx++) { localExtraParams[extraParamsIdx] = startingVal; } for (int f = 0; f < xTadLength; f++) { if (shape::order(yTadShapeInfo) == 'c') { shape::ind2subC(yRank, yShape, f, yCoord); } else { shape::ind2sub(yRank, yShape, f, yCoord); } if (shape::order(xTadShapeInfo) == 'c') { shape::ind2subC(xRank, xShape, f, xCoord); } else { shape::ind2sub(xRank, xShape, f, xCoord); } Nd4jLong xO = shape::getOffset(0, xShape, xStride, xCoord, xRank); Nd4jLong yO = shape::getOffset(0, yShape, yStride, yCoord, yRank); result[ri] = OpType::update(result[ri], OpType::op(lX[xO], lY[yO], localExtraParams), localExtraParams); } result[ri] = OpType::postProcess(result[ri], xTadLength, localExtraParams); if (localExtraParams != nullptr) delete[] localExtraParams; } } } template<typename OpType> static void exec( T *x, Nd4jLong *xShapeInfo, T *extraParams, T *y, Nd4jLong *yShapeInfo, T *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { /* nd4j_printf("Xp: [%p]; Yp: [%p]; Zp: [%p];\n", (void *) x, (void *) y, (void *) result); nd4j_printf("XSp: [%p]; YSp: [%p]; ZSp: [%p];\n", (void *) xShapeInfo, (void *) yShapeInfo, (void *) resultShapeInfoBuffer); nd4j_printf("Ep: [%p]; Dp: [%p]\n", (void *) extraParams, (void *) dimension); nd4j_printf("TSp: [%p]; TOp: [%p]\n", (void *) tadShapeInfo, (void *) tadOffsets); nd4j_printf("X[0]: %f\n", x[0]); nd4j_printf("Y[0]: %f\n", y[0]); nd4j_printf("Z[0]: %f\n", result[0]); nd4j_printf("XS[0]: %i\n", xShapeInfo[0]); nd4j_printf("YS[0]: %i\n", yShapeInfo[0]); nd4j_printf("ZS[0]: %i\n", resultShapeInfoBuffer[0]); nd4j_printf("E[0]: %f\n", extraParams[0]); nd4j_printf("D[0]: %i\n", dimension[0]); nd4j_printf("TS[0]: %i\n", tadShapeInfo[0]); nd4j_printf("TO[0]: %lld\n", tadOffsets[0]); nd4j_printf("dimLength: %i\n", dimensionLength); */ T startingVal = OpType::startingValue(x); auto tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength); auto tads = shape::length(xShapeInfo) / tadLength; auto *xShape = shape::shapeOf(tadShapeInfo); auto *xStride = shape::stride(tadShapeInfo); int xRank = shape::rank(tadShapeInfo); auto *yShape = shape::shapeOf(yShapeInfo); auto *yStride = shape::stride(yShapeInfo); int yRank = shape::rank(yShapeInfo); //shape::printShapeInfoLinear(xShapeInfo); //shape::printShapeInfoLinear(yShapeInfo); //shape::printShapeInfoLinear(resultShapeInfoBuffer); //shape::printShapeInfoLinear(tadShapeInfo); Nd4jLong xCoord[MAX_RANK]; Nd4jLong yCoord[MAX_RANK]; //#pragma omp parallel for proc_bind(AFFINITY) default(shared) for (Nd4jLong r = 0; r < tads; r++) { Nd4jLong offset = tadOffsets[r]; T *localExtraParams = nullptr; if (OpType::extraParamsLen > 0) localExtraParams = new T[OpType::extraParamsLen]; for (int extraParamsIdx = 0; extraParamsIdx < OpType::extraParamsLen; extraParamsIdx++) { localExtraParams[extraParamsIdx] = startingVal; } for (Nd4jLong f = 0; f < tadLength; f++) { if (shape::order(tadShapeInfo) == 'c') { shape::ind2subC(xRank, xShape, f, xCoord); shape::ind2subC(yRank, yShape, f, yCoord); } else { shape::ind2sub(xRank, xShape, f, xCoord); shape::ind2sub(yRank, yShape, f, yCoord); } Nd4jLong xOffset = shape::getOffset(offset, xShape, xStride, xCoord, xRank); Nd4jLong yOffset = shape::getOffset(0, yShape, yStride, yCoord, yRank); result[r] = OpType::update(result[r], OpType::op(x[xOffset], y[yOffset], localExtraParams), localExtraParams); } result[r] = OpType::postProcess(result[r], tadLength, localExtraParams); if (localExtraParams != nullptr) delete[] localExtraParams; } } template<typename OpType> static void exec( T *x, Nd4jLong *xShapeInfo, T *extraParams, T *y, Nd4jLong *yShapeInfo, T *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength) { /* nd4j_printf("Xp: [%p]; Yp: [%p]; Zp: [%p];\n", (void *) x, (void *) y, (void *) result); nd4j_printf("XSp: [%p]; YSp: [%p]; ZSp: [%p];\n", (void *) xShapeInfo, (void *) yShapeInfo, (void *) resultShapeInfoBuffer); nd4j_printf("Ep: [%p]; Dp: [%p]\n", (void *) extraParams, (void *) dimension); nd4j_printf("X[0]: %f\n", x[0]); nd4j_printf("Y[0]: %f\n", y[0]); nd4j_printf("Z[0]: %f\n", result[0]); nd4j_printf("XS[0]: %i\n", xShapeInfo[0]); nd4j_printf("YS[0]: %i\n", yShapeInfo[0]); nd4j_printf("ZS[0]: %i\n", resultShapeInfoBuffer[0]); nd4j_printf("E[0]: %f\n", extraParams[0]); nd4j_printf("D[0]: %i\n", dimension[0]); nd4j_printf("dimLength: %i\n", dimensionLength); */ T extraParamsVals[3] = {(T) 0.0, (T) 0.0, (T) 0.0}; if(shape::isScalar(resultShapeInfoBuffer)) { result[0] = execScalar<OpType>( x, xShapeInfo, extraParamsVals, y, yShapeInfo); return; } char xOrder = shape::order(xShapeInfo); char yOrder = shape::order(yShapeInfo); if(xOrder != yOrder) { Nd4jLong shapeIter[MAX_RANK]; Nd4jLong coord[MAX_RANK]; int dim; Nd4jLong xStridesIter[MAX_RANK]; Nd4jLong yStridesIter[MAX_RANK]; auto xShape = shape::shapeOf(xShapeInfo); auto xStride = shape::stride(xShapeInfo); auto yStride = shape::stride(yShapeInfo); int rank = shape::rank(xShapeInfo); if(PrepareTwoRawArrayIter<T>(rank, xShape, x, xStride, y, yStride, &rank, shapeIter, &x, xStridesIter, &y, yStridesIter) >= 0) { Nd4jLong resultLength = shape::length(resultShapeInfoBuffer); Nd4jLong tadLength = shape::tadLength(xShapeInfo,dimension,dimensionLength); ND4J_RAW_ITER_START(dim, rank, coord, shapeIter); { Nd4jLong xOffset = shape::getOffset(0,xShape,xStride,coord,rank); auto reductionIndex = xOffset / resultLength; result[reductionIndex] = OpType::update(result[reductionIndex], OpType::op(x[0],y[0], extraParamsVals), extraParamsVals); } ND4J_RAW_ITER_TWO_NEXT(dim, rank, coord, shapeIter, x, xStridesIter, y, yStridesIter); //#pragma omp parallel for proc_bind(AFFINITY) default(shared) for(Nd4jLong i = 0; i < resultLength ;i++) { result[i] = OpType::postProcess(result[i],tadLength, extraParamsVals); } } else { printf("Unable to prepare array\n"); } } else { T startingVal = OpType::startingValue(x); Nd4jLong resultLength = shape::length(resultShapeInfoBuffer); shape::TAD xTad(xShapeInfo, dimension, dimensionLength); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); shape::TAD yTad(yShapeInfo, dimension, dimensionLength); yTad.createTadOnlyShapeInfo(); yTad.createOffsets(); /** * The element wise stride belong longs to a reduction index. * When used out of order, we can get rid of the data * dependencies and rely on using the max dimension * specified for stride instead. * Say we take the sum(0,1) along long arr * we can use arr.stride(1) as a representation * along long which to iterate. */ int largerElementWiseStride; int smallerElementWiseStride; auto xElementWiseStride = shape::elementWiseStride(xTad.tadOnlyShapeInfo); auto yElementWiseStride = shape::elementWiseStride(yTad.tadOnlyShapeInfo); int tadLength; Nd4jLong xModLength; Nd4jLong yModLength; Nd4jLong *iterationTadInfo; bool xTadBigger; if(shape::length(xShapeInfo) > shape::length(yShapeInfo)) { tadLength = shape::length(xTad.tadOnlyShapeInfo); iterationTadInfo = xTad.tadOnlyShapeInfo; largerElementWiseStride = shape::elementWiseStride(xShapeInfo); smallerElementWiseStride = shape::elementWiseStride(yShapeInfo); xModLength = 1; yModLength = tadLength; xTadBigger = true; } else { tadLength = shape::length(yTad.tadOnlyShapeInfo); iterationTadInfo = yTad.tadOnlyShapeInfo; largerElementWiseStride = shape::elementWiseStride(yShapeInfo); smallerElementWiseStride = shape::elementWiseStride(xShapeInfo); xModLength = tadLength; yModLength = 1; xTadBigger = false; } if (largerElementWiseStride >= 1 && smallerElementWiseStride >= 1 && xElementWiseStride >= 1 && yElementWiseStride >= 1) { if(shape::length(xShapeInfo) == shape::length(yShapeInfo)) { //#pragma omp parallel for proc_bind(AFFINITY) default(shared) for (Nd4jLong i = 0; i < resultLength; i++) { T *localExtraParams = nullptr; if (OpType::extraParamsLen > 0) localExtraParams = new T[OpType::extraParamsLen]; for (int extraParamsIdx = 0; extraParamsIdx < OpType::extraParamsLen; extraParamsIdx++) { localExtraParams[extraParamsIdx] = startingVal; } Nd4jLong offset = xTad.tadOffsets[i]; Nd4jLong yOffset = yTad.tadOffsets[i]; result[i] = OpType::op(x[offset], y[yOffset], localExtraParams); for (int j = 1; j < tadLength; j++) { int xIdx = (offset + xElementWiseStride * j); int yIdx = (yOffset + yElementWiseStride * j); result[i] = OpType::update(result[i], OpType::op(x[xIdx], y[yIdx], localExtraParams), localExtraParams); } result[i] = OpType::postProcess(result[i], tadLength, localExtraParams); if (localExtraParams != nullptr) delete[] localExtraParams; } } else { int tadsPerThread = resultLength / TAD_THRESHOLD; int num_threads = nd4j::math::nd4j_max<int>(1, tadsPerThread); num_threads = nd4j::math::nd4j_min<int>(num_threads, omp_get_max_threads()); //#pragma omp parallel for schedule(guided) num_threads(num_threads) if (num_threads > 1) proc_bind(AFFINITY) default(shared) for (int i = 0; i < resultLength; i++) { Nd4jLong xOffset = xTadBigger ? xTad.tadOffsets[i] : 0; Nd4jLong yOffset = !xTadBigger ? yTad.tadOffsets[i] : 0; auto xShape = xTadBigger ? xTad.tadShape : shape::shapeOf(xShapeInfo); auto yShape = !xTadBigger ? yTad.tadShape : shape::shapeOf(yShapeInfo); auto xStride = xTadBigger ? xTad.tadStride : shape::stride(xShapeInfo); auto yStride = !xTadBigger ? yTad.tadStride : shape::stride(yShapeInfo); int xRank = xTadBigger ? shape::rank(xTad.tadOnlyShapeInfo) : shape::rank(xShapeInfo); int yRank = !xTadBigger ? shape::rank(yTad.tadOnlyShapeInfo) : shape::rank(yShapeInfo); Nd4jLong coord[MAX_RANK]; Nd4jLong yCoord[MAX_RANK]; T start = 0.0; for (int j = 0; j < tadLength; j++) { if(xTadBigger) { shape::ind2subC(shape::rank(xTad.tadOnlyShapeInfo), xTad.tadStride, j, coord); shape::ind2subC(shape::rank(yShapeInfo), shape::shapeOf(yShapeInfo), j, yCoord); } else { shape::ind2subC(shape::rank(xShapeInfo), shape::shapeOf(xShapeInfo), j, coord); shape::ind2subC(shape::rank(yTad.tadOnlyShapeInfo), yTad.tadShape, j, yCoord); } int xOffset2 = shape::getOffset(xOffset,xShape,xStride,coord,xRank); int yOffset2 = shape::getOffset(yOffset,yShape,yStride,yCoord,yRank); start = OpType::update(start, OpType::op(x[xOffset2], y[yOffset2],extraParams), extraParamsVals); } result[i] = OpType::postProcess(start, shape::length(iterationTadInfo), extraParamsVals); } } } else { shape::TAD xTad(xShapeInfo, dimension, dimensionLength); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); shape::TAD yTad(yShapeInfo, dimension, dimensionLength); yTad.createTadOnlyShapeInfo(); yTad.createOffsets(); int tadsPerThread = resultLength / TAD_THRESHOLD; int num_threads = nd4j::math::nd4j_max<int>(1, tadsPerThread); num_threads = nd4j::math::nd4j_min<int>(num_threads, omp_get_max_threads()); Nd4jLong coord[MAX_RANK]; //#pragma omp parallel for schedule(guided) num_threads(num_threads) if (num_threads > 1) proc_bind(AFFINITY) default(shared) private(coord) for (int i = 0; i < resultLength; i++) { Nd4jLong xOffset = xTad.tadOffsets[i]; Nd4jLong yOffset = yTad.tadOffsets[i]; T start = OpType::startingValue(x + xOffset); for (int j = 0; j < tadLength; j++) { shape::ind2subC(shape::rank(iterationTadInfo), shape::shapeOf(iterationTadInfo), j, coord); Nd4jLong xOffset2 = shape::getOffset(xOffset,shape::shapeOf(xTad.tadOnlyShapeInfo),shape::stride(xTad.tadOnlyShapeInfo),coord,shape::rank(xTad.tadOnlyShapeInfo)); Nd4jLong yOffset2 = shape::getOffset(yOffset,shape::shapeOf(yTad.tadOnlyShapeInfo),shape::stride(yTad.tadOnlyShapeInfo),coord,shape::rank(yTad.tadOnlyShapeInfo)); start = OpType::update(start, OpType::op(x[xOffset2], y[yOffset2],extraParamsVals), extraParamsVals); } result[i] = OpType::postProcess(start, shape::length(iterationTadInfo), extraParamsVals); } } } } }; } } #ifdef __CUDACC__ /** * The driver api * @param opNum the number * @param n the length of the reduce * @param dx the input data * @param xShapeInfo the shape information * @param dy the pair wise reduce * @param yShapeInfo the shape information for y * @param extraParams the extra parameters in the operation * @param result where to store the result * @param resultShapeInfo the shape information * @param gpuInformation the gpu information * @param dimension the dimension to reduce along long * @param dimensionLength the dimension length * @param postProcessOrNot whether to post */ template <typename T> __device__ void reduce3Generic( const int opNum, T *dx, Nd4jLong *xShapeInfo, T *dy, Nd4jLong *yShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { __shared__ UnifiedSharedMemory *manager; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; manager = new(shmem) UnifiedSharedMemory((int *) shmem); manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::reduce3::Reduce3<T>), sizeof(shape::TAD), shape::rank(xShapeInfo)); } __syncthreads(); functions::reduce3::Reduce3<T>::exec( opNum, dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationPointer, manager, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets); } template <typename T> __device__ void reduce3AllGeneric( const int opNum, T *dx, Nd4jLong *xShapeInfo, T *dy, Nd4jLong *yShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { __shared__ UnifiedSharedMemory *manager; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; manager = new(shmem) UnifiedSharedMemory((int *) shmem); manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::reduce3::Reduce3<T>), sizeof(shape::TAD), shape::rank(xShapeInfo)); } __syncthreads(); functions::reduce3::Reduce3<T>::execAllCuda( opNum, dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationPointer, manager, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets); } template <typename T> __device__ void reduce3ScalarGeneric( int opNum, T *dx, Nd4jLong *xShapeInfo, T *dy, Nd4jLong *yShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *allocationPointer, T *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { __shared__ UnifiedSharedMemory *manager; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; manager = new(shmem) UnifiedSharedMemory((int *) shmem); manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::reduce3::Reduce3<T>), sizeof(shape::TAD), shape::rank(xShapeInfo)); } __syncthreads(); functions::reduce3::Reduce3<T>::execScalarCuda( opNum, dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, allocationPointer, reductionBuffer, manager, tadOnlyShapeInfo); } /** * The driver api * @param opNum the number * @param n the length of the reduce * @param dx the input data * @param xShapeInfo the shape information * @param dy the pair wise reduce * @param yShapeInfo the shape information for y * @param extraParams the extra parameters in the operation * @param result where to store the result * @param resultShapeInfo the shape information * @param dimension the dimension to reduce along long * @param dimensionLength the dimension length * @param postProcessOrNot whether to post [ */ extern "C" __global__ void reduce3Double( int opNum, double *dx, Nd4jLong *xShapeInfo, double *dy, Nd4jLong *yShapeInfo, double *extraParams, double *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { reduce3Generic<double>( opNum, dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets); } extern "C" __global__ void reduce3AllDouble( int opNum, double *dx, Nd4jLong *xShapeInfo, double *dy, Nd4jLong *yShapeInfo, double *extraParams, double *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { reduce3AllGeneric<double>( opNum, dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets); } /** * The driver api * @param opNum the number * @param n the length of the reduce * @param dx the input data * @param xShapeInfo the shape information * @param dy the pair wise reduce * @param yShapeInfo the shape information for y * @param extraParams the extra parameters in the operation * @param result where to store the result * @param resultShapeInfo the shape information * @param gpuInformation the gpu information * @param dimension the dimension to reduce along long * @param dimensionLength the dimension length * @param postProcessOrNot whether to post [ */ extern "C" __global__ void reduce3Float( int opNum, float *dx, Nd4jLong *xShapeInfo, float *dy, Nd4jLong *yShapeInfo, float *extraParams, float *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { reduce3Generic<float>( opNum, dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets); } extern "C" __global__ void reduce3AllFloat( int opNum, float *dx, Nd4jLong *xShapeInfo, float *dy, Nd4jLong *yShapeInfo, float *extraParams, float *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { reduce3AllGeneric<float>( opNum, dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets); } extern "C" __global__ void reduce3Half( int opNum, float16 *dx, Nd4jLong *xShapeInfo, float16 *dy, Nd4jLong *yShapeInfo, float16 *extraParams, float16 *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { reduce3Generic<float16>( opNum, dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets); } extern "C" __global__ void reduce3AllHalf( int opNum, float16 *dx, Nd4jLong *xShapeInfo, float16 *dy, Nd4jLong *yShapeInfo, float16 *extraParams, float16 *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { reduce3AllGeneric<float16>( opNum, dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets); } extern "C" __global__ void reduce3ScalarFloat( int opNum, float *dx, Nd4jLong *xShapeInfo, float *dy, Nd4jLong *yShapeInfo, float *extraParams, float *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationPointer, float *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { reduce3ScalarGeneric<float>( opNum, dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, allocationPointer, reductionBuffer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets); } extern "C" __global__ void reduce3ScalarHalf( int opNum, float16 *dx, Nd4jLong *xShapeInfo, float16 *dy, Nd4jLong *yShapeInfo, float16 *extraParams, float16 *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationPointer, float16 *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { reduce3ScalarGeneric<float16>( opNum, dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, allocationPointer, reductionBuffer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets); } extern "C" __global__ void reduce3ScalarDouble( int opNum, double *dx, Nd4jLong *xShapeInfo, double *dy, Nd4jLong *yShapeInfo, double *extraParams, double *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationPointer, double *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { reduce3ScalarGeneric<double>( opNum, dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, allocationPointer, reductionBuffer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets); } #endif #endif /* REDUCE3_H_ */
pagerank_openmp.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <float.h> #include <string.h> #include <memory.h> #include <sys/time.h> #ifndef REAL #define REAL float #endif #define ALPHA 0.85 #define EPSILON 0.01 #define ARRAY_LENGTH 600000000 #define MAX_TIMES 1000 #define MICRO_IN_SEC 1000000.00 double begin_time, end_time, serial_time=0, parallel_time=0; double microtime(){ int tv_sec,tv_usec; double time; struct timeval tv; struct timezone tz; gettimeofday(&tv,&tz); return tv.tv_sec+tv.tv_usec/MICRO_IN_SEC; } typedef struct{ int nodei; int nodej; REAL p; } EDGE; int caculate(char *ifn); int check_result(REAL *r, REAL *rtmp, int noden); int main(int argc,char * argv[]) { char *ifn=NULL; if(argc<2) { printf("wrong command format! usage:parallel_pagerank INPUTFILENAME\n"); return 0; } else { ifn=argv[1]; caculate(ifn); return 0; } } int caculate(char *ifn) { begin_time = microtime(); FILE *ifp=NULL,*ofp=NULL; EDGE edge,*array_edge=NULL; char *ofn="CaculateResult.txt"; int noden,edgen,foffset,linen,i,j,begin,topi,counter,index_i,edge_i; REAL *r=NULL,*rtmp=NULL,*tmp,*array=NULL; if((ifp=fopen(ifn,"r"))==NULL) { printf("%s file open error!\n",ifn); exit(0); } else { printf("%s file opened success!\n",ifn); } if((ofp=fopen(ofn,"w"))==NULL) { printf("%s file open error!\n",ofn); fclose(ifp); exit(0); } else { printf("%s file opened success!\n",ofn); } fscanf(ifp,"%d%d",&noden,&edgen); foffset=ftell(ifp); printf("Allocing Memory!\n"); if((array_edge=(EDGE *)malloc(edgen*sizeof(EDGE)))==NULL) { printf("Memory alloc ERROR !\n"); fclose(ifp); fclose(ofp); exit(0); } linen=ARRAY_LENGTH/noden; if(linen<=0) { printf("ArrayLength is too short for this caculate!\nPlase change the value of ARRAYLENGTH\n"); free(array_edge); fclose(ifp); fclose(ofp); exit(0); } if((array=(REAL *)malloc(linen*noden*sizeof(REAL)))==NULL) { printf("Memory alloc ERROR !\n"); fclose(ifp); fclose(ofp); free(array_edge); exit(0); } if((r=(REAL *)malloc(noden*sizeof(REAL)))==NULL) { printf("Memory alloc ERROR !\n"); fclose(ifp); fclose(ofp); free(array); exit(0); } if((rtmp=(REAL *)malloc(noden*sizeof(REAL)))==NULL) { fclose(ifp); fclose(ofp); free(array); free(r); exit(0); } for(i=0;i<noden;i++) { *(r+i)=1.0; } printf("Memory Alloc done!\n"); printf("Caculating pagerank!\n"); printf("Loding Data!\n"); for(i=0;i<edgen;i++) { fscanf(ifp,"%d%d%f",&((array_edge+i)->nodei),&((array_edge+i)->nodej),&((array_edge+i)->p)); } printf("Data loaded!\n"); end_time = microtime(); printf("read file and alloc memory time consuming:%fs\n",end_time-begin_time); begin_time = end_time; printf("Begin Caculate!\n"); counter=MAX_TIMES; REAL pr_tmp=0.0; REAL matrix_probablity = 0; begin_time = microtime(); if(noden<=linen) { /* end_time = microtime(); printf("read file and alloc memory time consuming:%fs\n",end_time-begin_time); begin_time = end_time; */ #pragma omp parallel for for(i=0;i<noden*noden;i++) { array[i]=0; } /* end_time = microtime(); parallel_time += end_time-begin_time; begin_time = end_time; */ for(i=0;i<edgen;i++) { *(array+(((array_edge+i)->nodei)*noden+(array_edge+i)->nodej))=(array_edge+i)->p; } /* end_time = microtime(); serial_time += end_time-begin_time; begin_time= end_time; */ do { tmp=rtmp; rtmp=r; r=tmp; //caculate PageRank //#pragma omp parallel for for(i=0;i<noden;i++) { pr_tmp = 0.0; /* end_time = microtime(); serial_time += end_time - begin_time; begin_time = end_time; */ #pragma omp parallel for reduction(+:pr_tmp) for(j=0;j<noden;j++) { matrix_probablity = array[i*noden+j]; pr_tmp += ( ( ALPHA * matrix_probablity ) + ( 1.0 - ALPHA ) / ( REAL ) noden ) * rtmp[j]; } /* end_time = microtime(); parallel_time += end_time - begin_time; begin_time = end_time; */ *(r+i) = pr_tmp; } /* end_time = microtime(); serial_time += end_time-begin_time; begin_time = end_time; */ printf("parallel part time consuming:%fs\n",microtime()-begin_time); counter--; printf("counter = %d ", counter); printf(" first pagerank = %f, noden= %d linen= %d \n",r[0], noden, linen); } while((!check_result(r,rtmp,noden)) && counter); } else { int block_counter=0; int ii=0; do { tmp=rtmp; rtmp=r; r=tmp; begin=0; edge_i=0; block_counter=0; /* end_time = microtime(); serial_time += end_time-begin_time; begin_time = end_time; */ for(ii=0;ii<noden/linen;ii++) { /* end_time = microtime(); serial_time += end_time-begin_time; begin_time = end_time; */ #pragma omp parallel for for(i=0;i<linen*noden;i++) { array[i]=0; } /* end_time = microtime(); parallel_time += end_time - begin_time; begin_time = end_time; */ do{ if((array_edge+edge_i)->nodei>=begin+linen) { break; } else { *(array+(((array_edge+edge_i)->nodei%linen)*noden+(array_edge+edge_i)->nodej))=(array_edge+edge_i)->p; edge_i++; } } while(edge_i<edgen); //#pragma omp parallel for for(index_i=begin;index_i<begin+linen;index_i++) { pr_tmp = 0.0; #pragma omp parallel for reduction(+:pr_tmp) for(j=0;j<noden;j++) { matrix_probablity = array[(index_i%linen)*noden+j]; pr_tmp += ( ( ALPHA * matrix_probablity ) + ( 1.0 - ALPHA ) / ( REAL ) noden ) * rtmp[j]; } *(r+index_i) = pr_tmp; // r[index_i] = __sec_reduce_add ( (array+(index_i%linen) *noden)[0:noden] * rtmp[0:noden]); } begin+=linen; block_counter++; if(block_counter%1000 == 0) printf("block_counter:%d\n",block_counter); if(block_counter == 6000 || block_counter == (noden/linen - 1)){ /* end_time = microtime(); serial_time += end_time - begin_time; begin_time = end_time; */ printf("block_counter:%d\n parallel part time consuming:%fs\n",block_counter,microtime()-begin_time); exit(0); } } if(noden%linen != 0) { /* end_time = microtime(); serial_time += end_time - begin_time; begin_time = end_time; */ #pragma omp parallel for for(i=0;i<(noden%linen)*noden;i++) { array[i]=0; } /* end_time = microtime(); parallel_time += end_time - begin_time; begin_time = end_time; */ do{ if((array_edge+edge_i)->nodei>=begin+linen) { break; } else { *(array+(((array_edge+edge_i)->nodei%linen)*noden+(array_edge+edge_i)->nodej))=(array_edge+edge_i)->p; edge_i++; } } while(edge_i<edgen); //#pragma omp parallel for for(index_i=begin;index_i<noden;index_i++) { pr_tmp = 0.0; /* end_time = microtime(); serial_time += end_time - begin_time; begin_time = end_time; */ #pragma omp parallel for reduction(+:pr_tmp) for(j=0;j<noden;j++) { matrix_probablity = array[(index_i%linen)*noden+j]; pr_tmp += ( ( ALPHA * matrix_probablity ) + ( 1.0 - ALPHA ) / ( REAL ) noden ) * rtmp[j]; } /* end_time = microtime(); parallel_time += end_time - begin_time; begin_time = end_time; */ *(r+index_i) = pr_tmp; } block_counter++; if(block_counter%100 == 0) printf("block_counter=%d begin=%d\n",block_counter,begin); } counter--; printf("counter = %d ", counter); printf(" first pagerank = %f, noden= %d linen= %d \n",r[0], noden, linen); } while((!check_result(r,rtmp,noden)) && counter); } printf("caculate done !\n"); printf("outputing result to %s\n",ofn); for(i=0;i<noden;i++) { fprintf(ofp,"%d\t%f\n",i,*(r+i)); } printf("output done!,counter times:%d\n",counter); fclose(ifp); fclose(ofp); free(array); free(array_edge); free(rtmp); free(r); return 0; } int check_result(REAL *r, REAL *rtmp, int noden) { int i; for(i=0;i<noden;i++) { if(!(*(r+i)-*(rtmp+i)<EPSILON && *(rtmp+i)-*(r+i)<EPSILON)) { return 0; } } return 1; }
crop_and_resize.c
#include <TH/TH.h> #include <stdio.h> #include <math.h> void CropAndResizePerBox( const float * image_data, const int batch_size, const int depth, const int image_height, const int image_width, const float * boxes_data, const int * box_index_data, const int start_box, const int limit_box, float * corps_data, const int crop_height, const int crop_width, const float extrapolation_value ) { const int image_channel_elements = image_height * image_width; const int image_elements = depth * image_channel_elements; const int channel_elements = crop_height * crop_width; const int crop_elements = depth * channel_elements; int b; #pragma omp parallel for for (b = start_box; b < limit_box; ++b) { const float * box = boxes_data + b * 4; const float y1 = box[0]; const float x1 = box[1]; const float y2 = box[2]; const float x2 = box[3]; const int b_in = box_index_data[b]; if (b_in < 0 || b_in >= batch_size) { printf("Error: batch_index %d out of range [0, %d)\n", b_in, batch_size); exit(-1); } const float height_scale = (crop_height > 1) ? (y2 - y1) * (image_height - 1) / (crop_height - 1) : 0; const float width_scale = (crop_width > 1) ? (x2 - x1) * (image_width - 1) / (crop_width - 1) : 0; for (int y = 0; y < crop_height; ++y) { const float in_y = (crop_height > 1) ? y1 * (image_height - 1) + y * height_scale : 0.5 * (y1 + y2) * (image_height - 1); if (in_y < 0 || in_y > image_height - 1) { for (int x = 0; x < crop_width; ++x) { for (int d = 0; d < depth; ++d) { // crops(b, y, x, d) = extrapolation_value; corps_data[crop_elements * b + channel_elements * d + y * crop_width + x] = extrapolation_value; } } continue; } const int top_y_index = floorf(in_y); const int bottom_y_index = ceilf(in_y); const float y_lerp = in_y - top_y_index; for (int x = 0; x < crop_width; ++x) { const float in_x = (crop_width > 1) ? x1 * (image_width - 1) + x * width_scale : 0.5 * (x1 + x2) * (image_width - 1); if (in_x < 0 || in_x > image_width - 1) { for (int d = 0; d < depth; ++d) { corps_data[crop_elements * b + channel_elements * d + y * crop_width + x] = extrapolation_value; } continue; } const int left_x_index = floorf(in_x); const int right_x_index = ceilf(in_x); const float x_lerp = in_x - left_x_index; for (int d = 0; d < depth; ++d) { const float *pimage = image_data + b_in * image_elements + d * image_channel_elements; const float top_left = pimage[top_y_index * image_width + left_x_index]; const float top_right = pimage[top_y_index * image_width + right_x_index]; const float bottom_left = pimage[bottom_y_index * image_width + left_x_index]; const float bottom_right = pimage[bottom_y_index * image_width + right_x_index]; const float top = top_left + (top_right - top_left) * x_lerp; const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp; corps_data[crop_elements * b + channel_elements * d + y * crop_width + x] = top + (bottom - top) * y_lerp; } } // end for x } // end for y } // end for b } void crop_and_resize_forward( THFloatTensor * image, THFloatTensor * boxes, // [y1, x1, y2, x2] THIntTensor * box_index, // range in [0, batch_size) const float extrapolation_value, const int crop_height, const int crop_width, THFloatTensor * crops ) { // const int batch_size = image->size[0]; // const int depth = image->size[1]; // const int image_height = image->size[2]; // const int image_width = image->size[3]; const int batch_size = THFloatTensor_size(image, 0); const int depth = THFloatTensor_size(image, 1); const int image_height = THFloatTensor_size(image, 2); const int image_width = THFloatTensor_size(image, 3); // const int num_boxes = boxes->size[0]; const int num_boxes = THFloatTensor_size(boxes, 0); // init output space THFloatTensor_resize4d(crops, num_boxes, depth, crop_height, crop_width); THFloatTensor_zero(crops); // crop_and_resize for each box CropAndResizePerBox( THFloatTensor_data(image), batch_size, depth, image_height, image_width, THFloatTensor_data(boxes), THIntTensor_data(box_index), 0, num_boxes, THFloatTensor_data(crops), crop_height, crop_width, extrapolation_value ); } void crop_and_resize_backward( THFloatTensor * grads, THFloatTensor * boxes, // [y1, x1, y2, x2] THIntTensor * box_index, // range in [0, batch_size) THFloatTensor * grads_image // resize to [bsize, c, hc, wc] ) { // shape // const int batch_size = grads_image->size[0]; // const int depth = grads_image->size[1]; // const int image_height = grads_image->size[2]; // const int image_width = grads_image->size[3]; const int batch_size = THFloatTensor_size(grads_image, 0); const int depth = THFloatTensor_size(grads_image, 1); const int image_height = THFloatTensor_size(grads_image, 2); const int image_width = THFloatTensor_size(grads_image, 3); // const int num_boxes = grads->size[0]; // const int crop_height = grads->size[2]; // const int crop_width = grads->size[3]; const int num_boxes = THFloatTensor_size(grads, 0); const int crop_height = THFloatTensor_size(grads, 2); const int crop_width = THFloatTensor_size(grads, 3); // n_elements const int image_channel_elements = image_height * image_width; const int image_elements = depth * image_channel_elements; const int channel_elements = crop_height * crop_width; const int crop_elements = depth * channel_elements; // init output space THFloatTensor_zero(grads_image); // data pointer const float * grads_data = THFloatTensor_data(grads); const float * boxes_data = THFloatTensor_data(boxes); const int * box_index_data = THIntTensor_data(box_index); float * grads_image_data = THFloatTensor_data(grads_image); for (int b = 0; b < num_boxes; ++b) { const float * box = boxes_data + b * 4; const float y1 = box[0]; const float x1 = box[1]; const float y2 = box[2]; const float x2 = box[3]; const int b_in = box_index_data[b]; if (b_in < 0 || b_in >= batch_size) { printf("Error: batch_index %d out of range [0, %d)\n", b_in, batch_size); exit(-1); } const float height_scale = (crop_height > 1) ? (y2 - y1) * (image_height - 1) / (crop_height - 1) : 0; const float width_scale = (crop_width > 1) ? (x2 - x1) * (image_width - 1) / (crop_width - 1) : 0; for (int y = 0; y < crop_height; ++y) { const float in_y = (crop_height > 1) ? y1 * (image_height - 1) + y * height_scale : 0.5 * (y1 + y2) * (image_height - 1); if (in_y < 0 || in_y > image_height - 1) { continue; } const int top_y_index = floorf(in_y); const int bottom_y_index = ceilf(in_y); const float y_lerp = in_y - top_y_index; for (int x = 0; x < crop_width; ++x) { const float in_x = (crop_width > 1) ? x1 * (image_width - 1) + x * width_scale : 0.5 * (x1 + x2) * (image_width - 1); if (in_x < 0 || in_x > image_width - 1) { continue; } const int left_x_index = floorf(in_x); const int right_x_index = ceilf(in_x); const float x_lerp = in_x - left_x_index; for (int d = 0; d < depth; ++d) { float *pimage = grads_image_data + b_in * image_elements + d * image_channel_elements; const float grad_val = grads_data[crop_elements * b + channel_elements * d + y * crop_width + x]; const float dtop = (1 - y_lerp) * grad_val; pimage[top_y_index * image_width + left_x_index] += (1 - x_lerp) * dtop; pimage[top_y_index * image_width + right_x_index] += x_lerp * dtop; const float dbottom = y_lerp * grad_val; pimage[bottom_y_index * image_width + left_x_index] += (1 - x_lerp) * dbottom; pimage[bottom_y_index * image_width + right_x_index] += x_lerp * dbottom; } // end d } // end x } // end y } // end b }
utils.c
#include <stdio.h> #include <stdarg.h> #include <stdint.h> #include <stddef.h> #include <math.h> #include <omp.h> #include "utils.h" #include "bench.h" void fun3d_printf(const uint32_t c, const char *format, ...) { uint32_t val = 0; switch(c) { case 0: /* ANSI_COLOR_RED */ val = 31; break; case 1: /* ANSI_COLOR_GREEN */ val = 32; break; case 2: /* ANSI_COLOR_YELLOW */ val = 33; break; case 3: /* ANSI_COLOR_BLUE */ val = 34; break; case 4: /* ANSI_COLOR_MAGENTA */ val = 35; break; case 5: /* ANSI_COLOR_CYAN */ val = 36; break; default: val = 0; break; } char color[20]; sprintf(color, "\x1b[%dm", val); va_list arg; va_start(arg, format); fprintf(stdout, "%s", color); vfprintf(stdout, format, arg); fprintf(stdout, "\x1b[0m"); va_end(arg); } double Compute2ndNorm(const size_t sz, const double *v) { BENCH start_bench = rdbench(); double norm = 0.f; uint32_t i; #pragma omp parallel for reduction(+: norm) for(i = 0; i < sz; i++) norm += v[i] * v[i]; fun3d_log(start_bench, KERNEL_BLAS); return(sqrt(norm)); } void ComputeAXPY(const size_t sz, const double a, const double *x, double *y) { BENCH start_bench = rdbench(); uint32_t i; #pragma omp parallel for for(i = 0; i < sz; i++) { /* AXPY */ const double ax = a * x[i]; const double axpy = ax + y[i]; /* Update the vector component */ y[i] = axpy; } fun3d_log(start_bench, KERNEL_BLAS); } void ComputeNewAXPY(const size_t sz, const double a, const double *x, const double *y, double *w) { BENCH start_bench = rdbench(); uint32_t i; #pragma omp parallel for for(i = 0; i < sz; i++) { /* AXPY */ const double ax = a * x[i]; const double axpy = ax + y[i]; /* Update the vector component */ w[i] = axpy; } fun3d_log(start_bench, KERNEL_BLAS); } double Normalize(const size_t sz, double *x) { BENCH start_bench = rdbench(); double norm = Compute2ndNorm(sz, x); uint32_t i; #pragma omp parallel for for(i = 0; i < sz; i++) x[i] *= (1.f / norm); fun3d_log(start_bench, KERNEL_BLAS); return norm; }
ordered.c
#include <stdio.h> #include <omp.h> int main (void) { int i,myval; #pragma omp parallel for private(myval) ordered for(i=1; i<=100; i++){ myval=i*i; #pragma omp ordered { printf("%d %d\n", i, myval); } } return 0; }
array_init.c
// Test the handling of two loops under omp for // watch the loop index replacement (private by default) int main(void) { int i, j; float u[500][500]; #pragma omp parallel for for (i=0; i<500; i++) for (j=0; j<500; j++) { u[i][j] = 0.0; } return 0; }
omp_reduce_bad2.c
#include <assert.h> #include <omp.h> #include <stdio.h> int main () { int n = 5; int arr[5] = {5,3,9,1,7}; // Reduction combiners int res = 0; #pragma omp parallel for reduction(#:res) for (int i=0; i<n; i++) max = max < arr[i] ? arr[i] : max; assert(max == 9); }
serial_mt_tree_learner.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_TREELEARNER_SERIAL_MT_TREE_LEARNER_H_ #define LIGHTGBM_TREELEARNER_SERIAL_MT_TREE_LEARNER_H_ #include <LightGBM/dataset.h> #include <LightGBM/tree.h> #include <LightGBM/tree_learner.h> #include <LightGBM/cuda/vector_cudahost.h> #include <LightGBM/utils/array_args.h> #include <LightGBM/utils/json11.h> #include <LightGBM/utils/random.h> #include <string> #include <cmath> #include <cstdio> #include <memory> #include <random> #include <vector> #include <set> #include "col_sampler.hpp" #include "data_partition.hpp" #include "feature_histogram.hpp" #include "leaf_splits.hpp" #include "monotone_constraints.hpp" #include "split_info.hpp" #ifdef USE_GPU // Use 4KBytes aligned allocator for ordered gradients and ordered Hessians when GPU is enabled. // This is necessary to pin the two arrays in memory and make transferring faster. #include <boost/align/aligned_allocator.hpp> #endif namespace LightGBM { using json11::Json; /*! \brief forward declaration */ class CostEfficientMTGradientBoosting; /*! * \brief Used for learning a tree by single machine */ class SerialMTTreeLearner: public TreeLearner { public: friend CostEfficientMTGradientBoosting; explicit SerialMTTreeLearner(const Config* config); ~SerialMTTreeLearner(); void Init(const Dataset* train_data, bool is_constant_hessian) override; void ResetTrainingData(const Dataset* train_data, bool is_constant_hessian) override { ResetTrainingDataInner(train_data, is_constant_hessian, true); } void ResetIsConstantHessian(bool is_constant_hessian) override { share_state_->is_constant_hessian = is_constant_hessian; } virtual void ResetTrainingDataInner(const Dataset* train_data, bool is_constant_hessian, bool reset_multi_val_bin); void ResetConfig(const Config* config) override; inline void SetForcedSplit(const Json* forced_split_json) override { if (forced_split_json != nullptr && !forced_split_json->is_null()) { forced_split_json_ = forced_split_json; } else { forced_split_json_ = nullptr; } } Tree* Train(const score_t* gradients, const score_t *hessians, bool is_first_tree) override; void Train_serial2(Tree* tree, const score_t* gradients, const score_t* hessians, bool is_first_tree) override; Tree* FitByExistingTree(const Tree* old_tree, const score_t* gradients, const score_t* hessians) const override; Tree* FitByExistingTree(const Tree* old_tree, const std::vector<int>& leaf_pred, const score_t* gradients, const score_t* hessians) const override; void SetBaggingData(const Dataset* subset, const data_size_t* used_indices, data_size_t num_data) override { if (subset == nullptr) { data_partition_->SetUsedDataIndices(used_indices, num_data); share_state_->SetUseSubrow(false); } else { ResetTrainingDataInner(subset, share_state_->is_constant_hessian, false); share_state_->SetUseSubrow(true); share_state_->SetSubrowCopied(false); share_state_->bagging_use_indices = used_indices; share_state_->bagging_indices_cnt = num_data; } } void SetBaggingData2(const Dataset* subset, const data_size_t* used_indices, data_size_t num_data) override { data_partition_->SetUsedDataIndices2(used_indices, num_data); } void AddPredictionToScore(const Tree* tree, double* out_score) const override { CHECK_LE(tree->num_leaves(), data_partition_->num_leaves()); if (tree->num_leaves() <= 1) { return; } #pragma omp parallel for schedule(static, 1) for (int i = 0; i < tree->num_leaves(); ++i) { double output = static_cast<double>(tree->LeafOutput(i)); data_size_t cnt_leaf_data = 0; auto tmp_idx = data_partition_->GetIndexOnLeaf(i, &cnt_leaf_data); for (data_size_t j = 0; j < cnt_leaf_data; ++j) { out_score[tmp_idx[j]] += output; } } } void RenewTreeOutput(Tree* tree, const ObjectiveFunction* obj, std::function<double(const label_t*, int)> residual_getter, data_size_t total_num_data, const data_size_t* bag_indices, data_size_t bag_cnt) const override; /*! \brief Get output of parent node, used for path smoothing */ double GetParentOutput(const Tree* tree, const LeafSplits* leaf_splits) const; protected: void ComputeBestSplitForFeature(FeatureHistogram* histogram_array_, int feature_index, int real_fidx, int8_t is_feature_used, int num_data, const LeafSplits* leaf_splits, SplitInfo* best_split, double parent_output); void GetShareStates(const Dataset* dataset, bool is_constant_hessian, bool is_first_time); void RecomputeBestSplitForLeaf(Tree* tree, int leaf, SplitInfo* split); /*! * \brief Some initial works before training */ virtual void BeforeTrain(); /*! * \brief Some initial works before FindBestSplit */ virtual bool BeforeFindBestSplit(const Tree* tree, int left_leaf, int right_leaf); virtual void FindBestSplits(const Tree* tree); virtual void FindBestSplits(const Tree* tree, const std::set<int>* force_features); virtual void ConstructHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract); virtual void FindBestSplitsFromHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract, const Tree*); /*! * \brief Partition tree and data according best split. * \param tree Current tree, will be splitted on this function. * \param best_leaf The index of leaf that will be splitted. * \param left_leaf The index of left leaf after splitted. * \param right_leaf The index of right leaf after splitted. */ inline virtual void Split(Tree* tree, int best_leaf, int* left_leaf, int* right_leaf) { SplitInner(tree, best_leaf, left_leaf, right_leaf, true); } void SplitInner(Tree* tree, int best_leaf, int* left_leaf, int* right_leaf, bool update_cnt); /* Force splits with forced_split_json dict and then return num splits forced.*/ int32_t ForceSplits(Tree* tree, int* left_leaf, int* right_leaf, int* cur_depth); std::set<int> FindAllForceFeatures(Json force_split_leaf_setting); /*! * \brief Get the number of data in a leaf * \param leaf_idx The index of leaf * \return The number of data in the leaf_idx leaf */ inline virtual data_size_t GetGlobalDataCountInLeaf(int leaf_idx) const; /*! \brief number of data */ data_size_t num_data_; /*! \brief number of features */ int num_features_; /*! \brief training data */ const Dataset* train_data_; /*! \brief gradients of current iteration */ const score_t* gradients_; /*! \brief hessians of current iteration */ const score_t* hessians_; /*! \brief training data partition on leaves */ std::unique_ptr<DataPartition> data_partition_; /*! \brief pointer to histograms array of parent of current leaves */ FeatureHistogram* parent_leaf_histogram_array_; /*! \brief pointer to histograms array of smaller leaf */ FeatureHistogram* smaller_leaf_histogram_array_; /*! \brief pointer to histograms array of larger leaf */ FeatureHistogram* larger_leaf_histogram_array_; /*! \brief store best split points for all leaves */ std::vector<SplitInfo> best_split_per_leaf_; /*! \brief store best split per feature for all leaves */ std::vector<SplitInfo> splits_per_leaf_; /*! \brief stores minimum and maximum constraints for each leaf */ std::unique_ptr<LeafConstraintsBase> constraints_; /*! \brief stores best thresholds for all feature for smaller leaf */ std::unique_ptr<LeafSplits> smaller_leaf_splits_; /*! \brief stores best thresholds for all feature for larger leaf */ std::unique_ptr<LeafSplits> larger_leaf_splits_; #ifdef USE_GPU /*! \brief gradients of current iteration, ordered for cache optimized, aligned to 4K page */ std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_gradients_; /*! \brief hessians of current iteration, ordered for cache optimized, aligned to 4K page */ std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_hessians_; #elif USE_CUDA /*! \brief gradients of current iteration, ordered for cache optimized */ std::vector<score_t, CHAllocator<score_t>> ordered_gradients_; /*! \brief hessians of current iteration, ordered for cache optimized */ std::vector<score_t, CHAllocator<score_t>> ordered_hessians_; #else /*! \brief gradients of current iteration, ordered for cache optimized */ std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> ordered_gradients_; /*! \brief hessians of current iteration, ordered for cache optimized */ std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> ordered_hessians_; #endif /*! \brief used to cache historical histogram to speed up*/ HistogramPool histogram_pool_; /*! \brief config of tree learner*/ const Config* config_; ColSampler col_sampler_; const Json* forced_split_json_; std::unique_ptr<TrainingShareStates> share_state_; std::unique_ptr<CostEfficientMTGradientBoosting> cegb_; }; inline data_size_t SerialMTTreeLearner::GetGlobalDataCountInLeaf(int leaf_idx) const { if (leaf_idx >= 0) { return data_partition_->leaf_count(leaf_idx); } else { return 0; } } } // namespace LightGBM #endif // LightGBM_TREELEARNER_SERIAL_MT_TREE_LEARNER_H_
search_core.c
/* This is a rather cumbersome solution to two problems I have with OpenMP. 1) multiple threads and OpenMP don't mix on a Mac. It segfaults during the first openmp call. I want people to be able to use chemfp in multi-threaded environments, even with diminished performance, so the single thread version should not go through the OpenMP path. 2) I measured a roughly 5% performance penalty hit with a single thread using OpenMP vs. the code compiled without OpenMP. My solution is to compile the core code twice, one for each path. The RENAME macro rewrites int RENAME(chemfp_count_tanimoto_arena) to one of: static int chemfp_count_tanimoto_arena_single -- single-threaded, compiler supports OpenMP static int chemfp_count_tanimoto_arena_openmp -- multiple OpenMP threads int chemfp_count_tanimoto_arena -- single-threaded, compiler does not support OpenMP depending on the circumstances. In a normal build, where OpenMP is available, then this file will be #include'd twice. */ /* count code */ int RENAME(chemfp_count_tanimoto_arena)( /* Count all matches within the given threshold */ double threshold, /* Number of bits in the fingerprint */ int num_bits, /* Query arena, start and end indices */ int query_storage_size, const unsigned char *query_arena, int query_start, int query_end, /* Target arena, start and end indices */ int target_storage_size, const unsigned char *target_arena, int target_start, int target_end, /* Target popcount distribution information */ int *target_popcount_indices, /* Results go into these arrays */ int *result_counts ) { int query_index, target_index; const unsigned char *query_fp, *target_fp; int start, end; int count; int fp_size = (num_bits+7) / 8; double score, popcount_sum; int query_popcount, start_target_popcount, end_target_popcount; int target_popcount; int intersect_popcount; chemfp_popcount_f calc_popcount; chemfp_intersect_popcount_f calc_intersect_popcount; if (query_start >= query_end) { /* No queries */ return CHEMFP_OK; } /* Prevent overflow if someone uses a threshold of, say, 1E-80 */ /* (Not really needed unless you trap IEEE 754 overflow errors) */ if (threshold > 0.0 && threshold < 1.0/num_bits) { threshold = 0.5 / num_bits; } if ((target_start >= target_end) || threshold > 1.0) { for (query_index = 0; query_index < (query_end-query_start); query_index++) { /* No possible targets */ result_counts[query_index] = 0; } return CHEMFP_OK; } if (threshold <= 0.0) { /* Everything will match, so there's no need to figure that out */ for (query_index = 0; query_index < (query_end-query_start); query_index++) { result_counts[query_index] = (target_end - target_start); } return CHEMFP_OK; } if (target_popcount_indices == NULL) { /* Handle the case when precomputed targets aren't available. */ /* This is a slower algorithm because it tests everything. */ #if USE_OPENMP == 1 #pragma omp parallel for private(query_fp, target_fp, count, target_index, score) schedule(dynamic) #endif for (query_index = 0; query_index < (query_end-query_start); query_index++) { query_fp = query_arena + (query_start + query_index) * query_storage_size; target_fp = target_arena + (target_start * target_storage_size); /* Handle the popcount(query) == 0 special case? */ count = 0; for (target_index = target_start; target_index < target_end; target_index++, target_fp += target_storage_size) { score = chemfp_byte_tanimoto(fp_size, query_fp, target_fp); if (score >= threshold) { count++; } } result_counts[query_index] = count; } return CHEMFP_OK; } /* Choose popcounts optimized for this case */ calc_popcount = chemfp_select_popcount(num_bits, query_storage_size, query_arena); calc_intersect_popcount = chemfp_select_intersect_popcount( num_bits, query_storage_size, query_arena, target_storage_size, target_arena); /* This uses the limits from Swamidass and Baldi */ /* It doesn't use the search ordering because it's supposed to find everything */ #if USE_OPENMP == 1 #pragma omp parallel for \ private(query_fp, query_popcount, start_target_popcount, end_target_popcount, \ count, target_popcount, start, end, target_fp, popcount_sum, target_index, intersect_popcount, score) \ schedule(dynamic) #endif for (query_index = 0; query_index < (query_end-query_start); query_index++) { query_fp = query_arena + (query_start + query_index) * query_storage_size; query_popcount = calc_popcount(fp_size, query_fp); /* Special case when popcount(query) == 0; everything has a score of 0.0 */ if (query_popcount == 0) { if (threshold == 0.0) { result_counts[query_index] = (target_end - target_start); } continue; } /* Figure out which fingerprints to search */ if (threshold == 0.0) { start_target_popcount = 0; end_target_popcount = num_bits; } else { start_target_popcount = (int)(query_popcount * threshold); end_target_popcount = (int)(ceil(query_popcount / threshold)); if (end_target_popcount > num_bits) { end_target_popcount = num_bits; } } count = 0; for (target_popcount = start_target_popcount; target_popcount <= end_target_popcount; target_popcount++) { start = target_popcount_indices[target_popcount]; end = target_popcount_indices[target_popcount+1]; if (start < target_start) { start = target_start; } if (end > target_end) { end = target_end; } target_fp = target_arena + (start * target_storage_size); popcount_sum = query_popcount + target_popcount; for (target_index = start; target_index < end; target_index++, target_fp += target_storage_size) { intersect_popcount = calc_intersect_popcount(fp_size, query_fp, target_fp); score = intersect_popcount / (popcount_sum - intersect_popcount); if (score >= threshold) { count++; } } } result_counts[query_index] = count; } /* went through each of the queries */ return CHEMFP_OK; } int RENAME(chemfp_threshold_tanimoto_arena)( /* Within the given threshold */ double threshold, /* Number of bits in the fingerprint */ int num_bits, /* Query arena, start and end indices */ int query_storage_size, const unsigned char *query_arena, int query_start, int query_end, /* Target arena, start and end indices */ int target_storage_size, const unsigned char *target_arena, int target_start, int target_end, /* Target popcount distribution information */ /* (must have at least num_bits+1 elements) */ int *target_popcount_indices, /* Results go here */ chemfp_search_result *results) { int query_index, target_index; const unsigned char *query_fp, *target_fp; int start, end; int fp_size = (num_bits+7) / 8; double score; int query_popcount, start_target_popcount, end_target_popcount; int target_popcount; int intersect_popcount, popcount_sum; int numerator, denominator; int add_hit_error = 0; chemfp_popcount_f calc_popcount; chemfp_intersect_popcount_f calc_intersect_popcount; if (query_start >= query_end) { /* No queries */ return CHEMFP_OK; } /* Prevent overflow if someone uses a threshold of, say, 1E-80 */ /* (Not really needed unless you trap IEEE 754 overflow errors) */ if (threshold > 0.0 && threshold < 1.0/num_bits) { threshold = 0.5 / num_bits; } if ((target_start >= target_end) || threshold > 1.0) { return CHEMFP_OK; } if (target_popcount_indices == NULL) { /* Handle the case when precomputed targets aren't available. */ /* This is a slower algorithm because it tests everything. */ #if USE_OPENMP == 1 #pragma omp parallel for private(query_fp, target_fp, target_index, score) schedule(dynamic) #endif for (query_index = query_start; query_index < query_end; query_index++) { query_fp = query_arena + (query_index * query_storage_size); target_fp = target_arena + (target_start * target_storage_size); /* Handle the popcount(query) == 0 special case? */ for (target_index = target_start; target_index < target_end; target_index++, target_fp += target_storage_size) { score = chemfp_byte_tanimoto(fp_size, query_fp, target_fp); if (score >= threshold) { #if USE_OPENMP == 1 #pragma omp critical (add_hit_threshold) #endif if (!chemfp_add_hit(results+(query_index-query_start), target_index, score)) { add_hit_error = 1; } } } } if (add_hit_error) { return CHEMFP_NO_MEM; } return CHEMFP_OK; } calc_popcount = chemfp_select_popcount(num_bits, query_storage_size, query_arena); calc_intersect_popcount = chemfp_select_intersect_popcount( num_bits, query_storage_size, query_arena, target_storage_size, target_arena); denominator = num_bits * 10; numerator = (int)(threshold * denominator); /* This uses the limits from Swamidass and Baldi */ /* It doesn't use the search ordering because it's supposed to find everything */ #if USE_OPENMP == 1 #pragma omp parallel for \ private(query_fp, query_popcount, target_index, target_fp, start_target_popcount, \ end_target_popcount, target_popcount, start, end, popcount_sum, intersect_popcount, score) \ schedule(dynamic) #endif for (query_index = query_start; query_index < query_end; query_index++) { query_fp = query_arena + (query_index * query_storage_size); query_popcount = calc_popcount(fp_size, query_fp); /* Special case when popcount(query) == 0; everything has a score of 0.0 */ if (query_popcount == 0) { if (threshold == 0.0) { #if USE_OPENMP == 1 #pragma omp critical (add_hit_threshold) #endif for (target_index = target_start; target_index < target_end; target_index++) { if (!chemfp_add_hit(results+(query_index-query_start), target_index, 0.0)) { add_hit_error = 1; } } } continue; } /* Figure out which fingerprints to search */ if (threshold == 0.0) { start_target_popcount = 0; end_target_popcount = num_bits; } else { start_target_popcount = (int)(query_popcount * threshold); end_target_popcount = (int)(ceil(query_popcount / threshold)); if (end_target_popcount > num_bits) { end_target_popcount = num_bits; } } for (target_popcount=start_target_popcount; target_popcount<=end_target_popcount; target_popcount++) { start = target_popcount_indices[target_popcount]; end = target_popcount_indices[target_popcount+1]; if (start < target_start) { start = target_start; } if (end > target_end) { end = target_end; } target_fp = target_arena + (start * target_storage_size); popcount_sum = query_popcount + target_popcount; for (target_index = start; target_index < end; target_index++, target_fp += target_storage_size) { intersect_popcount = calc_intersect_popcount(fp_size, query_fp, target_fp); /* In my timings (on a Mac), the comparison against a double was a hotspot, */ /* but division is not. I switch to integer math and gained a 3-4% performance, */ /* at the cost of slightly more complicated code. */ if (denominator * intersect_popcount >= numerator * (popcount_sum - intersect_popcount)) { score = ((double) intersect_popcount) / (popcount_sum - intersect_popcount); #if USE_OPENMP == 1 #pragma omp critical (add_hit_threshold) #endif if (!chemfp_add_hit(results+(query_index-query_start), target_index, score)) { add_hit_error = 1; } } } } } /* went through each of the queries */ if (add_hit_error) { return CHEMFP_NO_MEM; } return CHEMFP_OK; } static int RENAME(knearest_tanimoto_arena_no_popcounts)( /* Find the 'k' nearest items */ int k, /* Within the given threshold */ double threshold, /* Fingerprint size in bits */ int num_bits, /* Query arena, start and end indices */ int query_storage_size, const unsigned char *query_arena, int query_start, int query_end, /* Target arena, start and end indices */ int target_storage_size, const unsigned char *target_arena, int target_start, int target_end, /* Results go into these arrays */ chemfp_search_result *results ) { int query_index, target_index; int fp_size = (num_bits+7)/8; const unsigned char *query_fp, *target_fp; double query_threshold, score; chemfp_search_result *result; for (query_index = 0; query_index < (query_end-query_start); query_index++) { query_fp = query_arena + (query_start+query_index) * query_storage_size; result = results+query_index; query_threshold = threshold; target_fp = target_arena + (target_start * query_storage_size); target_index = target_start; for (; target_index < target_end; target_index++, target_fp += target_storage_size) { score = chemfp_byte_tanimoto(fp_size, query_fp, target_fp); if (score >= query_threshold) { chemfp_add_hit(result, target_index, score); if (result->num_hits == k) { chemfp_heapq_heapify(k, result, (chemfp_heapq_lt) double_score_lt, (chemfp_heapq_swap) double_score_swap); query_threshold = result->scores[0]; /* Since we leave the loop early, I need to advance the pointers */ target_index++; target_fp += target_storage_size; break; } } } /* Either we've reached the end of the fingerprints or the heap is full */ if (result->num_hits == k) { /* Continue scanning through the fingerprints */ for (; target_index < target_end; target_index++, target_fp += target_storage_size) { score = chemfp_byte_tanimoto(fp_size, query_fp, target_fp); /* We need to be strictly *better* than what's in the heap */ if (score > query_threshold) { result->indices[0] = target_index; result->scores[0] = score; chemfp_heapq_siftup(k, result, 0, (chemfp_heapq_lt) double_score_lt, (chemfp_heapq_swap) double_score_swap); query_threshold = result->scores[0]; } /* heapreplaced the old smallest item with the new item */ } /* End of the fingerprint scan */ } else { /* The heap isn't full, so we haven't yet heapified it. */ chemfp_heapq_heapify(result->num_hits, result, (chemfp_heapq_lt) double_score_lt, (chemfp_heapq_swap) double_score_swap); } } /* Loop through the queries */ return query_index-query_start; } int RENAME(chemfp_knearest_tanimoto_arena)( /* Find the 'k' nearest items */ int k, /* Within the given threshold */ double threshold, /* Size of the fingerprints and size of the storage block */ int num_bits, /* Query arena, start and end indices */ int query_storage_size, const unsigned char *query_arena, int query_start, int query_end, /* Target arena, start and end indices */ int target_storage_size, const unsigned char *target_arena, int target_start, int target_end, /* Target popcount distribution information */ int *target_popcount_indices, /* Results go into these arrays */ chemfp_search_result *results ) { int fp_size; int query_popcount, target_popcount, intersect_popcount; double score, best_possible_score, popcount_sum, query_threshold; const unsigned char *query_fp, *target_fp; int query_index, target_index; int start, end; PopcountSearchOrder popcount_order; chemfp_search_result *result; chemfp_popcount_f calc_popcount; chemfp_intersect_popcount_f calc_intersect_popcount; /* This is C. We don't check for illegal input values. */ if (query_start >= query_end) { return 0; } /* k == 0 is a valid input, and of course the result is no matches */ if (k == 0) { return CHEMFP_OK; } fp_size = (num_bits+7)/8; if (target_popcount_indices == NULL) { /* precomputed targets aren't available. Use the slower algorithm. */ return RENAME(knearest_tanimoto_arena_no_popcounts)( k, threshold, num_bits, query_storage_size, query_arena, query_start, query_end, target_storage_size, target_arena, target_start, target_end, results); } /* Choose popcounts optimized for this case */ calc_popcount = chemfp_select_popcount(num_bits, query_storage_size, query_arena); calc_intersect_popcount = chemfp_select_intersect_popcount( num_bits, query_storage_size, query_arena, target_storage_size, target_arena); /* Loop through the query fingerprints */ for (query_index=0; query_index < (query_end-query_start); query_index++) { result = results+query_index; query_fp = query_arena + (query_start+query_index) * query_storage_size; query_threshold = threshold; query_popcount = calc_popcount(fp_size, query_fp); if (query_popcount == 0) { /* By definition this will never return hits. Even if threshold == 0.0. */ /* (I considered returning the first k hits, but that's chemically meaningless.) */ /* XXX change this. Make it returns the first k hits */ continue; } /* Search the bins using the ordering from Swamidass and Baldi.*/ init_search_order(&popcount_order, query_popcount, num_bits); /* Look through the sections of the arena in optimal popcount order */ while (next_popcount(&popcount_order, query_threshold)) { target_popcount = popcount_order.popcount; best_possible_score = popcount_order.score; /* If we can't beat the query threshold then we're done with the targets */ if (best_possible_score < query_threshold) { break; } /* Scan through the targets which have the given popcount */ start = target_popcount_indices[target_popcount]; end = target_popcount_indices[target_popcount+1]; if (!check_bounds(&popcount_order, &start, &end, target_start, target_end)) { continue; } /* Iterate over the target fingerprints */ target_fp = target_arena + start*target_storage_size; popcount_sum = (double)(query_popcount + target_popcount); target_index = start; /* There are fewer than 'k' elements in the heap*/ if (result->num_hits < k) { for (; target_index<end; target_index++, target_fp += target_storage_size) { intersect_popcount = calc_intersect_popcount(fp_size, query_fp, target_fp); score = intersect_popcount / (popcount_sum - intersect_popcount); /* The heap isn't full; only check if we're at or above the query threshold */ if (score >= query_threshold) { chemfp_add_hit(result, target_index, score); if (result->num_hits == k) { chemfp_heapq_heapify(k, result, (chemfp_heapq_lt) double_score_lt, (chemfp_heapq_swap) double_score_swap); query_threshold = result->scores[0]; /* We're going to jump to the "heap is full" section */ /* Since we leave the loop early, I need to advance the pointers */ target_index++; target_fp += target_storage_size; goto heap_replace; } } /* Added to heap */ } /* Went through target fingerprints */ /* If we're here then the heap did not fill up. Try the next popcount */ continue; } heap_replace: /* We only get here if the heap contains k element */ /* Earlier we tested for "best_possible_score<query_threshold". */ /* The test to replace an element in the heap is more stringent. */ if (query_threshold >= best_possible_score) { /* Can't do better. Might as well give up. */ break; } /* Scan through the target fingerprints; can we improve over the threshold? */ for (; target_index<end; target_index++, target_fp += target_storage_size) { intersect_popcount = calc_intersect_popcount(fp_size, query_fp, target_fp); score = intersect_popcount / (popcount_sum - intersect_popcount); /* We need to be strictly *better* than what's in the heap */ if (score > query_threshold) { result->indices[0] = target_index; result->scores[0] = score; chemfp_heapq_siftup(k, result, 0, (chemfp_heapq_lt) double_score_lt, (chemfp_heapq_swap) double_score_swap); query_threshold = result->scores[0]; if (query_threshold >= best_possible_score) { /* we can't do any better in this section (or in later ones) */ break; } } /* heapreplaced the old smallest item with the new item */ } /* looped over fingerprints */ } /* Went through all the popcount regions */ /* We have scanned all the fingerprints. Is the heap full? */ if (result->num_hits < k) { /* Not full, so need to heapify it. */ chemfp_heapq_heapify(result->num_hits, result, (chemfp_heapq_lt) double_score_lt, (chemfp_heapq_swap) double_score_swap); } } /* looped over all queries */ return CHEMFP_OK; } /***** Special support for the NxN symmetric case ******/ /* TODO: implement the k-nearest variant. It's harder because a k-nearest search, combined with the Swamidass and Baldi search limits, is not reflexive. */ int RENAME(chemfp_count_tanimoto_hits_arena_symmetric)( /* Count all matches within the given threshold */ double threshold, /* Number of bits in the fingerprint */ int num_bits, /* Fingerprint arena */ int storage_size, const unsigned char *arena, /* Row start and end indices */ int query_start, int query_end, /* Column start and end indices */ int target_start, int target_end, /* Target popcount distribution information */ int *target_popcount_indices, /* Results _increment_ existing values in the array - remember to initialize! */ int *result_counts ) { int fp_size = (num_bits+7) / 8; int query_index, target_index; int start, end; int query_popcount, target_popcount; int start_target_popcount, end_target_popcount, intersect_popcount; int count; double popcount_sum, score; const unsigned char *query_fp, *target_fp; chemfp_popcount_f calc_popcount; chemfp_intersect_popcount_f calc_intersect_popcount; #if USE_OPENMP == 1 /* Reduce contention by using a per-thread counts array. For details see: */ /* http://www.dalkescientific.com/writings/diary/archive/2012/01/17/I_parallelize_an_algorithm.html */ int i; int num_threads; int *parallel_counts; int *per_thread_counts; int per_thread_size; #endif /* Check that we're not obviously in the lower triangle */ if (query_start >= target_end) { /* No possible hits */ return CHEMFP_OK; } /* Shift the target towards the upper triangle, if needed */ if (target_start < query_start) { target_start = query_start; } /* Check for edge cases */ if ((query_start >= query_end) || (target_start >= target_end) || (threshold > 1.0)) { return CHEMFP_OK; } if (threshold <= 0.0) { /* By definition, everything matches */ /* FIXME: this is inelegant. I'm finding the symmetry and boundary conditions a bit tricky */ for (query_index=query_start; query_index<query_end; query_index++) { for (target_index=MAX(query_index+1, target_start); target_index<target_end; target_index++) { result_counts[query_index] += 1; result_counts[target_index] += 1; } } return CHEMFP_OK; } /* Prevent overflow if someone uses a threshold of, say, 1E-80 */ /* (Not really needed unless you trap IEEE 754 overflow errors) */ if (threshold > 0.0 && threshold < 1.0/num_bits) { threshold = 0.5 / num_bits; } /* target_popcount_indices must exist; if you don't care for the factor */ /* of two performance increase by precomputing/presorting based on popcount */ /* then why are you interested in the factor of two based on symmetry? */ /* Choose popcount methods optimized for this case */ calc_popcount = chemfp_select_popcount(num_bits, storage_size, arena); calc_intersect_popcount = chemfp_select_intersect_popcount( num_bits, storage_size, arena, storage_size, arena); /* This uses the limits from Swamidass and Baldi */ #if USE_OPENMP == 1 num_threads = omp_get_max_threads(); per_thread_size = MAX(query_end, target_end); parallel_counts = (int *) calloc(num_threads * per_thread_size, sizeof(int)); if (!parallel_counts) { return CHEMFP_NO_MEM; } #pragma omp parallel for \ private(query_fp, query_popcount, start_target_popcount, end_target_popcount, \ count, target_popcount, start, end, target_fp, popcount_sum, target_index, \ intersect_popcount, score, per_thread_counts) \ schedule(dynamic) #endif for (query_index = query_start; query_index < query_end; query_index++) { query_fp = arena + (query_index * storage_size); query_popcount = calc_popcount(fp_size, query_fp); #if USE_OPENMP == 1 per_thread_counts = parallel_counts+(omp_get_thread_num() * per_thread_size); #endif /* Special case when popcount(query) == 0; everything has a score of 0.0 */ if (query_popcount == 0) { continue; } /* Figure out which fingerprints to search */ start_target_popcount = (int)(query_popcount * threshold); end_target_popcount = (int)(ceil(query_popcount / threshold)); if (end_target_popcount > num_bits) { end_target_popcount = num_bits; } count = 0; for (target_popcount = start_target_popcount; target_popcount <= end_target_popcount; target_popcount++) { start = target_popcount_indices[target_popcount]; end = target_popcount_indices[target_popcount+1]; if (start < target_start) { start = target_start; } start = MAX(query_index+1, start); if (end > target_end) { end = target_end; } target_fp = arena + (start * storage_size); popcount_sum = query_popcount + target_popcount; for (target_index = start; target_index < end; target_index++, target_fp += storage_size) { intersect_popcount = calc_intersect_popcount(fp_size, query_fp, target_fp); score = intersect_popcount / (popcount_sum - intersect_popcount); if (score >= threshold) { /* Can accumulate the score for the row. This is likely a register */ /* instead of a memory location so should be slightly faster. */ count++; /* I can't use the same technique for the symmetric match */ #if USE_OPENMP == 1 per_thread_counts[target_index]++; #else result_counts[target_index]++; #endif } } } /* Save the accumulated row counts */ #if USE_OPENMP == 1 if (count) { per_thread_counts[query_index] += count; } #else result_counts[query_index] += count; #endif } /* went through each of the queries */ #if USE_OPENMP == 1 /* Merge the per-thread results into the counts array */ /* TODO: start from MIN(query_start, query_end) */ /* TODO: parallelize? */ for (query_index = 0; query_index < per_thread_size; query_index++) { count = 0; for (i=0; i<num_threads; i++) { count += parallel_counts[per_thread_size * i + query_index]; } result_counts[query_index] += count; } free(parallel_counts); #endif return CHEMFP_OK; } int RENAME(chemfp_threshold_tanimoto_arena_symmetric)( /* Within the given threshold */ double threshold, /* Number of bits in the fingerprint */ int num_bits, /* Arena */ int storage_size, const unsigned char *arena, /* start and end indices for the rows and columns */ int query_start, int query_end, int target_start, int target_end, /* Target popcount distribution information */ /* (must have at least num_bits+1 elements) */ int *popcount_indices, /* Results go here */ /* NOTE: This must have enough space for all of the fingerprints! */ chemfp_search_result *results) { int fp_size = (num_bits+7) / 8; int query_index, target_index; int start, end; const unsigned char *query_fp, *target_fp; int query_popcount, target_popcount; int start_target_popcount, end_target_popcount; chemfp_popcount_f calc_popcount; chemfp_intersect_popcount_f calc_intersect_popcount; int numerator, denominator, popcount_sum, intersect_popcount; double score; int add_hit_error = 0; /* Check that we're not obviously in the lower triangle */ if (query_start >= target_end) { /* No possible hits */ return CHEMFP_OK; } /* Shift the target towards the upper triangle, if needed */ if (target_start < query_start) { target_start = query_start; } /* Corner cases where I don't need to do anything */ if ((query_start >= query_end) || (target_start >= target_end) || (threshold < 0)) { return CHEMFP_OK; } /* if (threshold == 0.0) { */ /* TODO: Optimize this case */ /* Prevent overflow if someone uses a threshold of, say, 1E-80 */ if (threshold > 0.0 && threshold < 1.0/num_bits) { threshold = 0.5 / num_bits; } if (threshold > 1.0) { return CHEMFP_OK; } calc_popcount = chemfp_select_popcount(num_bits, storage_size, arena); calc_intersect_popcount = chemfp_select_intersect_popcount( num_bits, storage_size, arena, storage_size, arena); denominator = num_bits * 10; numerator = (int)(threshold * denominator); /* This uses the limits from Swamidass and Baldi */ /* It doesn't use the search ordering because it's supposed to find everything */ #if USE_OPENMP == 1 #pragma omp parallel for \ private(query_fp, query_popcount, start_target_popcount, end_target_popcount, \ target_popcount, start, end, target_fp, popcount_sum, target_index, intersect_popcount, score) \ schedule(dynamic) #endif for (query_index = query_start; query_index < query_end; query_index++) { query_fp = arena + (query_index * storage_size); query_popcount = calc_popcount(fp_size, query_fp); /* Special case when popcount(query) == 0; everything has a score of 0.0 */ if (query_popcount == 0) { if (threshold == 0.0) { /* Only populate the upper triangle */ target_index = MAX(query_index+1, target_start); for (;target_index < target_end; target_index++) { if (!chemfp_add_hit(results+query_index, target_index, 0.0)) { add_hit_error = 1; } } } continue; } /* Figure out which fingerprints to search, based on the popcount */ if (threshold == 0.0) { start_target_popcount = 0; end_target_popcount = num_bits; } else { start_target_popcount = (int)(query_popcount * threshold); end_target_popcount = (int)(ceil(query_popcount / threshold)); if (end_target_popcount > num_bits) { end_target_popcount = num_bits; } } for (target_popcount=start_target_popcount; target_popcount<=end_target_popcount; target_popcount++) { start = popcount_indices[target_popcount]; end = popcount_indices[target_popcount+1]; if (start < target_start) { start = target_start; } if (end > target_end) { end = target_end; } popcount_sum = query_popcount + target_popcount; for (target_index = MAX(query_index+1, start); target_index < end; target_index++) { target_fp = arena + (target_index * storage_size); intersect_popcount = calc_intersect_popcount(fp_size, query_fp, target_fp); if (denominator * intersect_popcount >= numerator * (popcount_sum - intersect_popcount)) { /* Add to the upper triangle */ score = ((double) intersect_popcount) / (popcount_sum - intersect_popcount); if (!chemfp_add_hit(results+query_index, target_index, score)) { add_hit_error = 1; } } } } } /* went through each of the queries */ if (add_hit_error) { return CHEMFP_NO_MEM; } return CHEMFP_OK; } /* I couldn't figure out a way to take advantage of symmetry */ /* This is the same as the NxM algorithm except that it excludes self-matches */ int RENAME(chemfp_knearest_tanimoto_arena_symmetric)( /* Find the 'k' nearest items */ int k, /* Within the given threshold */ double threshold, /* Number of bits in the fingerprint */ int num_bits, /* Arena */ int storage_size, const unsigned char *arena, /* start and end indices for the rows and columns */ int query_start, int query_end, int target_start, int target_end, /* Target popcount distribution information */ /* (must have at least num_bits+1 elements) */ int *popcount_indices, /* Results go into these arrays */ chemfp_search_result *results ) { int fp_size; int query_popcount, target_popcount, intersect_popcount; double score, best_possible_score, popcount_sum, query_threshold; const unsigned char *query_fp, *target_fp; int query_index, target_index; int start, end; PopcountSearchOrder popcount_order; chemfp_search_result *result; chemfp_popcount_f calc_popcount; chemfp_intersect_popcount_f calc_intersect_popcount; if (query_start >= query_end) { return 0; } /* k == 0 is a valid input, and of course the result is no matches */ if (k == 0) { return CHEMFP_OK; } fp_size = (num_bits+7)/8; /* Choose popcounts optimized for this case */ calc_popcount = chemfp_select_popcount(num_bits, storage_size, arena); calc_intersect_popcount = chemfp_select_intersect_popcount( num_bits, storage_size, arena, storage_size, arena); /* Loop through the query fingerprints */ for (query_index=0; query_index < (query_end-query_start); query_index++) { result = results+query_index; query_fp = arena + (query_start+query_index) * storage_size; query_threshold = threshold; query_popcount = calc_popcount(fp_size, query_fp); if (query_popcount == 0) { /* By definition this will never return hits. Even if threshold == 0.0. */ /* (I considered returning the first k hits, but that's chemically meaningless.) */ /* XXX change this. Make it returns the first k hits */ continue; } /* Search the bins using the ordering from Swamidass and Baldi.*/ init_search_order(&popcount_order, query_popcount, num_bits); /* Look through the sections of the arena in optimal popcount order */ while (next_popcount(&popcount_order, query_threshold)) { target_popcount = popcount_order.popcount; best_possible_score = popcount_order.score; /* If we can't beat the query threshold then we're done with the targets */ if (best_possible_score < query_threshold) { break; } /* Scan through the targets which have the given popcount */ start = popcount_indices[target_popcount]; end = popcount_indices[target_popcount+1]; if (!check_bounds(&popcount_order, &start, &end, target_start, target_end)) { continue; } /* Iterate over the target fingerprints */ target_fp = arena + start*storage_size; popcount_sum = (double)(query_popcount + target_popcount); target_index = start; /* There are fewer than 'k' elements in the heap*/ if (result->num_hits < k) { for (; target_index<end; target_index++, target_fp += storage_size) { intersect_popcount = calc_intersect_popcount(fp_size, query_fp, target_fp); score = intersect_popcount / (popcount_sum - intersect_popcount); /* The heap isn't full; only check if we're at or above the query threshold */ if (score >= query_threshold) { if (query_index == target_index) { continue; /* Don't match self */ } chemfp_add_hit(result, target_index, score); if (result->num_hits == k) { chemfp_heapq_heapify(k, result, (chemfp_heapq_lt) double_score_lt, (chemfp_heapq_swap) double_score_swap); query_threshold = result->scores[0]; /* We're going to jump to the "heap is full" section */ /* Since we leave the loop early, I need to advance the pointers */ target_index++; target_fp += storage_size; goto heap_replace; } } /* Added to heap */ } /* Went through target fingerprints */ /* If we're here then the heap did not fill up. Try the next popcount */ continue; } heap_replace: /* We only get here if the heap contains k element */ /* Earlier we tested for "best_possible_score<query_threshold". */ /* The test to replace an element in the heap is more stringent. */ if (query_threshold >= best_possible_score) { /* Can't do better. Might as well give up. */ break; } /* Scan through the target fingerprints; can we improve over the threshold? */ for (; target_index<end; target_index++, target_fp += storage_size) { intersect_popcount = calc_intersect_popcount(fp_size, query_fp, target_fp); score = intersect_popcount / (popcount_sum - intersect_popcount); /* We need to be strictly *better* than what's in the heap */ if (score > query_threshold) { if (query_index == target_index) { continue; /* Don't match self */ } result->indices[0] = target_index; result->scores[0] = score; chemfp_heapq_siftup(k, result, 0, (chemfp_heapq_lt) double_score_lt, (chemfp_heapq_swap) double_score_swap); query_threshold = result->scores[0]; if (query_threshold >= best_possible_score) { /* we can't do any better in this section (or in later ones) */ break; } } /* heapreplaced the old smallest item with the new item */ } /* looped over fingerprints */ } /* Went through all the popcount regions */ /* We have scanned all the fingerprints. Is the heap full? */ if (result->num_hits < k) { /* Not full, so need to heapify it. */ chemfp_heapq_heapify(result->num_hits, result, (chemfp_heapq_lt) double_score_lt, (chemfp_heapq_swap) double_score_swap); } } /* looped over all queries */ return CHEMFP_OK; }
SE1P_direct_fd.c
#include "mex.h" #include "SE_direct.h" #include "mathint.h" #define IDX prhs[0] #define X prhs[1] // Source locations #define Q prhs[2] // Source strengths #define OPT prhs[3] // Parameters #define PHI plhs[0] // Output #ifndef VERBOSE #define VERBOSE 0 #endif /* common option-unpacking */ void unpack_opt(ewald_opts* opt, const mxArray* mx_opt) { // mandatory options -- will trigger core dump if missing opt->xi = mxGetScalar(mxGetField(mx_opt,0,"xi")); if(opt->xi==0) mexErrMsgTxt("xi cannot be zero"); double* box = mxGetPr(mxGetField(mx_opt,0,"box")); opt->box[0] = box[0]; // layers: mandatory for ewald sums that are truncated const mxArray* mx_layers = mxGetField(mx_opt,0,"layers"); if(mx_layers) opt->layers = (int)mxGetScalar(mx_layers); else opt->layers = -1; } // MATLAB (one-based, doubles) to C (zero-based, integers) index translation void index_translation(int* idx, const double* idx_d, int N) { for(int i=0; i<N; i++) idx[i] = (int)idx_d[i] - 1; } #ifdef FORCE void SE1P_direct_fd(double* restrict force, const int* restrict idx, int nidx, const double* restrict x, const double* restrict q, int N, const ewald_opts opt) { const double xi = opt.xi; double xi2 = xi*xi; double TwoPiOverL = 2.*PI/opt.box[0]; #ifdef _OPENMP #pragma omp parallel for #endif for(int m=0; m<nidx; m++) { double xm[] = {x[idx[m]],x[idx[m]+N],x[idx[m]+2*N]}; double f[] = {0, 0, 0}; for(int n = 0; n<N; n++) { double rvec[] = {xm[0]-x[n],xm[1]-x[n+N], xm[2]-x[n+2*N]}; double rho2 = rvec[1]*rvec[1] + rvec[2]*rvec[2]; double b = rho2*xi2; double qn = q[n]; for(int j0 = -opt.layers; j0<=opt.layers; j0++) { if(j0 == 0) continue; double k = TwoPiOverL*j0; double kr = -k*rvec[0]; double a = k*k/(4.*xi2); double K0; K0 = computeINCBK0(a,b,0); f[0] += -qn*k*sin(kr)*K0; K0 = computeINCBK0(a,b,1); f[1] += 2.*qn*xi2*cos(kr)*rvec[1]*K0; f[2] += 2.*qn*xi2*cos(kr)*rvec[2]*K0; } } force[m ] = -f[0]/(opt.box[0]); force[m+ nidx] = -f[1]/(opt.box[0]); force[m+2*nidx] = -f[2]/(opt.box[0]); } /* gsl_integration_workspace_free (w); */ } #else void SE1P_direct_fd(double* restrict phi, const int* restrict idx, int nidx, const double* restrict x, const double* restrict q, int N, const ewald_opts opt) { double p; const double xi = opt.xi; double xi2 = xi*xi; double TwoPiOverL = 2.*PI/opt.box[0]; // int rep; #ifdef _OPENMP #pragma omp parallel for private(p) #endif for(int m=0; m<nidx; m++) { double xm[3] = {x[idx[m] ], x[idx[m]+N ], x[idx[m]+2*N]}; p = 0; for(int j0 = 1; j0<=opt.layers; j0++) { double k = TwoPiOverL*j0; double a = k*k/(4.*xi2); for(int n = 0; n<N; n++) { double r = xm[0]-x[n]; double rho2= ( (xm[1]-x[n+N ])*(xm[1]-x[n+N ])+ (xm[2]-x[n+2*N])*(xm[2]-x[n+2*N]) ); double b = rho2*xi2; double qn = q[n]; double K0 = computeK0(a,b); double kr = -k*r; p += 2*qn*cos(kr)*K0; } } phi[m] = p/(opt.box[0]); } } #endif /* no input checking is done */ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[] ) { // input dims const int N = mxGetM(X); const int num_eval = mxGetN(IDX); // FIXME: indices assumed to be row vec const double* idx_d = mxGetPr(IDX); int* idx = mxMalloc(num_eval*sizeof(int)); index_translation(idx, idx_d, num_eval); const double* x = mxGetPr(X); const double* q = mxGetPr(Q); #ifndef FORCE PHI = mxCreateDoubleMatrix(num_eval, 1, mxREAL); double* restrict phi = mxGetPr(PHI); #else /* This is to allocate 3 vectors for the force. * (FIXME) Note that the variable is still called PHI.*/ PHI = mxCreateDoubleMatrix(num_eval, 3, mxREAL); double* restrict phi = mxGetPr(PHI); #endif ewald_opts opt; unpack_opt(&opt, OPT); if(VERBOSE) { mexPrintf("[EWALD (%s)] MEX N=(%d,%d) ","FD1P",N,num_eval); mexPrintf("xi = %.2f, layers=%d\n", opt.xi,opt.layers); } // call kernel SE1P_direct_fd(phi, idx, num_eval, x, q, N, opt); mxFree(idx); }
for-2.c
void bar (short *); void foo (short *q, short *r, short *s) { short *p; #pragma omp for for (p = q; p != r; p++) bar (p); #pragma omp for for (p = s; p != r; p--) bar (p); #pragma omp for for (p = q; p != r; p = p + 1) bar (p); #pragma omp for for (p = s; p != r; p = p - 1) bar (p); #pragma omp for for (p = q; p != r; p = 1 + p) bar (p); #pragma omp for for (p = s; p != r; p = -1 + p) bar (p); #pragma omp for for (p = q; p != r; p += 1) bar (p); #pragma omp for for (p = s; p != r; p -= 1) bar (p); }
shape.h
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ /* * shape.h * * Created on: Dec 28, 2015 * Author: agibsonccc */ #ifndef SHAPE_H_ #define SHAPE_H_ #include <cstring> #include <cstdio> #include "../dll.h" #include "../nd4jmalloc.h" #include "../templatemath.h" #include "../helpers/logger.h" #include "../pointercast.h" #include "../cnpy/cnpy.h" #include <op_boilerplate.h> #define MAX_DIMENSION 0x7fffffff #define MAX_NUM_THREADS 1024 #define MAX_RANK 32 #define MAX_SHAPEINFOLENGTH 2*MAX_RANK+4 #define MAX_COORD 3 #define PREALLOC_SIZE 33554432 #ifdef __CUDACC__ #include <cuda.h> #include <cuda_runtime.h> #endif #ifdef __CUDACC__ #define INLINEDEF inline #else #define INLINEDEF inline #endif #include "../pairwise_util.h" #include <stdint.h> #include <array/ArrayOptions.h> typedef unsigned int uint; namespace shape { /** * Shape information approximating * the information on an ndarray */ struct ND4J_EXPORT ShapeInformation { _CUDA_HD ShapeInformation(Nd4jLong *shape_ = nullptr, Nd4jLong *stride_ = nullptr, char order_ = 0, int rank_ = 0, int offset_ = 0, int elementWiseStride_ = 0) : shape(shape_), stride(stride_), order(order_), rank(rank_), offset(offset_), elementWiseStride(elementWiseStride_) {} Nd4jLong *shape; Nd4jLong *stride; char order; int rank; int offset; int elementWiseStride; }; /** * Indexing information * for bounds checking */ struct ND4J_EXPORT CurrentIndexing { int numElementsPerThread; int blockStartingIndex; int startingThreadIndex; int endingThreadIndex; }; ND4J_EXPORT _CUDA_HD bool shapeEquals(const int shape1Rank, const Nd4jLong *shape1, const int shape2Rank, const Nd4jLong *shape2); ND4J_EXPORT _CUDA_HD Nd4jLong* detachShape(Nd4jLong *originalShape); ND4J_EXPORT _CUDA_HD Nd4jLong* copyShape(Nd4jLong *originalShape); ND4J_EXPORT _CUDA_HD bool shapeEquals(const Nd4jLong *shapeInfo1, const Nd4jLong *shapeInfo2); ND4J_EXPORT _CUDA_HD bool shapeEquals(const Nd4jLong *shapeInfo1, const Nd4jLong *shapeInfo2, const Nd4jLong *shapeInfo3); ND4J_EXPORT _CUDA_HD bool strideEquals(int shape1Rank,Nd4jLong *shape1,int shape2Rank,Nd4jLong *shape2); ND4J_EXPORT _CUDA_HD bool strideEquals(Nd4jLong *shapeInfo1,Nd4jLong *shapeInfo2); ND4J_EXPORT _CUDA_HD bool strideEquals(Nd4jLong *stride1,int rank1,Nd4jLong *stride2,int rank2); ND4J_EXPORT _CUDA_HD bool equalsSoft(const Nd4jLong *shapeA, const Nd4jLong *shapeB); ND4J_EXPORT _CUDA_HD bool equalsTypesAndShapesSoft(const Nd4jLong *shapeA, const Nd4jLong *shapeB); ND4J_EXPORT _CUDA_HD bool equalsStrict(const Nd4jLong *shapeA, const Nd4jLong *shapeB); // returns true if ranks, shapes and strides are the same ND4J_EXPORT _CUDA_HD bool haveSameShapeAndStrides(const Nd4jLong *shapeInfo1, const Nd4jLong *shapeInfo2); ND4J_EXPORT _CUDA_HD bool haveSameShapeAndStrides(const Nd4jLong *shapeInfo1, const Nd4jLong *shapeInfo2, const Nd4jLong *shapeInfo3); ND4J_EXPORT _CUDA_HD int sizeAt(const Nd4jLong *shape, const int dim); template <typename T> ND4J_EXPORT _CUDA_HD void fill(T* buffer, T value, Nd4jLong length); ND4J_EXPORT _CUDA_HD void traceNew(int id); ND4J_EXPORT _CUDA_HD int tadIndexForLinear(int linearIndex, int tadLength); ND4J_EXPORT _CUDA_HD int tadLength(Nd4jLong *shapeInfo, int *dimension, int dimensionLength); ND4J_EXPORT _CUDA_HD bool canReshape(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShape, bool isFOrder); ND4J_EXPORT _CUDA_HD bool reshapeC(const int oldRank, const Nd4jLong* oldShapeInfo, const int newRank, const Nd4jLong* newShape, Nd4jLong* newShapeInfo); /** * Get the shape info buffer * for the given rank and shape. */ ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBuffer(int rank, nd4j::DataType dtype, Nd4jLong *shape); ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBuffer(int rank, nd4j::DataType dtype, Nd4jLong *shape, Nd4jLong *buffer); /** * Get the shape info buffer * for the given rank and shape. */ ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, nd4j::DataType dtype, Nd4jLong *shape); ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, nd4j::DataType dtype, Nd4jLong *shape, Nd4jLong *output); #ifdef __CUDACC__ __device__ ND4J_EXPORT Nd4jLong *cuMalloc(Nd4jLong *buffer, long size); #endif /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ ND4J_EXPORT _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank); ND4J_EXPORT _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, Nd4jLong* ret); /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank); ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, Nd4jLong* ret); ND4J_EXPORT _CUDA_HD void updateStrides(Nd4jLong *shape, const char order); ND4J_EXPORT _CUDA_HD void updateStrides(const int rank, const Nd4jLong *shapeOnly, Nd4jLong *stridesOnly, const char order); // check whether input dimensions are permuted, not permuted dimensions order have to be 0,....,rank-1 template <typename T> ND4J_EXPORT _CUDA_HD bool isDimPermuted(const T* dimensions, const int dimSize); /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ ND4J_EXPORT _CUDA_HD Nd4jLong* calcStridesFortran(Nd4jLong *shape, int rank, int startNum); ND4J_EXPORT _CUDA_HD Nd4jLong* calcStridesFortran(Nd4jLong *shape, int rank, int startNum, Nd4jLong* ret); /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, int startNum); ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, int startNum, Nd4jLong* ret); /** * @param toCopy the shape to copy * @return a copy of the original struct */ ND4J_EXPORT _CUDA_HD ShapeInformation *shapeCopy( ShapeInformation *toCopy); ND4J_EXPORT _CUDA_HD bool strideDescendingCAscendingF(const Nd4jLong *shapeBuffer); ND4J_EXPORT _CUDA_HD bool isContiguous(const Nd4jLong* shapeInfo); /** * copy-past from java hasDefaultStridesForShape function * check whether array is not permuted and has contiguous elements in memory */ ND4J_EXPORT _CUDA_HD bool areStridesDefault(const Nd4jLong* shapeInfo); /** * Compute the element wise stride * for a given shape/stride configuration * @param rank the rank of the shape/stride * @param shape the shape * @param stride the stride * @param isFOrder 0 or 1 for whether the array is f * ordered or not * @return 0 if there is no element wise stride the * element wise stride of reshape(1,length) otherwise */ ND4J_EXPORT _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder); /** * Compute the element wise stride * for a given shape/stride configuration * @param rank the rank of the shape/stride * @param shape the shape * @param stride the stride * @param isFOrder 0 or 1 for whether the array is f * ordered or not * @return 0 if there is no element wise stride the * element wise stride of reshape(1,length) otherwise */ ND4J_EXPORT _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder, Nd4jLong *dimension, int dimensionLength); ND4J_EXPORT _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride); ND4J_EXPORT _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride, Nd4jLong *buffer); /** * * @param length * @param shape * @param rearrange * @return */ ND4J_EXPORT _CUDA_HD Nd4jLong *doPermuteSwap(int length, Nd4jLong *shape, int* rearrange); /** * In place permute swap * @param length * @param shape * @param rearrange */ ND4J_EXPORT _CUDA_HD void doPermuteSwap(int length, Nd4jLong **shape, int* rearrange); ND4J_EXPORT _CUDA_HD Nd4jLong *permuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange); ND4J_EXPORT _CUDA_HD void permuteShapeBufferInPlace(Nd4jLong *shapeBuffer, int* rearrange, Nd4jLong *out); ND4J_EXPORT _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeBuffer, const int *rearrange, Nd4jLong len = -1); /** * Rearrange the permute indexes * according to which dimensions are specified. * * For example, dimension is implicitly: * 0,1,2 * * If you want to do a reduce along dimensions 0 and 1, * you need to permute the indexes to be: * 2,0,1 * * which will give us the ability to ierate along an element * wise stride. */ ND4J_EXPORT _CUDA_HD Nd4jLong* createPermuteIndexes(int originalRank, int *dimension,int dimensionLength); ND4J_EXPORT _CUDA_HD Nd4jLong* computeResultShape(Nd4jLong *originalShapeBuffer, int *dimension,int dimensionLength); /** * This method does inplace transpose of given shapeBuffer * * @param shapeBuffer */ ND4J_EXPORT _CUDA_HD void transposeInplace(Nd4jLong *shapeBuffer); /** * Get the ordering for the device * @param length * @param shape * @param stride * @param elementStride * @return */ ND4J_EXPORT _CUDA_HD char getOrder(int length, Nd4jLong *shape, Nd4jLong *stride, int elementStride); /** * Ensure that every value in the re arrange * array is unique * @param arr * @param shape * @param arrLength * @param shapeLength * @return */ template <typename T> ND4J_EXPORT _CUDA_HD int checkArrangeArray(T *arr, int arrLength, int shapeLength); /** * Permute the shape information * @param info the shape information to permute * @param rearrange the order to re arrange * @param rank the rank of the rearrange array */ ND4J_EXPORT _CUDA_HD void permute(ShapeInformation **info, int *rearrange, int rank); /** * Returns whether the * given shape is a vector or not * @param shape the shape of the array * @param rank the rank of cthe shape */ ND4J_EXPORT _CUDA_HD int isVector(Nd4jLong *shape, int rank); /** * When 1 dimension is the whole length of the * array */ ND4J_EXPORT _CUDA_HD int oneDimEqualToLength(Nd4jLong *shape, int rank); ND4J_EXPORT _CUDA_HD int oneDimEqualToLength(Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD int isVector(const Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD bool isLikeVector(Nd4jLong *shapeInfo, int& posOfNonUnityDim); ND4J_EXPORT _CUDA_HD bool isCommonVector(const Nd4jLong *shapeInfo, int& posOfNonUnityDim); ND4J_EXPORT _CUDA_HD bool isRowVector(const Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD bool isColumnVector(Nd4jLong *shapeInfo); /** * Returns whether the * given shape is a vector or not * @param shape the shape of the array * @param rank the rank of the shape */ ND4J_EXPORT _CUDA_HD int isMatrix(Nd4jLong *shape, int rank); INLINEDEF _CUDA_HD int isMatrix(Nd4jLong *shapeInfo); /** * Returns the shape portion of an information * buffer */ ND4J_EXPORT _CUDA_HD Nd4jLong *shapeOf(Nd4jLong *buffer); /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ template <typename T> ND4J_EXPORT _CUDA_HD T* copyOf(Nd4jLong length, T *toCopy); template <typename T> ND4J_EXPORT _CUDA_HD T* copyOf(Nd4jLong length, T *toCopy, T *ret); /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ template <typename T> ND4J_EXPORT _CUDA_HD void copyTo(Nd4jLong length, T *from, T *to); /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ ND4J_EXPORT _CUDA_HD void copyTo(int length, Nd4jLong *from, Nd4jLong *to, Nd4jLong *indexes); /** * Permute the given strides * in the given rearrange order * @param toPermute the buffer to permute * @param shapeRank the length of the buffer to permute * @param rearrange the rearrange order (must be 0 based indexes * and all must be filled in) * @return the rearranged array */ //ND4J_EXPORT _CUDA_HD Nd4jLong *permutedStrides(Nd4jLong *toPermute, int shapeRank, Nd4jLong *rearrange); /** * Return the slice (shape + 1 in pointer arithmetic) * @param shape the shape to take the slice of * @return the shape array - the first entry */ ND4J_EXPORT _CUDA_HD Nd4jLong *slice(Nd4jLong *shape); ND4J_EXPORT _CUDA_HD int slices(Nd4jLong *shapeBuffer); ND4J_EXPORT _CUDA_HD Nd4jLong *sliceOfShapeBuffer(Nd4jLong sliceIdx, Nd4jLong *shapeBuffer); /** * Returns the length of the * shape information buffer: * rank * 2 + 3 * @param rank the rank to get the shape * info length for * @return rank * 2 + 4 */ ND4J_EXPORT _CUDA_HD int shapeInfoLength(int rank); ND4J_EXPORT _CUDA_HD int shapeInfoLength(Nd4jLong* shapeInfo); ND4J_EXPORT _CUDA_HD int shapeInfoLength(const Nd4jLong* shapeInfo); ND4J_EXPORT _CUDA_HD size_t shapeInfoByteLength(int rank); ND4J_EXPORT _CUDA_HD size_t shapeInfoByteLength(const Nd4jLong* shapeInfo); ND4J_EXPORT _CUDA_HD size_t shapeInfoByteLength(const Nd4jLong* shapeInfo); /** * Returns the rank portion of * an information buffer */ ND4J_EXPORT _CUDA_HD int rank(const Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD int rank(const int *shapeInfo); ND4J_EXPORT _CUDA_HD int rank(const unsigned int *shapeInfo); // returns pointer on elementWiseStride ND4J_EXPORT _CUDA_HD Nd4jLong* ews(Nd4jLong* shapeInfo); /** * returns pointer on elementWiseStride */ ND4J_EXPORT _CUDA_HD Nd4jLong* ews(Nd4jLong* shapeInfo); /** * Converts a raw int buffer of the layout: * rank * shape * stride * offset * elementWiseStride * * where shape and stride are both straight int pointers */ ND4J_EXPORT _CUDA_HD ShapeInformation *infoFromBuffer(Nd4jLong *buffer); /** * Returns the stride portion of an information * buffer */ ND4J_EXPORT _CUDA_HD Nd4jLong *stride(Nd4jLong *buffer); ND4J_EXPORT _CUDA_HD Nd4jLong *stride(const Nd4jLong *buffer); /** * Compute the length of the given shape */ ND4J_EXPORT _CUDA_HD bool isEmpty(const Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD Nd4jLong length(const Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD Nd4jLong length(std::initializer_list<int>& shape); ND4J_EXPORT _CUDA_HD Nd4jLong length(std::initializer_list<Nd4jLong>& shape); /*** * Returns the offset portion of an information buffer */ ND4J_EXPORT _CUDA_HD Nd4jLong offset(Nd4jLong *buffer); ND4J_EXPORT _CUDA_HD Nd4jLong& extra(Nd4jLong *buffer); /** * Returns the ordering * for this shape information buffer */ ND4J_EXPORT _CUDA_HD char order(const Nd4jLong *buffer); /** * Returns the type */ ND4J_EXPORT _CUDA_HD Nd4jLong type(const Nd4jLong* shapeInfo); /** * Returns the element wise stride for this information * buffer */ ND4J_EXPORT _CUDA_HD Nd4jLong elementWiseStride(const Nd4jLong *buffer); /** * Returns the element wise stride for this information * buffer * relative to a dimension and ordering for a reduction index */ ND4J_EXPORT _CUDA_HD Nd4jLong reductionIndexElementWiseStride(Nd4jLong *buffer, int *dimension, int dimensionLength); /** * Returns whether * the given shape info buffer * represents a scalar shape */ ND4J_EXPORT _CUDA_HD int isScalar(Nd4jLong *info); /** * Returns whether * the given shape information * represents a scalar * shape or not */ ND4J_EXPORT _CUDA_HD int isScalar(volatile ShapeInformation *info); /** * Return a copy of this array with the * given index omitted * * @param data the data to copy * @param indexes the index of the item to remove * @param dataLength the length of the data array * @param indexesLength the length of the data array * @return the new array with the omitted * * item */ template <typename T1, typename T2> ND4J_EXPORT _CUDA_HD void removeIndex(T1 *data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength, T1 *out); /** * Return a copy of this array with the * given index omitted * * @param data the data to copy * @param indexes the index of the item to remove * @param dataLength the length of the data array * @param indexesLength the length of the data array * @return the new array with the omitted * * item */ template <typename T1, typename T2> ND4J_EXPORT _CUDA_HD T1* removeIndex(T1 *data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength); /** * Iterate over a given set of indexes * the begin and end indexes are 0 based. * 1 padding is automatically assumed for the ending. * * For example if you want to iterate over 0 to 4 * it will go to 4 rather than 3. * * indexes should be the indexes to exclude * indexes length should be the length of indexes */ ND4J_EXPORT _CUDA_HD Nd4jLong* everyIndexBut(Nd4jLong *indexes,int indexesLength,int begin,int end); /** * Computes the offset for accessing * a global element given the shape information * and the offset to be read. */ //#ifdef __CUDACC__ // __device__ //#endif // ND4J_EXPORT int tadOffset(shape::ShapeInformation *xInfo, int offset); /** * Returns a shape * forces the given length to be 2. * @param shape the shape to modify * @param dimension the dimension (row or column) * for the shape to be returned as * @return the new shape */ ND4J_EXPORT _CUDA_HD Nd4jLong* ensureVectorShape(Nd4jLong *shape); ND4J_EXPORT _CUDA_HD Nd4jLong* createScalarShapeInfo(); ND4J_EXPORT _CUDA_HD Nd4jLong* createScalarShapeInfo(Nd4jLong *ret); /** * Generate an int buffer * up to the given length * at the specified increment * */ template <typename T> ND4J_EXPORT _CUDA_HD T* range(int from, int to, int increment); /** * Range between from and two with an * increment of 1 */ template <typename T> ND4J_EXPORT _CUDA_HD T* range(int from, int to); /** * Keep the given indexes * in the data */ ND4J_EXPORT _CUDA_HD Nd4jLong *keep(volatile Nd4jLong *data, int* index, int indexLength, int dataLength); /** * Generate reverse copy of the data * @param data * @param length * @return */ template <typename T> ND4J_EXPORT _CUDA_HD T* reverseCopy(T *data, Nd4jLong length); template <typename T> ND4J_EXPORT _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong length); template <typename T> ND4J_EXPORT _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong *indexes, Nd4jLong length); template <typename T1, typename T2> ND4J_EXPORT _CUDA_H void convertT(T1 *from, T2 *to, Nd4jLong length); /** * * @param arr1 * @param arr1Length * @param arr2 * @param arr2Length * @return */ template <typename T> ND4J_EXPORT _CUDA_HD T* concat(T* arr1, Nd4jLong arr1Length, T* arr2, Nd4jLong arr2Length); /** * * @param numArrays * @param numTotalElements * @param arr * @param lengths * @return */ template <typename T> ND4J_EXPORT _CUDA_HD T* concat(int numArrays, int numTotalElements, Nd4jLong **arr, Nd4jLong *lengths); /** * Get the length per slice of the * given shape and the dimension * @param rank the rank of the shape * @param shape the shape of to get * the length per slice for * @param dimension the dimension to * get the length per slice for * @param dimensionLength the length of the dimension array * @return the length per slice of the given shape * along the given dimension */ ND4J_EXPORT _CUDA_HD Nd4jLong lengthPerSlice(int rank, Nd4jLong *shape, int *dimension, int dimensionLength); /** * calculates the offset for a tensor * @param index * @param arr * @param tensorShape * @return */ ND4J_EXPORT _CUDA_HD Nd4jLong sliceOffsetForTensor(int rank, int index, Nd4jLong *shape, Nd4jLong *tensorShape, int tensorShapeLength, int *dimension, int dimensionLength); /** * calculates the offset for a tensor * @param index * @param arr * @param tensorShape * @return */ ND4J_EXPORT _CUDA_HD Nd4jLong sliceOffsetForTensor(int index,int tensorLength,int lengthPerSlice2); /** * Computes the tensor along dimension * offset * @param index the index to get the offset for the tad for * @param rank the rank of the shapes and strides * @param info the shape information to use for tad * @param dimension the dimensions to use for computing the tensor along dimensions */ // ND4J_EXPORT _CUDA_HD int offset(int index, // int rank, // shape::ShapeInformation *info, // Nd4jLong *dimension, // int dimensionLength); /** * Computes the number * of tensors along * a given dimension */ ND4J_EXPORT _CUDA_HD Nd4jLong tensorsAlongDimension(int rank, volatile int length, volatile Nd4jLong *shape, int *dimension, int dimensionLength); /** * Computes the number * of tensors along * a given dimension */ ND4J_EXPORT _CUDA_HD Nd4jLong tensorsAlongDimension(Nd4jLong *shapeInfo, int *dimension, int dimensionLength); /** * Returns the tensor along dimension * for the given block index * @param blockSize * @param blockIdx * @param i * @return */ ND4J_EXPORT _CUDA_HD int tadForBlockIndex(int blockSize, int blockIdx, int i); /** * Computes the number of tads per block * */ ND4J_EXPORT _CUDA_HD int tadsPerBlock(int blockSize, int tads); // ND4J_EXPORT _CUDA_HD Nd4jLong *tadShapeInfo(int index, Nd4jLong *xShapeInfo, Nd4jLong *dimension, // int dimensionLength); /** * Returns a shape buffer * for the shape information metadata. */ ND4J_EXPORT _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info); ND4J_EXPORT _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info, Nd4jLong* ret); /** * Returns the number of elements per thread */ //#ifdef __CUDACC__ // __device__ //#endif // int numElementsPerThread(int N); /** * Returns the block starting index */ //#ifdef __CUDACC__ // __device__ //#endif // int blockStartingIndex(int N); /** * Returns the thread starting index */ //#ifdef __CUDACC__ // __device__ //#endif // int threadStartingIndex(int N, int stride, int offset); /** * Returns the thread ending index */ //#ifdef __CUDACC__ // __device__ //#endif // int threadEndingIndex(int N, int stride, int offset); /** * Returns indexing information * for the current kernel invocation */ //#ifdef __CUDACC__ // __device__ //#endif // CurrentIndexing *currentIndex(int N, int offset, int stride); /** Given an linear index, element wise stride * and the length of each tad * map a linear index to a tad * @param i the index to map * @param the element wise stride for the tads * @param numElementsPerTad the number of elements * per tad */ ND4J_EXPORT _CUDA_HD int tadIndex(int i, int elementWiseStride, int numElementsPerTad); /** * Map a tad to a * reduction index. * @param tadIndexForOriginal the original tad index for the * split up problem (eg: split is dimension 3 mapping to a 2,3 problem) * @param tadsForReduced the number of tads for the shrunk down problem (eg: 2,3) * @param tadsForOriginal the number of tads for the smaller problem (eg: 3) */ ND4J_EXPORT _CUDA_HD int reductionIndexForTad(int tadIndexForOriginal, int tadsForReduced, int tadsForOriginal); /** * Computes the number of tads * per reduce index for the * reduction tad. */ ND4J_EXPORT _CUDA_HD int tadsPerReduceIndex(int tadsForReduce, int tadsForOriginal); /** * Maps a linear index to a reduction index * @param i the linear index to map * @param elementWiseStride the element wise stride * for the multiple problem * @param tadNum the number of tads for the shrunken problem * @param originalTadNum the tad number for the reduced version of the problem */ ND4J_EXPORT _CUDA_HD int reductionIndexForLinear(int i, int elementWiseStride, int numElementsPerTad, int tadNum, int originalTadNum); /** * Returns the prod of the data * up to the given length */ ND4J_EXPORT _CUDA_HD int prod(Nd4jLong *data, int length); ND4J_EXPORT _CUDA_HD Nd4jLong prodLong(const Nd4jLong *data, int length); /** * Returns the rear most left over item not present in * the dimension array. This assumes that the dimension array is sorted. * * For example, given a dimension array of: * 0,2 * * and * * 12,4,2,1 in data * * You end up with 1 (data[3]) * since the first item won't match * the last item of the dimension array */ // ND4J_EXPORT _CUDA_HD int rearMostLeftOverItem(Nd4jLong *data,int length,Nd4jLong *dimension,int dimensionLength); /** * Get an offset for retrieval * from a data buffer * based on the given * shape stride and given indices * @param baseOffset the offset to start from * @param shape the shape of the array * @param stride the stride of the array * @param indices the indices to iterate over * @return the double at the specified index */ ND4J_EXPORT _CUDA_HD Nd4jLong getOffset(Nd4jLong baseOffset, const Nd4jLong *shape, const Nd4jLong *stride, const Nd4jLong *indices,int rank); ND4J_EXPORT _CUDA_HD Nd4jLong* createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank); ND4J_EXPORT _CUDA_HD Nd4jLong* createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank, Nd4jLong *buffer); /** * Convert a linear index to the corresponding coordinates * for example if shape is {2, 4}, then index 5 corresponds to following coordinates * -> [1, 1] in case of c order * -> [1, 2] in case of f order */ ND4J_EXPORT _CUDA_HD void index2coords(const int rank, const Nd4jLong *shape, Nd4jLong index, Nd4jLong arrLen, Nd4jLong *coords, const char order = 'c'); ND4J_EXPORT _CUDA_HD void index2coords(const int rank, const Nd4jLong *shape, Nd4jLong index, Nd4jLong *coords, const char order = 'c'); /** * Convert coordinates to the corresponding linear index (sequence number in other words) * for example if shape is {2, 4}, then: * in case of c order and coordinates [1, 1] index 5 is returned * in case of f order and coordinates [1, 2] index 5 is returned */ ND4J_EXPORT _CUDA_HD Nd4jLong coords2index(const int rank, const Nd4jLong *shape, const Nd4jLong *coords, const char order = 'c'); /** * increment n-dimensional array by one iteration by changing coord appropriately * for example we have array with shape {2, 3}: * - if input coord = {0,1}, then output coord = {0,2} * - if input coord = {0,2}, then output coord = {1,0} * so the aim is to produce following subsequence of coord: {0,0}, {0,1}, {0,2}, {1,0}, {1,1}, {1,2} */ /* calculates an array buffer offset for given "index" using following formula: offset = coord_0*stride_0 + coord_1*stride_1 + ... + coord_{rank-1}*stride_{rank-1} * arrLen - array length */ ND4J_EXPORT _CUDA_HD uint getIndexOffset(uint index, const uint *shapeInfo, uint arrLen); ND4J_EXPORT _CUDA_HD Nd4jLong getIndexOffset(Nd4jLong index, const Nd4jLong *shapeInfo, Nd4jLong arrLen); ND4J_EXPORT _CUDA_HD Nd4jLong getIndexOrderOffset(Nd4jLong index, const Nd4jLong *shapeInfo, Nd4jLong arrLen, const char order); ND4J_EXPORT _CUDA_HD Nd4jLong indexOffset(Nd4jLong index, const Nd4jLong* lShapeInfo, const uint* uShapeInfo, Nd4jLong arrLen, const bool useUnsigned); /** * Compute the real linear indices for the given shape and stride */ ND4J_EXPORT _CUDA_HD Nd4jLong *computeIndices(int rank, Nd4jLong *shape, Nd4jLong *stride); /** * Compute the real linear indices for the * given shape buffer. Shape,stride and rank are derived * from the buffer */ ND4J_EXPORT _CUDA_HD Nd4jLong *computeIndices( Nd4jLong *shapeBuffer); ND4J_EXPORT _CUDA_HD void printShapeInfo(Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD void printShapeInfoLinear(const Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD void printShapeInfoLinear(const char *msg, const Nd4jLong *shapeInfo); ND4J_EXPORT _CUDA_HD void printShapeInfoLinear(const char *msg, int rank, const Nd4jLong *shape, const Nd4jLong *strides); ND4J_EXPORT _CUDA_HD void printIntArray(const Nd4jLong *arr, const int length); ND4J_EXPORT _CUDA_HD void printIntArray(const int *arr, const int length); ND4J_EXPORT _CUDA_HD void printArray(float *arr,int length); template<typename T> ND4J_EXPORT _CUDA_HD void printArray(T *arr,int length, const char *message); ND4J_EXPORT _CUDA_HD Nd4jLong* shapeBufferOfNpy(int rank, unsigned int *shape,bool fortranOrder); ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferOfNpy(cnpy::NpyArray arr); // ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferOfNpyBuffer(char *buffer); // this function checks the consistence of dimensions with array rank (negative dimensions, too large dimensions, too big number of dimensions) // also sort input array of dimensions, this operation is also necessary for creating TAD object ND4J_EXPORT _CUDA_H void checkDimensions(const int rank, std::vector<int>& dimensions); // function calculates linear index of array min, min is sub-array of max, index to be returned is min-array's index and corresponds to maxIdx of max array // dimsToExclude - should be sorted in increasing order ND4J_EXPORT _CUDA_HD Nd4jLong subArrayIndex(const Nd4jLong maxIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude = nullptr, const int dimsLen = -1); // function calculates absolute offset of min array, min is sub-array of max, offset to be returned corresponds to maxIdx of max array // dimsToExclude - should be sorted in increasing order ND4J_EXPORT _CUDA_HD Nd4jLong subArrayOffset(const Nd4jLong maxIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude = nullptr, const int dimsLen = -1); // max array is outer for min array, min array is sub-array of max array // function calculates the coordinates of min array (and saves them into minIdxs) given coordinates of max array (already stored in maxIdxs) // dimsToExclude - should be sorted in increasing order // dimsLen - length of dimsToExclude, if not set (= -1), then it is calculated as maxRank - minRank ND4J_EXPORT _CUDA_HD void maxIndToMinInd(Nd4jLong* maxIdxs, Nd4jLong* minIdxs, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude = nullptr, const int dimsLen = -1); // calculate indexes of max-array, these output indexes correspond to one minIdx index of min-array which is sub-array of max-array // dimsToExclude - should be sorted in increasing order ND4J_EXPORT _CUDA_HD int outerArrayIndexes(Nd4jLong* maxIdxs, const Nd4jLong minIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude = nullptr); // calculate offsets of max-array, these output offsets correspond to one minIdx index of min-array which is sub-array of max-array // dimsToExclude - should be sorted in increasing order ND4J_EXPORT _CUDA_HD int outerArrayOffsets(Nd4jLong* maxOffsets, const Nd4jLong minIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude = nullptr); // calculates offsets for entities (elements or sub-arrays), shape in context of sub-array means dimensions excluded from outer array // rank is equal to size of shape ND4J_EXPORT void calcOffsets(const int rank, const Nd4jLong* shape, const Nd4jLong* strides, Nd4jLong* offsets, const char order = 'c'); ND4J_EXPORT void calcOffsets(const Nd4jLong* shapeInfo, Nd4jLong* offsets, const char order = 'c'); ND4J_EXPORT void calcOffsets(const Nd4jLong *xShapeInfo, Nd4jLong*& xOffsets, const Nd4jLong *yShapeInfo, Nd4jLong*& yOffsets, const char order = 'c'); ND4J_EXPORT void calcOffsets(const Nd4jLong *xShapeInfo, Nd4jLong*& xOffsets, const Nd4jLong *yShapeInfo, Nd4jLong*& yOffsets, const Nd4jLong* zShapeInfo, Nd4jLong*& zOffsets, const char order = 'c'); ND4J_EXPORT _CUDA_HD void shapeOldScalar(nd4j::DataType dtype, Nd4jLong* const buffer, const char order); // deduce element-wise stride // if array is scalar or unit length vector then ews = 1 // if array is common vector then ews = stride of non-unity dimension // if strides are normal set ews = 1, otherwise ews = 0 ND4J_EXPORT _CUDA_HD void setEws(Nd4jLong* shapeInfo, Nd4jLong len); // deduce order and element-wise stride // if array is scalar or unit length vector then ews = 1 and order is preserved // if array is common vector then ews = stride of non-unity dimension and order is preserved // if strides are normal/contiguous then ews = 1 and corresponding order is set, otherwise ews = 0 and order is preserved ND4J_EXPORT _CUDA_HD void setOrderAndEws(Nd4jLong* shapeInfo, Nd4jLong len = -1); /** * processes whole set of sub-arrays * evaluates shapeInfo of sub-arrays (all sub-arrays have the same shapeInfo) and their buffer offsets (each sub-array has its own unique offset from original this-buffer) * arguments: * wholeShapeInfo - original shapeInfo of whole array * numOfSubArrs - number of sub-arrays, size of subArrOffsets is equal to numOfSubArrs * dimsSize - size of dimsToExclude, if dimsSize = array rank or dimsSize = 0 it means sub-array is whole array, copy of wholeShapeInfo and one zero offset will be returned * dimsToExclude - MUST BE SORTED, dimensions to evaluate sub-array along, i.e. when shape is [2,3,4,5] and dimsToExclude={0,2}, then there will be 8 sub-arrays with shape [3,5] * subArrShapeInfo - output argument, contains shapeInfo common for all sub-arrays * subArrOffsets - output argument, contains successive sub-arrays offsets from original this-buffer * keepUnitiesInShape - if false then eliminate unities from sub-array shapeInfo, for example {1,a,1,b} -> {a,b} */ ND4J_EXPORT _CUDA_HD void calcSubArrShapeAndOffsets(const Nd4jLong* wholeShapeInfo, const Nd4jLong numOfSubArrs, const int dimsSize, const int* dimsToExclude, Nd4jLong* subArrShapeInfo, Nd4jLong* subArrOffsets, bool keepUnitiesInShape = false); /** * insert dimension at shape[axis] position * 1) for example: for given rank = 3, shape = {2,4,5}, axis = 1, dimension = 10 result is -> shape = {2,10,4,5} * 2) for example: for given rank = 3, shape = {2,4,5}, axis = 3, dimension = 10 result is -> shape = {2,4,5,10} * so be careful and provide shape buffer with enough (at least rank+1) length * axis should be within [0, rank] range */ ND4J_EXPORT _CUDA_HD void insertDimension(const int rank, Nd4jLong *shape, const Nd4jLong axis, const Nd4jLong dimension); /** * erase dimension at shape[axis] position * 1) for example: for given rank = 3, shape = {2,4,5}, axis = 1, result is -> shape = {2,5} * 2) for example: for given rank = 3, shape = {2,4,5}, axis = 2, result is -> shape = {2,4} * axis should be within [0, rank-1] range */ ND4J_EXPORT _CUDA_HD void eraseDimension(const int rank, Nd4jLong *shape, const Nd4jLong axis); //END HEADERS //BEGIN IMPLEMENTATIONS #ifdef __CUDACC__ /** * BEWARE: THIS METHOD DOES NOT CHECKS ALLOCATION BOUNDARIES */ __device__ INLINEDEF Nd4jLong *cuMalloc(Nd4jLong *buffer, long size) { Nd4jLong *ret = buffer; ret += (threadIdx.x * size); return ret; } #endif /** * Length of a tad given * the shape information */ INLINEDEF _CUDA_HD int tadLength(Nd4jLong *shapeInfo, int *dimension, int dimensionLength) { if(dimensionLength == 1) { return shape::shapeOf(shapeInfo)[dimension[0]]; } else { int ret = 1; for(int i = 0; i < shape::rank(shapeInfo); i++) { for(int j = 0; j < dimensionLength; j++) { if(i == dimension[j]) ret *= shape::shapeOf(shapeInfo)[dimension[j]]; } } return ret; } } /** * Tad element wise stride: * given the inner most dimension (the sorted dimension of the last) * the element wise stride of the tad (disregarding order) is the * last dimension's stride. * * For a given singular dimension this will just be the only entry. * For example, given the following c order shape/stride: * 2,2,3,2 * 12,6,2,1 * * The tad element wise stride for 3 will be 1. * For zero it wil be 12 * * For 2,3 it's 1 * * Note here that the multi dimensional 2,3 case * is equivalent to the singular 3 case. * * * Note that this is for the dimension that ultimately * ends up removed. * * Again: this may not preserve ordering of the tad * but maybe used for reductions. */ INLINEDEF _CUDA_HD int tadElementWiseStride(Nd4jLong *shapeInfo, int *dimension,int dimensionLength) { return reductionIndexElementWiseStride(shapeInfo,dimension,dimensionLength); } INLINEDEF _CUDA_HD bool shapeEquals(const int shape1Rank, const Nd4jLong *shape1, const int shape2Rank, const Nd4jLong *shape2) { if(shape1Rank != shape2Rank) return false; //rank not equals for(int i = 0; i < shape1Rank; i++) { if(shape1[i] != shape2[i]) return false; } return true; } INLINEDEF _CUDA_HD bool shapeEquals(const Nd4jLong *shapeInfo1, const Nd4jLong *shapeInfo2) { return shape::shapeEquals(shape::rank(shapeInfo1), shape::shapeOf(const_cast<Nd4jLong*>(shapeInfo1)), shape::rank(shapeInfo2), shape::shapeOf(const_cast<Nd4jLong*>(shapeInfo2))); } INLINEDEF _CUDA_HD bool shapeEquals(const Nd4jLong *shapeInfo1, const Nd4jLong *shapeInfo2, const Nd4jLong *shapeInfo3) { return shape::shapeEquals(shapeInfo1, shapeInfo2) && shape::shapeEquals(shapeInfo1, shapeInfo3); } INLINEDEF _CUDA_HD bool strideEquals(int shape1Rank,Nd4jLong *shape1,int shape2Rank,Nd4jLong *shape2) { if(shape1Rank != shape2Rank) return false; //rank not equals for(int i = 0; i < shape1Rank; i++) { if(shape1[i] != shape2[i]) return false; } return true; } INLINEDEF _CUDA_HD bool strideEquals(Nd4jLong *shapeInfo1,Nd4jLong *shapeInfo2) { return shape::strideEquals(shape::rank(shapeInfo1),shape::stride(shapeInfo1),shape::rank(shapeInfo2),shape::stride(shapeInfo2)); } INLINEDEF _CUDA_HD bool strideEquals(Nd4jLong *stride1,int rank1 , Nd4jLong *stride2, int rank2) { if(rank1 != rank2) return false; for(int i = 0; i < rank1; i++) { if(stride1[i] != stride2[i]) return false; } return true; } INLINEDEF _CUDA_HD Nd4jLong *computeResultShape(Nd4jLong *originalShapeBuffer, int* dimension,int dimensionLength) { Nd4jLong *retShape; int retShapeLength; if(dimensionLength == 1 && dimension[0] == 2147483647) { retShape = new Nd4jLong[2]; retShape[0] = 1; retShape[1] = 1; retShapeLength = 2; } else { retShape = shape::removeIndex<Nd4jLong, int>(shape::shapeOf(originalShapeBuffer), dimension, shape::shapeInfoLength(shape::rank(originalShapeBuffer)), dimensionLength); retShapeLength = shape::rank(originalShapeBuffer) - dimensionLength; } //ensure vector is proper shape if (retShapeLength == 1) { if (dimension[0] == 0) { auto newRetShape = new Nd4jLong[2]{1, retShape[0]}; delete[] retShape; retShape = newRetShape; retShapeLength = 2; } else { auto newRetShape = new Nd4jLong[2]{retShape[0], 1}; delete[] retShape; retShape = newRetShape; retShapeLength = 2; } } else if (retShapeLength == 0) { auto newRetShape = new Nd4jLong[2]{1, 1}; delete[] retShape; retShape = newRetShape; retShapeLength = 2; } auto ret = shape::shapeBuffer(retShapeLength, nd4j::ArrayOptions::dataType(originalShapeBuffer), retShape); delete[] retShape; return ret; } INLINEDEF _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride, Nd4jLong *buffer) { Nd4jLong *theShape = shape::shapeOf(shapeInfo); Nd4jLong *theStride = shape::stride(shapeInfo); int rank = dimensionLength == 1 ? 2 : dimensionLength; Nd4jLong *ret = buffer; //set the rank ret[0] = rank; Nd4jLong *retShape = shape::shapeOf(ret); Nd4jLong *retStride = shape::stride(ret); int len = rank; if(dimensionLength == 1) { if(shape::isMatrix(theShape,shape::rank(shapeInfo))) { if(dimension[0] == 0) { Nd4jLong newStride[2] = {theStride[dimension[0]],1}; Nd4jLong newShape[2] = {theShape[dimension[0]],1}; retShape[0] = newShape[0]; retShape[1] = newShape[1]; retStride[0] = newStride[0]; retStride[1] = newStride[1]; } else { Nd4jLong newStride[2] = {theStride[dimension[0]],1}; Nd4jLong newShape[2] = {theShape[dimension[0]],1}; retShape[0] = newShape[0]; retShape[1] = newShape[1]; retStride[0] = newStride[0]; retStride[1] = newStride[1]; } } else { Nd4jLong newStride[2] = {1,theStride[dimension[0]]}; Nd4jLong newShape[2] = {1,theShape[dimension[0]]}; retShape[0] = newShape[0]; retShape[1] = newShape[1]; retStride[0] = newStride[0]; retStride[1] = newStride[1]; } } else { Nd4jLong *newIndexes = dimension; if(reverseCopyStride) shape::reverseCopyTo(theStride, retStride, newIndexes, len); else shape::copyTo(len, theStride, retStride, newIndexes); shape::copyTo(len, theShape, retShape, newIndexes); } ret[shape::shapeInfoLength(rank) - 1] = shape::order(shapeInfo); return ret; } INLINEDEF _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride) { int rank = dimensionLength == 1 ? 2 : dimensionLength; traceNew(4); Nd4jLong *ret = new Nd4jLong[shape::shapeInfoLength(rank)]; return shapeInfoOnlyShapeAndStride(shapeInfo, dimension, dimensionLength, reverseCopyStride, ret); } INLINEDEF _CUDA_HD Nd4jLong * createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank) { traceNew(5); Nd4jLong *ret = new Nd4jLong[shape::shapeInfoLength(rank)]; return createShapeInfo(shape, stride, rank, ret); } INLINEDEF _CUDA_HD Nd4jLong * createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank, Nd4jLong *buffer) { buffer[0] = rank; Nd4jLong *retShape = shape::shapeOf(buffer); Nd4jLong *retStride = shape::stride(buffer); for(int i = 0;i < rank; i++) { retShape[i] = shape[i]; retStride[i] = stride[i]; } return buffer; } /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, int startNum) { if (isVector(shape, rank)) { traceNew(5); Nd4jLong *ret = new Nd4jLong[2]; for (int i = 0; i < 2; i++) ret[i] = 1; return ret; } int dimensions = rank; traceNew(6); Nd4jLong *stride = new Nd4jLong[dimensions]; int st = startNum; for (int j = 0; j < rank; j++) { stride[j] = st; st *= shape[j]; } return stride; } INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, int startNum, Nd4jLong *ret) { if (isVector(shape, rank)) { for (int i = 0; i < rank; i++) ret[i] = 1; return ret; } //int dimensions = rank; int st = startNum; for (int j = 0; j < rank; j++) { ret[j] = st; st *= shape[j]; } return ret; } /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ INLINEDEF _CUDA_HD Nd4jLong * calcStrides(Nd4jLong *shape, int rank, int startNum) { traceNew(7); Nd4jLong *stride = new Nd4jLong[rank]; if (rank == 1) { stride[0] = 1; return stride; } // if (shape::isVector(shape, rank)) { // for (int i = 0; i < 2; i++) // stride[i] = 1; // return stride; // } int st = startNum; for (int j = rank - 1; j >= 0; j--) { stride[j] = st; st *= shape[j]; } return stride; } INLINEDEF _CUDA_HD Nd4jLong * calcStrides(Nd4jLong *shape, int rank, int startNum, Nd4jLong* ret) { if (rank == 1) { ret[0] = 1; return ret; } // if (shape::isVector(shape, rank)) { // for (int i = 0; i < 2; i++) // ret[i] = 1; // return ret; // } int st = startNum; for (int j = rank - 1; j >= 0; j--) { ret[j] = st; st *= shape[j]; } return ret; } /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank) { return calcStridesFortran(shape, rank, 1); } INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, Nd4jLong* ret) { return calcStridesFortran(shape, rank, 1, ret); } /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ INLINEDEF _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank) { return calcStrides(shape, rank, 1); } INLINEDEF _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, Nd4jLong* ret) { return calcStrides(shape, rank, 1, ret); } ////////////////////////////////////////////////////////////////////// INLINEDEF _CUDA_HD void updateStrides(Nd4jLong *shapeInfo, const char order) { int rank = shapeInfo[0]; int doubleRank = 2*rank; if (rank > 0) { if (order == 'c') { shapeInfo[doubleRank] = 1; // set unity as last stride for c order for (int j = 1; j < rank; ++j) { shapeInfo[doubleRank - j] = shapeInfo[doubleRank - j + 1] * shapeInfo[rank + 1 - j]; } } else { shapeInfo[rank + 1] = 1; // set unity as first stride for f order for (int j = rank + 1; j < doubleRank; ++j) { shapeInfo[j + 1] = shapeInfo[j] * shapeInfo[j - rank]; } } } // set last 2 elements in shapeInfo shapeInfo[doubleRank + 2] = 1; shapeInfo[doubleRank + 3] = (int)order; } ////////////////////////////////////////////////////////////////////// INLINEDEF _CUDA_HD void updateStrides(const int rank, const Nd4jLong *shapeOnly, Nd4jLong *stridesOnly, const char order) { if (rank > 0) { if (order == 'c') { stridesOnly[rank - 1] = 1; // set unity as last stride for c order for (int j = 1; j < rank; ++j) stridesOnly[rank - 1 - j] = stridesOnly[rank - j] * shapeOnly[rank - j]; } else { stridesOnly[0] = 1; // set unity as first stride for f order for (int j = 1; j < rank; ++j) { stridesOnly[j] = stridesOnly[j - 1] * shapeOnly[j - 1]; } } } } // check whether input dimensions are permuted, not permuted dimensions order have to be 0,....,rank-1 template <typename T> INLINEDEF _CUDA_HD bool isDimPermuted(const T* dimensions, const Nd4jLong dimSize ) { for(int i=0; i<dimSize-1; ++i) if(dimensions[i] > dimensions[i+1]) return true; return false; } /** * @param toCopy the shape to copy * @return a copy of the original struct */ INLINEDEF _CUDA_HD ShapeInformation *shapeCopy( ShapeInformation *toCopy) { auto copy = new ShapeInformation; traceNew(8); copy->shape = new Nd4jLong[toCopy->rank]; memcpy(copy->shape, toCopy->shape, toCopy->rank * sizeof(Nd4jLong)); traceNew(9); copy->stride = new Nd4jLong[toCopy->rank]; for (int i = 0; i < toCopy->rank; i++) { copy->stride[i] = toCopy->stride[i]; } copy->order = toCopy->order; copy->rank = toCopy->rank; copy->offset = toCopy->offset; copy->elementWiseStride = toCopy->elementWiseStride; return copy; } INLINEDEF _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder) { if (rank == 0) return 1; if(shape::isVector(shape,rank)) { return stride[rank - 1]; } else { int oldnd; Nd4jLong *oldDims = shape::copyOf(rank, shape); Nd4jLong *oldStrides = shape::copyOf(rank, stride); int np, op, last_stride; int oldStart, oldStop, ok, newStart, newStop, nk; traceNew(10); auto newStrides = new Nd4jLong[rank]; oldnd = 0; //set the shape to be 1 x length int newShapeRank = 2; auto newShape = new Nd4jLong[newShapeRank]; newShape[0] = 1; newShape[1] = shape::prodLong(shape, rank); /* * Remove axes with dimension 1 from the old array. They have no effect * but would need special cases since their strides do not matter. */ for (oldStart = 0; oldStart < rank; oldStart++) { if (shape[oldStart] != 1) { oldDims[oldnd] = shape[oldStart]; oldStrides[oldnd] = stride[oldStart]; oldnd++; } } np = 1; for (newStart = 0; newStart < newShapeRank; newStart++) { np *= newShape[newStart]; } op = 1; for (oldStart = 0; oldStart < oldnd; oldStart++) { op *= oldDims[oldStart]; } if (np != op) { /* different total sizes; no hope */ delete[] newStrides; delete[] newShape; delete[] oldStrides; delete[] oldDims; return 0; } if (np == 0) { /* the current code does not handle 0-sized arrays, so give up */ delete[] newStrides; delete[] newShape; delete[] oldStrides; delete[] oldDims; return 0; } /* oldStart to oldStop and newStart to newStop give the axis ranges currently worked with */ oldStart = 0; oldStop = 1; newStart = 0; newStop = 1; while (newStart < newShapeRank && oldStart < oldnd) { np = newShape[newStart]; op = oldDims[oldStart]; while (np != op) { if (np < op) { /* Misses trailing 1s, these are handled later */ np *= newShape[newStop++]; } else { op *= oldDims[oldStop++]; } } /* Check whether the original axes can be combined */ for (ok = oldStart; ok < oldStop - 1; ok++) { if (isFOrder) { if (oldStrides[ok + 1] != oldDims[ok] * oldStrides[ok]) { /* not contiguous enough */ delete[] newStrides; delete[] newShape; delete[] oldStrides; delete[] oldDims; return 0; } } else { /* C order */ if (oldStrides[ok] != oldDims[ok + 1] * oldStrides[ok + 1]) { /* not contiguous enough */ delete[] newStrides; delete[] newShape; delete[] oldStrides; delete[] oldDims; return 0; } } } /* Calculate new strides for all axes currently worked with */ if (isFOrder) { newStrides[newStart] = oldStrides[oldStart]; for (nk = newStart + 1; nk < newStop; nk++) { newStrides[nk] = newStrides[nk - 1] * newShape[nk - 1]; } } else { /* C order */ newStrides[newStop - 1] = oldStrides[oldStop - 1]; for (nk = newStop - 1; nk > newStart; nk--) { newStrides[nk - 1] = newStrides[nk] * newShape[nk]; } } newStart = newStop++; oldStart = oldStop++; } /* * Set strides corresponding to trailing 1s of the new shape. */ if (newStart >= 1) { last_stride = newStrides[newStart - 1]; } else { last_stride = stride[rank - 1]; } if (isFOrder) { if (newStart >= 1) last_stride *= newShape[newStart - 1]; } for (nk = newStart; nk < newShapeRank; nk++) { newStrides[nk] = last_stride; } //returns the last element of the new stride array int ret = last_stride; delete[] newStrides; delete[] newShape; delete[] oldStrides; delete[] oldDims; return ret; } } INLINEDEF _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder, Nd4jLong *dimension, int dimensionLength) { if(dimensionLength == 1) { return stride[dimension[0]]; } return 0; } /** * Get the shape info buffer * for the given rank and shape. */ INLINEDEF _CUDA_HD Nd4jLong *shapeBuffer(int rank, nd4j::DataType dtype, Nd4jLong *shape) { Nd4jLong *stride = shape::calcStrides(shape, rank); traceNew(11); auto shapeInfo = new shape::ShapeInformation(); shapeInfo->shape = shape; shapeInfo->stride = stride; shapeInfo->offset = 0; shapeInfo->rank = rank; int elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0); shapeInfo->order = 'c'; shapeInfo->elementWiseStride = elementWiseStride; auto shapeInfoBuffer = shape::toShapeBuffer(shapeInfo); delete[] stride; delete shapeInfo; nd4j::ArrayOptions::setDataType(shapeInfoBuffer, dtype); return shapeInfoBuffer; } /** * This is special method, it returns ONLY 2D shapebuffer. * * This method is used only for SoftMax */ INLINEDEF _CUDA_HD Nd4jLong *shapeBuffer(int rank, nd4j::DataType dtype, Nd4jLong *shape, Nd4jLong *buffer) { Nd4jLong stride[MAX_RANK]; shape::calcStrides(shape,rank, stride); shape::ShapeInformation shapeInfo; shapeInfo.shape = shape; shapeInfo.stride = stride; shapeInfo.offset = 0; shapeInfo.rank = rank; auto elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0); shapeInfo.order = 'c'; shapeInfo.elementWiseStride = elementWiseStride; shape::toShapeBuffer(&shapeInfo, buffer); nd4j::ArrayOptions::setDataType(buffer, dtype); return buffer; } /** * Get the shape info buffer * for the given rank and shape. */ INLINEDEF _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, nd4j::DataType dtype, Nd4jLong *shape) { auto stride = shape::calcStridesFortran(shape,rank); traceNew(12); auto shapeInfo = new shape::ShapeInformation(); shapeInfo->shape = shape; shapeInfo->stride = stride; shapeInfo->offset = 0; shapeInfo->rank = rank; int elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0); shapeInfo->order = 'f'; shapeInfo->elementWiseStride = elementWiseStride; auto shapeInfoBuffer = shape::toShapeBuffer(shapeInfo); delete[] stride; delete shapeInfo; nd4j::ArrayOptions::setDataType(shapeInfoBuffer, dtype); return shapeInfoBuffer; } INLINEDEF _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, nd4j::DataType dtype, Nd4jLong *shape, Nd4jLong *output) { Nd4jLong stride[MAX_RANK]; shape::calcStridesFortran(shape,rank, stride); shape::ShapeInformation shapeInfo; shapeInfo.shape = shape; shapeInfo.stride = stride; shapeInfo.offset = 0; shapeInfo.rank = rank; auto elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0); shapeInfo.order = 'f'; shapeInfo.elementWiseStride = elementWiseStride; shape::toShapeBuffer(&shapeInfo, output); nd4j::ArrayOptions::setDataType(output, dtype); return output; } /** * Compute the real linear indices for the given shape and stride */ INLINEDEF _CUDA_HD Nd4jLong *computeIndices(int rank, Nd4jLong *shape, Nd4jLong *stride) { Nd4jLong length = shape::prodLong(shape,rank); traceNew(13); Nd4jLong *ret = new Nd4jLong[length]; for(int i = 0; i < length; i++) { Nd4jLong *idx = new Nd4jLong[rank]; shape::index2coords(rank, shape, i, idx, 'f'); ret[i] = shape::getOffset(0, shape, stride, idx, rank); delete[] idx; } return ret; } /** * Compute the real linear indices for the given shape and stride */ INLINEDEF _CUDA_HD Nd4jLong *computeIndices(Nd4jLong *shapeBuffer) { return computeIndices(shape::rank(shapeBuffer),shape::shapeOf(shapeBuffer),shape::stride(shapeBuffer)); } ////////////////////////////////////////////////////////////////////// INLINEDEF _CUDA_HD Nd4jLong coords2index(const int rank, const Nd4jLong *shape, const Nd4jLong *indices, const char order) { Nd4jLong index, shift = 1;; if(order == 'c') { index = indices[rank - 1]; for(int i = rank - 2; i >= 0; --i) { shift *= shape[i + 1]; index += shift * indices[i]; } } else { index = indices[0]; for(int i = 1; i < rank; ++i) { shift *= shape[i - 1]; index += shift * indices[i]; } } return index; } template <typename T> INLINEDEF _CUDA_HD void fill(T* buffer, T value, Nd4jLong length) { PRAGMA_OMP_SIMD for (int e = 0; e < length; e++) buffer[e] = value; } ////////////////////////////////////////////////////////////////////// INLINEDEF _CUDA_HD Nd4jLong getIndexOffset(Nd4jLong index, const Nd4jLong *shapeInfo, Nd4jLong arrLen) { const Nd4jLong ews = shapeInfo[shapeInfo[0] + shapeInfo[0] + 2]; if(ews > 0 && order(shapeInfo) == 'c') if (ews == 1) return index; else return ews * index; Nd4jLong offset = 0; Nd4jLong rank = shapeInfo[0]; for(int i = 1; i <= shapeInfo[0]; ++i) { arrLen /= shapeInfo[i]; if(arrLen > 0 && shapeInfo[i] > 1) { offset += (index / arrLen) * shapeInfo[i + rank]; index %= arrLen; } } return offset; } INLINEDEF _CUDA_HD uint getIndexOffset(uint index, const uint *shapeInfo, uint arrLen) { const uint rank = shapeInfo[0]; const uint ews = shapeInfo[rank + rank + 2]; if(ews > 0 && shapeInfo[rank + rank + 3] == 99) if (ews == 1) return index; else return ews * index; uint offset = 0; for(uint i = 1; i <= rank; ++i) { arrLen /= shapeInfo[i]; if(arrLen > 0 && shapeInfo[i] > 1) { offset += (index / arrLen) * shapeInfo[i + rank]; index %= arrLen; } } return offset; } INLINEDEF _CUDA_HD Nd4jLong indexOffset(Nd4jLong index, const Nd4jLong* lShapeInfo, const uint* uShapeInfo, Nd4jLong arrLen, const bool useUnsigned) { if(useUnsigned) return getIndexOffset(static_cast<uint>(index), uShapeInfo, static_cast<uint>(arrLen)); return getIndexOffset(index, lShapeInfo, arrLen); } ////////////////////////////////////////////////////////////////////// INLINEDEF _CUDA_HD Nd4jLong getIndexOrderOffset(Nd4jLong index, const Nd4jLong *shapeInfo, Nd4jLong arrLen, const char order) { Nd4jLong offset = 0; if(order == 'c') { for(int i = 1; i <= *shapeInfo; ++i) { arrLen /= shapeInfo[i]; if(arrLen > 0 && shapeInfo[i] > 1) { offset += (index / arrLen) * shapeInfo[i + *shapeInfo]; index %= arrLen; } } } else { for(int i = *shapeInfo; i >= 1 ; --i) { arrLen /= shapeInfo[i]; if(arrLen > 0 && shapeInfo[i] > 1) { offset += (index / arrLen) * shapeInfo[i + *shapeInfo]; index %= arrLen; } } } return offset; } /** * * @param length * @param shape * @param rearrange * @return */ INLINEDEF _CUDA_HD Nd4jLong *doPermuteSwap(int length, Nd4jLong *shape, int *rearrange) { traceNew(16); Nd4jLong *ret = new Nd4jLong[length]; for (int i = 0; i < length; i++) { ret[i] = shape[rearrange[i]]; } return ret; } /** * * @param length * @param shape * @param rearrange * @return */ INLINEDEF _CUDA_HD void doPermuteSwap(int length, Nd4jLong **shape, int *rearrange) { if(length == 1) { return; } else { Nd4jLong *shapeDeref = *shape; if(shape::prodLong(shapeDeref,length) < 2) { return; } } bool inOrder = true; for(int i = 0; i < length - 1; i++) { inOrder = inOrder && rearrange[i] + 1 == rearrange[i + 1]; } //all in order, nothing to do if(inOrder) return; Nd4jLong *shapeDeref = *shape; //we know they are just reversed, dimension length of 2 if(length == 2) { auto shapeFirst = shapeDeref[0]; auto shapeSecond = shapeDeref[1]; shapeDeref[0] = shapeSecond; shapeDeref[1] = shapeFirst; return; } else if(length == 1) { //no permute return; } auto temp = new Nd4jLong[length]; memcpy(temp,shapeDeref,sizeof(Nd4jLong) * length); for (int i = 0; i < length; i++) { shapeDeref[i] = temp[rearrange[i]]; } delete[] temp; } INLINEDEF _CUDA_HD void permuteShapeBufferInPlace(Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *out) { if(shapeBuffer != out) memcpy(out,shapeBuffer,sizeof(Nd4jLong) * shape::shapeInfoLength(shapeBuffer)); shape::doPermuteShapeInfo(out, rearrange); } INLINEDEF _CUDA_HD Nd4jLong *permuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange) { auto len = shape::shapeInfoLength(shape::rank(shapeBuffer)); Nd4jLong *copy = shape::copyOf(len, shapeBuffer); shape::doPermuteShapeInfo(copy,rearrange); return copy; } INLINEDEF _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeInfo, const int *rearrange, Nd4jLong len) { if(len == -1) // calculate array length if it is not given len = shape::length(shapeInfo); //check whether shape is like {1} or {1,1} or {1,1,1,1,...} - in this case we don't need permute if(len == 1) return; const int rank = shape::rank(shapeInfo); // check whether rearrange is like {0,1,2,3,...} - in this case we don't need permute as well bool isPermutNecessary = false; for(int i = 0; i < rank; ++i) if(rearrange[i] != i) { isPermutNecessary = true; break; } if(!isPermutNecessary) return; // check whether rearrange contains correct indexes for(int i = 0; i < rank; ++i) if(rearrange[i] >= rank || rearrange[i] < 0) { printf("shape::doPermuteShapeInfo function failed: rearrange indexes are incorrect !\n"); return; } // if everything is ok then perform permute auto temp = new Nd4jLong[shape::shapeInfoLength(rank) - 3]; memcpy(temp, shapeInfo, sizeof(Nd4jLong) * (shape::shapeInfoLength(rank) - 3)); for (int i = 0; i < rank; ++i) { shapeInfo[i + 1] = temp[rearrange[i] + 1]; shapeInfo[i + 1 + rank] = temp[rearrange[i] + 1 + rank]; } shape::setOrderAndEws(shapeInfo, len); delete[] temp; } INLINEDEF _CUDA_HD Nd4jLong *createPermuteIndexes(int originalRank, int *dimension,int dimensionLength) { int delta = originalRank - dimensionLength; traceNew(17); Nd4jLong *ret = new Nd4jLong[originalRank]; for(int i = 0; i < delta; i++) { ret[i] = i + dimensionLength; } for(int i = delta; i < originalRank; i++) { ret[i] = i - delta; } return ret; } /** * Get the ordering for the device * @param length * @param shape * @param stride * @param elementStride * @return */ INLINEDEF _CUDA_HD char getOrder(int length, Nd4jLong *shape, Nd4jLong *stride, int elementStride) { int sd = -1; int dim = -1; int i = -1; int cContiguous = 1; int isFortran = 1; sd = 1; for (i = length - 1; i >= 0; --i) { dim = shape[i]; if (stride[i] != sd) { cContiguous = 0; break; } /* contiguous, if it got this far */ if (dim == 0) { break; } sd *= dim; } /* check if fortran contiguous */ sd = elementStride; for (i = 0; i < length; ++i) { dim = shape[i]; if (stride[i] != sd) { isFortran = 0; } if (dim == 0) { break; } sd *= dim; } if (isFortran && cContiguous) return 'a'; else if (isFortran && !cContiguous) return 'f'; else if (!isFortran && !cContiguous) return 'c'; else return 'c'; } /** * Ensure that every value in the re arrange * array is unique * @param arr * @param shape * @param arrLength * @param shapeLength * @return */ template <typename T> INLINEDEF _CUDA_HD int checkArrangeArray(T *arr, int arrLength, int shapeLength) { if (arrLength != shapeLength) return -1; for (int i = 0; i < arrLength; i++) { if (arr[i] >= arrLength || arr[i] < 0) return -1; } for (int i = 0; i < arrLength; i++) { for (int j = 0; j < arrLength; j++) { if (i != j && arr[i] == arr[j]) return -1; } } return 1; } INLINEDEF _CUDA_HD void traceNew(int id) { //printf("new happened: [%i]\n", id); #ifndef __CUDACC__ //fflush(stdout); #endif } /** * Permute the shape information * @param info the shape information to permute * @param rearrange the order to re arrange * @param rank the rank of the rearrange array */ INLINEDEF _CUDA_HD void permute(ShapeInformation **info, int *rearrange, int rank) { ShapeInformation *infoDeref = *info; checkArrangeArray(rearrange, rank, rank); shape::doPermuteSwap(rank, &infoDeref->shape, rearrange); shape::doPermuteSwap(rank, &infoDeref->stride, rearrange); char order = getOrder(rank, infoDeref->shape, infoDeref->stride, infoDeref->elementWiseStride); infoDeref->order = order; } /** * Returns whether the * given shape is a vector or not * @param shape the shape of the array * @param rank the rank of the shape */ INLINEDEF _CUDA_HD int isVector(Nd4jLong *shape, int rank) { if (rank == 0) return 0; if (rank == 1) return 1; if (rank > 2) return 0; else if (rank <= 2) { if (shape[0] == 1 || shape[1] == 1) return 1; } return 0; } INLINEDEF _CUDA_HD bool isLikeVector(Nd4jLong *shapeInfo, int& posOfNonUnityDim) { int numOfNonUnity = 0; for(int i = 1; i <= shapeInfo[0]; ++i) { if(shapeInfo[i] != 1) { ++numOfNonUnity; posOfNonUnityDim = i-1; } } return numOfNonUnity == 1 && shapeInfo[0] > 2; } INLINEDEF _CUDA_HD bool isCommonVector(const Nd4jLong *shapeInfo, int& posOfNonUnityDim) { if(rank(shapeInfo) > 0 && length(shapeInfo) == 1) { posOfNonUnityDim = 0; return true; } int numOfNonUnity = 0; for(int i = 1; i <= shapeInfo[0]; ++i) { if(shapeInfo[i] != 1) { ++numOfNonUnity; posOfNonUnityDim = i-1; } } return numOfNonUnity == 1; } INLINEDEF _CUDA_H Nd4jLong* detachShape(Nd4jLong *originalShape) { Nd4jLong *newShape = new Nd4jLong[shape::shapeInfoLength(originalShape)]; memcpy(newShape, originalShape, shape::shapeInfoByteLength(originalShape)); return newShape; } INLINEDEF _CUDA_H Nd4jLong* copyShape(Nd4jLong *originalShape) { Nd4jLong *newShape = new Nd4jLong[shape::shapeInfoLength(originalShape)]; memcpy(newShape, originalShape, shape::shapeInfoByteLength(originalShape)); return newShape; } INLINEDEF _CUDA_HD int isVector(const Nd4jLong *shapeInfo) { return isVector(shape::shapeOf(const_cast<Nd4jLong*>(shapeInfo)), shape::rank(shapeInfo)); } INLINEDEF _CUDA_HD bool isRowVector(const Nd4jLong *shapeInfo) { bool isVector = shape::isVector(shapeInfo) == 1; bool shapeFirstOne = shape::shapeOf(const_cast<Nd4jLong*>(shapeInfo))[0] == 1; return isVector && shapeFirstOne; } INLINEDEF _CUDA_HD bool isColumnVector(Nd4jLong *shapeInfo) { bool isVector = shape::isVector(shapeInfo) == 1; bool shapeFirstOne = shape::shapeOf(shapeInfo)[0] == 1; return isVector && !shapeFirstOne; } INLINEDEF _CUDA_HD int oneDimEqualToLength(Nd4jLong *shape, int rank) { for(int i = 0; i < rank; i++) { if(shape[i] == shape::prod(shape,rank)) return 1; } return 0; } INLINEDEF _CUDA_HD int oneDimEqualToLength(Nd4jLong *shapeInfo) { return oneDimEqualToLength(shape::shapeOf(shapeInfo),shape::rank(shapeInfo)); } /** * Returns whether the * given shape is a vector or not * @param shape the shape of the array * @param rank the rank of the shape */ INLINEDEF _CUDA_HD int isMatrix(Nd4jLong *shape, int rank) { if (rank > 2) return 0; else if (rank <= 2) { if (shape[0] == 1 || shape[1] == 1) return 0; } return 1; } INLINEDEF _CUDA_HD int isMatrix(Nd4jLong *shapeInfo) { return isMatrix(shape::shapeOf(shapeInfo),shape::rank(shapeInfo)); } /** * Returns the shape portion of an information * buffer */ INLINEDEF _CUDA_HD Nd4jLong *shapeOf(Nd4jLong *buffer) { return buffer + 1; } /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ template <typename T> INLINEDEF _CUDA_HD T *copyOf(Nd4jLong length, T *toCopy) { traceNew(18); T *ret = new T[length]; return copyOf(length, toCopy, ret); } template <typename T> INLINEDEF _CUDA_HD T* copyOf(Nd4jLong length, T *toCopy, T *ret) { memcpy(ret, toCopy, sizeof(T)*length); return ret; } /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ template <typename T> INLINEDEF _CUDA_HD void copyTo(Nd4jLong length, T *from, T *to) { memcpy(to, from, sizeof(T)*length); } /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ INLINEDEF _CUDA_HD void copyTo(int length, Nd4jLong *from, Nd4jLong *to, Nd4jLong *indexes) { for(int i = 0; i < length; i++) { to[i] = from[indexes[i]]; } } /** * Permute the given strides * in the given rearrange order * @param toPermute the buffer to permute * @param shapeRank the length of the buffer to permute * @param rearrange the rearrange order (must be 0 based indexes * and all must be filled in) * @return the rearranged array */ /* INLINEDEF _CUDA_HD Nd4jLong *permutedStrides(Nd4jLong *toPermute, int shapeRank, int *rearrange) { Nd4jLong *strideCopy = copyOf(shapeRank, toPermute); checkArrangeArray(rearrange, shapeRank, shapeRank); Nd4jLong *newStride = doPermuteSwap(shapeRank, strideCopy, rearrange); delete[] strideCopy; return newStride; } */ /** * Return the slice (shape + 1 in pointer arithmetic) * @param shape the shape to take the slice of * @return the shape array - the first entry */ INLINEDEF _CUDA_HD Nd4jLong *slice(Nd4jLong *shape) { return shape + 1; } INLINEDEF _CUDA_HD int slices(Nd4jLong *shapeBuffer) { return static_cast<int>(shape::shapeOf(shapeBuffer)[0]); } INLINEDEF _CUDA_HD Nd4jLong *sliceOfShapeBuffer(Nd4jLong sliceIdx, Nd4jLong *shapeBuffer) { int rank = shape::rank(shapeBuffer); int newRank = rank - 1; if(newRank < 2) newRank = 2; Nd4jLong *newShapeBuffer = new Nd4jLong[shape::shapeInfoLength(newRank)]; newShapeBuffer[0] = newRank; Nd4jLong *currShape = shape::shapeOf(shapeBuffer); Nd4jLong *currStride = shape::stride(shapeBuffer); //initialize new shape and stride by taking the shape and stride + 1 //and adding to the shape information //a slice is always just taking the existing shape and cutting the first index off //of the shape and stride Nd4jLong *newShape = shape::shapeOf(newShapeBuffer); Nd4jLong *newStride = shape::stride(newShapeBuffer); if(shape::isVector(shapeBuffer)) { Nd4jLong *currShape = shape::shapeOf(shapeBuffer); //row vector: slice index 0 is a valid index, just copy the whole thing if(currShape[0] == 1) { if(sliceIdx == 0) { memcpy(newShapeBuffer,shapeBuffer,shape::shapeInfoByteLength(shape::rank(shapeBuffer))); return newShapeBuffer; } } //column vector: this will be a scalar else { delete[] newShapeBuffer; Nd4jLong *scalar = shape::createScalarShapeInfo(); int offset = shape::offset(shapeBuffer); scalar[shape::shapeInfoLength(2) - 3] = offset + sliceIdx; return scalar; } } else if(shape::isMatrix(shapeBuffer)) { newShape[0] = 1; newShape[1] = currShape[1]; newStride[0] = 1; newStride[1] = currStride[1]; } else { for(int i = 0; i < newRank; i++) { newShape[i] = currShape[i + 1]; newStride[i] = currStride[i + 1]; } } auto indices = new Nd4jLong[rank]; memset((void *) indices,0,rank * sizeof(Nd4jLong)); indices[0] = sliceIdx; Nd4jLong offset = shape::getOffset(0,newShape,newStride,indices,rank); newShapeBuffer[shape::shapeInfoLength(newRank) - 3] = offset; // set current order and ews newShapeBuffer[2 * newRank + 2] = shape::elementWiseStride(shapeBuffer); newShapeBuffer[2 * newRank + 3] = shape::order(shapeBuffer); // correct order and ews if necessary shape::setOrderAndEws(newShapeBuffer); delete[] indices; return newShapeBuffer; } /** * Returns the length of the * shape information buffer: * rank * 2 + 3 * @param rank the rank to get the shape * info length for * @return rank * 2 + 4 */ INLINEDEF _CUDA_HD int shapeInfoLength(int rank) { //FIXME magic numbers return rank * 2 + 4; } INLINEDEF _CUDA_HD int shapeInfoLength(Nd4jLong* shape) { return shapeInfoLength(static_cast<int>(shape[0])); } INLINEDEF _CUDA_HD int shapeInfoLength(const Nd4jLong* shape) { return shapeInfoLength(static_cast<int>(shape[0])); } INLINEDEF _CUDA_HD size_t shapeInfoByteLength(int rank) { //FIXME magic numbers return (rank * 2 + 4) * sizeof(Nd4jLong); } INLINEDEF _CUDA_HD size_t shapeInfoByteLength(const Nd4jLong* shapeInfo) { //FIXME magic numbers return shapeInfoByteLength((int) shapeInfo[0]); } /** * Returns the rank portion of * an information buffer */ INLINEDEF _CUDA_HD int rank(const Nd4jLong *buffer) { return static_cast<int>(buffer[0]); } INLINEDEF _CUDA_HD int rank(const int *buffer) { return buffer[0]; } INLINEDEF _CUDA_HD int rank(const unsigned int *buffer) { return static_cast<int>(buffer[0]); } INLINEDEF _CUDA_HD Nd4jLong* ews(Nd4jLong* shapeInfo) { return shapeInfo + 2 * shapeInfo[0] + 2; } /** * Converts a raw int buffer of the layout: * rank * shape * stride * offset * elementWiseStride * * where shape and stride are both straight int pointers */ INLINEDEF _CUDA_HD ShapeInformation *infoFromBuffer(Nd4jLong *buffer) { traceNew(19); auto info = new ShapeInformation; auto length = shapeInfoLength(rank(buffer)); auto rank = buffer[0]; //start after rank info->shape = buffer + 1; info->stride = buffer + (1 + rank); info->rank = rank; info->offset = buffer[length - 3]; info->elementWiseStride = buffer[length - 2]; Nd4jLong *stride = buffer + 1 + rank; info->stride = stride; info->order = (char) buffer[length - 1]; return info; } /** * Returns the stride portion of an information * buffer */ INLINEDEF _CUDA_HD Nd4jLong *stride(Nd4jLong *buffer) { return buffer + (1 + rank(buffer)); } INLINEDEF _CUDA_HD Nd4jLong *stride(const Nd4jLong *buffer) { return stride(const_cast<Nd4jLong *>(buffer)); } INLINEDEF _CUDA_HD bool isEmpty(const Nd4jLong *shapeInfo) { return ((shape::extra(const_cast<Nd4jLong*>(shapeInfo)) & ARRAY_EMPTY) == ARRAY_EMPTY); } /** * Compute the length of the given shape */ INLINEDEF _CUDA_HD Nd4jLong length(const Nd4jLong *shapeInfo) { const int rank = shape::rank(shapeInfo); if (rank == 0) { if (isEmpty(shapeInfo)) return 0L; return 1L; } if (rank == 1) return shapeInfo[1]; // if(shape::elementWiseStride(shapeInfo) == 1) { // contiguous // if(shape::order(shapeInfo) == 'c') // return shapeInfo[1] * shapeInfo[rank + 1]; // first dim * first stride // return shapeInfo[rank] * shapeInfo[2 * rank]; // last dim * last stride // } return shape::prodLong(shape::shapeOf(const_cast<Nd4jLong*>(shapeInfo)), rank); } INLINEDEF _CUDA_HD Nd4jLong length(std::initializer_list<int>& shape) { Nd4jLong ret = 1; for (auto v : shape) { ret *= v; } return ret; } INLINEDEF _CUDA_HD Nd4jLong length(std::initializer_list<Nd4jLong>& shape) { Nd4jLong ret = 1; for (auto v : shape) { ret *= v; } return ret; } /*** * Returns the offset * portion of an information buffer */ INLINEDEF _CUDA_HD Nd4jLong offset(Nd4jLong *buffer) { return buffer[shape::shapeInfoLength(shape::rank(buffer)) - 3]; } INLINEDEF _CUDA_HD Nd4jLong& extra(Nd4jLong *buffer) { return buffer[shape::shapeInfoLength(shape::rank(buffer)) - 3]; } /** * Returns the ordering * for this shape information buffer */ INLINEDEF _CUDA_HD char order(const Nd4jLong *buffer) { //FIXME magic numbers return static_cast<char>(buffer[buffer[0] * 2 + 3]); } /** * Returns type */ INLINEDEF _CUDA_HD Nd4jLong type(const Nd4jLong *shapeInfo) { return shapeInfo[2 * shapeInfo[0] + 1]; } /** * Returns the element wise stride for this information * buffer */ INLINEDEF _CUDA_HD Nd4jLong elementWiseStride(const Nd4jLong *buffer) { return buffer[shapeInfoLength(static_cast<int>(buffer[0])) - 2]; } /** * Returns the element wise stride for this information * buffer relative to a dimension and reduction index */ INLINEDEF _CUDA_HD Nd4jLong reductionIndexElementWiseStride(Nd4jLong* buffer, int* dimension, int dimensionLength) { if(dimensionLength > 1) { if(shape::order(buffer) == 'f') { /** * The element wise stride belongs to a reduction index. * When used out of order, we can get rid of the data * dependencies and rely on using the max dimension * specified for stride instead. * Say we take the sum(0,1) along arr * we can use arr.stride(1) as a representation * along which to iterate. */ if(shape::shapeOf(buffer)[dimension[dimensionLength - 1]] != 1) { //int tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]]; //return tadElementWiseStride; auto tadElementWiseStride = shape::stride(buffer)[dimension[0]]; return tadElementWiseStride; } return 1; } else { /** * The element wise stride belongs to a reduction index. * When used out of order, we can get rid of the data * dependencies and rely on using the max dimension * specified for stride instead. * Say we take the sum(0,1) along arr * we can use arr.stride(1) as a representation * along which to iterate. */ if(shape::shapeOf(buffer)[dimension[dimensionLength - 1]] != 1) { auto tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]]; return tadElementWiseStride; } return 1; } } else { if(shape::order(buffer) == 'f') { /** * The element wise stride belongs to a reduction index. * When used out of order, we can get rid of the data * dependencies and rely on using the max dimension * specified for stride instead. * Say we take the sum(0,1) along arr * we can use arr.stride(1) as a representation * along which to iterate. */ auto tadElementWiseStride = shape::stride(buffer)[dimension[0]]; return tadElementWiseStride; } else { /** * The element wise stride belongs to a reduction index. * When used out of order, we can get rid of the data * dependencies and rely on using the max dimension * specified for stride instead. * Say we take the sum(0,1) along arr * we can use arr.stride(1) as a representation * along which to iterate. */ auto tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]]; return tadElementWiseStride; } } } /** * Returns whether * the given shape info buffer * represents a scalar shape */ INLINEDEF _CUDA_HD int isScalar(Nd4jLong *info) { const int rank = shape::rank(info); if(rank > 2) return 0; if(rank == 0) return 1; if(rank == 1) return shape::shapeOf(info)[0] == 1; if(rank == 2) return shape::shapeOf(info)[0] == 1 && shape::shapeOf(info)[1] == 1; return 0; } /** * Returns whether * the given shape information * represents a scalar * shape or not */ INLINEDEF _CUDA_HD int isScalar(volatile ShapeInformation *info) { const int rank = info->rank; if(rank > 2) return 0; if(rank == 1) return info->shape[0] == 1; if(rank == 2) return info->shape[0] == 1 && info->shape[1] == 1; return 0; } /** * Return a copy of this array with the * given index omitted * * @param data the data to copy * @param indexes the index of the item to remove * @param dataLength the length of the data array * @param indexesLength the length of the data array * @return the new array with the omitted * * item */ template <typename T1, typename T2> INLINEDEF _CUDA_HD void removeIndex(T1* data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength, T1 *ret) { int count = 0; int absLength = dataLength - indexesLength; for (int i = 0; i < dataLength && count < absLength; i++) { int contains = 0; for (int j = 0; j < indexesLength; j++) { if (i == indexes[j]) { contains = 1; break; } } if (!contains) { ret[count] = data[i]; count++; } } } /** * Return a copy of this array with the * given index omitted * * @param data the data to copy * @param indexes the index of the item to remove * @param dataLength the length of the data array * @param indexesLength the length of the data array * @return the new array with the omitted * * item */ template <typename T1, typename T2> INLINEDEF _CUDA_HD T1* removeIndex(T1 *data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength) { auto lengthOfArr = dataLength - indexesLength; if(lengthOfArr < 0) { printf("Remove index call created a <= 0 length array. This was likely not intended."); } auto ret = new T1[lengthOfArr]; memset(ret,0,sizeof(T1) * lengthOfArr); removeIndex<T1, T2>(data, indexes, dataLength, indexesLength, ret); return ret; } INLINEDEF _CUDA_HD Nd4jLong* everyIndexBut(Nd4jLong *indexes,int indexesLength,int begin,int end) { int len = end - indexesLength; traceNew(20); auto ret = new Nd4jLong[len]; int retIdx = 0; //not here that we do 0 based indexing for end - this assumes things like: //0 to 4 are specified for(int i = begin; i < end ; i++) { bool found = false; for(int j = 0; j < indexesLength; j++) { if(indexes[j] == i) { found = true; break; } } if(!found) { ret[retIdx++] = i; } } return ret; } /** * Computes the offset for accessing * a global element given the shape information * and the offset to be read. */ #ifdef __CUDACC__ INLINEDEF __device__ int tadOffset(ShapeInformation *xInfo, int offset) { return offset + threadIdx.x * xInfo->elementWiseStride; } #endif /** * Returns a shape * forces the given length to be 2. * @param shape the shape to modify * @param dimension the dimension (row or column) * for the shape to be returned as * @return the new shape */ INLINEDEF _CUDA_HD Nd4jLong *ensureVectorShape(Nd4jLong *shape, int dimension) { traceNew(21); Nd4jLong *ret = new Nd4jLong[2]; if (dimension == 0) { ret[0] = 1; ret[1] = shape[0]; } else { ret[0] = shape[0]; ret[1] = 1; } return ret; } /** * Returns a shape * forces the given length to be 2. * @param shape the shape to modify * @param dimension the dimension (row or column) * for the shape to be returned as * @return the new shape */ INLINEDEF _CUDA_HD Nd4jLong *ensureVectorShape(Nd4jLong *shape) { return ensureVectorShape(shape, 0); } /** * This method does STRICT comparison for two shape buffers * * @param shape * @return */ INLINEDEF _CUDA_HD bool equalsStrict(const Nd4jLong *shapeA, const Nd4jLong *shapeB) { if (shapeA[0] != shapeB[0]) return false; if (shapeA[0] == 0) return true; // we do full comparison here int length = shape::shapeInfoLength(shapeA[0]); for (int e = 1; e < length; e++) if (shapeA[e] != shapeB[e]) return false; return true; } ////////////////////////////////////////////////////////////////////// INLINEDEF _CUDA_HD bool haveSameShapeAndStrides(const Nd4jLong *shapeInfo1, const Nd4jLong *shapeInfo2) { if (shapeInfo1[0] != shapeInfo2[0]) return false; if (shapeInfo1[0] == 0) return true; int range = 2 * shapeInfo1[0]; for (int e = 1; e <= range; e++) if (shapeInfo1[e] != shapeInfo2[e]) return false; return true; } ////////////////////////////////////////////////////////////////////// INLINEDEF _CUDA_HD bool haveSameShapeAndStrides(const Nd4jLong *shapeInfo1, const Nd4jLong *shapeInfo2, const Nd4jLong *shapeInfo3) { return shape::haveSameShapeAndStrides(shapeInfo1, shapeInfo2) && shape::haveSameShapeAndStrides(shapeInfo1, shapeInfo3); } INLINEDEF _CUDA_HD int sizeAt(const Nd4jLong *shape, const int dim) { if (0 == rank(shape)) return 1; if (dim >= 0) return shape[1+dim]; else return shape[1+(rank(shape) + dim)]; } /** * This method does SOFT comparison for two shape buffers, we compare only rank & shapes * * @param shape * @return */ INLINEDEF _CUDA_HD bool equalsSoft(const Nd4jLong *shapeA, const Nd4jLong *shapeB) { if (shapeA[0] != shapeB[0]) return false; if (shapeA[0] == 0) return true; // we compare only shapes, and ignoring stride & ews auto length = shapeA[0]; for (int e = 1; e <= length; e++) if (shapeA[e] != shapeB[e]) return false; return true; } INLINEDEF _CUDA_HD bool equalsTypesAndShapesSoft(const Nd4jLong *shapeA, const Nd4jLong *shapeB) { return equalsSoft(shapeA, shapeB) && shapeA[shapeInfoLength(shapeA) - 3] == shapeB[shapeInfoLength(shapeB) - 3]; } /** * Generate an int buffer * up to the given length * at the specified increment * */ template <typename T> INLINEDEF _CUDA_HD T* range(int from, int to, int increment) { int diff = nd4j::math::nd4j_abs<int>(from - to); int retLength = diff / increment; T *ret; traceNew(22); if(diff / increment < 1) ret = new T[1]; else ret = new T[diff / increment]; if (from < to) { int count = 0; for (int i = from; i < to; i += increment) { if (count >= retLength) break; ret[count++] = i; } } else if (from > to) { int count = 0; for (int i = from - 1; i >= to; i -= increment) { if (count >= retLength) break; ret[count++] = i; } } return ret; } /** * Generate a range * beginning at from and ending at to * incrementing by 1 * @param from the start * @param to the end * @return the int array starting at from and ending at to */ template <typename T> INLINEDEF _CUDA_HD T* range(int from, int to) { return range<T>(from, to, 1); } /** * Keep the given indexes in the data * @param data * @param index * @param indexLength * @param dataLength * @return */ INLINEDEF _CUDA_HD Nd4jLong *keep(volatile Nd4jLong *data, int* index, int indexLength, int dataLength) { traceNew(23); Nd4jLong *ret = new Nd4jLong[indexLength]; int count = 0; for (int i = 0; i < dataLength; i++) { int contains = 0; for (int j = 0; j < indexLength; j++) { if (i == index[j]) { contains = 1; break; } } if (contains) ret[count++] = data[i]; } return ret; } /** * Generate a reverse * copy of the data */ template <typename T> INLINEDEF _CUDA_HD T* reverseCopy(T *data, Nd4jLong length) { if (length < 1) return nullptr; traceNew(24); T *copy = new T[length]; for (Nd4jLong i = 0; i <= length / 2; i++) { T temp = data[i]; copy[i] = data[length - i - 1]; copy[length - i - 1] = temp; } return copy; } template <typename T> INLINEDEF _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong length) { if (length < 1) return; for (Nd4jLong i = 0; i <= length / 2; i++) { T temp = from[i]; to[i] = from[length - i - 1]; to[length - i - 1] = temp; } } template <typename T> INLINEDEF _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong *indexes, Nd4jLong length) { if (length < 1) return; for (Nd4jLong i = 0; i <= length / 2; i++) { T temp = from[indexes[i]]; to[i] = from[indexes[length - i - 1]]; to[length - i - 1] = temp; } } /** * * @param arr1 * @param arr1Length * @param arr2 * @param arr2Length * @return */ template <typename T> INLINEDEF _CUDA_HD T* concat(T* arr1, Nd4jLong arr1Length, T* arr2, Nd4jLong arr2Length) { traceNew(25); T *ret = new T[arr1Length + arr2Length]; std::memcpy(ret, arr1, arr1Length * sizeof(T)); std::memcpy(ret + arr1Length, arr2, arr2Length * sizeof(T)); return ret; } /** * * @param numArrays * @param numTotalElements * @param arr * @param lengths * @return */ template <typename T> INLINEDEF _CUDA_HD T *concat(Nd4jLong numArrays, Nd4jLong numTotalElements, T **arr, Nd4jLong *lengths) { T* ret = new T[numTotalElements]; Nd4jLong count = 0; for (Nd4jLong i = 0; i < numArrays; i++) { for (Nd4jLong j = 0; j < lengths[i]; j++) { ret[count++] = arr[i][j]; } } return ret; } /** * Get the length per slice of the * given shape and the dimension * @param rank the rank of the shape * @param shape the shape of to get * the length per slice for * @param dimension the dimension to * get the length per slice for * @param dimensionLength the length of the dimension array * @return the length per slice of the given shape * along the given dimension */ INLINEDEF _CUDA_HD Nd4jLong lengthPerSlice(int rank, Nd4jLong *shape, int* dimension, int dimensionLength) { if(shape::isVector(shape,rank)) { //return total length for row vectors if(dimensionLength == 1 && shape[0] == 1) { return shape::prod(shape,rank); } } else if(rank == dimensionLength) return shape::prod(shape,rank); int absSelta = nd4j::math::nd4j_abs<int>(rank - dimensionLength); traceNew(27); auto ret2 = shape::removeIndex<Nd4jLong>(shape, dimension, rank, dimensionLength); auto ret = prodLong(ret2, absSelta); delete[] ret2; return ret; } /** * calculates the offset for a tensor * @param index * @param arr * @param tensorShape * @return */ INLINEDEF _CUDA_HD Nd4jLong sliceOffsetForTensor(int rank, int index, Nd4jLong *shape, Nd4jLong *tensorShape, int tensorShapeLength, int* dimension, int dimensionLength) { auto tensorLength = prodLong(tensorShape, tensorShapeLength); auto lengthPerSlice2 = lengthPerSlice(rank, shape, dimension, dimensionLength); if (lengthPerSlice2 <= 0) { return 0; } Nd4jLong offset = index * tensorLength / lengthPerSlice2; return offset; } /** * calculates the offset for a tensor * @param index * @param arr * @param tensorShape * @return */ INLINEDEF _CUDA_HD Nd4jLong sliceOffsetForTensor(int index,int tensorLength,int lengthPerSlice2) { Nd4jLong offset = index * tensorLength / lengthPerSlice2; return offset; } #ifdef __CUDACC__ /** * Computes the offset for accessing * a global element given the shape information * and the offset to be read. */ INLINEDEF _CUDA_D int tadOffset(Nd4jLong *xInfo, int offset) { return offset + threadIdx.x * elementWiseStride(xInfo); } #endif /** * Computes the number * of tensors along * a given dimension */ INLINEDEF _CUDA_HD Nd4jLong tensorsAlongDimension(volatile int rank, volatile int length, volatile Nd4jLong *shape, int *dimension, int dimensionLength) { Nd4jLong *tensorShape = shape::keep(shape, dimension, dimensionLength, rank); Nd4jLong ret = length / shape::prodLong(tensorShape, dimensionLength); delete[] tensorShape; return ret; } /** * Computes the number * of tensors along * a given dimension */ INLINEDEF _CUDA_HD Nd4jLong tensorsAlongDimension(Nd4jLong *shapeInfo, int *dimension, int dimensionLength) { Nd4jLong *keepShape = shape::shapeOf(shapeInfo); Nd4jLong *tensorShape = shape::keep(keepShape, dimension, dimensionLength, rank(shapeInfo)); Nd4jLong ret = shape::length(shapeInfo) / shape::prodLong(tensorShape, dimensionLength); delete[] tensorShape; return ret; } /** * Get an offset for retrieval * from a data buffer * based on the given * shape stride and given indices * @param baseOffset the offset to start from * @param shape the shape of the array * @param stride the stride of the array * @param indices the indices to iterate over * @return the double at the specified index */ INLINEDEF _CUDA_HD Nd4jLong getOffset(Nd4jLong baseOffset, const Nd4jLong *shape, const Nd4jLong *stride, const Nd4jLong *indices, int rank) { Nd4jLong offset = baseOffset; for(int i = 0; i < rank; i++) { if(shape[i] != 1) offset += indices[i] * stride[i]; } return offset; } /** * Returns the tensor along dimension * for the given block index * @param blockSize * @param blockIdx * @param i * @return */ INLINEDEF _CUDA_HD int tadForBlockIndex(int blockSize, int blockIdx, int i) { return blockIdx + i * blockSize; } /** * Computes the number of tads per block * */ INLINEDEF _CUDA_HD int tadsPerBlock(int blockSize, int tads) { return nd4j::math::nd4j_ceil<double, int>(tads / (double) blockSize); } /** * Returns a shape buffer * for the shape information metadata. */ INLINEDEF _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info) { traceNew(29); auto ret = new Nd4jLong[shapeInfoLength(info->rank)]; int count = 1; int rank = info->rank; ret[0] = info->rank; for (int i = 0; i < rank; i++) { ret[count++] = info->shape[i]; } for (int i = 0; i < rank; i++) { ret[count++] = info->stride[i]; } ret[count++] = info->offset; ret[count++] = info->elementWiseStride; ret[count] = info->order; return ret; } INLINEDEF _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info, Nd4jLong* ret) { int count = 1; int rank = info->rank; ret[0] = info->rank; if (ret[0] == 0) { ret[1] = 0; ret[2] = 1; ret[3] = 99; return ret; } for (int i = 0; i < rank; i++) { ret[count++] = info->shape[i]; } for (int i = 0; i < rank; i++) { ret[count++] = info->stride[i]; } ret[count++] = info->offset; ret[count++] = info->elementWiseStride; ret[count++] = info->order; return ret; } INLINEDEF _CUDA_HD void printIntArray(const Nd4jLong *arr, const int length) { for(int i = 0; i < length; i++) { printf(" %lld ", (long long) arr[i]); } printf("\n"); } INLINEDEF _CUDA_HD void printIntArray(const int *arr, const int length) { for(int i = 0; i < length; i++) { printf(" %i ", arr[i]); } printf("\n"); } INLINEDEF _CUDA_HD void printShapeInfo(Nd4jLong *shapeInfo) { int rank = shape::rank(shapeInfo); Nd4jLong *shape = shape::shapeOf(shapeInfo); printf("Rank %d\n",rank); printf("Shape:\n"); for(int i = 0; i < rank; i++) { printf(" %lld ",(long long) shape[i]); } printf("\n"); Nd4jLong *stride = shape::stride(shapeInfo); printf("Stride:\n"); for(int i = 0; i < rank; i++) { printf(" %lld ", (long long) stride[i]); } printf("\n"); printf("Order %c\n",shape::order(shapeInfo)); } INLINEDEF _CUDA_HD void printShapeInfoLinear(const Nd4jLong *shapeInfo) { int rank = shape::rank(shapeInfo); int lim = shape::shapeInfoLength(rank); printf("ShapeInfo: ["); for (int i = 0; i < lim; i++) { printf("%lld", (long long) shapeInfo[i]); if (i < lim - 1) { printf(", "); } } printf("]\n"); #ifndef __CUDA_ARCH__ fflush(stdout); #endif } INLINEDEF _CUDA_HD void printShapeInfoLinear(const char *msg, int rank, const Nd4jLong *shape, const Nd4jLong *strides) { printf("%s : [", msg); for (int i = 0; i < rank; i++) { printf("%lld, ", (long long) shape[i]); } for (int i = 0; i < rank; i++) { printf("%lld", (long long) strides[i]); if (i < rank - 1) printf(", "); } printf("]\n"); #ifndef __CUDA_ARCH__ fflush(stdout); #endif } INLINEDEF _CUDA_HD void printShapeInfoLinear(const char *msg, const Nd4jLong *shapeInfo) { int rank = shape::rank(shapeInfo); int lim = shape::shapeInfoLength(rank); printf("%s : [", msg); for (int i = 0; i < lim; i++) { printf("%lld", (long long) shapeInfo[i]); if (i < lim - 1) { printf(", "); } } printf("]\n"); #ifndef __CUDACC__ fflush(stdout); #endif } template <typename T> INLINEDEF _CUDA_HD void printArray(void *varr,int length, const char * message) { auto arr = reinterpret_cast<T*>(varr); if (message != nullptr) printf("%s: [", message); else printf("Array: ["); for (int i = 0; i < length; i ++) { printf("%f", (float) arr[i]); if (i + 1 < length) printf(", "); } printf("]\n"); #ifndef __CUDACC__ fflush(stdout); #endif } INLINEDEF _CUDA_HD void printArray(float *arr,int length) { printf("Array: ["); for (int i = 0; i < length; i ++) { printf("%f", arr[i]); if (i + 1 < length) printf(", "); } printf("]\n"); } /** * Given an linear index, element wise stride * and the length of each tad * map a linear index to a tad * @param i the index to map * @param the element wise stride for the tads * @param numElementsPerTad the number of elements * per tad */ INLINEDEF _CUDA_HD int tadIndex(int i, int elementWiseStride, int numElementsPerTad) { return i / (numElementsPerTad * elementWiseStride); } /** * Map a tad to a * reduction index. * @param tadIndexForOriginal the original tad index for the * split up problem (eg: split is dimension 3 mapping to a 2,3 problem) * @param tadsForReduced the number of tads for the shrunk down problem (eg: 2,3) * @param tadsForOriginal the number of tads for the smaller problem (eg: 3) */ INLINEDEF _CUDA_HD int reductionIndexForTad(int tadIndexForOriginal, int tadsForReduced, int tadsForOriginal) { if (tadIndexForOriginal == 0) return 0; return tadIndexForOriginal / (tadsForOriginal / tadsForReduced); } INLINEDEF _CUDA_HD void transposeInplace(Nd4jLong *shapeBuffer) { int rank = shape::rank(shapeBuffer); Nd4jLong *shape = shape::shapeOf(shapeBuffer); Nd4jLong *strides = shape::stride(shapeBuffer); // swap shape for (int e = 0; e < rank / 2; e++) { int idx1 = rank - e - 1; int idx2 = e; int tmp = shape[idx2]; shape[idx2] = shape[idx1]; shape[idx1] = tmp; } // swap strides for (int e = 0; e < rank / 2; e++) { int idx1 = rank - e - 1; int idx2 = e; int tmp = strides[idx2]; strides[idx2] = strides[idx1]; strides[idx1] = tmp; } if (shape::order(shapeBuffer) == 'c') shapeBuffer[shape::shapeInfoLength(shapeBuffer) - 1] = 102; else shapeBuffer[shape::shapeInfoLength(shapeBuffer) - 1] = 99; } /** * Tad index for linear * @param linearIndex * @param tadLength * @return */ INLINEDEF _CUDA_HD int tadIndexForLinear(int linearIndex, int tadLength) { return linearIndex % tadLength; } /** * Computes the number of tads * per reduce index for the * reduction tad. */ INLINEDEF _CUDA_HD int tadsPerReduceIndex(int tadsForReduce, int tadsForOriginal) { return tadsForOriginal / tadsForReduce; } /** * Maps a linear index to a reduction index * @param i the linear index to map * @param elementWiseStride the element wise stride * for the multiple problem * @param tadNum the number of tads for the shrunken problem * @param originalTadNum the tad number for the reduced version of the problem */ INLINEDEF _CUDA_HD int reductionIndexForLinear(int i, int elementWiseStride, int numElementsPerTad, int tadNum, int originalTadNum) { int tad = tadIndex(i, elementWiseStride, numElementsPerTad); return reductionIndexForTad(tad, tadNum, originalTadNum); } INLINEDEF _CUDA_HD Nd4jLong* createScalarShapeInfo() { traceNew(30); auto shape = new Nd4jLong[1]; shape[0] = 1; auto stride = new Nd4jLong[1]; stride[0] = 1; auto shapeInformation2 = new ShapeInformation(); shapeInformation2->rank = 1; shapeInformation2->offset = 0; shapeInformation2->stride = stride; shapeInformation2->shape = shape; shapeInformation2->elementWiseStride = 1; shapeInformation2->order = 99; Nd4jLong *ret = shape::toShapeBuffer(shapeInformation2); delete shapeInformation2; delete[] shape; delete[] stride; return ret; } INLINEDEF _CUDA_HD Nd4jLong* createScalarShapeInfo(Nd4jLong *ret) { ret[0] = 2; ret[1] = 1; ret[2] = 1; ret[3] = 1; ret[4] = 1; ret[5] = 0; ret[6] = 1; ret[7] = 99; return ret; } /** * Returns the prod of the data * up to the given length */ INLINEDEF _CUDA_HD int prod(Nd4jLong *data, int length) { int prod = 1; for (int i = 0; i < length; i++) { prod *= data[i]; } return prod; } /** * Returns the prod of the data * up to the given length */ INLINEDEF _CUDA_HD Nd4jLong prodLong(const Nd4jLong *data, int length) { Nd4jLong prod = 1; for (int i = 0; i < length; i++) { prod *= data[i]; } return prod; } INLINEDEF _CUDA_HD int rearMostLeftOverItem(Nd4jLong *data, Nd4jLong *dimension,int dimensionLength) { Nd4jLong *stride = shape::stride(data); //corner case: return the final item when its greater than the max, since its guaranteed to be left over //note here that strides are interpreted in reverse for tad //start from the front rather than the back int rank = shape::rank(data); if(shape::order(data) == 'f') { int dimIdx = dimensionLength - 1; for(int i = rank - 1; i >= 0; i--) { /** * Needs to find an algorithm such that: * looping backwards will find the highest dimension left * that isn't included in the dimension index list. * * This can also be thought of as the last item of the first index * of the difference between the full list of indices and * the dimension indices. * * We should avoid excessive object creation by only looping backwards. */ if(dimension[dimIdx--] != i) { int ret = stride[i]; return ret; } } } else { int dimIdx = dimensionLength - 1; for(int i = rank - 1; i >= 0; i--) { /** * Needs to find an algorithm such that: * looping backwards will find the highest dimension left * that isn't included in the dimension index list. * * This can also be thought of as the last item of the first index * of the difference between the full list of indices and * the dimension indices. * * We should avoid excessive object creation by only looping backwards. */ if(dimension[dimIdx--] != i) { int ret = stride[i]; return ret; } } } int ret = stride[0]; return ret; } #ifdef __CUDACC__ __device__ INLINEDEF void sweepShapeInfoBuffer(Nd4jLong *shapeInfoBuffer, Nd4jLong *targetBuffer) { // we read first element, to find out length of our shapeInfoBuffer int rank = shapeInfoBuffer[0]; int len = shape::shapeInfoLength(rank); for (int i = threadIdx.x; i < len; i += blockDim.x) targetBuffer[i] = shapeInfoBuffer[i]; } #endif INLINEDEF _CUDA_HD Nd4jLong *shapeBufferOfNpy(cnpy::NpyArray arr) { return shape::shapeBufferOfNpy(arr.shape.size(),(unsigned int*) arr.shape.data(),arr.fortranOrder); } // INLINEDEF _CUDA_HD Nd4jLong *shapeBufferOfNpyBuffer(char *buffer) { // unsigned Nd4jLong *shape; // unsigned int ndims, wordSize; // bool fortranOrder; // cnpy::parseNpyHeaderStr(std::string(buffer),wordSize,shape,ndims,fortranOrder); // Nd4jLong * ret = shape::shapeBufferOfNpy(ndims,shape,fortranOrder); // delete[] shape; // return ret; // } INLINEDEF _CUDA_HD Nd4jLong *shapeBufferOfNpy(int rank, unsigned int* shape,bool fortranOrder) { if(fortranOrder) { Nd4jLong *shapeBufferRet = shape::shapeBufferFortran(rank, nd4j::FLOAT32,(Nd4jLong *) shape); return shapeBufferRet; } else { Nd4jLong *newShape = new Nd4jLong[rank]; for(int i = 0; i < rank; i++) { newShape[i] = shape[i]; } Nd4jLong *shapeBufferRet = shape::shapeBuffer(rank, nd4j::FLOAT32, newShape); delete[] newShape; return shapeBufferRet; } } INLINEDEF _CUDA_HD bool strideDescendingCAscendingF(const Nd4jLong *shapeBuffer) { int rank = shape::rank(shapeBuffer); Nd4jLong *strides = shape::stride(const_cast<Nd4jLong*>(shapeBuffer)); char order = shape::order(shapeBuffer); if (shape::isRowVector(shapeBuffer) && strides[0] == 1 && strides[1] == 1) return true; if (order == 'c') { for (int i = 1; i < rank; i++) if (strides[i-1] <= strides[i]) return false; return true; } else if (order == 'f') { for (int i = 1; i < rank; i++) if (strides[i-1] >= strides[i]) return false; return true; } else { printf("Unknown order for array!\n"); return false; } } INLINEDEF _CUDA_HD bool isContiguous(const Nd4jLong* shapeInfo) { return (order(shapeInfo) == 'c') && (elementWiseStride(shapeInfo) > 0); } ////////////////////////////////////////////////////////////////////////// // copy-past from java hasDefaultStridesForShape function INLINEDEF _CUDA_HD bool areStridesDefault(const Nd4jLong* shapeInfo) { const int rank = shape::rank(shapeInfo); if(rank == 0) return true; if(!strideDescendingCAscendingF(shapeInfo)) return false; Nd4jLong defaultShapeInfo[MAX_SHAPEINFOLENGTH]; memcpy(defaultShapeInfo, shapeInfo, shape::shapeInfoByteLength(shapeInfo)); shape::updateStrides(defaultShapeInfo, shape::order(shapeInfo)); bool result = true; for(int i = rank+1; i <= 2*rank; ++i) if(defaultShapeInfo[i] != shapeInfo[i]) { result = false; break; } return result; } // INLINEDEF _CUDA_H bool reshapeC(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShapeOf, bool isFOrder, Nd4jLong* target) { // int oldnd; // Nd4jLong* olddims = shape::copyOf(oldRank, shape::shapeOf(oldShape)); // Nd4jLong* oldstrides = shape::copyOf(oldRank, shape::stride(oldShape)); // int np, op, last_stride; // int oi, oj, ok, ni, nj, nk; // Nd4jLong* newStrides = new Nd4jLong[newRank]; // oldnd = 0; // /* // * Remove axes with dimension 1 from the old array. They have no effect // * but would need special cases since their strides do not matter. // */ // for (oi = 0; oi < oldRank; oi++) { // if (shape::shapeOf(oldShape)[oi] != 1) { // olddims[oldnd] = shape::shapeOf(oldShape)[oi]; // oldstrides[oldnd] = shape::stride(oldShape)[oi]; // oldnd++; // } // } // np = 1; // for (ni = 0; ni < newRank; ni++) { // np *= newShapeOf[ni]; // } // op = 1; // for (oi = 0; oi < oldnd; oi++) { // op *= olddims[oi]; // } // if (np != op) { // /* different total sizes; no hope */ // delete[] olddims; // delete[] oldstrides; // delete[] newStrides; // return false; // } // if (np == 0) { // /* the current code does not handle 0-sized arrays, so give up */ // delete[] olddims; // delete[] oldstrides; // delete[] newStrides; // return false; // } // /* oi to oj and ni to nj give the axis ranges currently worked with */ // oi = 0; // oj = 1; // ni = 0; // nj = 1; // while (ni < newRank && oi < oldnd) { // np = newShapeOf[ni]; // op = olddims[oi]; // while (np != op) { // if (np < op) { // /* Misses trailing 1s, these are handled later */ // np *= newShapeOf[nj++]; // } else { // op *= olddims[oj++]; // } // } // /* Check whether the original axes can be combined */ // for (ok = oi; ok < oj - 1; ok++) { // if (isFOrder) { // if (oldstrides[ok + 1] != olddims[ok] * oldstrides[ok]) { // /* not contiguous enough */ // delete[] olddims; // delete[] oldstrides; // delete[] newStrides; // return false; // } // } else { // /* C order */ // if (oldstrides[ok] != olddims[ok + 1] * oldstrides[ok + 1]) { // /* not contiguous enough */ // delete[] olddims; // delete[] oldstrides; // delete[] newStrides; // return false; // } // } // } // /* Calculate new strides for all axes currently worked with */ // if (isFOrder) { // newStrides[ni] = oldstrides[oi]; // for (nk = ni + 1; nk < nj; nk++) { // newStrides[nk] = newStrides[nk - 1] * newShapeOf[nk - 1]; // } // } else { // /* C order */ // newStrides[nj - 1] = oldstrides[oj - 1]; // for (nk = nj - 1; nk > ni; nk--) { // newStrides[nk - 1] = newStrides[nk] * newShapeOf[nk]; // } // } // ni = nj++; // oi = oj++; // } // if (ni >= 1) { // last_stride = newStrides[ni - 1]; // } else { // last_stride = shape::elementWiseStride(oldShape); // } // if (isFOrder && ni >= 1) { // last_stride *= newShapeOf[ni - 1]; // } // for (nk = ni; nk < newRank; nk++) { // newStrides[nk] = last_stride; // } // target[0] = newRank; // int cnt = 1; // for (int e = 0; e < newRank; e++) // target[cnt++] = newShapeOf[e]; // for (int e = 0; e < newRank; e++) // target[cnt++] = newStrides[e]; // target[shape::shapeInfoLength(newRank) - 3] = 0; // target[shape::shapeInfoLength(newRank) - 2] = 0; // target[shape::shapeInfoLength(newRank) - 1] = isFOrder ? 102 : 99; // nd4j::ArrayOptions::setDataType(target, nd4j::ArrayOptions::dataType(oldShape)); // delete[] olddims; // delete[] oldstrides; // delete[] newStrides; // return true; // } // INLINEDEF _CUDA_H bool reshapeC(const int oldRank, const Nd4jLong* oldShapeInfo, const int newRank, const Nd4jLong* newShape, const bool isFOrder, Nd4jLong* newShapeInfo) { // // PLEASE NOTE !: reshaping not-permuted (ews=1) array in f order (except insertion/elimination of unities) will definitely cause allocation of new buffer for array elements // // also this function takes into account identical shapes automatically, namely in that case oldShapeInfo is completely copied to newShapeInfo // const int newOrder = isFOrder ? 102 : 99; // const int oldOrder = oldShapeInfo[2 * oldRank + 3]; // newShapeInfo[0] = newRank; // memcpy(newShapeInfo + 1, newShape, newRank * sizeof(Nd4jLong)); // Nd4jLong* newStrides = shape::stride(newShapeInfo); // const Nd4jLong* oldShape = shape::shapeOf(const_cast<Nd4jLong*>(oldShapeInfo)); // const Nd4jLong* oldStrides = shape::stride(const_cast<Nd4jLong*>(oldShapeInfo)); // int oldStart(0), oldStop(1), newStart(0), newStop(1), newDim, oldDim; // while (newStart < newRank && oldStart < oldRank) { // newDim = newShape[newStart]; // oldDim = oldShape[oldStart]; // while (newDim != oldDim) // if (newDim < oldDim) newDim *= newShape[newStop++]; // else oldDim *= oldShape[oldStop++]; // // ------ Check whether the original axes can be combined ------ // // for (int i = oldStart; i < oldStop - 1; i++) { // if(oldShape[i] == 1) { // ignore strides like {...,1,1,...} // if(oldOrder == 102) ++oldStart; // continue; // } // if(oldOrder == 102 && oldStrides[i + 1] != oldShape[i] * oldStrides[i]) // return false; // not contiguous enough // if(oldOrder == 99 && oldStrides[i] != oldShape[i + 1] * oldStrides[i + 1]) // return false; // not contiguous enough // } // // ------ Calculate new strides for all axes currently worked with ------ // // if(isFOrder) { // newStrides[newStart] = oldStrides[oldStart]; // for (int i = newStart + 1; i < newStop; ++i) // newStrides[i] = newStrides[i - 1] * newShape[i - 1]; // } // else { // newStrides[newStop - 1] = oldStrides[oldStop - 1]; // for (int i = newStop - 1; i > newStart; --i) // newStrides[i - 1] = newStrides[i] * newShape[i]; // } // newStart = newStop++; // oldStart = oldStop++; // } // newShapeInfo[2 * newRank + 3] = shape::order(oldShapeInfo); // order // newShapeInfo[2 * newRank + 2] = shape::elementWiseStride(oldShapeInfo); // ews // newShapeInfo[2 * newRank + 1] = shape::type(oldShapeInfo); // type // return true; // } ////////////////////////////////////////////////////////////////////// INLINEDEF _CUDA_H bool reshapeC(const int oldRank, const Nd4jLong* oldShapeInfo, const int newRank, const Nd4jLong* newShape, Nd4jLong* newShapeInfo) { // PLEASE NOTE !: reshaping not-permuted (ews=1) array in f order (except insertion/elimination of unities) will definitely cause allocation of new buffer for array elements // also this function takes into account identical shapes automatically, namely in that case oldShapeInfo is completely copied to newShapeInfo newShapeInfo[0] = newRank; memcpy(newShapeInfo + 1, newShape, newRank * sizeof(Nd4jLong)); Nd4jLong* newStrides = shape::stride(newShapeInfo); const Nd4jLong* oldShape = shape::shapeOf(const_cast<Nd4jLong*>(oldShapeInfo)); const Nd4jLong* oldStrides = shape::stride(const_cast<Nd4jLong*>(oldShapeInfo)); int oldStart(0), oldStop(1), newStart(0), newStop(1), newDim, oldDim; while (newStart < newRank && oldStart < oldRank) { newDim = newShape[newStart]; oldDim = oldShape[oldStart]; while (newDim != oldDim && newDim > 0 && oldDim > 0) if (newDim < oldDim) newDim *= newShape[newStop++]; else oldDim *= oldShape[oldStop++]; // ------ Check whether the original axes can be combined ------ // for (int step = 1, i = oldStart; i < oldStop - 1; ++i) { if(oldShape[i] == 1) // skip unity-dimension and its stride continue; while((i + step) < oldRank && oldShape[i + step] == 1) ++step; // skip following unity-dimensions and its strides if such are present if((i + step) < oldRank && oldStrides[i] != oldShape[i + step] * oldStrides[i + step]) return false; // not contiguous enough } newStrides[newStop - 1] = oldStrides[oldStop - 1]; for (int i = newStop - 1; i > newStart; --i) newStrides[i - 1] = newStrides[i] * newShape[i]; newStart = newStop++; oldStart = oldStop++; } newShapeInfo[2 * newRank + 3] = shape::order(oldShapeInfo); // order newShapeInfo[2 * newRank + 2] = shape::elementWiseStride(oldShapeInfo); // ews newShapeInfo[2 * newRank + 1] = shape::type(oldShapeInfo); // type return true; } INLINEDEF _CUDA_H bool canReshape(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShapeOf, bool isFOrder) { int oldnd; Nd4jLong* oldDims = shape::copyOf(oldRank, shape::shapeOf(oldShape)); Nd4jLong* oldStrides = shape::copyOf(oldRank, shape::stride(oldShape)); int np, op, last_stride; int oldStart, oldStop, ok, newStart, newStop, nk; auto newStrides = new Nd4jLong[newRank]; oldnd = 0; /* * Remove axes with dimension 1 from the old array. They have no effect * but would need special cases since their strides do not matter. */ for (oldStart = 0; oldStart < oldRank; oldStart++) { if (shape::shapeOf(oldShape)[oldStart] != 1) { oldDims[oldnd] = shape::shapeOf(oldShape)[oldStart]; oldStrides[oldnd] = shape::stride(oldShape)[oldStart]; oldnd++; } } np = 1; for (newStart = 0; newStart < newRank; newStart++) { np *= newShapeOf[newStart]; } op = 1; for (oldStart = 0; oldStart < oldnd; oldStart++) { op *= oldDims[oldStart]; } if (np != op) { /* different total sizes; no hope */ delete[] oldDims; delete[] oldStrides; delete[] newStrides; return false; } if (np == 0) { /* the current code does not handle 0-sized arrays, so give up */ delete[] oldDims; delete[] oldStrides; delete[] newStrides; return false; } /* oldStart to oldStop and newStart to newStop give the axis ranges currently worked with */ oldStart = 0; oldStop = 1; newStart = 0; newStop = 1; while (newStart < newRank && oldStart < oldnd) { np = newShapeOf[newStart]; op = oldDims[oldStart]; while (np != op) { if (np < op) { /* Misses trailing 1s, these are handled later */ np *= newShapeOf[newStop++]; } else { op *= oldDims[oldStop++]; } } /* Check whether the original axes can be combined */ for (ok = oldStart; ok < oldStop - 1; ok++) { if (isFOrder) { if (oldStrides[ok + 1] != oldDims[ok] * oldStrides[ok]) { /* not contiguous enough */ delete[] oldDims; delete[] oldStrides; delete[] newStrides; return false; } } else { /* C order */ if (oldStrides[ok] != oldDims[ok + 1] * oldStrides[ok + 1]) { /* not contiguous enough */ delete[] oldDims; delete[] oldStrides; delete[] newStrides; return false; } } } /* Calculate new strides for all axes currently worked with */ if (isFOrder) { newStrides[newStart] = oldStrides[oldStart]; for (nk = newStart + 1; nk < newStop; nk++) { newStrides[nk] = newStrides[nk - 1] * newShapeOf[nk - 1]; } } else { /* C order */ newStrides[newStop - 1] = oldStrides[oldStop - 1]; for (nk = newStop - 1; nk > newStart; nk--) { newStrides[nk - 1] = newStrides[nk] * newShapeOf[nk]; } } newStart = newStop++; oldStart = oldStop++; } delete[] oldDims; delete[] oldStrides; delete[] newStrides; return true; } // this function checks the consistence of dimensions with array rank (negative dimensions, too large dimensions, too big number of dimensions) // also it sorts input array of dimensions, this operation is also necessary for creating TAD object INLINEDEF _CUDA_H void checkDimensions(const int rank, std::vector<int>& dimensions) { int dimSize = dimensions.size(); if(dimSize == 0) throw std::runtime_error("shape::checkDimensions method: array of dimensions is empty!"); // check presence of negative dimensions and if they are present transform them to positive ones -dim -> rank - |dim| for(auto& dim : dimensions) if(dim < 0) dim += rank; // sort input array of dimensions, this operation is also necessary for creating TAD object in external methods if (dimSize > 1) { std::sort(dimensions.begin(), dimensions.end()); // remove duplicates if they are present dimensions.erase(std::unique(dimensions.begin(), dimensions.end()), dimensions.end()); } // check whether number of dimensions is to big (>rank) dimSize = dimensions.size(); if(dimSize > rank) throw std::runtime_error("shape::checkDimensions method: number of input dimensions is too big ( > rank of array)!"); // check if min dimension is still negative and whether max dimension is bigger then rank-1 if(dimensions[0] < 0 || dimensions.back() > (rank-1)) throw std::runtime_error("shape::checkDimensions method: the negative dimension is still present in input array after transform or the too big dimension is present ( > rank of array) !"); } // max array is outer for min array, min array is sub-array of max array // function calculates the coordinates of min array (and saves them into minIdxs) given coordinates of max array (already stored in maxIdxs) INLINEDEF _CUDA_HD void maxIndToMinInd(Nd4jLong* maxIdxs, Nd4jLong* minIdxs, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude, int dimsLen) { const auto maxRank = shape::rank(maxShapeInfo); const auto minRank = shape::rank(minShapeInfo); // if(minRank >= maxRank) // throw std::runtime_error("shape::maxIndToMinInd method: rank of min array should be smaller then rank of max array!"); if(dimsLen == -1) dimsLen = maxRank - minRank; // if size is not given (= -1) then it is equal to ranks difference if(maxRank == minRank) { if(dimsToExclude == nullptr) { // --> means dimsToExclude == {0,1,2,...,dimsLen-1} for (int i = 0; i < maxRank; ++i) { if(i < dimsLen) minIdxs[i] = maxIdxs[i]; else { if(maxIdxs[i] > minShapeInfo[i + 1]) minIdxs[i] = maxIdxs[i] % minShapeInfo[i + 1]; else if(maxIdxs[i] == minShapeInfo[i + 1]) minIdxs[i] = 0; else minIdxs[i] = maxIdxs[i]; } } } else { for (int i = 0, dim = 0; i < maxRank; ++i) { if(dim < dimsLen && dimsToExclude[dim] == i) { minIdxs[i] = maxIdxs[i]; ++dim; continue; } if(maxIdxs[i] > minShapeInfo[i + 1]) minIdxs[i] = maxIdxs[i] % minShapeInfo[i + 1]; else if(maxIdxs[i] == minShapeInfo[i + 1]) minIdxs[i] = 0; else minIdxs[i] = maxIdxs[i]; } } } else { if(dimsToExclude == nullptr) { // --> means dimsToExclude == {0,1,2,...,dimsLen-1} for (int i = 0; i < minRank; ++i) { if(maxIdxs[i + dimsLen] > minShapeInfo[i + 1]) minIdxs[i] = maxIdxs[i + dimsLen] % minShapeInfo[i + 1]; else if(maxIdxs[i + dimsLen] == minShapeInfo[i + 1]) minIdxs[i] = 0; else minIdxs[i] = maxIdxs[i + dimsLen]; } } else { for (int minI = 0, maxI = 0, dim = 0; maxI < maxRank; ++maxI) { if(dim < dimsLen && dimsToExclude[dim] == maxI) { ++dim; continue; } if(maxIdxs[maxI] == minShapeInfo[minI + 1]) minIdxs[minI] = 0; else if(maxIdxs[maxI] > minShapeInfo[minI + 1]) minIdxs[minI] = maxIdxs[maxI] % minShapeInfo[minI + 1]; else minIdxs[minI] = maxIdxs[maxI]; ++minI; } } } } ////////////////////////////////////////////////////////////////////// INLINEDEF _CUDA_HD Nd4jLong subArrayIndex(const Nd4jLong maxIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude, const int dimsLen) { Nd4jLong maxIdxs[MAX_RANK]; shape::index2coords(shape::rank(maxShapeInfo), const_cast<Nd4jLong *>(maxShapeInfo)+1, const_cast<Nd4jLong&>(maxIdx), maxIdxs, shape::order(maxShapeInfo)); Nd4jLong minIdxs[MAX_RANK]; maxIndToMinInd(maxIdxs, minIdxs, maxShapeInfo, minShapeInfo, dimsToExclude, dimsLen); return coords2index(shape::rank(minShapeInfo), minShapeInfo + 1, minIdxs); } ////////////////////////////////////////////////////////////////////// INLINEDEF _CUDA_HD Nd4jLong subArrayOffset(const Nd4jLong maxIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude, const int dimsLen) { Nd4jLong maxIdxs[MAX_RANK]; shape::index2coords(shape::rank(maxShapeInfo), const_cast<Nd4jLong *>(maxShapeInfo)+1, const_cast<Nd4jLong&>(maxIdx), maxIdxs, shape::order(maxShapeInfo)); Nd4jLong minIdxs[MAX_RANK]; maxIndToMinInd(maxIdxs, minIdxs, maxShapeInfo, minShapeInfo, dimsToExclude, dimsLen); return getOffset(0, minShapeInfo + 1, minShapeInfo + shape::rank(minShapeInfo) + 1, minIdxs, shape::rank(minShapeInfo)); } ////////////////////////////////////////////////////////////////////// INLINEDEF _CUDA_HD int outerArrayOffsets(Nd4jLong* maxOffsets, const Nd4jLong minIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude) { const auto rankMin = shape::rank(minShapeInfo); const auto rankMax = shape::rank(maxShapeInfo); // if(rankMin >= rankMax) // throw std::runtime_error("shape::subArrayIndex method: rank of min array should be smaller then rank of max array!"); // if(rankMax > MAX_RANK/2) // throw std::runtime_error("shape::subArrayIndex method: rank of max array should be <= MAX_RANK/2 !"); const auto diff = rankMax - rankMin; // the size of dimsToExclude is equal to diff Nd4jLong buffer[MAX_RANK]; Nd4jLong* indices = buffer; Nd4jLong* increment = buffer + MAX_RANK/2; int N, minI, maxI; // calculate min per-dim-indices which corresponds to absolute minIdx index shape::index2coords(rankMin, minShapeInfo + 1, minIdx, indices, order(minShapeInfo)); // transform storage indices to contain per-dim max indices, purpose - memory saving // fill increment array as well if(dimsToExclude == nullptr) { // means dimsToExclude == {0,1,2,...,diff-1} for(minI = rankMin - 1, maxI = rankMax-1; maxI >= diff; --maxI, --minI) { increment[maxI] = (maxShapeInfo[maxI+1] == minShapeInfo[minI+1]) ? 0 : minShapeInfo[minI+1]; indices[maxI] = indices[minI]; } for(maxI = 0; maxI < diff; ++maxI) { increment[maxI] = 1; indices[maxI] = 0; } } else { for(N = diff-1, minI = rankMin - 1, maxI = rankMax - 1; maxI >= 0; --maxI) { if(N >= 0 && dimsToExclude[N] == maxI) { increment[maxI] = 1; indices[maxI] = 0; --N; } else { increment[maxI] = (maxShapeInfo[maxI+1] == minShapeInfo[minI+1]) ? 0 : minShapeInfo[minI+1]; indices[maxI] = indices[minI--]; } } } maxI = rankMax-1; N = 0; int step; maxOffsets[N++] = shape::getOffset(0, maxShapeInfo + 1, maxShapeInfo + rankMax + 1, indices, rankMax); // nested loops - producing of absolute indices for max array while(maxI >= 0) { if(increment[maxI] != 0) { indices[maxI] += increment[maxI]; if(indices[maxI] >= maxShapeInfo[maxI+1]) { indices[maxI] %= increment[maxI]; // restore initial value of indices[maxI] step = -1; } else { maxOffsets[N++] = shape::getOffset(0, maxShapeInfo + 1, maxShapeInfo + rankMax + 1, indices, rankMax); step = rankMax - 1 - maxI; } } else if(maxI == rankMax - 1) step = -1; maxI += step; } return N; } ////////////////////////////////////////////////////////////////////// INLINEDEF _CUDA_HD int outerArrayIndexes(Nd4jLong* maxIdxs, const Nd4jLong minIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude) { const auto rankMin = shape::rank(minShapeInfo); const auto rankMax = shape::rank(maxShapeInfo); // if(rankMin >= rankMax) // throw std::runtime_error("shape::subArrayIndex method: rank of min array should be smaller then rank of max array!"); // if(rankMax > MAX_RANK/2) // throw std::runtime_error("shape::subArrayIndex method: rank of max array should be <= MAX_RANK/2 !"); const auto diff = rankMax - rankMin; // the size of dimsToExclude is equal to diff Nd4jLong buffer[MAX_RANK]; Nd4jLong* indices = buffer; Nd4jLong* increment = buffer + MAX_RANK/2; int N, minI, maxI; // calculate min per-dim-indices which corresponds to absolute minIdx index shape::index2coords(rankMin, minShapeInfo + 1, minIdx, indices, order(minShapeInfo)); // transform storage indices to contain per-dim max indices, purpose - memory saving // fill increment array as well if(dimsToExclude == nullptr) { // means dimsToExclude == {0,1,2,...,diff-1} for(minI = rankMin - 1, maxI = rankMax-1; maxI >= diff; --maxI, --minI) { increment[maxI] = (maxShapeInfo[maxI+1] == minShapeInfo[minI+1]) ? 0 : minShapeInfo[minI+1]; indices[maxI] = indices[minI]; } for(maxI = 0; maxI < diff; ++maxI) { increment[maxI] = 1; indices[maxI] = 0; } } else { for(N = diff-1, minI = rankMin - 1, maxI = rankMax - 1; maxI >= 0; --maxI) { if(N >= 0 && dimsToExclude[N] == maxI) { increment[maxI] = 1; indices[maxI] = 0; --N; } else { increment[maxI] = (maxShapeInfo[maxI+1] == minShapeInfo[minI+1]) ? 0 : minShapeInfo[minI+1]; indices[maxI] = indices[minI--]; } } } maxI = rankMax-1; N = 0; int step; maxIdxs[N++] = coords2index(rankMax, maxShapeInfo + 1, indices); // nested loops - producing of absolute indices for max array while(maxI >= 0) { if(increment[maxI] != 0) { indices[maxI] += increment[maxI]; if(indices[maxI] >= maxShapeInfo[maxI+1]) { indices[maxI] %= increment[maxI]; // restore initial value of indices[maxI] step = -1; } else { maxIdxs[N++] = coords2index(rankMax, maxShapeInfo + 1, indices); step = rankMax - 1 - maxI; } } else if(maxI == rankMax - 1) step = -1; maxI += step; } return N; } INLINEDEF _CUDA_HD void shapeOldScalar(nd4j::DataType dataType, Nd4jLong* const buffer, const char order) { buffer[0] = 2; buffer[1] = 1; buffer[2] = 1; buffer[3] = 1; buffer[4] = 1; buffer[6] = 1; buffer[7] = (int)order; nd4j::ArrayOptions::setDataType(buffer, dataType); } template <typename T1, typename T2> INLINEDEF _CUDA_H void convertT(T1 *from, T2 *to, Nd4jLong length) { for (Nd4jLong e = 0; e < length; e++) to[e] = (T2) from[e]; }; ////////////////////////////////////////////////////////////////////// INLINEDEF void calcOffsets(const Nd4jLong* shapeInfo, Nd4jLong* offsets, const char order) { // firstly consider simple case when ews > 0 const Nd4jLong ews = shape::elementWiseStride(shapeInfo); if(ews > 0) { // set offset for first sub-array, it is equal to zero always offsets[0] = 0; Nd4jLong e = 0; if(order != shape::order(shapeInfo)) for(int i = 1; i <= shape::rank(shapeInfo); ++i) if(shapeInfo[i] != 1) ++e; //check whether input is CommonVector if(order == shape::order(shapeInfo) || e == 1) { // e==1 means common vector e = 1; Nd4jLong len = shape::length(shapeInfo); while(e < len) offsets[e++] = offsets[e - 1] + ews; return; } } shape::calcOffsets(shape::rank(shapeInfo), shape::shapeOf(const_cast<Nd4jLong*>(shapeInfo)), shape::stride(const_cast<Nd4jLong*>(shapeInfo)), offsets, order); } ////////////////////////////////////////////////////////////////////// INLINEDEF void calcOffsets(const int rank, const Nd4jLong* shape, const Nd4jLong* strides, Nd4jLong* offsets, const char order) { // if(false) { // tests showed that this code did calculation notably slower even for big N // Nd4jLong indexes[MAX_RANK]; // PRAGMA_OMP_PARALLEL_FOR_ARGS(private(indexes)) // for (Nd4jLong i = 0; i < N; ++i) { // shape::index2coords(rank, shape, i, indexes); // subArrOffsets[i] = 0; // for (int j = 0; j < rank; ++j) // if(shape[j] != 1) // subArrOffsets[i] += indexes[j] * strides[j]; // } // return; // } // set offset for first sub-array, it is equal to zero always offsets[0] = 0; Nd4jLong * idx = new Nd4jLong[rank]; Nd4jLong* offsetPerDim = new Nd4jLong[rank]; memset(idx, 0, sizeof(Nd4jLong) * rank); PRAGMA_OMP_SIMD for (int k = 0; k < rank; ++k) offsetPerDim[k] = (shape[k] - 1) * strides[k]; Nd4jLong init = 0, i = 1; // nested loops - calculation of sub-array offsets if(order == 'c') { Nd4jLong rankMinusOne = rank - 1, j = rankMinusOne; while(j >= 0) { if(shape[j] == 1) { --j; continue; } // ignore dimensions equal to unity if(j == rankMinusOne) { // last dimension for(int l = 1; l < shape[j]; ++l) offsets[i++] = offsets[i - 1] + strides[j]; --j; } else if(idx[j] < shape[j] - 1) { init += strides[j]; offsets[i++] = init; ++idx[j]; j = rankMinusOne; } else { init -= offsetPerDim[j]; idx[j--] = 0; } } } else { Nd4jLong j = 0; while(j < rank) { if(shape[j] == 1) { ++j; continue; } // ignore dimensions equal to unity if(j == 0) { // last dimension for(int l = 1; l < shape[j]; ++l) offsets[i++] = offsets[i - 1] + strides[j]; ++j; } else if(idx[j] < shape[j] - 1) { init += strides[j]; offsets[i++] = init; ++idx[j]; j = 0; } else { init -= offsetPerDim[j]; idx[j++] = 0; } } } delete []idx; delete []offsetPerDim; } ////////////////////////////////////////////////////////////////////// INLINEDEF void _CUDA_HD setEws(Nd4jLong* shapeInfo, Nd4jLong len) { const int rank = shape::rank(shapeInfo); const Nd4jLong* shape = shape::shapeOf(shapeInfo); const Nd4jLong* strides = shape::stride(shapeInfo); const char order = shape::order(shapeInfo); Nd4jLong* ews = shape::ews(shapeInfo); if(len == -1) // calculate array length if it is not given len = shape::length(shapeInfo); if(len <= 1) { // empty, scalar or unity-vector case *ews = 1; return; } int nonUnityDim(0); if(shape::isCommonVector(shapeInfo, nonUnityDim)) { *ews = strides[nonUnityDim]; return; } // check last(c)/first(f) dimension, it should be equal to 1 if((order == 'c' && shape[rank - 1] != 1 && strides[rank - 1] != 1) || (order == 'f' && shape[0] != 1 && strides[0] != 1)) { *ews = 0; return; } Nd4jLong correctStride = 1; if(order == 'c') { for (int i = rank - 2; i >= 0 ; i--) { correctStride *= shape[i + 1]; if(shape[i] == 1) continue; if(correctStride != strides[i]) { *ews = 0; return; } } } else { for (int i = 1; i < rank; ++i) { correctStride *= shape[i - 1]; if(shape[i] == 1) continue; if(correctStride != strides[i]) { *ews = 0; return; } } } *ews = 1; } ////////////////////////////////////////////////////////////////////// INLINEDEF _CUDA_HD void setOrderAndEws(Nd4jLong* shapeInfo, Nd4jLong len) { const int rank = shape::rank(shapeInfo); const Nd4jLong* shape = shape::shapeOf(shapeInfo); const Nd4jLong* strides = shape::stride(shapeInfo); const char order = shape::order(shapeInfo); Nd4jLong* ews = shape::ews(shapeInfo); if(len == -1) // calculate array length if it is not given len = shape::length(shapeInfo); if(len <= 1) { // empty, scalar or unity-vector case *ews = 1; return; } int nonUnityDim(0); if(shape::isCommonVector(shapeInfo, nonUnityDim)) { // in this case we don't change order *ews = strides[nonUnityDim]; return; } // check if strides are contiguous in respect to c-order // firstly check last stride, it should be equal to 1 if (strides[rank - 1] == 1 || shape[rank - 1] == 1) { // last dimension is ok, go on through the rest dimensions in reverse order Nd4jLong correctStride = 1; bool cContiguous = true; for (int i = rank - 2; i >= 0 ; i--) { correctStride *= shape[i + 1]; if(shape[i] == 1) continue; if(correctStride != strides[i]) { cContiguous = false; break; } } if(cContiguous) { *ews = 1; shapeInfo[shape::shapeInfoLength(rank) - 1] = 99; return; } } // now check if strides are contiguous in respect to f-order // firstly check first stride, it should be equal to 1 if(strides[0] == 1 || shape[0] == 1) { // first dimension is ok, go on through the rest dimensions Nd4jLong correctStride = 1; bool fContiguous = true; for (int i = 1; i < rank; ++i) { correctStride *= shape[i - 1]; if(shape[i] == 1) continue; if(correctStride != strides[i]) { fContiguous = false; break; } } if(fContiguous) { *ews = 1; shapeInfo[shape::shapeInfoLength(rank) - 1] = 102; return; } } *ews = 0; // if both cContiguous and fContiguous are false then order is preserved } ////////////////////////////////////////////////////////////////////// INLINEDEF _CUDA_HD void calcSubArrShapeAndOffsets(const Nd4jLong* wholeShapeInfo, const Nd4jLong numOfSubArrs, const int dimsSize, const int* dimsToExclude, Nd4jLong* subArrShapeInfo, Nd4jLong* subArrOffsets, bool keepUnitiesInShape) { const int rank = shape::rank(wholeShapeInfo); if(dimsSize == rank || dimsSize == 0) { // means there is one sub-array and it coincides with whole array, return copy of wholeShapeInfo and one zero offset in this case memcpy(subArrShapeInfo, wholeShapeInfo, shape::shapeInfoLength(rank) * sizeof(Nd4jLong)); *subArrOffsets = 0; return; } Nd4jLong *outShapeInfo = new Nd4jLong[shape::shapeInfoLength(wholeShapeInfo)]; memcpy(outShapeInfo, wholeShapeInfo, shape::shapeInfoByteLength(wholeShapeInfo)); Nd4jLong* shape = new Nd4jLong[dimsSize]; Nd4jLong* strides = new Nd4jLong[dimsSize]; const int subArrRank = keepUnitiesInShape ? rank : rank - dimsSize; Nd4jLong* shapeNoUnities = nullptr; if(!keepUnitiesInShape) shapeNoUnities = new Nd4jLong[subArrRank]; Nd4jLong subArrLen = 1; for(int k = subArrRank - 1, j = dimsSize - 1, i = rank - 1; i >= 0; --i) { if(j >= 0 && i == dimsToExclude[j]) { strides[j] = shape::stride(outShapeInfo)[i]; shape[j--] = shape::shapeOf(outShapeInfo)[i]; shape::shapeOf(outShapeInfo)[i] = 1; } else { subArrLen *= shape::shapeOf(outShapeInfo)[i]; if(!keepUnitiesInShape) shapeNoUnities[k--] = shape::shapeOf(outShapeInfo)[i]; } } // evaluate ews shape::setEws(outShapeInfo, subArrLen); // calculation of sub-array offsets (subArrOffsets) shape::calcOffsets(dimsSize, shape, strides, subArrOffsets); // remove unities from outShapeInfo if required if(!keepUnitiesInShape) { shape::reshapeC(rank, outShapeInfo, subArrRank, shapeNoUnities, subArrShapeInfo); delete []shapeNoUnities; } else memcpy(subArrShapeInfo, outShapeInfo, shape::shapeInfoLength(subArrRank) * sizeof(Nd4jLong)); delete []strides; delete []shape; delete []outShapeInfo; } ////////////////////////////////////////////////////////////////////// INLINEDEF void _CUDA_HD index2coords(const int rank, const Nd4jLong *shape, Nd4jLong index, Nd4jLong *coords, const char order) { Nd4jLong arrLen = shape::prodLong(shape, rank); shape::index2coords(rank, shape, index, arrLen, coords, order); } INLINEDEF void _CUDA_HD index2coords(const int rank, const Nd4jLong *shape, Nd4jLong index, Nd4jLong arrLen, Nd4jLong *coords, const char order) { if(order == 'c') { for(int i = 0; i < rank; i++) { arrLen /= shape[i]; if(arrLen > 0 && shape[i] > 1) { coords[i] = index / arrLen; index %= arrLen; } else coords[i] = 0; } } else { for(int i = rank - 1; i >= 0; i--) { arrLen /= shape[i]; if(arrLen > 0 && shape[i] > 1) { coords[i] = index / arrLen; index %= arrLen; } else coords[i] = 0; } } } ////////////////////////////////////////////////////////////////////// INLINEDEF _CUDA_HD void calcOffsets(const Nd4jLong *xShapeInfo, Nd4jLong*& xOffsets, const Nd4jLong *yShapeInfo, Nd4jLong*& yOffsets, const Nd4jLong* zShapeInfo, Nd4jLong*& zOffsets, const char order) { // we assume all array have same length const Nd4jLong len = shape::length(xShapeInfo); const Nd4jLong xEws = shape::elementWiseStride(xShapeInfo); const Nd4jLong yEws = shape::elementWiseStride(yShapeInfo); const Nd4jLong zEws = shape::elementWiseStride(zShapeInfo); const char xOrder = shape::order(xShapeInfo); const char yOrder = shape::order(yShapeInfo); const char zOrder = shape::order(zShapeInfo); const bool shapesSame = shape::shapeEquals(xShapeInfo, yShapeInfo, zShapeInfo); if (xEws == 1 && yEws == 1 && zEws == 1 && xOrder == yOrder && xOrder == zOrder && (xOrder == 'c' || shapesSame)) { xOffsets = yOffsets = zOffsets = nullptr; } else if(xEws == 1 && yEws == 1 && xOrder == yOrder && (xOrder == 'c' || shape::shapeEquals(xShapeInfo, yShapeInfo))) { xOffsets = yOffsets = nullptr; zOffsets = new Nd4jLong[len]; shape::calcOffsets(zShapeInfo, zOffsets, xOrder); } else if(xEws == 1 && zEws == 1 && xOrder == zOrder && (xOrder == 'c' || shape::shapeEquals(xShapeInfo, zShapeInfo))) { xOffsets = zOffsets = nullptr; yOffsets = new Nd4jLong[len]; shape::calcOffsets(yShapeInfo, yOffsets, xOrder); } else if(yEws == 1 && zEws == 1 && yOrder == zOrder && (yOrder == 'c' || shape::shapeEquals(yShapeInfo, zShapeInfo))) { yOffsets = zOffsets = nullptr; xOffsets = new Nd4jLong[len]; shape::calcOffsets(xShapeInfo, xOffsets, yOrder); } else if(xEws == 1) { xOffsets = nullptr; #pragma omp parallel sections { #pragma omp section { yOffsets = new Nd4jLong[len]; shape::calcOffsets(yShapeInfo, yOffsets, xOrder); } #pragma omp section { zOffsets = new Nd4jLong[len]; shape::calcOffsets(zShapeInfo, zOffsets, xOrder); } } } else if(yEws == 1) { yOffsets = nullptr; #pragma omp parallel sections { #pragma omp section { xOffsets = new Nd4jLong[len]; shape::calcOffsets(xShapeInfo, xOffsets, yOrder); } #pragma omp section { zOffsets = new Nd4jLong[len]; shape::calcOffsets(zShapeInfo, zOffsets, yOrder); } } } else if(zEws == 1) { zOffsets = nullptr; #pragma omp parallel sections { #pragma omp section { xOffsets = new Nd4jLong[len]; shape::calcOffsets(xShapeInfo, xOffsets, zOrder); } #pragma omp section { yOffsets = new Nd4jLong[len]; shape::calcOffsets(yShapeInfo, yOffsets, zOrder); } } } else if(shape::haveSameShapeAndStrides(xShapeInfo, yShapeInfo, zShapeInfo)) { xOffsets = new Nd4jLong[len]; shape::calcOffsets(xShapeInfo, xOffsets); yOffsets = zOffsets = xOffsets; } else if(shape::haveSameShapeAndStrides(xShapeInfo, yShapeInfo)) { #pragma omp parallel sections { #pragma omp section { xOffsets = new Nd4jLong[len]; shape::calcOffsets(xShapeInfo, xOffsets); } #pragma omp section { zOffsets = new Nd4jLong[len]; shape::calcOffsets(zShapeInfo, zOffsets); } } yOffsets = xOffsets; } else if(shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo)) { #pragma omp parallel sections { #pragma omp section { xOffsets = new Nd4jLong[len]; shape::calcOffsets(xShapeInfo, xOffsets); } #pragma omp section { yOffsets = new Nd4jLong[len]; shape::calcOffsets(yShapeInfo, yOffsets); } } zOffsets = xOffsets; } else { #pragma omp parallel sections { #pragma omp section { xOffsets = new Nd4jLong[len]; shape::calcOffsets(xShapeInfo, xOffsets); } #pragma omp section { yOffsets = new Nd4jLong[len]; shape::calcOffsets(yShapeInfo, yOffsets); } #pragma omp section { zOffsets = new Nd4jLong[len]; shape::calcOffsets(zShapeInfo, zOffsets); } } } } ////////////////////////////////////////////////////////////////////// INLINEDEF _CUDA_HD void calcOffsets(const Nd4jLong *xShapeInfo, Nd4jLong*& xOffsets, const Nd4jLong *yShapeInfo, Nd4jLong*& yOffsets, const char order) { // we assume all array have same length const Nd4jLong len = shape::length(xShapeInfo); const Nd4jLong xEws = shape::elementWiseStride(xShapeInfo); const Nd4jLong yEws = shape::elementWiseStride(yShapeInfo); const char xOrder = shape::order(xShapeInfo); const char yOrder = shape::order(yShapeInfo); const bool shapesSame = shape::shapeEquals(xShapeInfo, yShapeInfo); if (xEws == 1 && yEws == 1 && xOrder == yOrder && (xOrder == 'c' || shapesSame)) { xOffsets = yOffsets = nullptr; } else if(xEws == 1) { xOffsets = nullptr; yOffsets = new Nd4jLong[len]; shape::calcOffsets(yShapeInfo, yOffsets, xOrder); } else if(yEws == 1) { yOffsets = nullptr; xOffsets = new Nd4jLong[len]; shape::calcOffsets(xShapeInfo, xOffsets, yOrder); } else if(shape::haveSameShapeAndStrides(xShapeInfo, yShapeInfo)) { xOffsets = new Nd4jLong[len]; shape::calcOffsets(xShapeInfo, xOffsets); yOffsets = xOffsets; } else { #pragma omp parallel sections { #pragma omp section { xOffsets = new Nd4jLong[len]; shape::calcOffsets(xShapeInfo, xOffsets); } #pragma omp section { yOffsets = new Nd4jLong[len]; shape::calcOffsets(yShapeInfo, yOffsets); } } } } ////////////////////////////////////////////////////////////////////// INLINEDEF _CUDA_HD void insertDimension(const int rank, Nd4jLong *shape, const Nd4jLong axis, const Nd4jLong dimension) { for (int i = rank; i > axis; --i) shape[i] = shape[i - 1]; shape[axis] = dimension; } ////////////////////////////////////////////////////////////////////// INLINEDEF _CUDA_HD void eraseDimension(const int rank, Nd4jLong *shape, const Nd4jLong axis) { for (int i = axis; i < rank - 1; ++i) shape[i] = shape[i + 1]; } } #endif /* SHAPE_H_ */
ompBFS.c
#include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <unistd.h> #include <string.h> #include <time.h> #include <sys/types.h> #include <sys/stat.h> #include <omp.h> #define min(x,y) ((x) < (y) ? (x) : (y)) #define max(x,y) ((x) > (y) ? (x) : (y)) //graph500 #define Efactor 16 #define A 0.57 #define B 0.19 #define C 0.19 #define WORD 6 //64 bits, 2^6 #define MASK 0x0000003F //6 1's #define MILLION 1000000L uint32_t verbose = 0; uint32_t numT; //number of threads uint32_t *prefixSum; //offsets for threads to write in sparse uint64_t seed; //first seed for pseudorandom number generators #define GAP 16807 //7^5 between each subsequent seed struct drand48_data *rndBuf; //one buffer per thread //graph data structure uint32_t scale; //graph500 uint32_t n, m, bitN; //num vertices, num edges, bitN = n >> WORD uint32_t *csr, *idxCSR, *tmpCSR; //compressed sparse row uint32_t *edgeU, *edgeV, *deg, **neighbor, *idx, *newID; uint64_t *hubs; //adjacency to top 64 hub vertices //BFS data structure enum direction { TopDown1, BottomUp, BU2TD, TopDown2 }; uint32_t layerSize, layerDeg; uint32_t *sparse, sparseSize, *newSparse, newSparseSize; uint32_t **mySparse, *mySparseSize; uint64_t *dense, *newDense; int *parent; //-1 if unvisited uint32_t root; //current root of BFS uint32_t roots[64]; //64 randomly selected roots for BFS //statistics for 64 runs of BFS float cm[64], runTime[64], teps[64], statistics[7]; uint64_t oneBit[64]; //oneBit[0] 0001 //oneBit[1] 0010 //oneBit[2] 0100 //oneBit[3] 1000 /* https://graph500.org/?page_id=12#sec-3 */ void generate(void) { uint32_t i, j, shift, bit, tid; double ab, aNorm, cNorm, rnd; ab = A + B; aNorm = A / ab; cNorm = C / (1 - ab); for (i = 0; i < m; i++) edgeU[i] = edgeV[i] = 0; //zero-based indexing #pragma omp parallel for private(i,shift,tid,bit,rnd) for (j = 0; j < m; j++) { tid = omp_get_thread_num(); for (i = 0; i < scale; i++) { shift = (i == 0) ? 1 : (1 << i); drand48_r(&rndBuf[tid], &rnd); bit = rnd > ab; edgeU[j] += bit * shift; drand48_r(&rndBuf[tid], &rnd); bit = rnd > (bit ? cNorm : aNorm); edgeV[j] += bit * shift; } } } void init(void) { uint32_t i; n = 1 << scale; m = n * Efactor; bitN = n >> WORD; rndBuf = NULL; parent = NULL; neighbor = NULL; mySparse = NULL; dense = newDense = NULL; sparse = newSparse = mySparseSize = NULL; deg = edgeU = edgeV = idx = newID = tmpCSR = csr = idxCSR = NULL; posix_memalign((void **)&parent, 64, sizeof(int) * n); posix_memalign((void **)&csr, 64, sizeof(uint32_t) * m * 2); posix_memalign((void **)&idxCSR, 64, sizeof(uint32_t) * (n + 1)); posix_memalign((void **)&dense, 64, n >> 3); // n/8 bytes posix_memalign((void **)&newDense, 64, n >> 3); // n/8 bytes posix_memalign((void **)&sparse, 64, sizeof(uint32_t) * n); posix_memalign((void **)&newSparse, 64, sizeof(uint32_t) * n); posix_memalign((void **)&mySparse, 64, sizeof(uint32_t*) * numT); posix_memalign((void **)&mySparseSize, 64, sizeof(uint32_t) * numT); posix_memalign((void **)&prefixSum, 64, sizeof(uint32_t) * (numT + 1)); if (parent == NULL || csr == NULL || idxCSR == NULL || dense == NULL || newDense == NULL || sparse == NULL || newSparse == NULL || mySparse == NULL || mySparseSize == NULL || prefixSum == NULL) { fprintf(stderr, "out of memory\n"); exit(0); } for (i = 0; i < numT; i++) { posix_memalign((void **) &mySparse[i], 64, sizeof(uint32_t) * (n >> 4)); if (mySparse[i] == NULL) { fprintf(stderr, "out of memory\n"); exit(0); } } posix_memalign((void **)&rndBuf, 64, sizeof(struct drand48_data) * numT); posix_memalign((void **)&tmpCSR, 64, sizeof(uint32_t) * m * 2); posix_memalign((void **)&deg, 64, sizeof(uint32_t) * n); posix_memalign((void **)&idx, 64, sizeof(uint32_t) * n); posix_memalign((void **)&newID, 64, sizeof(uint32_t) * n); posix_memalign((void **)&neighbor, 64, sizeof(uint32_t*) * n); posix_memalign((void **)&edgeU, 64, sizeof(uint32_t) * m); posix_memalign((void **)&edgeV, 64, sizeof(uint32_t) * m); if (rndBuf == NULL || tmpCSR == NULL || deg == NULL || idx == NULL || newID == NULL || neighbor == NULL || edgeU == NULL || edgeV == NULL) { fprintf(stderr, "out of memory\n"); exit(0); } srand48_r(seed, &rndBuf[0]); for (i = 1; i < numT; i++) srand48_r(seed + i * GAP, &rndBuf[i]); oneBit[0] = 1; for (i = 1; i < 64; i++) oneBit[i] = oneBit[i-1] << 1; } //ascending int cmpID(const void *a, const void *b) { return *(uint32_t*) a - *(uint32_t*) b; } //descending int cmpDeg(const void *a, const void *b) { return deg[*(uint32_t*) b] - deg[*(uint32_t*) a]; } //descending degree, break tie by having a higher degree neighbor int degBrkTie(const void *a, const void *b) { uint32_t u, v; u = *(uint32_t*) a; v = *(uint32_t*) b; if (deg[u] > deg[v]) return -1; if (deg[u] < deg[v]) return 1; if (hubs[u] > hubs[v]) return -1; if (hubs[u] < hubs[v]) return 1; return 0; } //ascending int cmpNewID(const void *a, const void *b) { return newID[*(uint32_t*) a] - newID[*(uint32_t*) b]; } void preprocessing(void) { uint32_t i, j, k, u, v, chunk, start, end; double rnd; chunk = m / numT; #pragma omp parallel for private(j,u,v,start,end) for (i = 0; i < numT; i++) { start = i * chunk; end = (i == numT - 1) ? m : (start + chunk); for (j = 0; j < m; j++) { u = edgeU[j], v = edgeV[j]; if (start <= u && u < end) deg[u]++; if (start <= v && v < end) deg[v]++; } } idxCSR[0] = 0; for (i = 0; i < n; i++) { neighbor[i] = &tmpCSR[ idxCSR[i] ]; idxCSR[i + 1] = idxCSR[i] + deg[i]; deg[i] = 0; idx[i] = i; } #pragma omp parallel for private(j,u,v,start,end) for (i = 0; i < numT; i++) { start = i * chunk; end = (i == numT - 1) ? m : (start + chunk); for (j = 0; j < m; j++) { u = edgeU[j], v = edgeV[j]; //symmetry if (start <= u && u < end) neighbor[u] [ deg[u]++ ] = v; if (start <= v && v < end) neighbor[v] [ deg[v]++ ] = u; } } free(edgeV); free(edgeU); //remove self-loops and parallel edges #pragma omp parallel for private(j,k) schedule(guided) for (i = 0; i < n; i++) { if (deg[i] <= 1) continue; qsort((void *)neighbor[i], deg[i], sizeof(uint32_t), cmpID); for (j = k = 0; j < deg[i]; j++) { if (neighbor[i] [j] == i) //self-loop continue; if (k > 0 && neighbor[i] [j] == neighbor[i] [k - 1]) //parallel edges continue; neighbor[i] [k++] = neighbor[i] [j]; } deg[i] = k; } posix_memalign((void **)&hubs, 64, sizeof(uint64_t) * n); for (i = 0; i < n; i++) hubs[i] = 0; //sort by nonincreasing degrees qsort((void *)idx, n, sizeof(uint32_t), cmpDeg); for (i = 0; i < 64; i++) { for (j = 0; j < deg[ idx[i] ]; j++) { u = neighbor[ idx[i] ] [j]; hubs[u] = hubs[u] | oneBit[64 - i - 1]; } } //sort by nonincreasing degrees, break tie qsort((void *)idx, n, sizeof(uint32_t), degBrkTie); #pragma omp parallel for for (i = 0; i < n; i++) newID[ idx[i] ] = i; //sort adjacency lists by newIDs #pragma omp parallel for for (i = 0; i < n; i++) if (deg[i] > 1) qsort((void *)neighbor[i], deg[i], sizeof(uint32_t), cmpNewID); idxCSR[0] = 0; for (i = 0; i < n; i++) idxCSR[i + 1] = idxCSR[i] + deg[ idx[i] ]; m = idxCSR[n]; //remove degree-0 vertices while (deg[ idx[n-1] ] == 0) n--; //round up n to the next multiple of 512 if (n & 0x000001FF) n = ((n >> 9) + 1) << 9; for (i = 0, k = 0; i < n; i++) for (j = 0; j < deg[ idx[i] ]; j++) csr[k++] = newID[ neighbor[ idx[i] ] [j] ]; for (i = 0; i < 64; i++) { //pick 64 different roots for BFS randomly while (( 1 )) { drand48_r(&rndBuf[0], &rnd); roots[i] = floor(rnd * n); if (deg[ idx[ roots[i] ] ] == 0) continue; for (j = 0; j < i; j++) if (roots[i] == roots[j]) break; if (i == j) break; } } free(neighbor); free(newID); free(idx); free(deg); free(tmpCSR); free(rndBuf); } void topDown(void) { uint32_t tid, i, j, u, v, outDeg = 0; #pragma vector aligned for (i = 0; i < numT; i++) mySparseSize[i] = 0; if (layerSize == 1) { u = sparse[0]; #pragma omp parallel for private(tid,v) reduction(+:outDeg) schedule(guided) for (i = idxCSR[u]; i < idxCSR[u + 1]; i++) { tid = omp_get_thread_num(); v = csr[i]; if (parent[v] == -1) { parent[v] = u; mySparse[tid] [ mySparseSize[tid]++ ] = v; outDeg += idxCSR[v + 1] - idxCSR[v]; } } } else { //layerSize > 1 #pragma omp parallel for private(tid,j,u,v) reduction(+:outDeg) \ schedule(guided) for (i = 0; i < layerSize; i++) { tid = omp_get_thread_num(); u = sparse[i]; for (j = idxCSR[u]; j < idxCSR[u + 1]; j++) { v = csr[j]; if (parent[v] == -1) { //race condition, multiple copies of v may be added parent[v] = u; mySparse[tid] [ mySparseSize[tid]++ ] = v; outDeg += idxCSR[v + 1] - idxCSR[v]; } } } } layerDeg = outDeg; prefixSum[0] = 0; for (i = 0; i < numT; i++) prefixSum[i + 1] = prefixSum[i] + mySparseSize[i]; #pragma omp parallel for private(tid,j) for (i = 0; i < numT; i++) { tid = omp_get_thread_num(); for (j = 0; j < mySparseSize[tid]; j++) newSparse[prefixSum[tid] + j] = mySparse[tid] [j]; } layerSize = prefixSum[numT]; } void td2bu(void) { uint32_t i, j, u, w, o, chunk, rem, start, end; #pragma vector aligned chunk = (n >> 9) / numT; rem = (n >> 9) % numT; #pragma omp parallel for for (i = 0; i < bitN; i++) dense[i] = 0; #pragma omp parallel for private(start,end,j,u,w,o) schedule(static,1) for (i = 0; i < numT; i++) { start = i * chunk; end = start + chunk; if (rem) { if (i < rem) start += i, end += i + 1; else start += rem, end += rem; } start <<= 9, end <<= 9; for (j = 0; j < layerSize; j++) { u = sparse[j]; if (u < start || u >= end) continue; w = u >> WORD; //64-bit word number o = u & MASK; //offset within word dense[w] = dense[w] | oneBit[o]; } } } void bottomUp2TopDown(void) { uint32_t tid, i, j, k, a, d, e, u, v; uint32_t *ptr32; uint16_t *ptr16; uint8_t *ptr8, mask; #pragma vector aligned for (i = 0; i < numT; i++) mySparseSize[i] = 0; #pragma omp parallel for private(tid,j,k,u,v,a,d,e,ptr32,ptr16,ptr8,mask) \ schedule(guided) for (i = 0; i < bitN; i++) { tid = omp_get_thread_num(); if (dense[i] == 0) continue; //dense[i] is all zeros ptr32 = (uint32_t*) &dense[i]; for (j = 0; j < 2; j++) { if (ptr32[j] == 0) continue; ptr16 = (uint16_t*) &ptr32[j]; for (k = 0; k < 2; k++) { if (ptr16[k] == 0) continue; ptr8 = (uint8_t*) &ptr16[k]; for (a = 0; a < 2; a++) { if (ptr8[a] == 0) continue; mask = ptr8[a]; d = 0; while (mask) { if (mask & 1) { u = (i << 6) + (j << 5) + (k << 4) + (a << 3) + d; //visit vertex u for (e = idxCSR[u]; e < idxCSR[u + 1]; e++) { v = csr[e]; if (parent[v] == -1) { //race condition, multiple copies of v may be added parent[v] = u; mySparse[tid] [ mySparseSize[tid]++ ] = v; } } } mask >>= 1; d++; } } } } } prefixSum[0] = 0; for (i = 0; i < numT; i++) prefixSum[i + 1] = prefixSum[i] + mySparseSize[i]; #pragma omp parallel for private(tid,j) for (i = 0; i < numT; i++) { tid = omp_get_thread_num(); for (j = 0; j < mySparseSize[tid]; j++) newSparse[prefixSum[tid] + j] = mySparse[tid] [j]; } layerSize = prefixSum[numT]; layerDeg = 0; //no longer needed } void bottomUp(void) { uint32_t i, j, u, w, o, numV = 0, outDeg = 0; uint64_t tmp; #pragma vector aligned #pragma omp parallel for for (i = 0; i < bitN; i++) newDense[i] = 0; #pragma omp parallel for private(j,u,w,o,tmp) reduction(+:numV,outDeg) \ schedule(dynamic,512) for (i = 0; i < n; i++) { if (parent[i] != -1) continue; for (j = idxCSR[i]; j < idxCSR[i + 1]; j++) { u = csr[j]; //u has an edge going into vertex i w = u >> WORD; //64-bit word number o = u & MASK; //offset within word tmp = dense[w] & oneBit[o]; //Is u in the layer? if (tmp == 0) //No. continue; //Add vertex i to the new layer parent[i] = u; w = i >> WORD; //64-bit word number o = i & MASK; //offset within word newDense[w] = newDense[w] | oneBit[o]; outDeg += idxCSR[i+1] - idxCSR[i]; numV++; break; } } layerSize = numV; layerDeg = outDeg; } //reset BFS data structure void reset(void) { uint32_t i; for (i = 0; i < n; i++) parent[i] = -1; parent[root] = root; sparse[0] = root; layerSize = 1; layerDeg = idxCSR[root+1] - idxCSR[root]; } void bfs(void) { enum direction dir = TopDown1, nextDir; uint32_t numIter = 0, *tmpPtr; uint64_t *ptr64; while (( 1 )) { numIter++; if (verbose) printf("iter %u: s %u dir %u\n", numIter, layerSize, dir); if (dir == TopDown1 || dir == TopDown2) topDown(); else if (dir == BottomUp) bottomUp(); else { //dir == BU2TD bottomUp2TopDown(); dir = TopDown2; } if (layerSize == 0) break; //we are done if (dir == TopDown1) { tmpPtr = sparse, sparse = newSparse, newSparse = tmpPtr; if (layerSize <= n >> 5 && layerDeg <= n >> 5) nextDir = TopDown1; else { nextDir = BottomUp; td2bu(); } } else if (dir == BottomUp) { ptr64 = dense, dense = newDense, newDense = ptr64; if (layerSize <= n >> 5 && layerDeg <= n >> 5) nextDir = BU2TD; else nextDir = BottomUp; } else { //dir == TopDown2 tmpPtr = sparse, sparse = newSparse, newSparse = tmpPtr; nextDir = TopDown2; } dir = nextDir; } } int cmpFloat(const void *a, const void *b) { if (*(float*) a < *(float*) b) return -1; if (*(float*) a > *(float*) b) return 1; return 0; } void calcStat(float data[]) { float sum, mean, std; uint32_t i; qsort((void *)data, 64, sizeof(float), cmpFloat); statistics[0] = data[0]; statistics[1] = data[15]; statistics[2] = data[31]; statistics[3] = data[47]; statistics[4] = data[63]; sum = 0.0; for (i = 0; i < 64; i++) sum += data[i]; mean = sum / 64.0; sum = 0.0; for (i = 0; i < 64; i++) sum += (data[i] - mean) * (data[i] - mean); std = sqrt(sum / 63.0); statistics[5] = mean; statistics[6] = std; } void output(float kernel1) { float sum, hMean, hSTD; uint32_t i; printf("SCALE: %u\n", scale); printf("NBFS: 64\n"); printf("construction_time: %20.17e\n", kernel1); calcStat(runTime); printf("bfs_min_time: %20.17e\n", statistics[0]); printf("bfs_firstquartile_time: %20.17e\n", statistics[1]); printf("bfs_median_time: %20.17e\n", statistics[2]); printf("bfs_thirdquartile_time: %20.17e\n",statistics[3]); printf("bfs_max_time: %20.17e\n", statistics[4]); printf("bfs_mean_time: %20.17e\n", statistics[5]); printf("bfs_stddev_time: %20.17e\n", statistics[6]); calcStat(cm); printf ("bfs_min_nedge: %20.17e\n", statistics[0]); printf ("bfs_firstquartile_nedge: %20.17e\n", statistics[1]); printf ("bfs_median_nedge: %20.17e\n", statistics[2]); printf ("bfs_thirdquartile_nedge: %20.17e\n", statistics[3]); printf ("bfs_max_nedge: %20.17e\n", statistics[4]); printf ("bfs_mean_nedge: %20.17e\n", statistics[5]); printf ("bfs_stddev_nedge: %20.17e\n", statistics[6]); calcStat(teps); sum = 0.0; for (i = 0; i < 64; i++) sum += 1.0 / teps[i]; hMean = 1.0 / (sum / 64.0); sum = 0.0; for (i = 0; i < 64; i++) sum += (1.0 / teps[i] - 1.0 / hMean) * (1.0 / teps[i] - 1.0 / hMean); hSTD = (sqrt(sum) / 63.0) * hMean * hMean; statistics[5] = hMean; statistics[6] = hSTD; printf ("bfs_min_TEPS: %20.17e\n", statistics[0]); printf ("bfs_firstquartile_TEPS: %20.17e\n", statistics[1]); printf ("bfs_median_TEPS: %20.17e\n", statistics[2]); printf ("bfs_thirdquartile_TEPS: %20.17e\n", statistics[3]); printf ("bfs_max_TEPS: %20.17e\n", statistics[4]); printf ("bfs_harmonic_mean_TEPS: %20.17e\n", statistics[5]); printf ("bfs_harmonic_stddev_TEPS: %20.17e\n", statistics[6]); } int main(int argc, char* argv[]) { float kernel1, sec, minimum; struct timeval start, stop; uint32_t i, j, cntM; int c; seed = 1; scale = 22; numT = omp_get_max_threads(); while ((c = getopt(argc, argv, "d:s:t:v:")) != -1) { switch (c) { case 'd': sscanf(optarg, "%lu", &seed); break; //seed srand48_r case 's': sscanf(optarg, "%u", &scale); break; //scale, n = 2^s case 't': sscanf(optarg, "%u", &numT); break; case 'v': sscanf(optarg, "%u", &verbose); break; default: break; } } if (scale < 10) { printf("scale %u is too small\n", scale); return 0; } if (numT < 2 || numT > omp_get_max_threads()) numT = omp_get_max_threads(); omp_set_num_threads(numT); init(); printf("scale %u, n %u, m %u, seed %lu\n", scale, n, m, seed); gettimeofday(&start, NULL); generate(); gettimeofday(&stop, NULL); sec = (stop.tv_sec - start.tv_sec) + (stop.tv_usec - start.tv_usec) / (float)MILLION; printf("time for generating the edge list: %.6f sec\n", sec); gettimeofday(&start, NULL); preprocessing(); gettimeofday(&stop, NULL); sec = (stop.tv_sec - start.tv_sec) + (stop.tv_usec - start.tv_usec) / (float)MILLION; printf("time for preprocessing: %.6f sec\n", sec); kernel1 = sec; //run BFS from 64 randomly selected roots for (i = 0; i < 64; i++) { root = roots[i]; reset(); gettimeofday(&start, NULL); bfs(); gettimeofday(&stop, NULL); sec = (stop.tv_sec - start.tv_sec) + (stop.tv_usec - start.tv_usec) / (float)MILLION; minimum = sec; cntM = 0; for (j = 0; j < n; j++) if (parent[j] != -1) { cntM += idxCSR[j + 1] - idxCSR[j]; } cm[i] = cntM / 2; //each edge is counted twice for (j = 0; j < 30; j++) { reset(); gettimeofday(&start, NULL); bfs(); gettimeofday(&stop, NULL); sec = (stop.tv_sec - start.tv_sec) + (stop.tv_usec - start.tv_usec) / (float)MILLION; minimum = min(minimum, sec); } runTime[i] = minimum; teps[i] = cm[i] / runTime[i]; if (verbose) printf("%9u cm %.0f %.6f %.2f\n", root, cm[i], runTime[i], cm[i] / (runTime[i] * 1000000000)); } printf("\n"); output(kernel1); printf("\n"); return 0; }
GB_binop__first_fc64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__first_fc64) // A.*B function (eWiseMult): GB (_AemultB_08__first_fc64) // A.*B function (eWiseMult): GB (_AemultB_02__first_fc64) // A.*B function (eWiseMult): GB (_AemultB_04__first_fc64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__first_fc64) // A*D function (colscale): GB (_AxD__first_fc64) // D*A function (rowscale): GB (_DxB__first_fc64) // C+=B function (dense accum): GB (_Cdense_accumB__first_fc64) // C+=b function (dense accum): GB (_Cdense_accumb__first_fc64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_fc64) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: GxB_FC64_t // A type: GxB_FC64_t // B,b type: GxB_FC64_t // BinaryOp: cij = aij #define GB_ATYPE \ GxB_FC64_t #define GB_BTYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = x ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FIRST || GxB_NO_FC64 || GxB_NO_FIRST_FC64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__first_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__first_fc64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__first_fc64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type GxB_FC64_t GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__first_fc64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__first_fc64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__first_fc64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__first_fc64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__first_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__first_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__first_fc64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ; GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = x ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ; GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC64_t aij = GBX (Ax, p, false) ; Cx [p] = aij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = x ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = aij ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
dense.c
/* Copyright (c) 2015-2016, 2021 Drew Schmidt All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <math.h> #include <stdlib.h> #include <string.h> #include "utils/safeomp.h" #include "coop.h" #include "utils/fill.h" #include "utils/inverse.h" #include "utils/mmult.h" #include "utils/scale.h" #include "utils/sumstats.h" #include "utils/xpose.h" // --------------------------------------------- // Cosine // --------------------------------------------- /** * @brief * Compute the cosine similarity matrix of a matrix. This is * all pair-wise vector cosine similarities of the columns. * * @details * The implementation is dominated by a symmetric rank-k update * via the BLAS function dsyrk(). * * @param trans * Perform cosine(x) or cosine(t(x)) * @param m,n * The number of rows/columns of the input matrix x. * @param x * The input mxn matrix. * @param cos * The output nxn matrix. */ int coop_cosine_mat(const bool inv, const int m, const int n, const double *const restrict x, double *restrict cos) { int ncols; ncols = n; crossprod(m, n, 1.0, x, m, cos); int ret = cosim_fill(ncols, cos); CHECKRET(ret); if (inv) { ret = inv_sym_chol(ncols, cos); CHECKRET(ret); } symmetrize(ncols, cos); return COOP_OK; } int coop_tcosine_mat(const bool inv, const int m, const int n, const double *const restrict x, double *restrict cos) { int ncols; ncols = m; tcrossprod(m, n, 1.0, x, m, cos); int ret = cosim_fill(ncols, cos); CHECKRET(ret); if (inv) { ret = inv_sym_chol(ncols, cos); CHECKRET(ret); } symmetrize(ncols, cos); return COOP_OK; } // cos - nx x ny int coop_cosine_matmat(const bool inv, const int m, const int nx, const double *const restrict x, const int ny, const double *const restrict y, double *restrict cos) { matmult(true, false, 1.0, m, nx, x, m, ny, y, cos); double *diagx = malloc(nx * sizeof(*diagx)); double *diagy = malloc(ny * sizeof(*diagy)); CHECKMALLOC(diagx); CHECKMALLOC(diagy); SAFE_FOR_SIMD for (int i=0; i<nx; i++) crossprod(m, 1, 1.0, x+m*i, m, diagx+i); SAFE_FOR_SIMD for (int i=0; i<ny; i++) crossprod(m, 1, 1.0, y+m*i, m, diagy+i); for (int j=0; j<ny; j++) { for (int i=0; i<nx; i++) cos[i + nx*j] /= sqrt(diagx[i] * diagy[j]); } free(diagx); free(diagy); if (inv && nx == ny) { int ret = inv_gen_lu(nx, cos); CHECKRET(ret); } return COOP_OK; } // cos - mx x my int coop_tcosine_matmat(const bool inv, const int mx, const int n, const double *const restrict x, const int my, const double *const restrict y, double *restrict cos) { matmult(false, true, 1.0, mx, n, x, my, n, y, cos); double *diagx = malloc(mx * sizeof(*diagx)); double *diagy = malloc(my * sizeof(*diagy)); CHECKMALLOC(diagx); CHECKMALLOC(diagy); SAFE_FOR_SIMD for (int i=0; i<mx; i++) tcrossprod(1, n, 1.0, x+i, mx, diagx+i); SAFE_FOR_SIMD for (int i=0; i<my; i++) tcrossprod(1, n, 1.0, y+i, my, diagy+i); for (int j=0; j<my; j++) { for (int i=0; i<mx; i++) cos[i + mx*j] /= sqrt(diagx[i] * diagy[j]); } free(diagx); free(diagy); if (inv && mx == my) { int ret = inv_gen_lu(mx, cos); CHECKRET(ret); } return COOP_OK; } /** * @brief * Compute the cosine similarity between two vectors. * * @details * The implementation uses a dgemm() to compute the dot product * of x and y, and then two dsyrk() calls to compute the (square of) * the norms of x and y. * * @param n * The length of the x and y vectors. * @param x,y * The input vectors. * * @return * The cosine similarity between the two vectors. */ int coop_cosine_vecvec(const int n, const double *const restrict x, const double *const restrict y, double *cos) { double normx, normy; const double cp = ddot(n, x, y); crossprod(n, 1, 1.0, x, n, &normx); crossprod(n, 1, 1.0, y, n, &normy); *cos = cp / sqrt(normx * normy); return COOP_OK; } // --------------------------------------------- // Pearson Correlation // --------------------------------------------- static inline int coop_pcor_mat_work(const bool inv, const int m, const int n, double *const restrict x, double *restrict cor) { remove_colmeans(m, n, x); crossprod(m, n, 1.0, x, m, cor); free(x); int ret = cosim_fill(n, cor); CHECKRET(ret); if (inv) { ret = inv_sym_chol(n, cor); CHECKRET(ret); } symmetrize(n, cor); return COOP_OK; } /** * @brief * Compute the pearson correlation matrix. * * @details * The implementation is dominated by a symmetric rank-k update * via the BLAS function dsyrk(). * * @param inv * Invert after computing? * @param m,n * The number of rows/columns of the input matrix x. * @param x * The input mxn matrix. * @param cor * The output correlation matrix. */ int coop_pcor_mat(const bool inv, const int m, const int n, const double * const restrict x, double *restrict cor) { double *x_cp = malloc(m*n*sizeof(*x)); CHECKMALLOC(x_cp); memcpy(x_cp, x, m*n*sizeof(*x)); coop_pcor_mat_work(inv, m, n, x_cp, cor); return COOP_OK; } int coop_tpcor_mat(const bool inv, const int m, const int n, const double * const restrict x, double *restrict cor) { double *x_cp = malloc(m*n*sizeof(*x)); CHECKMALLOC(x_cp); xpose(m, n, x, x_cp); coop_pcor_mat_work(inv, n, m, x_cp, cor); return COOP_OK; } /** * pcor(x, y) * * @brief * Compute the pearson correlation matrix. * * @details * The implementation is dominated by a symmetric rank-k update * via the BLAS function dsyrk(). * * @param inv * Invert after computing? * @param m,n * The number of rows/columns of the input matrix x. * @param x,y * The input matrices. * @param cor * The output correlation matrix. */ // cos - nx x ny int coop_pcor_matmat(const bool inv, const int m, const int nx, const double *const restrict x, const int ny, const double *const restrict y, double *restrict cor) { int ret = 0; double *x_cp = malloc(m*nx * sizeof(*x)); double *y_cp = malloc(m*ny * sizeof(*y)); CHECKMALLOC(x_cp); CHECKMALLOC(y_cp); memcpy(x_cp, x, m*nx*sizeof(*x)); memcpy(y_cp, y, m*ny*sizeof(*y)); scale_nostore(true, true, m, nx, x_cp); scale_nostore(true, true, m, ny, y_cp); const double alpha = 1. / ((double) (m-1)); matmult(true, false, alpha, m, nx, x_cp, m, ny, y_cp, cor); free(x_cp); free(y_cp); if (inv && nx == ny) { int ret = inv_gen_lu(nx, cor); CHECKRET(ret); } return ret; } // cos - mx x my int coop_tpcor_matmat(const bool inv, const int mx, const int n, const double *const restrict x, const int my, const double *const restrict y, double *restrict cor) { int ret = 0; double *x_cp = malloc(mx*n * sizeof(*x)); double *y_cp = malloc(my*n * sizeof(*y)); CHECKMALLOC(x_cp); CHECKMALLOC(y_cp); xpose(mx, n, x, x_cp); xpose(my, n, y, y_cp); scale_nostore(true, true, n, mx, x_cp); scale_nostore(true, true, n, my, y_cp); const double alpha = 1. / ((double) (n-1)); matmult(true, false, alpha, n, mx, x_cp, n, my, y_cp, cor); free(x_cp); free(y_cp); if (inv && mx == my) { int ret = inv_gen_lu(mx, cor); CHECKRET(ret); } return ret; } /** * @brief * Compute the pearson correlation between two vectors. * * @details * The implementation uses a dgemm() to compute the dot product * of x and y, and then two dsyrk() calls to compute the (square of) * the norms of x and y. * * @param n * The length of the x and y vectors. * @param x,y * The input vectors. * * @return * The correlation between the two vectors. */ int coop_pcor_vecvec(const int n, const double *const restrict x, const double *const restrict y, double *restrict cor) { double normx, normy; double *x_minusmean = malloc(n*sizeof(*x)); CHECKMALLOC(x_minusmean); double *y_minusmean = malloc(n*sizeof(*y)); CHECKMALLOC(y_minusmean); const double meanx = mean(n, x); const double meany = mean(n, y); SAFE_PARALLEL_FOR_SIMD for (int i=0; i<n; i++) { x_minusmean[i] = x[i] - meanx; y_minusmean[i] = y[i] - meany; } const double cp = ddot(n, x_minusmean, y_minusmean); crossprod(n, 1, 1.0, x_minusmean, n, &normx); crossprod(n, 1, 1.0, y_minusmean, n, &normy); free(x_minusmean); free(y_minusmean); *cor = cp / sqrt(normx * normy); return COOP_OK; } // --------------------------------------------- // Covariance // --------------------------------------------- static inline int coop_covar_mat_work(const bool inv, const int m, const int n, double *const restrict x, double *restrict cov) { const double alpha = 1. / ((double) (m-1)); remove_colmeans(m, n, x); crossprod(m, n, alpha, x, m, cov); free(x); if (inv) { int ret = inv_sym_chol(n, cov); CHECKRET(ret); } symmetrize(n, cov); return COOP_OK; } /** * @file * @brief Covariance. * * @details * Computes the variance-covariance matrix. Centering is done in-place. * * @param method * Input. The form the covariance matrix takes (pearson, kendall, * spearman). Currently only pearson works. * @param m,n * Inputs. Problem size (dims of x) * @param x * Input. The data matrix. * @param coc * Output. The covariance matrix. * * @return * The return value indicates that status of the function. Non-zero values * are errors. */ int coop_covar_mat(const bool inv, const int m, const int n, const double *const restrict x, double *restrict cov) { double *x_cp = malloc(m*n*sizeof(*x)); CHECKMALLOC(x_cp); memcpy(x_cp, x, m*n*sizeof(*x)); return coop_covar_mat_work(inv, m, n, x_cp, cov); } int coop_tcovar_mat(const bool inv, const int m, const int n, const double *const restrict x, double *restrict cov) { double *x_cp = malloc(m*n*sizeof(*x)); CHECKMALLOC(x_cp); xpose(m, n, x, x_cp); return coop_covar_mat_work(inv, n, m, x_cp, cov); } int coop_covar_matmat(const bool inv, const int m, const int nx, const double *const restrict x, const int ny, const double *const restrict y, double *restrict cov) { int ret = 0; double *x_cp = malloc(m*nx * sizeof(*x)); double *y_cp = malloc(m*ny * sizeof(*y)); CHECKMALLOC(x_cp); CHECKMALLOC(y_cp); memcpy(x_cp, x, m*nx*sizeof(*x)); memcpy(y_cp, y, m*ny*sizeof(*y)); const double alpha = 1. / ((double) (m-1)); //TODO FIXME make tremove_colmeans and use the BLAS more efficiently... remove_colmeans(m, nx, x_cp); remove_colmeans(m, ny, y_cp); matmult(true, false, alpha, m, nx, x_cp, m, ny, y_cp, cov); free(x_cp); free(y_cp); if (inv && nx == ny) { int ret = inv_gen_lu(nx, cov); CHECKRET(ret); } return ret; } int coop_tcovar_matmat(const bool inv, const int mx, const int n, const double *const restrict x, const int my, const double *const restrict y, double *restrict cov) { int ret = 0; double *x_cp = malloc(mx*n * sizeof(*x)); double *y_cp = malloc(mx*n * sizeof(*y)); CHECKMALLOC(x_cp); CHECKMALLOC(y_cp); xpose(mx, n, x, x_cp); xpose(my, n, y, y_cp); const double alpha = 1. / ((double) (n-1)); //TODO FIXME make tremove_colmeans and use the BLAS more efficiently... remove_colmeans(n, mx, x_cp); remove_colmeans(n, my, y_cp); matmult(true, false, alpha, n, mx, x_cp, n, my, y_cp, cov); free(x_cp); free(y_cp); if (inv && mx == my) { int ret = inv_gen_lu(mx, cov); CHECKRET(ret); } return ret; } /** * @brief * Compute the covariance between two vectors. * * @details * The implementation uses a dgemm() to compute the dot product * of x and y, and then two dsyrk() calls to compute the (square of) * the norms of x and y. * * @param n * The length of the x and y vectors. * @param x,y * The input vectors. * * @return * The variance of the vectors. */ int coop_covar_vecvec(const int n, const double *const restrict x, const double *const restrict y, double *restrict cov) { const double recip_n = (double) 1. / (n-1); double sum_xy = 0., sum_x = 0., sum_y = 0.; #ifdef OMP_VER_4 #pragma omp simd reduction(+: sum_xy, sum_x, sum_y) #endif for (int i=0; i<n; i++) { const double tx = x[i]; const double ty = y[i]; sum_xy += tx*ty; sum_x += tx; sum_y += ty; } *cov = (sum_xy - (sum_x*sum_y*((double) 1./n))) * recip_n; return COOP_OK; }
atomic_utilities.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // Denis Demidov // #if !defined(KRATOS_ATOMIC_UTILITIES_H_INCLUDED ) #define KRATOS_ATOMIC_UTILITIES_H_INCLUDED // System includes // External includes #ifdef KRATOS_SMP_OPENMP #include <omp.h> #endif // Project includes #include "includes/define.h" namespace Kratos { ///@addtogroup KratosCore /** * collection of utilities for atomic updates of simple types. (essentially mimics the omp atomic) */ /** @param target variable being atomically updated by doing target += value * @param value value being added */ template<class TDataType> inline void AtomicAdd(TDataType& target, const TDataType& value ) { #pragma omp atomic target += value; } /** @param target vector variable being atomically updated by doing target += value * @param value vector value being added * Note that the update is not really atomic, but rather is done component by component */ template<class TVectorType1, class TVectorType2> inline void AtomicAdd(TVectorType1& target, const TVectorType2& value ) { KRATOS_DEBUG_ERROR_IF(target.size() != value.size()) << "vector size mismatch in vector AtomicAdd- Sizes are: " << target.size() << " for target and " << value.size() << " for value " <<std::endl; for(unsigned int i=0; i<target.size(); ++i){ AtomicAdd(target[i], value[i]); } } /** @param target vector variable being atomically updated by doing target -= value * @param value vector value being subtracted * Note that the update is not really atomic, but rather is done component by component */ template<class TDataType> inline void AtomicSub(TDataType& target, const TDataType& value ) { #pragma omp atomic target -= value; } /** @param target vector variable being atomically updated by doing target -= value * @param value vector value being subtracted * Note that the update is not really atomic, but rather is done component by component */ template<class TVectorType1, class TVectorType2> inline void AtomicSub(TVectorType1& target, const TVectorType2& value ) { KRATOS_DEBUG_ERROR_IF(target.size() != value.size()) << "vector size mismatch in vector AtomicSub- Sizes are: " << target.size() << " for target and " << value.size() << " for value " <<std::endl; for(unsigned int i=0; i<target.size(); ++i){ AtomicSub(target[i], value[i]); } } /** @param target variable being atomically updated by doing target = value * @param value valuev to which the target is set * KLUDGE: might not be supported by all compilers even though the openmp standard does support it */ template<class TDataType> inline void AtomicAssign(TDataType& target, const TDataType& value) { #pragma omp atomic write target = value; } } // namespace Kratos. #endif // KRATOS_ATOMIC_UTILITIES_H_INCLUDED defined
feac26_so8_itt.c
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "ittnotify.h" #include "xmmintrin.h" #include "pmmintrin.h" #include <stdio.h> #include "omp.h" #define min(a, b) (((a) < (b)) ? (a) : (b)) #define max(a, b) (((a) > (b)) ? (a) : (b)) struct dataobj { void *restrict data; int *size; int *npsize; int *dsize; int *hsize; int *hofs; int *oofs; }; int Kernel(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, const float dt, const float h_x, const float h_y, const float h_z, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict save_src_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict usol_vec, struct dataobj *restrict vp_vec, const int sp_zi_m, const int time_M, const int time_m, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads) { int (*restrict block_sizes) __attribute__ ((aligned (64))) = (int (*)) block_sizes_vec->data; float(*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data; int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float(*restrict save_src)[save_src_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_vec->size[1]])save_src_vec->data; int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; float(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (float(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; float(*restrict usol)[usol_vec->size[1]][usol_vec->size[2]][usol_vec->size[3]] __attribute__((aligned(64))) = (float(*)[usol_vec->size[1]][usol_vec->size[2]][usol_vec->size[3]])usol_vec->data; float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data; /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); __itt_resume(); int xb_size = block_sizes[0]; int y0_blk0_size = block_sizes[3]; int x0_blk0_size = block_sizes[2]; int yb_size = block_sizes[1]; printf(" Tiles: %d, %d ::: Blocks %d, %d \n", x0_blk0_size, y0_blk0_size, xb_size, yb_size); int sf = 4; // half the space order int t_blk_size = 2 * sf * (time_M - time_m); for (int t_blk = time_m; t_blk < sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block { for (int xb = x_m; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size + 1) { for (int yb = y_m; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size + 1) { for (int time = t_blk, t0 = (time + 1) % (3), t1 = (time) % (3), t2 = (time + 2) % (3); time <= 1 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3), t1 = (((time / sf) % (time_M - time_m + 1))) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 2) % (3)) { int tw = ((time / sf) % (time_M - time_m + 1)); /* Begin section0 */ #pragma omp parallel num_threads(nthreads) { #pragma omp for collapse(2) schedule(dynamic, 1) for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size) { for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size) { for (int x = x0_blk0; x <= min(min((x_M + time), (xb + xb_size)), (x0_blk0 + x0_blk0_size - 1)); x++) { for (int y = y0_blk0; y <= min(min((y_M + time), (yb + yb_size)), (y0_blk0 + y0_blk0_size - 1)); y++) { #pragma omp simd aligned(damp, usol, vp : 32) for (int z = z_m; z <= z_M; z += 1) { float r14 = -2.84722222F * usol[t1][x - time + 8][y - time + 8][z + 8]; float r13 = 1.0 / dt; float r12 = 1.0 / (dt * dt); float r11 = 1.0 / (vp[x - time + 8][y - time + 8][z + 8] * vp[x - time + 8][y - time + 8][z + 8]); usol[t0][x - time + 8][y - time + 8][z + 8] = (r11 * (-r12 * (-2.0F * usol[t1][x - time + 8][y - time + 8][z + 8] + usol[t2][x - time + 8][y - time + 8][z + 8])) + r13 * (damp[x - time + 1][y - time + 1][z + 1] * usol[t1][x - time + 8][y - time + 8][z + 8]) + (r14 - 1.78571429e-3F * (usol[t1][x - time + 8][y - time + 8][z + 4] + usol[t1][x - time + 8][y - time + 8][z + 12]) + 2.53968254e-2F * (usol[t1][x - time + 8][y - time + 8][z + 5] + usol[t1][x - time + 8][y - time + 8][z + 11]) - 2.0e-1F * (usol[t1][x - time + 8][y - time + 8][z + 6] + usol[t1][x - time + 8][y - time + 8][z + 10]) + 1.6F * (usol[t1][x - time + 8][y - time + 8][z + 7] + usol[t1][x - time + 8][y - time + 8][z + 9])) / ((h_z * h_z)) + (r14 - 1.78571429e-3F * (usol[t1][x - time + 8][y - time + 4][z + 8] + usol[t1][x - time + 8][y - time + 12][z + 8]) + 2.53968254e-2F * (usol[t1][x - time + 8][y - time + 5][z + 8] + usol[t1][x - time + 8][y - time + 11][z + 8]) - 2.0e-1F * (usol[t1][x - time + 8][y - time + 6][z + 8] + usol[t1][x - time + 8][y - time + 10][z + 8]) + 1.6F * (usol[t1][x - time + 8][y - time + 7][z + 8] + usol[t1][x - time + 8][y - time + 9][z + 8])) / ((h_y * h_y)) + (r14 - 1.78571429e-3F * (usol[t1][x - time + 4][y - time + 8][z + 8] + usol[t1][x - time + 12][y - time + 8][z + 8]) + 2.53968254e-2F * (usol[t1][x - time + 5][y - time + 8][z + 8] + usol[t1][x - time + 11][y - time + 8][z + 8]) - 2.0e-1F * (usol[t1][x - time + 6][y - time + 8][z + 8] + usol[t1][x - time + 10][y - time + 8][z + 8]) + 1.6F * (usol[t1][x - time + 7][y - time + 8][z + 8] + usol[t1][x - time + 9][y - time + 8][z + 8])) / ((h_x * h_x))) / (r11 * r12 + r13 * damp[x - time + 1][y - time + 1][z + 1]); } #pragma omp simd aligned(damp, usol, vp : 32) for (int sp_zi = sp_zi_m; sp_zi <= nnz_sp_source_mask[x - time][y - time]-1; sp_zi += 1) { int zind = sp_source_mask[x - time][y - time][sp_zi]; float r0 = save_src[((time / sf) % (time_M - time_m + 1))][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind]; usol[t0][x - time + 8][y - time + 8][zind + 8] += r0; } } } } } } } } } } /* End section0 */ __itt_pause(); return 0; }
conv_dw_hcl_x86.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: qtang@openailab.com */ #include "sys_port.h" #include "module.h" #include "tengine_errno.h" #include "tengine_log.h" #include "tengine_ir.h" #include "../../cpu_node_ops.h" #include "tengine_op.h" #include "convolution_param.h" #include "x86/conv_dw_kernel_x86.h" #include <math.h> static void pad_int8(int8_t* input, int8_t* output, int in_h, int in_w, int out_h, int out_w, int top, int left, int8_t v) { int8_t* ptr = input; int8_t* outptr = output; int y = 0; // fill top for (; y < top; y++) { int x = 0; for (; x < out_w; x++) { outptr[x] = v; } outptr += out_w; } // fill center for (; y < (top + in_h); y++) { int x = 0; for (; x < left; x++) { outptr[x] = v; } if (in_w < 12) { for (; x < (left + in_w); x++) { outptr[x] = ptr[x - left]; } } else { memcpy(outptr + left, ptr, in_w * sizeof(int8_t)); x += in_w; } for (; x < out_w; x++) { outptr[x] = v; } ptr += in_w; outptr += out_w; } // fill bottom for (; y < out_h; y++) { int x = 0; for (; x < out_w; x++) { outptr[x] = v; } outptr += out_w; } } static int convdw3x3s1_int8_sse(struct ir_tensor* input_tensor, struct ir_tensor* weight_tensor, struct ir_tensor* bias_tensor, struct ir_tensor* output_tensor, struct conv_param* param, int num_thread) { int inch = input_tensor->dims[1]; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int in_hw = inh * inw; int outch = output_tensor->dims[1]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; int out_hw = outh * outw; int out_size = output_tensor->elem_num; int pad_w = param->pad_w0; int pad_h = param->pad_h0; int32_t* output_int32 = (int32_t*)sys_malloc(out_size * sizeof(int32_t)); memset(output_int32, 0, out_size * sizeof(int32_t)); float* output_fp32 = (float*)sys_malloc(out_size * sizeof(float)); int8_t* output_int8 = output_tensor->data; int8_t* input_int8 = input_tensor->data; int32_t* bias_int32 = NULL; if(bias_tensor) bias_int32 = bias_tensor->data; /* get scale value of quantizaiton */ float input_scale = input_tensor->scale; float* kernel_scales = weight_tensor->scale_list; float output_scale = output_tensor->scale; const signed char* kernel = weight_tensor->data; /* pading */ int inh_tmp = inh + pad_h + pad_h; int inw_tmp = inw + pad_w + pad_w; int8_t* input_tmp = NULL; if (inh_tmp == inh && inw_tmp == inw) input_tmp = input_int8; else { input_tmp = ( int8_t* )sys_malloc(inh_tmp * inw_tmp * inch * sizeof(int8_t)); #pragma omp parallel for num_threads(num_thread) for (int g = 0; g < inch; g++) { int8_t* pad_in = input_int8 + g * inh * inw; int8_t* pad_out = input_tmp + g * inh_tmp * inw_tmp; pad_int8(pad_in, pad_out, inh, inw, inh_tmp, inw_tmp, pad_h, pad_w, 0); } } #pragma omp parallel for num_threads(num_thread) for (int p = 0; p < outch; p++) { int32_t* out0 = output_int32 + p * out_hw; int8_t* kernel0 = (int8_t* )kernel + p * 9; int* outptr0 = out0; int8_t* img0 = input_tmp + p * inw_tmp * inh_tmp; int8_t* r0 = img0; int8_t* r1 = img0 + inw_tmp; int8_t* r2 = img0 + inw_tmp * 2; for (int i = 0; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum0 = 0; sum0 += ( int )r0[0] * kernel0[0]; sum0 += ( int )r0[1] * kernel0[1]; sum0 += ( int )r0[2] * kernel0[2]; sum0 += ( int )r1[0] * kernel0[3]; sum0 += ( int )r1[1] * kernel0[4]; sum0 += ( int )r1[2] * kernel0[5]; sum0 += ( int )r2[0] * kernel0[6]; sum0 += ( int )r2[1] * kernel0[7]; sum0 += ( int )r2[2] * kernel0[8]; *outptr0 += sum0; r0++; r1++; r2++; outptr0++; } r0 += 2; r1 += 2; r2 += 2; } kernel0 += 9; } /* process bias and dequant output from int32 to fp32 */ #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (bias_tensor) output_fp32[output_off] = (float )(output_int32[output_off] + bias_int32[i]) * input_scale * kernel_scales[i]; else output_fp32[output_off] = (float )output_int32[output_off] * input_scale * kernel_scales[i]; } } /* process activation relu */ if (param->activation == 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; } } } /* process activation relu6 */ if (param->activation > 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; if (output_fp32[output_off] > 6) output_fp32[output_off] = 6; } } } /* quant from fp32 to int8 */ #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; int32_t data_i32 = ( int32_t )(round(output_fp32[output_off] / output_scale)); if (data_i32 > 127) data_i32 = 127; else if (data_i32 < -127) data_i32 = -127; output_int8[output_off] = (int8_t)data_i32; } } sys_free(output_int32); sys_free(output_fp32); if (!(inh_tmp == inh && inw_tmp == inw)) sys_free(input_tmp); return 0; } static int convdw3x3s2_int8_sse(struct ir_tensor* input_tensor, struct ir_tensor* weight_tensor, struct ir_tensor* bias_tensor, struct ir_tensor* output_tensor, struct conv_param* param, int num_thread) { int inch = input_tensor->dims[1]; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int in_hw = inh * inw; int outch = output_tensor->dims[1]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; int out_hw = outh * outw; int out_size = output_tensor->elem_num; int pad_w = param->pad_w0; int pad_h = param->pad_h0; int32_t* output_int32 = (int32_t*)sys_malloc(out_size * sizeof(int32_t)); memset(output_int32, 0, out_size * sizeof(int32_t)); float* output_fp32 = (float*)sys_malloc(out_size * sizeof(float)); int8_t* output_int8 = output_tensor->data; int8_t* input_int8 = input_tensor->data; int32_t* bias_int32 = NULL; if(bias_tensor) bias_int32 = bias_tensor->data; /* get scale value of quantizaiton */ float input_scale = input_tensor->scale; float* kernel_scales = weight_tensor->scale_list; float output_scale = output_tensor->scale; const signed char* kernel = weight_tensor->data; /* pading */ int inh_tmp = inh + pad_h + pad_h; int inw_tmp = inw + pad_w + pad_w; int8_t* input_tmp = NULL; if (inh_tmp == inh && inw_tmp == inw) input_tmp = input_int8; else { input_tmp = ( int8_t* )sys_malloc(inh_tmp * inw_tmp * inch * sizeof(int8_t)); #pragma omp parallel for num_threads(num_thread) for (int g = 0; g < inch; g++) { int8_t* pad_in = input_int8 + g * inh * inw; int8_t* pad_out = input_tmp + g * inh_tmp * inw_tmp; pad_int8(pad_in, pad_out, inh, inw, inh_tmp, inw_tmp, pad_h, pad_w, 0); } } int tailstep = inw_tmp - 2 * outw + inw_tmp; #pragma omp parallel for num_threads(num_thread) for (int p = 0; p < outch; p++) { int32_t* out0 = output_int32 + p * out_hw; int8_t* kernel0 = (int8_t* )kernel + p * 9; int* outptr0 = out0; int8_t* img0 = input_tmp + p * inw_tmp * inh_tmp; int8_t* r0 = img0; int8_t* r1 = img0 + inw_tmp; int8_t* r2 = img0 + inw_tmp * 2; for (int i = 0; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum0 = 0; sum0 += ( int )r0[0] * kernel0[0]; sum0 += ( int )r0[1] * kernel0[1]; sum0 += ( int )r0[2] * kernel0[2]; sum0 += ( int )r1[0] * kernel0[3]; sum0 += ( int )r1[1] * kernel0[4]; sum0 += ( int )r1[2] * kernel0[5]; sum0 += ( int )r2[0] * kernel0[6]; sum0 += ( int )r2[1] * kernel0[7]; sum0 += ( int )r2[2] * kernel0[8]; *outptr0 += sum0; r0 += 2; r1 += 2; r2 += 2; outptr0++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } kernel0 += 9; } /* process bias and dequant output from int32 to fp32 */ #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (bias_tensor) output_fp32[output_off] = (float )(output_int32[output_off] + bias_int32[i]) * input_scale * kernel_scales[i]; else output_fp32[output_off] = (float )output_int32[output_off] * input_scale * kernel_scales[i]; } } /* process activation relu */ if (param->activation == 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; } } } /* process activation relu6 */ if (param->activation > 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; if (output_fp32[output_off] > 6) output_fp32[output_off] = 6; } } } /* quant from fp32 to int8 */ #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; int32_t data_i32 = ( int32_t )(round(output_fp32[output_off] / output_scale)); if (data_i32 > 127) data_i32 = 127; else if (data_i32 < -127) data_i32 = -127; output_int8[output_off] = (int8_t)data_i32; } } sys_free(output_int32); sys_free(output_fp32); if (!(inh_tmp == inh && inw_tmp == inw)) sys_free(input_tmp); return 0; } static int conv_dw_run_int8(struct ir_tensor* input_tensor, struct ir_tensor* weight_tensor, struct ir_tensor* bias_tensor, struct ir_tensor* output_tensor, struct conv_param* param, int num_thread) { int ret = -1; switch(param->stride_h) { case 1: ret = convdw3x3s1_int8_sse(input_tensor, weight_tensor, bias_tensor, output_tensor, param, num_thread); break; case 2: ret = convdw3x3s2_int8_sse(input_tensor, weight_tensor, bias_tensor, output_tensor, param, num_thread); break; default: TLOG_ERR("Direct Convolution Int8 not support the stride %d\n", param->stride_h); set_tengine_errno(EFAULT); } return ret; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct ir_node* ir_node = exec_node->ir_node; struct ir_graph* ir_graph = ir_node->graph; struct ir_tensor* input_tensor; struct ir_tensor* weight_tensor; struct ir_tensor* bias_tensor = NULL; struct ir_tensor* output_tensor = NULL; int num_thread = exec_graph->num_thread; int cpu_affinity = exec_graph->cpu_affinity; /* set the input data and shape again, in case of reshape or dynamic shape */ input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); weight_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]); if (ir_node->input_num > 2) bias_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[2]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); struct conv_param* conv_param = ( struct conv_param* )ir_node->op.param_mem; struct conv_priv_info* conv_priv_info = ( struct conv_priv_info* )exec_node->ops_priv; int ret = -1; if (exec_graph->mode == TENGINE_MODE_FP32) ret = conv_dw_run(input_tensor, weight_tensor, bias_tensor, output_tensor, conv_priv_info, conv_param, num_thread, cpu_affinity); else if (exec_graph->mode == TENGINE_MODE_INT8) ret = conv_dw_run_int8(input_tensor, weight_tensor, bias_tensor, output_tensor, conv_param, num_thread); else { TLOG_ERR("hcl conv run failed\n"); set_tengine_errno(EFAULT); return -1; } return ret; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node) { struct conv_param* param = ( struct conv_param* )exec_node->op.param_mem; struct ir_node* ir_node = exec_node; struct ir_graph* ir_graph = ir_node->graph; struct ir_tensor* input_tensor; struct ir_tensor* output_tensor; int group = param->group; int kernel_h = param->kernel_h; int kernel_w = param->kernel_w; int stride_h = param->stride_h; int stride_w = param->stride_w; int dilation_h = param->dilation_h; int dilation_w = param->dilation_w; int pad_h0 = param->pad_h0; int pad_w0 = param->pad_w0; int pad_h1 = param->pad_h1; int pad_w1 = param->pad_w1; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); int in_c = input_tensor->dims[1] / group; int out_c = output_tensor->dims[1] / group; /* todo support uint8 */ if (!(input_tensor->data_type == TENGINE_DT_FP32 || input_tensor->data_type == TENGINE_DT_INT8)) return 0; if (kernel_h != kernel_w || input_tensor->dims[0] > 1) return 0; if (param->group > 1 && in_c == 1 && out_c == 1 && pad_h0 == pad_h1 && pad_w0 == pad_w1 && dilation_h == 1 && dilation_w == 1 && kernel_h == 3 && kernel_w == 3 && ((stride_h == 1 && stride_w == 1) || (stride_h == 2 && stride_w == 2))) return OPS_SCORE_BEST; else return 0; } static struct node_ops hcl_node_ops = {.prerun = NULL, .run = run, .reshape = NULL, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; static int reg_conv_dw_ops(void* arg) { return register_builtin_node_ops(OP_CONV, &hcl_node_ops); } static int unreg_conv_dw_ops(void* arg) { unregister_builtin_node_ops(OP_CONV, &hcl_node_ops); return 0; } AUTO_REGISTER_OPS(reg_conv_dw_ops); AUTO_UNREGISTER_OPS(unreg_conv_dw_ops);
doaccross-1.c
#include <omp.h> #include "omprace.h" #include <stdio.h> float foo(int i){ return (float)i*2; } float bar(float a, float b){ return a*b; } float baz(float b){ return 2*b + 2; } void work( int N, float *A, float *B, float *C ) { int i; #pragma omp parallel num_threads(2) { #pragma omp for ordered(1) for (i=1; i<N; i++) { A[i] = foo(i); #pragma omp ordered depend(sink: i-1) B[i] = bar(A[i], B[i-1]); #pragma omp ordered depend(source) //printf("b[%d] = %f\n", i, ) C[i] = baz(B[i]); } } } #define NUM 3 int main(){ omprace_init(); float a[] = {1.0,3.0,5.0}; float b[] = {2.0,2.0,2.0}; float c[] = {4.0,5.0,6.0}; work(NUM,a,b,c); printf("c[1] = %f\n", c[1]); omprace_fini(); return 0; }
DRB016-outputdep-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* The loop in this example cannot be parallelized. This pattern has two pair of dependencies: 1. loop carried output dependence x = .. : 2. loop carried true dependence due to: .. = x; x = ..; Data race pairs: we allow two pairs to preserve the original code pattern. 1. x@73:12 vs. x@74:5 2. x@74:5 vs. x@74:5 */ #include <stdio.h> int a[100]; int main() { int len=100; int i,x=10; #pragma omp parallel for firstprivate(i ) lastprivate(i ) for (i=0;i<len;i++) { a[i] = x; x=i; } printf("x=%d",x); return 0; }
pst_fmt_plug.c
/* PST cracker patch for JtR. Hacked together during July of 2012 by * Dhiru Kholia <dhiru.kholia at gmail.com> * * Optimizations and shift to pkzip CRC32 code done by JimF * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com> * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * Uses code from crc32_fmt_plug.c written by JimF */ #if FMT_EXTERNS_H extern struct fmt_main fmt_pst; #elif FMT_REGISTERS_H john_register_one(&fmt_pst); #else #include <string.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "pkzip.h" // includes the 'inline' crc table. #ifdef _OPENMP #include <omp.h> #define OMP_SCALE 8 static int omp_t = 1; #endif #include "memdbg.h" #define FORMAT_LABEL "PST" #define FORMAT_NAME "custom CRC-32" #define ALGORITHM_NAME "32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 8 #define BINARY_SIZE 4 #define SALT_SIZE 0 #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 256 static struct fmt_tests tests[] = { {"$pst$a9290513", "openwall"}, /* "jfuck jw" works too ;) */ {"$pst$50e099bc", "password"}, {"$pst$00000000", ""}, {"$pst$e3da3318", "xxx"}, {"$pst$a655dd18", "XYz123"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out); static void init(struct fmt_main *self) { #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static int ishex(char *q) { while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return !*q; } static int valid(char *ciphertext, struct fmt_main *self) { char *p; if (strncmp(ciphertext, "$pst$", 5)) return 0; p = ciphertext + 5; if (strlen(p) != BINARY_SIZE * 2) return 0; if (!ishex(p)) return 0; return 1; } static void set_key(char *key, int index) { strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH+1); } static int cmp_all(void *binary, int count) { ARCH_WORD_32 crc=*((ARCH_WORD_32*)binary), i; for (i = 0; i < count; ++i) if (crc == crypt_out[i]) return 1; return 0; } static int cmp_one(void *binary, int index) { return *((ARCH_WORD_32*)binary) == crypt_out[index]; } static int cmp_exact(char *source, int index) { return 1; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int i; #ifdef _OPENMP #pragma omp parallel for private(i) #endif for (i = 0; i < count; ++i) { ARCH_WORD_32 crc = 0; unsigned char *p = (unsigned char*)saved_key[i]; while (*p) crc = pkzip_crc32(crc, *p++); crypt_out[i] = crc; } return count; } static void *get_binary(char *ciphertext) { static ARCH_WORD_32 *out; if (!out) out = mem_alloc_tiny(sizeof(ARCH_WORD_32), MEM_ALIGN_WORD); sscanf(&ciphertext[5], "%x", out); return out; } static char *get_key(int index) { return saved_key[index]; } static int get_hash_0(int index) { return crypt_out[index] & 0xf; } static int get_hash_1(int index) { return crypt_out[index] & 0xff; } static int get_hash_2(int index) { return crypt_out[index] & 0xfff; } static int get_hash_3(int index) { return crypt_out[index] & 0xffff; } static int get_hash_4(int index) { return crypt_out[index] & 0xfffff; } static int get_hash_5(int index) { return crypt_out[index] & 0xffffff; } static int get_hash_6(int index) { return crypt_out[index] & 0x7ffffff; } struct fmt_main fmt_pst = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_NOT_EXACT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { NULL }, #endif tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, fmt_default_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, fmt_default_set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
DRB028-privatemissing-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* tmp should be annotated as private to avoid race condition. Data race pair: tmp@65:5 vs. tmp@66:12 */ #include <stdlib.h> #include <stdio.h> int main(int argc, char* argv[]) { int i; int tmp; int len=100; int a[100]; for (i=0;i<len;i++) a[i]=i; #pragma omp parallel for schedule(dynamic) for (i=0;i<len;i++) { tmp =a[i]+i; a[i] = tmp; } printf("a[50]=%d\n", a[50]); return 0; }
matrix.c
/* This file is part of ParTI!. ParTI! is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. ParTI! is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with ParTI!. If not, see <http://www.gnu.org/licenses/>. */ #include <ParTI.h> #include <assert.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <math.h> #include "../error/error.h" /** * Initialize a new dense matrix * * @param mtx a valid pointer to an uninitialized sptMatrix variable * @param nrows the number of rows * @param ncols the number of columns * * The memory layout of this dense matrix is a flat 2D array, with `ncols` * rounded up to multiples of 8 */ int sptNewMatrix(sptMatrix *mtx, sptIndex const nrows, sptIndex const ncols) { mtx->nrows = nrows; mtx->ncols = ncols; mtx->cap = nrows != 0 ? nrows : 1; mtx->stride = ((ncols-1)/8+1)*8; #ifdef _ISOC11_SOURCE mtx->values = aligned_alloc(8 * sizeof (sptValue), mtx->cap * mtx->stride * sizeof (sptValue)); #elif _POSIX_C_SOURCE >= 200112L { int result = posix_memalign((void **) &mtx->values, 8 * sizeof (sptValue), mtx->cap * mtx->stride * sizeof (sptValue)); if(result != 0) { mtx->values = NULL; } } #else mtx->values = malloc(mtx->cap * mtx->stride * sizeof (sptValue)); #endif spt_CheckOSError(!mtx->values, "Mtx New"); return 0; } /** * Build a matrix with random number * * @param mtx a pointer to an uninitialized matrix * @param nrows fill the specified number of rows * @param ncols fill the specified number of columns * * The matrix is filled with uniform distributed pseudorandom number in [0, 1] * The random number will have a precision of 31 bits out of 51 bits */ int sptRandomizeMatrix(sptMatrix *mtx, sptIndex const nrows, sptIndex const ncols) { srand(time(NULL)); for(sptIndex i=0; i<nrows; ++i) for(sptIndex j=0; j<ncols; ++j) { mtx->values[i * mtx->stride + j] = i + j + 1; //sptRandomValue(); } return 0; } /** * Fill an identity dense matrix * * @param mtx a pointer to an uninitialized matrix * @param nrows fill the specified number of rows * @param ncols fill the specified number of columns * */ int sptIdentityMatrix(sptMatrix *mtx) { sptIndex const nrows = mtx->nrows; sptIndex const ncols = mtx->ncols; assert(nrows == ncols); for(sptIndex i=0; i<nrows; ++i) for(sptIndex j=0; j<ncols; ++j) mtx->values[i * mtx->stride + j] = 0; for(sptIndex i=0; i<nrows; ++i) mtx->values[i * mtx->stride + i] = 1; return 0; } /** * Fill an existed dense matrix with a specified constant * * @param mtx a pointer to a valid matrix * @param val a given value constant * */ int sptConstantMatrix(sptMatrix *mtx, sptValue const val) { for(sptIndex i=0; i<mtx->nrows; ++i) for(sptIndex j=0; j<mtx->ncols; ++j) mtx->values[i * mtx->stride + j] = val; return 0; } /** * Shuffle matrix row indices. * * @param[in] mtx matrix to be shuffled * @param[out] map_inds is the renumbering mapping * */ void sptMatrixInverseShuffleIndices(sptMatrix *mtx, sptIndex * mode_map_inds) { /* Renumber matrix rows */ sptIndex new_i; sptValue * tmp_values = malloc(mtx->cap * mtx->stride * sizeof (sptValue)); for(sptIndex i=0; i<mtx->nrows; ++i) { new_i = mode_map_inds[i]; for(sptIndex j=0; j<mtx->ncols; ++j) { tmp_values[i * mtx->stride + j] = mtx->values[new_i * mtx->stride + j]; } } free(mtx->values); mtx->values = tmp_values; } /** * Copy a dense matrix to an uninitialized dense matrix * * @param dest a pointer to an uninitialized dense matrix * @param src a pointer to an existing valid dense matrix * * The contents of `src` will be copied to `dest`. */ int sptCopyMatrix(sptMatrix *dest, const sptMatrix *src) { int result = sptNewMatrix(dest, src->nrows, src->ncols); spt_CheckError(result, "Mtx Copy", NULL); assert(dest->stride == src->stride); memcpy(dest->values, src->values, dest->nrows * dest->stride * sizeof (sptValue)); return 0; } /** * Add a row to the end of dense matrix * * @param mtx a pointer to a valid matrix * @param values an array of data to be added */ int sptAppendMatrix(sptMatrix *mtx, const sptValue values[]) { if(mtx->cap <= mtx->nrows) { #ifndef MEMCHECK_MODE sptIndex newcap = mtx->cap + mtx->cap/2; #else sptIndex newcap = mtx->nrows+1; #endif sptValue *newdata; #ifdef _ISOC11_SOURCE newdata = aligned_alloc(8 * sizeof (sptValue), newcap * mtx->stride * sizeof (sptValue)); #elif _POSIX_C_SOURCE >= 200112L { int result = posix_memalign((void **) &newdata, 8 * sizeof (sptValue), newcap * mtx->stride * sizeof (sptValue)); if(result != 0) { newdata = NULL; } } #else newdata = malloc(newcap * mtx->stride * sizeof (sptValue)); #endif spt_CheckOSError(!newdata, "Mtx Append"); memcpy(newdata, mtx->values, mtx->nrows * mtx->stride * sizeof (sptValue)); free(mtx->values); mtx->cap = newcap; mtx->values = newdata; } if(values != NULL) { memcpy(&mtx->values[mtx->nrows * mtx->stride], values, mtx->ncols * sizeof (sptValue)); } ++ mtx->nrows; return 0; } /** * Modify the number of rows in a dense matrix * * @param mtx a pointer to a valid matrix * @param new_nrows the new number of rows `mtx` will have */ int sptResizeMatrix(sptMatrix *mtx, sptIndex const new_nrows) { sptValue *newdata; #ifdef _ISOC11_SOURCE newdata = aligned_alloc(8 * sizeof (sptValue), new_nrows * mtx->stride * sizeof (sptValue)); #elif _POSIX_C_SOURCE >= 200112L { int result = posix_memalign((void **) &newdata, 8 * sizeof (sptValue), new_nrows * mtx->stride * sizeof (sptValue)); if(result != 0) { newdata = NULL; } } #else newdata = malloc(new_nrows * mtx->stride * sizeof (sptValue)); #endif spt_CheckOSError(!newdata, "Mtx Resize"); memcpy(newdata, mtx->values, mtx->nrows * mtx->stride * sizeof (sptValue)); free(mtx->values); mtx->nrows = new_nrows; mtx->cap = new_nrows; mtx->values = newdata; return 0; } /** * Release the memory buffer a dense matrix is holding * * @param mtx a pointer to a valid matrix * * By using `sptFreeMatrix`, a valid matrix would become uninitialized and * should not be used anymore prior to another initialization */ void sptFreeMatrix(sptMatrix *mtx) { free(mtx->values); mtx->nrows = 0; mtx->ncols = 0; mtx->cap = 0; mtx->stride = 0; } /**** sptMatrix Operations ****/ int sptMatrixDotMul(sptMatrix const * A, sptMatrix const * B, sptMatrix const * C) { sptIndex nrows = A->nrows; sptIndex ncols = A->ncols; sptIndex stride = A->stride; assert(nrows == B->nrows && nrows == C->nrows); assert(ncols == B->ncols && ncols == C->ncols); assert(stride == B->stride && stride == C->stride); for(sptIndex i=0; i < nrows; ++i) { for(sptIndex j=0; j < ncols; ++j) { C->values[i*stride+j] = A->values[i*stride+j] * B->values[i*stride+j]; } } return 0; } int sptMatrixDotMulSeq(sptIndex const mode, sptIndex const nmodes, sptMatrix ** mats) { sptIndex const nrows = mats[0]->nrows; sptIndex const ncols = mats[0]->ncols; sptIndex const stride = mats[0]->stride; for(sptIndex m=1; m<nmodes+1; ++m) { assert(mats[m]->ncols == ncols); assert(mats[m]->nrows == nrows); assert(mats[m]->stride == stride); } sptValue * ovals = mats[nmodes]->values; #ifdef PARTI_USE_OPENMP #pragma omp parallel for #endif for(sptIndex i=0; i < nrows; ++i) { for(sptIndex j=0; j < ncols; ++j) { ovals[i * stride + j] = 1; } } for(sptIndex m=1; m < nmodes; ++m) { sptIndex const pm = (mode + m) % nmodes; sptValue const * vals = mats[pm]->values; #ifdef PARTI_USE_OPENMP #pragma omp parallel for #endif for(sptIndex i=0; i < nrows; ++i) { for(sptIndex j=0; j < ncols; ++j) { ovals[i * stride + j] *= vals[i * stride + j]; } } } return 0; } int sptMatrixDotMulSeqCol(sptIndex const mode, sptIndex const nmodes, sptMatrix ** mats) { sptIndex const nrows = mats[0]->nrows; sptIndex const ncols = mats[0]->ncols; sptIndex const stride = mats[0]->stride; // printf("stride: %lu\n", stride); for(sptIndex m=1; m<nmodes+1; ++m) { assert(mats[m]->ncols == ncols); assert(mats[m]->nrows == nrows); assert(mats[m]->stride == stride); } sptValue * ovals = mats[nmodes]->values; #ifdef PARTI_USE_OPENMP #pragma omp parallel for #endif for(sptIndex j=0; j < ncols; ++j) { for(sptIndex i=0; i < nrows; ++i) { ovals[j * stride + i] = 1; } } for(sptIndex m=1; m < nmodes; ++m) { sptIndex const pm = (mode + m) % nmodes; sptValue const * vals = mats[pm]->values; #ifdef PARTI_USE_OPENMP #pragma omp parallel for #endif for(sptIndex j=0; j < ncols; ++j) { for(sptIndex i=0; i < nrows; ++i) { ovals[j * stride + i] *= vals[j * stride + i]; } } } return 0; } /* mats (aTa) only stores upper triangle elements. */ int sptMatrixDotMulSeqTriangle(sptIndex const mode, sptIndex const nmodes, sptMatrix ** mats) { sptIndex const nrows = mats[0]->nrows; sptIndex const ncols = mats[0]->ncols; sptIndex const stride = mats[0]->stride; for(sptIndex m=1; m<nmodes+1; ++m) { assert(mats[m]->ncols == ncols); assert(mats[m]->nrows == nrows); } sptValue * ovals = mats[nmodes]->values; #ifdef PARTI_USE_OPENMP #pragma omp parallel for schedule(static) #endif for(sptIndex i=0; i < nrows; ++i) { for(sptIndex j=0; j < ncols; ++j) { ovals[j * stride + i] = 1.0; } } for(sptIndex m=1; m < nmodes; ++m) { sptIndex const pm = (mode + m) % nmodes; sptValue const * vals = mats[pm]->values; #ifdef PARTI_USE_OPENMP #pragma omp parallel for schedule(static) #endif for(sptIndex i=0; i < nrows; ++i) { for(sptIndex j=i; j < ncols; ++j) { ovals[i * stride + j] *= vals[i * stride + j]; } } } /* Copy upper triangle to lower part */ #ifdef PARTI_USE_OPENMP #pragma omp parallel for schedule(static) #endif for(sptIndex i=0; i < nrows; ++i) { for(sptIndex j=0; j < i; ++j) { ovals[i * stride + j] = ovals[j * stride + i]; } } return 0; } // Row-major int sptMatrix2Norm(sptMatrix * const A, sptValue * const lambda) { sptIndex const nrows = A->nrows; sptIndex const ncols = A->ncols; sptIndex const stride = A->stride; sptValue * const vals = A->values; sptValue * buffer_lambda; #ifdef PARTI_USE_OPENMP #pragma omp parallel for #endif for(sptIndex j=0; j < ncols; ++j) { lambda[j] = 0.0; } #ifdef PARTI_USE_OPENMP #pragma omp parallel { int const nthreads = omp_get_num_threads(); #pragma omp master { buffer_lambda = (sptValue *)malloc(nthreads * ncols * sizeof(sptValue)); for(sptNnzIndex j=0; j < nthreads * ncols; ++j) buffer_lambda[j] = 0.0; } } #pragma omp parallel { int const tid = omp_get_thread_num(); int const nthreads = omp_get_num_threads(); sptValue * loc_lambda = buffer_lambda + tid * ncols; #pragma omp for for(sptIndex i=0; i < nrows; ++i) { for(sptIndex j=0; j < ncols; ++j) { loc_lambda[j] += vals[i*stride + j] * vals[i*stride + j]; } } #pragma omp for for(sptIndex j=0; j < ncols; ++j) { for(int i=0; i < nthreads; ++i) { lambda[j] += buffer_lambda[i*ncols + j]; } } } /* end parallel pragma */ #else for(sptIndex i=0; i < nrows; ++i) { for(sptIndex j=0; j < ncols; ++j) { lambda[j] += vals[i*stride + j] * vals[i*stride + j]; } } #endif #ifdef PARTI_USE_OPENMP #pragma omp parallel for #endif for(sptIndex j=0; j < ncols; ++j) { lambda[j] = sqrt(lambda[j]); } #ifdef PARTI_USE_OPENMP #pragma omp parallel for #endif for(sptIndex i=0; i < nrows; ++i) { for(sptIndex j=0; j < ncols; ++j) { vals[i*stride + j] /= lambda[j]; } } #ifdef PARTI_USE_OPENMP free(buffer_lambda); #endif return 0; } // Row-major int sptMatrixMaxNorm(sptMatrix * const A, sptValue * const lambda) { sptIndex const nrows = A->nrows; sptIndex const ncols = A->ncols; sptIndex const stride = A->stride; sptValue * const vals = A->values; sptValue * buffer_lambda; #ifdef PARTI_USE_OPENMP #pragma omp parallel for #endif for(sptIndex j=0; j < ncols; ++j) { lambda[j] = 0.0; } #ifdef PARTI_USE_OPENMP #pragma omp parallel { int const nthreads = omp_get_num_threads(); #pragma omp master { buffer_lambda = (sptValue *)malloc(nthreads * ncols * sizeof(sptValue)); for(sptNnzIndex j=0; j < nthreads * ncols; ++j) buffer_lambda[j] = 0.0; } } #pragma omp parallel { int const tid = omp_get_thread_num(); int const nthreads = omp_get_num_threads(); sptValue * loc_lambda = buffer_lambda + tid * ncols; #pragma omp for for(sptIndex i=0; i < nrows; ++i) { for(sptIndex j=0; j < ncols; ++j) { if(vals[i*stride + j] > loc_lambda[j]) loc_lambda[j] = vals[i*stride + j]; } } #pragma omp for for(sptIndex j=0; j < ncols; ++j) { for(int i=0; i < nthreads; ++i) { if(buffer_lambda[i*ncols + j] > lambda[j]) lambda[j] = buffer_lambda[i*ncols + j]; } } } /* end parallel pragma */ #else for(sptIndex i=0; i < nrows; ++i) { for(sptIndex j=0; j < ncols; ++j) { if(vals[i*stride + j] > lambda[j]) lambda[j] = vals[i*stride + j]; } } #endif #ifdef PARTI_USE_OPENMP #pragma omp parallel for #endif for(sptIndex j=0; j < ncols; ++j) { if(lambda[j] < 1) lambda[j] = 1; } #ifdef PARTI_USE_OPENMP #pragma omp parallel for #endif for(sptIndex i=0; i < nrows; ++i) { for(sptIndex j=0; j < ncols; ++j) { vals[i*stride + j] /= lambda[j]; } } #ifdef PARTI_USE_OPENMP free(buffer_lambda); #endif return 0; } void GetFinalLambda( sptIndex const rank, sptIndex const nmodes, sptMatrix ** mats, sptValue * const lambda) { sptValue * tmp_lambda = (sptValue *) malloc(rank * sizeof(*tmp_lambda)); for(sptIndex m=0; m < nmodes; ++m) { sptMatrix2Norm(mats[m], tmp_lambda); for(sptIndex r=0; r < rank; ++r) { lambda[r] *= tmp_lambda[r]; } } free(tmp_lambda); }
conv_dw_hcl_x86.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: qtang@openailab.com */ #include "convolution_param.h" #include "conv_dw_kernel_x86.h" #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "utility/sys_port.h" #include "utility/float.h" #include "utility/log.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include <math.h> #include <string.h> static void pad_int8(int8_t* input, int8_t* output, int in_h, int in_w, int out_h, int out_w, int top, int left, int8_t v) { int8_t* ptr = input; int8_t* outptr = output; int y = 0; // fill top for (; y < top; y++) { int x = 0; for (; x < out_w; x++) { outptr[x] = v; } outptr += out_w; } // fill center for (; y < (top + in_h); y++) { int x = 0; for (; x < left; x++) { outptr[x] = v; } if (in_w < 12) { for (; x < (left + in_w); x++) { outptr[x] = ptr[x - left]; } } else { memcpy(outptr + left, ptr, in_w * sizeof(int8_t)); x += in_w; } for (; x < out_w; x++) { outptr[x] = v; } ptr += in_w; outptr += out_w; } // fill bottom for (; y < out_h; y++) { int x = 0; for (; x < out_w; x++) { outptr[x] = v; } outptr += out_w; } } static int convdw3x3s1_int8_sse(struct tensor* input_tensor, struct tensor* weight_tensor, struct tensor* bias_tensor, struct tensor* output_tensor, struct conv_param* param, int num_thread) { int inch = input_tensor->dims[1]; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int in_hw = inh * inw; int outch = output_tensor->dims[1]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; int out_hw = outh * outw; int out_size = output_tensor->elem_num; int pad_w = param->pad_w0; int pad_h = param->pad_h0; int32_t* output_int32 = (int32_t*)sys_malloc(out_size * sizeof(int32_t)); memset(output_int32, 0, out_size * sizeof(int32_t)); float* output_fp32 = (float*)sys_malloc(out_size * sizeof(float)); int8_t* output_int8 = (int8_t*)output_tensor->data; int8_t* input_int8 = (int8_t*)input_tensor->data; int32_t* bias_int32 = NULL; if (bias_tensor) bias_int32 = (int32_t*)bias_tensor->data; /* get scale value of quantizaiton */ float input_scale = input_tensor->scale; float* kernel_scales = weight_tensor->scale_list; float output_scale = output_tensor->scale; const signed char* kernel = (const signed char*)weight_tensor->data; /* pading */ int inh_tmp = inh + pad_h + pad_h; int inw_tmp = inw + pad_w + pad_w; int8_t* input_tmp = NULL; if (inh_tmp == inh && inw_tmp == inw) input_tmp = input_int8; else { input_tmp = (int8_t*)sys_malloc((size_t)inh_tmp * inw_tmp * inch * sizeof(int8_t)); #pragma omp parallel for num_threads(num_thread) for (int g = 0; g < inch; g++) { int8_t* pad_in = input_int8 + g * inh * inw; int8_t* pad_out = input_tmp + g * inh_tmp * inw_tmp; pad_int8(pad_in, pad_out, inh, inw, inh_tmp, inw_tmp, pad_h, pad_w, 0); } } #pragma omp parallel for num_threads(num_thread) for (int p = 0; p < outch; p++) { int32_t* out0 = output_int32 + p * out_hw; int8_t* kernel0 = (int8_t*)kernel + p * 9; int* outptr0 = out0; int8_t* img0 = input_tmp + p * inw_tmp * inh_tmp; int8_t* r0 = img0; int8_t* r1 = img0 + inw_tmp; int8_t* r2 = img0 + inw_tmp * 2; for (int i = 0; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum0 = 0; sum0 += (int)r0[0] * kernel0[0]; sum0 += (int)r0[1] * kernel0[1]; sum0 += (int)r0[2] * kernel0[2]; sum0 += (int)r1[0] * kernel0[3]; sum0 += (int)r1[1] * kernel0[4]; sum0 += (int)r1[2] * kernel0[5]; sum0 += (int)r2[0] * kernel0[6]; sum0 += (int)r2[1] * kernel0[7]; sum0 += (int)r2[2] * kernel0[8]; *outptr0 += sum0; r0++; r1++; r2++; outptr0++; } r0 += 2; r1 += 2; r2 += 2; } kernel0 += 9; } /* process bias and dequant output from int32 to fp32 */ #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (bias_tensor) output_fp32[output_off] = (float)(output_int32[output_off] + bias_int32[i]) * input_scale * kernel_scales[i]; else output_fp32[output_off] = (float)output_int32[output_off] * input_scale * kernel_scales[i]; } } /* process activation relu */ if (param->activation == 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; } } } /* process activation relu6 */ if (param->activation > 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; if (output_fp32[output_off] > 6) output_fp32[output_off] = 6; } } } /* quant from fp32 to int8 */ #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; int32_t data_i32 = (int32_t)(round(output_fp32[output_off] / output_scale)); if (data_i32 > 127) data_i32 = 127; else if (data_i32 < -127) data_i32 = -127; output_int8[output_off] = (int8_t)data_i32; } } sys_free(output_int32); sys_free(output_fp32); if (!(inh_tmp == inh && inw_tmp == inw)) sys_free(input_tmp); return 0; } static int convdw3x3s2_int8_sse(struct tensor* input_tensor, struct tensor* weight_tensor, struct tensor* bias_tensor, struct tensor* output_tensor, struct conv_param* param, int num_thread) { int inch = input_tensor->dims[1]; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int in_hw = inh * inw; int outch = output_tensor->dims[1]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; int out_hw = outh * outw; int out_size = output_tensor->elem_num; int pad_w = param->pad_w0; int pad_h = param->pad_h0; int32_t* output_int32 = (int32_t*)sys_malloc(out_size * sizeof(int32_t)); memset(output_int32, 0, out_size * sizeof(int32_t)); float* output_fp32 = (float*)sys_malloc(out_size * sizeof(float)); int8_t* output_int8 = (int8_t*)output_tensor->data; int8_t* input_int8 = (int8_t*)input_tensor->data; int32_t* bias_int32 = NULL; if (bias_tensor) bias_int32 = (int32_t*)bias_tensor->data; /* get scale value of quantizaiton */ float input_scale = input_tensor->scale; float* kernel_scales = weight_tensor->scale_list; float output_scale = output_tensor->scale; const signed char* kernel = (const signed char*)weight_tensor->data; /* pading */ int inh_tmp = inh + pad_h + pad_h; int inw_tmp = inw + pad_w + pad_w; int8_t* input_tmp = NULL; if (inh_tmp == inh && inw_tmp == inw) input_tmp = input_int8; else { input_tmp = (int8_t*)sys_malloc((size_t)inh_tmp * inw_tmp * inch * sizeof(int8_t)); #pragma omp parallel for num_threads(num_thread) for (int g = 0; g < inch; g++) { int8_t* pad_in = input_int8 + g * inh * inw; int8_t* pad_out = input_tmp + g * inh_tmp * inw_tmp; pad_int8(pad_in, pad_out, inh, inw, inh_tmp, inw_tmp, pad_h, pad_w, 0); } } int tailstep = inw_tmp - 2 * outw + inw_tmp; #pragma omp parallel for num_threads(num_thread) for (int p = 0; p < outch; p++) { int32_t* out0 = output_int32 + p * out_hw; int8_t* kernel0 = (int8_t*)kernel + p * 9; int* outptr0 = out0; int8_t* img0 = input_tmp + p * inw_tmp * inh_tmp; int8_t* r0 = img0; int8_t* r1 = img0 + inw_tmp; int8_t* r2 = img0 + inw_tmp * 2; for (int i = 0; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum0 = 0; sum0 += (int)r0[0] * kernel0[0]; sum0 += (int)r0[1] * kernel0[1]; sum0 += (int)r0[2] * kernel0[2]; sum0 += (int)r1[0] * kernel0[3]; sum0 += (int)r1[1] * kernel0[4]; sum0 += (int)r1[2] * kernel0[5]; sum0 += (int)r2[0] * kernel0[6]; sum0 += (int)r2[1] * kernel0[7]; sum0 += (int)r2[2] * kernel0[8]; *outptr0 += sum0; r0 += 2; r1 += 2; r2 += 2; outptr0++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } kernel0 += 9; } /* process bias and dequant output from int32 to fp32 */ #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (bias_tensor) output_fp32[output_off] = (float)(output_int32[output_off] + bias_int32[i]) * input_scale * kernel_scales[i]; else output_fp32[output_off] = (float)output_int32[output_off] * input_scale * kernel_scales[i]; } } /* process activation relu */ if (param->activation == 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; } } } /* process activation relu6 */ if (param->activation > 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; if (output_fp32[output_off] > 6) output_fp32[output_off] = 6; } } } /* quant from fp32 to int8 */ #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; int32_t data_i32 = (int32_t)(round(output_fp32[output_off] / output_scale)); if (data_i32 > 127) data_i32 = 127; else if (data_i32 < -127) data_i32 = -127; output_int8[output_off] = (int8_t)data_i32; } } sys_free(output_int32); sys_free(output_fp32); if (!(inh_tmp == inh && inw_tmp == inw)) sys_free(input_tmp); return 0; } static int conv_dw_run_int8(struct tensor* input_tensor, struct tensor* weight_tensor, struct tensor* bias_tensor, struct tensor* output_tensor, struct conv_param* param, int num_thread) { int ret = -1; switch (param->stride_h) { case 1: ret = convdw3x3s1_int8_sse(input_tensor, weight_tensor, bias_tensor, output_tensor, param, num_thread); break; case 2: ret = convdw3x3s2_int8_sse(input_tensor, weight_tensor, bias_tensor, output_tensor, param, num_thread); break; default: TLOG_ERR("Direct Convolution Int8 not support the stride %d\n", param->stride_h); } return ret; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); struct tensor* weight_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]); struct tensor* bias_tensor = NULL; struct tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); int num_thread = exec_graph->num_thread; int cpu_affinity = exec_graph->cpu_affinity; /* set the input data and shape again, in case of reshape or dynamic shape */ if (ir_node->input_num > 2) bias_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[2]); struct conv_param* conv_param = (struct conv_param*)ir_node->op.param_mem; struct conv_priv_info* conv_priv_info = (struct conv_priv_info*)exec_node->ops_priv; int ret = -1; if (exec_graph->mode == TENGINE_MODE_FP32) ret = conv_dw_run(input_tensor, weight_tensor, bias_tensor, output_tensor, conv_priv_info, conv_param, num_thread, cpu_affinity); else if (exec_graph->mode == TENGINE_MODE_INT8) ret = conv_dw_run_int8(input_tensor, weight_tensor, bias_tensor, output_tensor, conv_param, num_thread); else { TLOG_ERR("hcl conv run failed\n"); return -1; } return ret; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node) { struct conv_param* param = (struct conv_param*)exec_node->op.param_mem; struct node* ir_node = exec_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); struct tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); int group = param->group; int kernel_h = param->kernel_h; int kernel_w = param->kernel_w; int stride_h = param->stride_h; int stride_w = param->stride_w; int dilation_h = param->dilation_h; int dilation_w = param->dilation_w; int pad_h0 = param->pad_h0; int pad_w0 = param->pad_w0; int pad_h1 = param->pad_h1; int pad_w1 = param->pad_w1; int in_c = input_tensor->dims[1] / group; int out_c = output_tensor->dims[1] / group; /* todo support uint8 */ if (!(input_tensor->data_type == TENGINE_DT_FP32 || input_tensor->data_type == TENGINE_DT_INT8)) return 0; if (kernel_h != kernel_w || input_tensor->dims[0] > 1) return 0; if (param->group > 1 && in_c == 1 && out_c == 1 && pad_h0 == pad_h1 && pad_w0 == pad_w1 && dilation_h == 1 && dilation_w == 1 && kernel_h == 3 && kernel_w == 3 && ((stride_h == 1 && stride_w == 1) || (stride_h == 2 && stride_w == 2))) return OPS_SCORE_BEST; else return 0; } static struct node_ops hcl_node_ops = {.prerun = NULL, .run = run, .reshape = NULL, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; int register_conv_dw_hcl_x86_op() { return register_builtin_node_ops(OP_CONV, &hcl_node_ops); } int unregister_conv_dw_hcl_x86_op() { unregister_builtin_node_ops(OP_CONV, &hcl_node_ops); return 0; }
query.c
#include <stdlib.h> #include <stdio.h> #include <assert.h> #ifdef _OPENMP #include <omp.h> #endif #include <sys/types.h> #include <sys/socket.h> #include <unistd.h> #include <netdb.h> #include <arpa/inet.h> #include <strings.h> #include <string.h> #include <errno.h> #include <signal.h> #include <time.h> #include <stdarg.h> #include <sys/stat.h> #include <fcntl.h> #include <inttypes.h> #include <netinet/tcp.h> const char *servername = "localhost"; const uint16_t serverport = 5006; #define ERR "ERROR: " #define WARN "WARNING: " #define DBG "DEBUG: " #define INFO "INFO: " int message(const char *format, ...) { va_list ap; time_t now = time(NULL); char buf[26]; int n; ctime_r(&now, buf); buf[strlen(buf)-1] = 0; #pragma omp critical { n = printf("[%s] ", buf); va_start(ap, format); n += vprintf(format, ap); va_end(ap); fflush(stdout); } return n; } int init_sockaddr(struct sockaddr_in *name, const char *hostname, uint16_t port) { struct hostent *hostinfo; assert(name != NULL); bzero(name, sizeof(struct sockaddr_in)); name->sin_family = AF_INET; name->sin_port = htons(port); hostinfo = gethostbyname(hostname); if (hostinfo == NULL) { return -1; } name->sin_addr = *(struct in_addr *) hostinfo->h_addr_list[0]; return 0; } ssize_t write_(int fd, const char *buf, size_t count) { ssize_t written = 0; while ((size_t)written < count) { ssize_t t = write(fd, buf+written, count - written); if (t < 0) { /* errno is set appropriately */ return -1; } if (t == 0 && (size_t)(written + t) != count) { /* zero indicates nothing was written */ return -1; } written += t; } return written; } ssize_t read_(int fd, char *buf, size_t count) { /* I known this isn't a word in English, but "read" was already taken. */ ssize_t readen = 0; while ((size_t)readen < count) { ssize_t t = read(fd, buf + readen, count - readen); if (t < 0) { /* errno is set appropriately */ return -1; } if (t == 0 && (size_t)(readen + t) != count) { /* zero indicates end of file */ return -1; } readen += t; } return readen; } int read_uint64(int fd, uint64_t *nptr) { uint32_t nh, nl; uint64_t n; if (read_(fd, (void *)&nh, 4) < 0) { return -1; } if (read_(fd, (void *)&nl, 4) < 0) { return -1; } nh = ntohl(nh); nl = ntohl(nl); n = ((uint64_t)nh << 32) + nl; assert(nptr != NULL); *nptr = n; return 0; } int query_lowest_incomplete(int fd) { if (write_(fd, "LOI", 4) < 0) { return -1; } return 0; } int query_highest_requested(int fd) { if (write_(fd, "HIR", 4) < 0) { return -1; } return 0; } int open_socket_to_server() { int fd; struct sockaddr_in server_addr; fd = socket(AF_INET, SOCK_STREAM, 0); if (fd < 0) { /* errno is set appropriately */ return -1; } if (init_sockaddr(&server_addr, servername, serverport) < 0 ) { close(fd); return -1; } if (connect(fd, (struct sockaddr *) &server_addr, sizeof(server_addr)) < 0) { /* errno is set appropriately */ close(fd); return -1; } return fd; } int open_socket_and_query_lowest_incomplete(uint64_t *n, uint64_t *task_size) { int fd; fd = open_socket_to_server(); if (fd < 0) { return -1; } if (query_lowest_incomplete(fd) < 0) { close(fd); return -1; } if (read_uint64(fd, n) < 0) { close(fd); return -1; } if (read_uint64(fd, task_size) < 0) { close(fd); return -1; } close(fd); return 0; } int open_socket_and_query_highest_requested(uint64_t *n, uint64_t *task_size) { int fd; fd = open_socket_to_server(); if (fd < 0) { return -1; } if (query_highest_requested(fd) < 0) { close(fd); return -1; } if (read_uint64(fd, n) < 0) { close(fd); return -1; } if (read_uint64(fd, task_size) < 0) { close(fd); return -1; } close(fd); return 0; } int main(int argc, char *argv[]) { int opt; int query = 0; if (getenv("SERVER_NAME")) { servername = getenv("SERVER_NAME"); } message(INFO "server to be used: %s\n", servername); while ((opt = getopt(argc, argv, "lh")) != -1) { switch (opt) { case 'l': query = opt; break; case 'h': query = opt; break; default: message(ERR "Usage: %s [-l|-h]\n", argv[0]); return EXIT_FAILURE; } } switch (query) { uint64_t n; uint64_t task_size; case 'l': if (open_socket_and_query_lowest_incomplete(&n, &task_size) < 0) { message(ERR "open_socket_and_query_lowest_incomplete failed\n"); } else { printf("%" PRIu64 " %" PRIu64 "\n", n, task_size); } break; case 'h': if (open_socket_and_query_highest_requested(&n, &task_size) < 0) { message(ERR "open_socket_and_query_highest_requested failed\n"); } else { printf("%" PRIu64 " %" PRIu64 "\n", n, task_size); } break; } return EXIT_SUCCESS; }
DRB049-fprintf-orig-no.c
/* Copyright (C) 1991-2018 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it andor modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http:www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is synchronized with ISOIEC 10646:2017, fifth edition, plus the following additions from Amendment 1 to the fifth edition: - 56 emoji characters - 285 hentaigana - 3 additional Zanabazar Square characters */ /* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https:github.comLLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> /* Example use of fprintf */ #include <stdio.h> int main(int argc, char * argv[]) { int i; int ret; FILE * pfile; int len = 1000; int A[1000]; int _ret_val_0; #pragma cetus private(i) #pragma loop name main#0 #pragma cetus parallel #pragma omp parallel for private(i) for (i=0; i<len; i ++ ) { A[i]=i; } pfile=fopen("mytempfile.txt", "a+"); if (pfile==((void * )0)) { fprintf(stderr, "Error in fopen()\n"); } #pragma cetus private(i) #pragma loop name main#1 for (i=0; i<len; ++ i) { fprintf(pfile, "%d\n", A[i]); } fclose(pfile); ret=remove("mytempfile.txt"); if (ret!=0) { fprintf(stderr, "Error: unable to delete mytempfile.txt\n"); } _ret_val_0=0; return _ret_val_0; }
GB_unop__minv_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__minv_fp32_fp32) // op(A') function: GB (_unop_tran__minv_fp32_fp32) // C type: float // A type: float // cast: float cij = aij // unaryop: cij = (1.0F)/aij #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = (1.0F)/x ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = (1.0F)/z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__minv_fp32_fp32) ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = (1.0F)/z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = (1.0F)/z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__minv_fp32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ncwa.c
/* $Header$ */ /* ncwa -- netCDF weighted averager */ /* Purpose: Compute averages of specified hyperslabs of specfied variables in a single input netCDF file and output them to a single file. */ /* Copyright (C) 1995--present Charlie Zender This file is part of NCO, the netCDF Operators. NCO is free software. You may redistribute and/or modify NCO under the terms of the 3-Clause BSD License. You are permitted to link NCO with the HDF, netCDF, OPeNDAP, and UDUnits libraries and to distribute the resulting executables under the terms of the BSD, but in addition obeying the extra stipulations of the HDF, netCDF, OPeNDAP, and UDUnits licenses. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 3-Clause BSD License for more details. The original author of this software, Charlie Zender, seeks to improve it with your suggestions, contributions, bug-reports, and patches. Please contact the NCO project at http://nco.sf.net or write to Charlie Zender Department of Earth System Science University of California, Irvine Irvine, CA 92697-3100 */ /* fxm: 19981202 deactivated -n and -W switches and code left in place to rethink normalization switches */ /* Usage: ncwa -O -a lon ~/nco/data/in.nc ~/foo.nc ncwa -O -R -p /ZENDER/tmp -l ~/nco/data in.nc ~/foo.nc ncwa -O -C -a lat,lon,time -w gw -v PS -p /fs/cgd/csm/input/atm SEP1.T42.0596.nc ~/foo.nc;ncks -H foo.nc scp ~/nco/src/nco/ncwa.c esmf.ess.uci.edu:nco/src/nco */ #ifdef HAVE_CONFIG_H # include <config.h> /* Autotools tokens */ #endif /* !HAVE_CONFIG_H */ /* Standard C headers */ #include <math.h> /* sin cos cos sin 3.14159 */ #include <stdio.h> /* stderr, FILE, NULL, etc. */ #include <stdlib.h> /* atof, atoi, malloc, getopt */ #include <string.h> /* strcmp() */ #include <sys/stat.h> /* stat() */ #include <time.h> /* machine time */ #ifndef _MSC_VER # if !defined(HAVE_BISON_FLEX) # define HAVE_BISON_FLEX /* 21070906 pvn add this definition to automake, currently in CMake */ # endif /* HAVE_BISON_FLEX */ # include <unistd.h> /* POSIX stuff */ #endif /* _MSC_VER */ #ifndef HAVE_GETOPT_LONG # include "nco_getopt.h" #else /* HAVE_GETOPT_LONG */ # ifdef HAVE_GETOPT_H # include <getopt.h> # endif /* !HAVE_GETOPT_H */ #endif /* HAVE_GETOPT_LONG */ #ifdef I18N # include <langinfo.h> /* nl_langinfo() */ # include <libintl.h> /* Internationalization i18n */ # include <locale.h> /* Locale setlocale() */ # define _(sng) gettext (sng) # define gettext_noop(sng) (sng) # define N_(sng) gettext_noop(sng) #endif /* I18N */ /* Supply stub gettext() function in case i18n failed */ #ifndef _LIBINTL_H # define gettext(foo) foo #endif /* _LIBINTL_H */ /* 3rd party vendors */ #include <netcdf.h> /* netCDF definitions and C library */ #ifdef ENABLE_MPI # include <mpi.h> /* MPI definitions */ # include <netcdf_par.h> /* Parallel netCDF definitions */ # include "nco_mpi.h" /* MPI utilities */ #endif /* !ENABLE_MPI */ /* #define MAIN_PROGRAM_FILE MUST precede #include libnco.h */ #define MAIN_PROGRAM_FILE #ifdef HAVE_BISON_FLEX # include "ncap_utl.h" /* netCDF arithmetic processor-specific definitions (symbol table, ...) */ #endif /* !HAVE_BISON_FLEX */ #include "libnco.h" /* netCDF Operator (NCO) library */ #ifdef HAVE_BISON_FLEX /* Global variables (keep consistent with global variables declared in ncap.c) */ size_t ncap_ncl_dpt_crr=0UL; /* [nbr] Depth of current #include file (incremented in ncap.l) */ size_t *ncap_ln_nbr_crr; /* [cnt] Line number (incremented in ncap.l) */ char **ncap_fl_spt_glb; /* [fl] Script file */ #endif /* !HAVE_BISON_FLEX */ int main(int argc,char **argv) { char **dmn_avg_lst_in=NULL_CEWI; /* Option a */ char **fl_lst_abb=NULL; /* Option n */ char **fl_lst_in=NULL_CEWI; char **gaa_arg=NULL; /* [sng] Global attribute arguments */ char **var_lst_in=NULL_CEWI; char **grp_lst_in=NULL_CEWI; char *aux_arg[NC_MAX_DIMS]; char *cmd_ln; char *cnk_arg[NC_MAX_DIMS]; char *cnk_map_sng=NULL_CEWI; /* [sng] Chunking map */ char *cnk_plc_sng=NULL_CEWI; /* [sng] Chunking policy */ char *fl_in=NULL; char *fl_out=NULL; /* Option o */ char *fl_out_tmp=NULL_CEWI; char *fl_pth=NULL; /* Option p */ char *fl_pth_lcl=NULL; /* Option l */ char *lmt_arg[NC_MAX_DIMS]; char *msk_nm=NULL; char *msk_cnd_sng=NULL; /* Mask string to be "parsed" and values given to msk_nm, msk_val, op_typ_rlt */ char *nco_op_typ_sng; /* Operation type */ char *opt_crr=NULL; /* [sng] String representation of current long-option name */ char *optarg_lcl=NULL; /* [sng] Local copy of system optarg */ char *ppc_arg[NC_MAX_VARS]; /* [sng] PPC arguments */ char *sng_cnv_rcd=NULL_CEWI; /* [sng] strtol()/strtoul() return code */ char *wgt_nm=NULL; char trv_pth[]="/"; /* [sng] Root path of traversal tree */ const char * const CVS_Id="$Id$"; const char * const CVS_Revision="$Revision$"; const char * const opt_sht_lst="34567Aa:B:bCcD:d:Fg:G:hIL:l:M:m:nNOo:p:rRT:t:v:Ww:xy:-:"; cnk_sct cnk; /* [sct] Chunking structure */ #if defined(__cplusplus) || defined(PGI_CC) ddra_info_sct ddra_info; ddra_info.flg_ddra=False; #else /* !__cplusplus */ ddra_info_sct ddra_info={.MRV_flg=False,.flg_ddra=False,.lmn_nbr=0LL,.lmn_nbr_avg=0LL,.lmn_nbr_wgt=0LL,.nco_op_typ=nco_op_nil,.rnk_avg=0,.rnk_var=0,.rnk_wgt=0,.tmr_flg=nco_tmr_srt,.var_idx=0,.wgt_brd_flg=False,.wrd_sz=0}; #endif /* !__cplusplus */ dmn_sct **dim=NULL_CEWI; dmn_sct **dmn_out=NULL_CEWI; dmn_sct **dmn_avg=NULL_CEWI; double msk_val=1.0; /* Option M */ extern char *optarg; extern int optind; /* Using naked stdin/stdout/stderr in parallel region generates warning Copy appropriate filehandle to variable scoped shared in parallel clause */ FILE * const fp_stderr=stderr; /* [fl] stderr filehandle CEWI */ FILE * const fp_stdout=stdout; /* [fl] stdout filehandle CEWI */ int *in_id_arr; int aux_nbr=0; /* [nbr] Number of auxiliary coordinate hyperslabs specified */ int abb_arg_nbr=0; int cnk_map=nco_cnk_map_nil; /* [enm] Chunking map */ int cnk_nbr=0; /* [nbr] Number of chunk sizes */ int cnk_plc=nco_cnk_plc_nil; /* [enm] Chunking policy */ int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */ int dmn_avg_nbr=0; int fl_idx=int_CEWI; int fl_nbr=0; int fl_in_fmt; /* [enm] Input file format */ int fl_out_fmt=NCO_FORMAT_UNDEFINED; /* [enm] Output file format */ int fll_md_old; /* [enm] Old fill mode */ int gaa_nbr=0; /* [nbr] Number of global attributes to add */ int grp_lst_in_nbr=0; /* [nbr] Number of groups explicitly specified by user */ int idx=int_CEWI; int in_id; int lmt_nbr=0; /* Option d. NB: lmt_nbr gets incremented */ int log_lvl=0; /* [enm] netCDF library debugging verbosity [0..5] */ int md_open; /* [enm] Mode flag for nc_open() call */ int nbr_dmn_fl; int nbr_dmn_out=0; int nbr_dmn_xtr; int nbr_var_fix; /* nbr_var_fix gets incremented */ int nbr_var_fl; int nbr_var_prc; /* nbr_var_prc gets incremented */ int xtr_nbr=0; /* xtr_nbr won't otherwise be set for -c with no -v */ int nco_op_typ=nco_op_avg; /* Operation type */ int op_typ_rlt=0; /* Option o */ int opt; int out_id; int ppc_nbr=0; /* [nbr] Number of PPC arguments */ int rcd=NC_NOERR; /* [rcd] Return code */ int thr_idx; /* [idx] Index of current thread */ int thr_nbr=int_CEWI; /* [nbr] Thread number Option t */ int var_lst_in_nbr=0; md5_sct *md5=NULL; /* [sct] MD5 configuration */ cnv_sct *cnv; /* [sct] Convention structure */ nco_bool DO_CONFORM_MSK=False; /* Did nco_var_cnf_dmn() find truly conforming mask? */ nco_bool DO_CONFORM_WGT=False; /* Did nco_var_cnf_dmn() find truly conforming weight? */ nco_bool EXCLUDE_INPUT_LIST=False; /* Option c */ nco_bool EXTRACT_ALL_COORDINATES=False; /* Option c */ nco_bool EXTRACT_ASSOCIATED_COORDINATES=True; /* Option C */ nco_bool EXTRACT_CLL_MSR=True; /* [flg] Extract cell_measures variables */ nco_bool EXTRACT_FRM_TRM=True; /* [flg] Extract formula_terms variables */ nco_bool FL_RTR_RMT_LCN; nco_bool FL_LST_IN_FROM_STDIN=False; /* [flg] fl_lst_in comes from stdin */ nco_bool FORCE_APPEND=False; /* Option A */ nco_bool FORCE_OVERWRITE=False; /* Option O */ nco_bool FORTRAN_IDX_CNV=False; /* Option F */ nco_bool GRP_VAR_UNN=False; /* [flg] Select union of specified groups and variables */ nco_bool HISTORY_APPEND=True; /* Option h */ nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */ nco_bool MSA_USR_RDR=False; /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */ nco_bool MULTIPLY_BY_TALLY=False; /* Not currently implemented */ nco_bool MUST_CONFORM=False; /* [flg] Must nco_var_cnf_dmn() find truly conforming variables? */ nco_bool NORMALIZE_BY_TALLY=True; /* Not currently implemented */ nco_bool NORMALIZE_BY_WEIGHT=True; /* Not currently implemented */ nco_bool NRM_BY_DNM=True; /* Option N Normalize by denominator */ nco_bool RAM_CREATE=False; /* [flg] Create file in RAM */ nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */ nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */ nco_bool WGT_MSK_CRD_VAR=True; /* [flg] Weight and/or mask coordinate variables */ nco_bool WRT_TMP_FL=True; /* [flg] Write output to temporary file */ nco_bool flg_cll_mth=True; /* [flg] Add/modify cell_methods attributes */ nco_bool flg_mmr_cln=True; /* [flg] Clean memory prior to exit */ nco_bool flg_dmn_prc_usr_spc=False; /* [flg] Processed dimensions specified on command line */ nco_bool flg_ddra=False; /* [flg] DDRA diagnostics */ nco_bool flg_rdd=False; /* [flg] Retain degenerate dimensions */ size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */ size_t cnk_csh_byt=NCO_CNK_CSH_BYT_DFL; /* [B] Chunk cache size */ size_t cnk_min_byt=NCO_CNK_SZ_MIN_BYT_DFL; /* [B] Minimize size of variable to chunk */ size_t cnk_sz_byt=0UL; /* [B] Chunk size in bytes */ size_t cnk_sz_scl=0UL; /* [nbr] Chunk size scalar */ size_t hdr_pad=0UL; /* [B] Pad at end of header section */ var_sct **var; var_sct **var_fix; var_sct **var_fix_out; var_sct **var_out; var_sct **var_prc; var_sct **var_prc_out; var_sct *wgt_avg=NULL; trv_tbl_sct *trv_tbl=NULL; /* [lst] Traversal table */ nco_dmn_dne_t *flg_dne=NULL; /* [lst] Flag to check if input dimension -d "does not exist" */ gpe_sct *gpe=NULL; /* [sng] Group Path Editing (GPE) structure */ #ifdef HAVE_BISON_FLEX prs_sct prs_arg; /* I/O [sct] Global information required in ncwa parser */ #endif /* !HAVE_BISON_FLEX */ #ifdef ENABLE_MPI /* Declare all MPI-specific variables here */ MPI_Comm mpi_cmm=MPI_COMM_WORLD; /* [prc] Communicator */ int prc_rnk; /* [idx] Process rank */ int prc_nbr=0; /* [nbr] Number of MPI processes */ #endif /* !ENABLE_MPI */ static struct option opt_lng[]={ /* Structure ordered by short option key if possible */ /* Long options with no argument, no short option counterpart */ {"cll_msr",no_argument,0,0}, /* [flg] Extract cell_measures variables */ {"cell_measures",no_argument,0,0}, /* [flg] Extract cell_measures variables */ {"no_cll_msr",no_argument,0,0}, /* [flg] Do not extract cell_measures variables */ {"no_cell_measures",no_argument,0,0}, /* [flg] Do not extract cell_measures variables */ {"frm_trm",no_argument,0,0}, /* [flg] Extract formula_terms variables */ {"formula_terms",no_argument,0,0}, /* [flg] Extract formula_terms variables */ {"no_frm_trm",no_argument,0,0}, /* [flg] Do not extract formula_terms variables */ {"no_formula_terms",no_argument,0,0}, /* [flg] Do not extract formula_terms variables */ {"cll_mth",no_argument,0,0}, /* [flg] Add/modify cell_methods attributes */ {"cell_methods",no_argument,0,0}, /* [flg] Add/modify cell_methods attributes */ {"no_cll_mth",no_argument,0,0}, /* [flg] Do not add/modify cell_methods attributes */ {"no_cell_methods",no_argument,0,0}, /* [flg] Do not add/modify cell_methods attributes */ {"clean",no_argument,0,0}, /* [flg] Clean memory prior to exit */ {"mmr_cln",no_argument,0,0}, /* [flg] Clean memory prior to exit */ {"drt",no_argument,0,0}, /* [flg] Allow dirty memory on exit */ {"dirty",no_argument,0,0}, /* [flg] Allow dirty memory on exit */ {"mmr_drt",no_argument,0,0}, /* [flg] Allow dirty memory on exit */ {"ddra",no_argument,0,0}, /* [flg] DDRA diagnostics */ {"mdl_cmp",no_argument,0,0}, /* [flg] DDRA diagnostics */ {"dbl",no_argument,0,0}, /* [flg] Arithmetic convention: promote float to double */ {"flt",no_argument,0,0}, /* [flg] Arithmetic convention: keep single-precision */ {"rth_dbl",no_argument,0,0}, /* [flg] Arithmetic convention: promote float to double */ {"rth_flt",no_argument,0,0}, /* [flg] Arithmetic convention: keep single-precision */ {"hdf4",no_argument,0,0}, /* [flg] Treat file as HDF4 */ {"hdf_upk",no_argument,0,0}, /* [flg] HDF unpack convention: unpacked=scale_factor*(packed-add_offset) */ {"hdf_unpack",no_argument,0,0}, /* [flg] HDF unpack convention: unpacked=scale_factor*(packed-add_offset) */ {"help",no_argument,0,0}, {"hlp",no_argument,0,0}, {"hpss_try",no_argument,0,0}, /* [flg] Search HPSS for unfound files */ {"ram_all",no_argument,0,0}, /* [flg] Open (netCDF3) and create file(s) in RAM */ {"create_ram",no_argument,0,0}, /* [flg] Create file in RAM */ {"open_ram",no_argument,0,0}, /* [flg] Open (netCDF3) file(s) in RAM */ {"diskless_all",no_argument,0,0}, /* [flg] Open (netCDF3) and create file(s) in RAM */ {"wrt_tmp_fl",no_argument,0,0}, /* [flg] Write output to temporary file */ {"write_tmp_fl",no_argument,0,0}, /* [flg] Write output to temporary file */ {"no_tmp_fl",no_argument,0,0}, /* [flg] Do not write output to temporary file */ {"version",no_argument,0,0}, {"vrs",no_argument,0,0}, /* Long options with argument, no short option counterpart */ {"bfr_sz_hnt",required_argument,0,0}, /* [B] Buffer size hint */ {"buffer_size_hint",required_argument,0,0}, /* [B] Buffer size hint */ {"cnk_byt",required_argument,0,0}, /* [B] Chunk size in bytes */ {"chunk_byte",required_argument,0,0}, /* [B] Chunk size in bytes */ {"cnk_csh",required_argument,0,0}, /* [B] Chunk cache size in bytes */ {"chunk_cache",required_argument,0,0}, /* [B] Chunk cache size in bytes */ {"cnk_dmn",required_argument,0,0}, /* [nbr] Chunk size */ {"chunk_dimension",required_argument,0,0}, /* [nbr] Chunk size */ {"cnk_map",required_argument,0,0}, /* [nbr] Chunking map */ {"chunk_map",required_argument,0,0}, /* [nbr] Chunking map */ {"cnk_min",required_argument,0,0}, /* [B] Minimize size of variable to chunk */ {"chunk_min",required_argument,0,0}, /* [B] Minimize size of variable to chunk */ {"cnk_plc",required_argument,0,0}, /* [nbr] Chunking policy */ {"chunk_policy",required_argument,0,0}, /* [nbr] Chunking policy */ {"cnk_scl",required_argument,0,0}, /* [nbr] Chunk size scalar */ {"chunk_scalar",required_argument,0,0}, /* [nbr] Chunk size scalar */ {"fl_fmt",required_argument,0,0}, {"file_format",required_argument,0,0}, {"gaa",required_argument,0,0}, /* [sng] Global attribute add */ {"glb_att_add",required_argument,0,0}, /* [sng] Global attribute add */ {"hdr_pad",required_argument,0,0}, {"header_pad",required_argument,0,0}, {"log_lvl",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */ {"log_level",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */ {"ppc",required_argument,0,0}, /* [nbr] Precision-preserving compression, i.e., number of total or decimal significant digits */ {"precision_preserving_compression",required_argument,0,0}, /* [nbr] Precision-preserving compression, i.e., number of total or decimal significant digits */ {"quantize",required_argument,0,0}, /* [nbr] Precision-preserving compression, i.e., number of total or decimal significant digits */ /* Long options with short counterparts */ {"3",no_argument,0,'3'}, {"4",no_argument,0,'4'}, {"netcdf4",no_argument,0,'4'}, {"5",no_argument,0,'5'}, {"64bit_data",no_argument,0,'5'}, {"cdf5",no_argument,0,'5'}, {"pnetcdf",no_argument,0,'5'}, {"64bit_offset",no_argument,0,'6'}, {"7",no_argument,0,'7'}, {"append",no_argument,0,'A'}, {"average",required_argument,0,'a'}, {"avg",required_argument,0,'a'}, {"mask_condition",required_argument,0,'B'}, {"msk_cnd_sng",required_argument,0,'B'}, {"retain-degenerate-dimensions",no_argument,0,'b'}, /* [flg] Retain degenerate dimensions */ {"rdd",no_argument,0,'b'}, /* [flg] Retain degenerate dimensions */ {"xtr_ass_var",no_argument,0,'c'}, {"xcl_ass_var",no_argument,0,'C'}, {"no_coords",no_argument,0,'C'}, {"no_crd",no_argument,0,'C'}, {"coords",no_argument,0,'c'}, {"crd",no_argument,0,'c'}, {"debug",required_argument,0,'D'}, {"nco_dbg_lvl",required_argument,0,'D'}, {"dimension",required_argument,0,'d'}, {"dmn",required_argument,0,'d'}, {"fortran",no_argument,0,'F'}, {"ftn",no_argument,0,'F'}, {"history",no_argument,0,'h'}, {"hst",no_argument,0,'h'}, {"wgt_msk_crd_var",no_argument,0,'I'}, {"dfl_lvl",required_argument,0,'L'}, /* [enm] Deflate level */ {"deflate",required_argument,0,'L'}, /* [enm] Deflate level */ {"local",required_argument,0,'l'}, {"lcl",required_argument,0,'l'}, {"mask-variable",required_argument,0,'m'}, {"mask_variable",required_argument,0,'m'}, {"mask",required_argument,0,'m'}, {"msk_var",required_argument,0,'m'}, {"msk_nm",required_argument,0,'m'}, {"mask-value",required_argument,0,'M'}, {"mask_value",required_argument,0,'M'}, {"msk_val",required_argument,0,'M'}, {"nintap",required_argument,0,'n'}, {"nmr",no_argument,0,'N'}, {"numerator",no_argument,0,'N'}, {"overwrite",no_argument,0,'O'}, {"ovr",no_argument,0,'O'}, {"output",required_argument,0,'o'}, {"fl_out",required_argument,0,'o'}, {"path",required_argument,0,'p'}, {"retain",no_argument,0,'R'}, {"rtn",no_argument,0,'R'}, {"revision",no_argument,0,'r'}, {"mask_comparator",required_argument,0,'T'}, {"msk_cmp_typ",required_argument,0,'T'}, {"op_rlt",required_argument,0,'T'}, {"thr_nbr",required_argument,0,'t'}, {"threads",required_argument,0,'t'}, {"omp_num_threads",required_argument,0,'t'}, {"variable",required_argument,0,'v'}, {"normalize-by-tally",no_argument,0,'W',}, {"exclude",no_argument,0,'x'}, {"xcl",no_argument,0,'x'}, {"weight",required_argument,0,'w'}, {"wgt",required_argument,0,'w'}, {"wgt_var",required_argument,0,'w'}, {"operation",required_argument,0,'y'}, {"op_typ",required_argument,0,'y'}, {0,0,0,0} }; /* end opt_lng */ int opt_idx=0; /* Index of current long option into opt_lng array */ /* Start timer and save command line */ ddra_info.tmr_flg=nco_tmr_srt; rcd+=nco_ddra((char *)NULL,(char *)NULL,&ddra_info); ddra_info.tmr_flg=nco_tmr_mtd; cmd_ln=nco_cmd_ln_sng(argc,argv); NORMALIZE_BY_TALLY=NORMALIZE_BY_TALLY+0; /* CEWI: Avert compiler warning that variable is set but never used */ NORMALIZE_BY_WEIGHT=NORMALIZE_BY_WEIGHT+0; /* CEWI: Avert compiler warning that variable is set but never used */ /* Get program name and set program enum (e.g., nco_prg_id=ncra) */ nco_prg_nm=nco_prg_prs(argv[0],&nco_prg_id); #ifdef ENABLE_MPI /* MPI Initialization */ if(False) (void)fprintf(stdout,gettext("%s: WARNING Compiled with MPI\n"),nco_prg_nm); MPI_Init(&argc,&argv); MPI_Comm_size(mpi_cmm,&prc_nbr); MPI_Comm_rank(mpi_cmm,&prc_rnk); #endif /* !ENABLE_MPI */ /* Parse command line arguments */ while(1){ /* getopt_long_only() allows one dash to prefix long options */ opt=getopt_long(argc,argv,opt_sht_lst,opt_lng,&opt_idx); /* NB: access to opt_crr is only valid when long_opt is detected */ if(opt == EOF) break; /* Parse positional arguments once getopt_long() returns EOF */ opt_crr=(char *)strdup(opt_lng[opt_idx].name); /* Process long options without short option counterparts */ if(opt == 0){ if(!strcmp(opt_crr,"bfr_sz_hnt") || !strcmp(opt_crr,"buffer_size_hint")){ bfr_sz_hnt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif bfr_sz */ if(!strcmp(opt_crr,"cnk_byt") || !strcmp(opt_crr,"chunk_byte")){ cnk_sz_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk_byt */ if(!strcmp(opt_crr,"cnk_csh") || !strcmp(opt_crr,"chunk_cache")){ cnk_csh_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk_csh_byt */ if(!strcmp(opt_crr,"cnk_min") || !strcmp(opt_crr,"chunk_min")){ cnk_min_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk_min */ if(!strcmp(opt_crr,"cnk_dmn") || !strcmp(opt_crr,"chunk_dimension")){ /* Copy limit argument for later processing */ cnk_arg[cnk_nbr]=(char *)strdup(optarg); cnk_nbr++; } /* endif cnk_dmn */ if(!strcmp(opt_crr,"cnk_scl") || !strcmp(opt_crr,"chunk_scalar")){ cnk_sz_scl=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk */ if(!strcmp(opt_crr,"cnk_map") || !strcmp(opt_crr,"chunk_map")){ /* Chunking map */ cnk_map_sng=(char *)strdup(optarg); cnk_map=nco_cnk_map_get(cnk_map_sng); } /* endif cnk */ if(!strcmp(opt_crr,"cnk_plc") || !strcmp(opt_crr,"chunk_policy")){ /* Chunking policy */ cnk_plc_sng=(char *)strdup(optarg); cnk_plc=nco_cnk_plc_get(cnk_plc_sng); } /* endif cnk */ if(!strcmp(opt_crr,"cll_msr") || !strcmp(opt_crr,"cell_measures")) EXTRACT_CLL_MSR=True; /* [flg] Extract cell_measures variables */ if(!strcmp(opt_crr,"no_cll_msr") || !strcmp(opt_crr,"no_cell_measures")) EXTRACT_CLL_MSR=False; /* [flg] Do not extract cell_measures variables */ if(!strcmp(opt_crr,"frm_trm") || !strcmp(opt_crr,"formula_terms")) EXTRACT_FRM_TRM=True; /* [flg] Extract formula_terms variables */ if(!strcmp(opt_crr,"no_frm_trm") || !strcmp(opt_crr,"no_formula_terms")) EXTRACT_FRM_TRM=False; /* [flg] Do not extract formula_terms variables */ if(!strcmp(opt_crr,"cll_mth") || !strcmp(opt_crr,"cell_methods")) flg_cll_mth=True; /* [flg] Add/modify cell_methods attributes */ if(!strcmp(opt_crr,"no_cll_mth") || !strcmp(opt_crr,"no_cell_methods")) flg_cll_mth=False; /* [flg] Add/modify cell_methods attributes */ if(!strcmp(opt_crr,"mmr_cln") || !strcmp(opt_crr,"clean")) flg_mmr_cln=True; /* [flg] Clean memory prior to exit */ if(!strcmp(opt_crr,"drt") || !strcmp(opt_crr,"mmr_drt") || !strcmp(opt_crr,"dirty")) flg_mmr_cln=False; /* [flg] Clean memory prior to exit */ if(!strcmp(opt_crr,"ddra") || !strcmp(opt_crr,"mdl_cmp")) ddra_info.flg_ddra=flg_ddra=True; /* [flg] DDRA diagnostics */ if(!strcmp(opt_crr,"fl_fmt") || !strcmp(opt_crr,"file_format")) rcd=nco_create_mode_prs(optarg,&fl_out_fmt); if(!strcmp(opt_crr,"dbl") || !strcmp(opt_crr,"rth_dbl")) nco_rth_cnv=nco_rth_flt_dbl; /* [flg] Arithmetic convention: promote float to double */ if(!strcmp(opt_crr,"flt") || !strcmp(opt_crr,"rth_flt")) nco_rth_cnv=nco_rth_flt_flt; /* [flg] Arithmetic convention: keep single-precision */ if(!strcmp(opt_crr,"gaa") || !strcmp(opt_crr,"glb_att_add")){ gaa_arg=(char **)nco_realloc(gaa_arg,(gaa_nbr+1)*sizeof(char *)); gaa_arg[gaa_nbr++]=(char *)strdup(optarg); } /* endif gaa */ if(!strcmp(opt_crr,"hdf4")) nco_fmt_xtn=nco_fmt_xtn_hdf4; /* [enm] Treat file as HDF4 */ if(!strcmp(opt_crr,"hdf_upk") || !strcmp(opt_crr,"hdf_unpack")) nco_upk_cnv=nco_upk_HDF_MOD10; /* [flg] HDF unpack convention: unpacked=scale_factor*(packed-add_offset) */ if(!strcmp(opt_crr,"hdr_pad") || !strcmp(opt_crr,"header_pad")){ hdr_pad=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif "hdr_pad" */ if(!strcmp(opt_crr,"help") || !strcmp(opt_crr,"hlp")){ (void)nco_usg_prn(); nco_exit(EXIT_SUCCESS); } /* endif "help" */ if(!strcmp(opt_crr,"hpss_try")) HPSS_TRY=True; /* [flg] Search HPSS for unfound files */ if(!strcmp(opt_crr,"log_lvl") || !strcmp(opt_crr,"log_level")){ log_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd); nc_set_log_level(log_lvl); } /* !log_lvl */ if(!strcmp(opt_crr,"ppc") || !strcmp(opt_crr,"precision_preserving_compression") || !strcmp(opt_crr,"quantize")){ ppc_arg[ppc_nbr]=(char *)strdup(optarg); ppc_nbr++; } /* endif "ppc" */ if(!strcmp(opt_crr,"ram_all") || !strcmp(opt_crr,"create_ram") || !strcmp(opt_crr,"diskless_all")) RAM_CREATE=True; /* [flg] Open (netCDF3) file(s) in RAM */ if(!strcmp(opt_crr,"ram_all") || !strcmp(opt_crr,"open_ram") || !strcmp(opt_crr,"diskless_all")) RAM_OPEN=True; /* [flg] Create file in RAM */ if(!strcmp(opt_crr,"vrs") || !strcmp(opt_crr,"version")){ (void)nco_vrs_prn(CVS_Id,CVS_Revision); nco_exit(EXIT_SUCCESS); } /* endif "vrs" */ if(!strcmp(opt_crr,"wrt_tmp_fl") || !strcmp(opt_crr,"write_tmp_fl")) WRT_TMP_FL=True; if(!strcmp(opt_crr,"no_tmp_fl")) WRT_TMP_FL=False; } /* opt != 0 */ /* Process short options */ switch(opt){ case 0: /* Long options have already been processed, return */ break; case '3': /* Request netCDF3 output storage format */ fl_out_fmt=NC_FORMAT_CLASSIC; break; case '4': /* Request netCDF4 output storage format */ fl_out_fmt=NC_FORMAT_NETCDF4; break; case '5': /* Request netCDF3 64-bit offset+data storage (i.e., pnetCDF) format */ fl_out_fmt=NC_FORMAT_CDF5; break; case '6': /* Request netCDF3 64-bit offset output storage format */ fl_out_fmt=NC_FORMAT_64BIT_OFFSET; break; case '7': /* Request netCDF4-classic output storage format */ fl_out_fmt=NC_FORMAT_NETCDF4_CLASSIC; break; case 'A': /* Toggle FORCE_APPEND */ FORCE_APPEND=!FORCE_APPEND; break; case 'a': /* Dimensions over which to average hyperslab */ flg_dmn_prc_usr_spc=True; if(dmn_avg_lst_in){ (void)fprintf(fp_stdout,"%s: ERROR Option -a appears more than once\n",nco_prg_nm); (void)fprintf(fp_stdout,"%s: HINT Use -a dim1,dim2,... not -a dim1 -a dim2 ...\n",nco_prg_nm); (void)nco_usg_prn(); nco_exit(EXIT_FAILURE); } /* endif */ dmn_avg_lst_in=nco_lst_prs_2D(optarg,",",&dmn_avg_nbr); break; case 'B': /* Mask string to be parsed */ msk_cnd_sng=(char *)strdup(optarg); #ifndef HAVE_BISON_FLEX (void)fprintf(fp_stdout,"%s: ERROR -B and --mask_condition options unsupported because configuration could not find a parser (e.g., Bison) and lexer (e.g., Flex). HINT: Break condition into component -m -T -M switches, e.g., use -m ORO -T lt -M 1.0 instead of -B \"ORO < 1\"\n",nco_prg_nm); nco_exit(EXIT_FAILURE); #endif /* HAVE_BISON_FLEX */ break; case 'b': /* [flg] Retain degenerate dimensions */ flg_rdd=True; break; case 'C': /* Extract all coordinates associated with extracted variables? */ EXTRACT_ASSOCIATED_COORDINATES=False; break; case 'c': EXTRACT_ALL_COORDINATES=True; break; case 'D': /* Debugging level. Default is 0. */ nco_dbg_lvl=(unsigned short int)strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); break; case 'd': /* Copy limit argument for later processing */ lmt_arg[lmt_nbr]=(char *)strdup(optarg); lmt_nbr++; break; case 'F': /* Toggle index convention. Default is 0-based arrays (C-style). */ FORTRAN_IDX_CNV=!FORTRAN_IDX_CNV; break; case 'G': /* Apply Group Path Editing (GPE) to output group */ /* NB: GNU getopt() optional argument syntax is ugly (requires "=" sign) so avoid it http://stackoverflow.com/questions/1052746/getopt-does-not-parse-optional-arguments-to-parameters */ gpe=nco_gpe_prs_arg(optarg); fl_out_fmt=NC_FORMAT_NETCDF4; break; case 'g': /* Copy group argument for later processing */ /* Replace commas with hashes when within braces (convert back later) */ optarg_lcl=(char *)strdup(optarg); (void)nco_rx_comma2hash(optarg_lcl); grp_lst_in=nco_lst_prs_2D(optarg_lcl,",",&grp_lst_in_nbr); optarg_lcl=(char *)nco_free(optarg_lcl); break; case 'h': /* Toggle appending to history global attribute */ HISTORY_APPEND=!HISTORY_APPEND; break; case 'I': /* [flg] Weight and/or mask coordinate variables */ WGT_MSK_CRD_VAR=!WGT_MSK_CRD_VAR; break; case 'L': /* [enm] Deflate level. Default is 0. */ dfl_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd); break; case 'l': /* Local path prefix for files retrieved from remote file system */ fl_pth_lcl=(char *)strdup(optarg); break; case 'm': /* Name of variable to use as mask in reducing. Default is none */ msk_nm=(char *)strdup(optarg); break; case 'M': /* Good data defined by relation to mask value. Default is 1.0 */ msk_val=strtod(optarg,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtod",sng_cnv_rcd); break; case 'N': NRM_BY_DNM=False; NORMALIZE_BY_TALLY=False; NORMALIZE_BY_WEIGHT=False; break; case 'n': NORMALIZE_BY_WEIGHT=False; (void)fprintf(fp_stdout,"%s: ERROR This option has been disabled while I rethink its implementation\n",nco_prg_nm); nco_exit(EXIT_FAILURE); break; case 'O': /* Toggle FORCE_OVERWRITE */ FORCE_OVERWRITE=!FORCE_OVERWRITE; break; case 'o': /* Name of output file */ fl_out=(char *)strdup(optarg); break; case 'p': /* Common file path */ fl_pth=(char *)strdup(optarg); break; case 'R': /* Toggle removal of remotely-retrieved-files. Default is True. */ RM_RMT_FL_PST_PRC=!RM_RMT_FL_PST_PRC; break; case 'r': /* Print CVS program information and copyright notice */ (void)nco_vrs_prn(CVS_Id,CVS_Revision); (void)nco_lbr_vrs_prn(); (void)nco_cpy_prn(); (void)nco_cnf_prn(); nco_exit(EXIT_SUCCESS); break; case 'T': /* Relational operator type. Default is 0, eq, equality */ op_typ_rlt=nco_op_prs_rlt(optarg); break; case 't': /* Thread number */ thr_nbr=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd); break; case 'v': /* Variables to extract/exclude */ /* Replace commas with hashes when within braces (convert back later) */ optarg_lcl=(char *)strdup(optarg); (void)nco_rx_comma2hash(optarg_lcl); var_lst_in=nco_lst_prs_2D(optarg_lcl,",",&var_lst_in_nbr); optarg_lcl=(char *)nco_free(optarg_lcl); xtr_nbr=var_lst_in_nbr; break; case 'W': NORMALIZE_BY_TALLY=False; (void)fprintf(fp_stdout,"%s: ERROR This option has been disabled while I rethink its implementation\n",nco_prg_nm); nco_exit(EXIT_FAILURE); break; case 'w': /* Variable to use as weight in reducing. Default is none */ wgt_nm=(char *)strdup(optarg); break; case 'x': /* Exclude rather than extract variables specified with -v */ EXCLUDE_INPUT_LIST=True; break; case 'y': /* Operation type */ nco_op_typ_sng=(char *)strdup(optarg); nco_op_typ=nco_op_typ_get(nco_op_typ_sng); break; case '?': /* Question mark means unrecognized option, print proper usage then EXIT_FAILURE */ (void)fprintf(stdout,"%s: ERROR in command-line syntax/options. Missing or unrecognized option. Please reformulate command accordingly.\n",nco_prg_nm_get()); (void)nco_usg_prn(); nco_exit(EXIT_FAILURE); break; case '-': /* Long options are not allowed */ (void)fprintf(stderr,"%s: ERROR Long options are not available in this build. Use single letter options instead.\n",nco_prg_nm_get()); nco_exit(EXIT_FAILURE); break; default: /* Print proper usage */ (void)fprintf(stdout,"%s ERROR in command-line syntax/options. Please reformulate command accordingly.\n",nco_prg_nm_get()); (void)nco_usg_prn(); nco_exit(EXIT_FAILURE); break; } /* end switch */ if(opt_crr) opt_crr=(char *)nco_free(opt_crr); } /* end while loop */ /* Set/report global chunk cache */ rcd+=nco_cnk_csh_ini(cnk_csh_byt); /* Initialize traversal table */ trv_tbl_init(&trv_tbl); /* Parse mask string */ #ifdef HAVE_BISON_FLEX if(msk_cnd_sng){ int cst_zero=0; /* Set arguments for scan */ prs_arg.fl_in=NULL; /* [sng] Input data file */ prs_arg.in_id=0; /* [id] Input data file ID */ prs_arg.fl_out=NULL; /* [sng] Output data file */ prs_arg.out_id=0; /* [id] Output data file ID */ prs_arg.att_lst=NULL; /* [sct] Attributes in script */ prs_arg.nbr_att=&cst_zero; /* [nbr] Number of attributes in script */ prs_arg.dmn_in=NULL; /* [dmn_in] List of all dimensions in input */ prs_arg.nbr_dmn_in=0; /* [nbr] Number of dimensions in input */ prs_arg.dmn_out=NULL; /* [sct] Pointer to output dimension list */ prs_arg.nbr_dmn_out=&cst_zero; /* [nbr] Number of dimensions in output list */ prs_arg.sym_tbl=NULL; /* [fnc] Symbol table for functions */ prs_arg.sym_tbl_nbr=0; /* [nbr] Number of functions in table */ prs_arg.ntl_scn=False; /* [flg] Initial scan of script */ prs_arg.var_LHS=NULL; /* [var] LHS cast variable */ prs_arg.nco_op_typ=nco_op_nil; /* [enm] Operation type */ /* Initialize line counter */ ncap_ln_nbr_crr=(size_t *)nco_realloc(ncap_ln_nbr_crr,ncap_ncl_dpt_crr+1UL); ncap_ln_nbr_crr[ncap_ncl_dpt_crr]=1UL; /* [cnt] Line number incremented in ncap.l */ if(ncap_ncwa_scn(&prs_arg,msk_cnd_sng,&msk_nm,&msk_val,&op_typ_rlt) != NCO_NOERR) nco_exit(EXIT_FAILURE); } /* endif msk_cnd_sng */ #endif /* !HAVE_BISON_FLEX */ /* Ensure we do not attempt to normalize by non-existent weight */ if(!wgt_nm) NORMALIZE_BY_WEIGHT=False; /* Process positional arguments and fill-in filenames */ fl_lst_in=nco_fl_lst_mk(argv,argc,optind,&fl_nbr,&fl_out,&FL_LST_IN_FROM_STDIN,FORCE_OVERWRITE); /* Initialize thread information */ thr_nbr=nco_openmp_ini(thr_nbr); in_id_arr=(int *)nco_malloc(thr_nbr*sizeof(int)); /* Parse filename */ fl_in=nco_fl_nm_prs(fl_in,0,&fl_nbr,fl_lst_in,abb_arg_nbr,fl_lst_abb,fl_pth); /* Make sure file is on local system and is readable or die trying */ fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN); /* Open file using appropriate buffer size hints and verbosity */ if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE; rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,&in_id); (void)nco_inq_format(in_id,&fl_in_fmt); /* Construct GTT, Group Traversal Table (groups,variables,dimensions, limits) */ (void)nco_bld_trv_tbl(in_id,trv_pth,lmt_nbr,lmt_arg,aux_nbr,aux_arg,MSA_USR_RDR,FORTRAN_IDX_CNV,grp_lst_in,grp_lst_in_nbr,var_lst_in,xtr_nbr,EXTRACT_ALL_COORDINATES,GRP_VAR_UNN,False,EXCLUDE_INPUT_LIST,EXTRACT_ASSOCIATED_COORDINATES,EXTRACT_CLL_MSR,EXTRACT_FRM_TRM,nco_pck_plc_nil,&flg_dne,trv_tbl); /* Get number of variables, dimensions, and global attributes in file, file format */ (void)trv_tbl_inq((int *)NULL,(int *)NULL,(int *)NULL,&nbr_dmn_fl,(int *)NULL,(int *)NULL,(int *)NULL,(int *)NULL,&nbr_var_fl,trv_tbl); /* Allocate array of dimensions associated with variables to be extracted with maximum possible size */ dim=(dmn_sct **)nco_malloc(nbr_dmn_fl*sizeof(dmn_sct *)); /* Find dimensions associated with variables to be extracted */ (void)nco_dmn_lst_ass_var_trv(in_id,trv_tbl,&nbr_dmn_xtr,&dim); /* Not specifying any dimensions is interpreted as specifying all dimensions */ if(dmn_avg_nbr == 0){ dmn_avg_nbr=nbr_dmn_xtr; dmn_avg_lst_in=(char **)nco_malloc(dmn_avg_nbr*sizeof(char *)); for(idx=0;idx<dmn_avg_nbr;idx++) dmn_avg_lst_in[idx]=(char *)strdup(dim[idx]->nm); if(nco_dbg_lvl >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO No dimensions specified with -a, therefore reducing (averaging, taking minimum, etc.) over all dimensions\n",nco_prg_nm); } /* end if dmn_avg_nbr == 0 */ /* Allocate array of dimensions to average with maximum possible size */ dmn_avg=(dmn_sct **)nco_malloc(nbr_dmn_fl*sizeof(dmn_sct *)); /* Allocate array of dimensions to keep on output with maximum possible size */ dmn_out=(dmn_sct **)nco_malloc(nbr_dmn_fl*sizeof(dmn_sct *)); /* Create list of dimensions to average */ (void)nco_dmn_avg_mk(in_id,dmn_avg_lst_in,dmn_avg_nbr,flg_dmn_prc_usr_spc,flg_rdd,trv_tbl,&dmn_avg,&dmn_avg_nbr); /* Create list of dimensions to keep on output */ (void)nco_dmn_out_mk(dim,nbr_dmn_xtr,trv_tbl,&dmn_out,&nbr_dmn_out); dmn_avg=(dmn_sct **)nco_realloc(dmn_avg,dmn_avg_nbr*sizeof(dmn_sct *)); dmn_out=(dmn_sct **)nco_realloc(dmn_out,nbr_dmn_out*sizeof(dmn_sct *)); /* Transfer degenerated dimensions information into GTT */ (void)nco_dmn_dgn_tbl(dmn_out,nbr_dmn_out,trv_tbl); /* Fill-in variable structure list for all extracted variables */ var=nco_fll_var_trv(in_id,&xtr_nbr,trv_tbl); /* Duplicate to output array */ var_out=(var_sct **)nco_malloc(xtr_nbr*sizeof(var_sct *)); for(idx=0;idx<xtr_nbr;idx++){ var_out[idx]=nco_var_dpl(var[idx]); (void)nco_xrf_var(var[idx],var_out[idx]); (void)nco_xrf_dmn(var_out[idx]); } /* end loop over var */ /* Determine conventions (ARM/CCM/CCSM/CF/MPAS) for treating file */ cnv=nco_cnv_ini(in_id); /* Divide variable lists into lists of fixed variables and variables to be processed */ (void)nco_var_lst_dvd(var,var_out,xtr_nbr,cnv,True,nco_pck_plc_nil,nco_pck_map_nil,dmn_avg,dmn_avg_nbr,&var_fix,&var_fix_out,&nbr_var_fix,&var_prc,&var_prc_out,&nbr_var_prc,trv_tbl); /* Store processed and fixed variables info into GTT */ (void)nco_var_prc_fix_trv(nbr_var_prc,var_prc,nbr_var_fix,var_fix,trv_tbl); /* We now have final list of variables to extract. Phew. */ if(nco_dbg_lvl >= nco_dbg_var && nco_dbg_lvl != nco_dbg_dev){ for(idx=0;idx<xtr_nbr;idx++) (void)fprintf(stderr,"var[%d]->nm = %s, ->id=[%d]\n",idx,var[idx]->nm,var[idx]->id); for(idx=0;idx<nbr_var_fix;idx++) (void)fprintf(stderr,"var_fix[%d]->nm = %s, ->id=[%d]\n",idx,var_fix[idx]->nm,var_fix[idx]->id); for(idx=0;idx<nbr_var_prc;idx++) (void)fprintf(stderr,"var_prc[%d]->nm = %s, ->id=[%d]\n",idx,var_prc[idx]->nm,var_prc[idx]->id); } /* end if */ /* Make output and input files consanguinous */ if(fl_out_fmt == NCO_FORMAT_UNDEFINED) fl_out_fmt=fl_in_fmt; /* Initialize, decode, and set PPC information */ if(ppc_nbr > 0) nco_ppc_ini(in_id,&dfl_lvl,fl_out_fmt,ppc_arg,ppc_nbr,trv_tbl); /* Verify output file format supports requested actions */ (void)nco_fl_fmt_vet(fl_out_fmt,cnk_nbr,dfl_lvl); /* Open output file */ fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,WRT_TMP_FL,&out_id); /* Initialize chunking from user-specified inputs */ if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC) rcd+=nco_cnk_ini(in_id,fl_out,cnk_arg,cnk_nbr,cnk_map,cnk_plc,cnk_csh_byt,cnk_min_byt,cnk_sz_byt,cnk_sz_scl,&cnk); /* Define dimensions, extracted groups, variables, and attributes in output file. */ (void)nco_xtr_dfn(in_id,out_id,&cnk,dfl_lvl,gpe,md5,!FORCE_APPEND,True,False,nco_pck_plc_nil,(char *)NULL,trv_tbl); /* Catenate time-stamped command line to "history" global attribute */ if(HISTORY_APPEND) (void)nco_hst_att_cat(out_id,cmd_ln); if(HISTORY_APPEND && FORCE_APPEND) (void)nco_prv_att_cat(fl_in,in_id,out_id); if(gaa_nbr > 0) (void)nco_glb_att_add(out_id,gaa_arg,gaa_nbr); if(HISTORY_APPEND) (void)nco_vrs_att_cat(out_id); if(thr_nbr > 1 && HISTORY_APPEND) (void)nco_thr_att_cat(out_id,thr_nbr); /* Add new missing values to output file while in define mode */ if(msk_nm){ for(idx=0;idx<nbr_var_prc;idx++){ char *grp_out_fll=NULL; /* [sng] Group name */ int grp_out_id; /* [ID] Group ID (output) */ int var_out_id; /* [ID] Variable ID (output) */ trv_sct *var_trv; /* [sct] Variable GTT object */ /* Obtain variable GTT object using full variable name */ var_trv=trv_tbl_var_nm_fll(var_prc[idx]->nm_fll,trv_tbl); /* Edit group name for output */ if(gpe) grp_out_fll=nco_gpe_evl(gpe,var_trv->grp_nm_fll); else grp_out_fll=(char *)strdup(var_trv->grp_nm_fll); /* Obtain output group ID */ (void)nco_inq_grp_full_ncid(out_id,grp_out_fll,&grp_out_id); /* Memory management after current extracted group */ if(grp_out_fll) grp_out_fll=(char *)nco_free(grp_out_fll); /* Get variable ID */ (void)nco_inq_varid(grp_out_id,var_trv->nm,&var_out_id); /* Store the output variable ID */ var_prc_out[idx]->id=var_out_id; /* Define for var_prc_out because mss_val for var_prc will be overwritten in nco_var_mtd_refresh() */ if(!var_prc_out[idx]->has_mss_val){ var_prc_out[idx]->has_mss_val=True; var_prc_out[idx]->mss_val=nco_mss_val_mk(var_prc[idx]->type); (void)nco_put_att(grp_out_id,var_prc_out[idx]->id,nco_mss_val_sng_get(),var_prc_out[idx]->type,(long)1,var_prc_out[idx]->mss_val.vp); } /* end if */ } /* end for */ } /* end if */ /* Turn-off default filling behavior to enhance efficiency */ nco_set_fill(out_id,NC_NOFILL,&fll_md_old); /* Add cell_methods attributes (before exiting define mode) */ if(flg_cll_mth) rcd+=nco_cnv_cf_cll_mth_add(out_id,var_prc_out,nbr_var_prc,dmn_avg,dmn_avg_nbr,nco_op_typ,gpe,(clm_bnd_sct *)NULL,trv_tbl); /* Take output file out of define mode */ if(hdr_pad == 0UL){ (void)nco_enddef(out_id); }else{ (void)nco__enddef(out_id,hdr_pad); if(nco_dbg_lvl >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO Padding header with %lu extra bytes\n",nco_prg_nm_get(),(unsigned long)hdr_pad); } /* hdr_pad */ /* Assign zero to start and unity to stride vectors in output variables */ (void)nco_var_srd_srt_set(var_out,xtr_nbr); /* Copy variable data for non-processed variables */ (void)nco_cpy_fix_var_trv(in_id,out_id,gpe,trv_tbl); /* Close first input netCDF file */ nco_close(in_id); /* Loop over input files (not currently used, fl_nbr == 1) */ for(fl_idx=0;fl_idx<fl_nbr;fl_idx++){ /* Parse filename */ if(fl_idx != 0) fl_in=nco_fl_nm_prs(fl_in,fl_idx,&fl_nbr,fl_lst_in,abb_arg_nbr,fl_lst_abb,fl_pth); if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,"%s: INFO Input file %d is %s",nco_prg_nm_get(),fl_idx,fl_in); /* Make sure file is on local system and is readable or die trying */ if(fl_idx != 0) fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN); if(nco_dbg_lvl >= nco_dbg_fl && FL_RTR_RMT_LCN) (void)fprintf(stderr,", local file is %s",fl_in); if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,"\n"); /* Open file once per thread to improve caching */ for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) rcd=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,in_id_arr+thr_idx); in_id=in_id_arr[0]; /* Perform various error-checks on input file */ if(False) (void)nco_fl_cmp_err_chk(); /* Timestamp end of metadata setup and disk layout */ rcd+=nco_ddra((char *)NULL,(char *)NULL,&ddra_info); ddra_info.tmr_flg=nco_tmr_rgl; #ifdef _OPENMP /* OpenMP notes: firstprivate(): rcd gets incremented, so keep initial value lastprivate(): retain rcd value from last thread private(): wgt_avg does not need initialization shared(): msk, wgt and lmt_nbr are not altered within loop */ #pragma omp parallel for firstprivate(DO_CONFORM_MSK,DO_CONFORM_WGT,ddra_info,rcd) lastprivate(rcd) private(idx,in_id,wgt_avg) shared(MULTIPLY_BY_TALLY,MUST_CONFORM,NRM_BY_DNM,WGT_MSK_CRD_VAR,dmn_avg,dmn_avg_nbr,flg_ddra,flg_rdd,gpe,in_id_arr,msk_nm,msk_val,nbr_var_prc,nco_dbg_lvl,nco_op_typ,nco_prg_nm,op_typ_rlt,out_id,trv_tbl,var_prc,var_prc_out,wgt_nm,lmt_nbr,lmt_arg,FORTRAN_IDX_CNV,MSA_USR_RDR) #endif /* !_OPENMP */ for(idx=0;idx<nbr_var_prc;idx++){ /* Process all variables in current file */ char *grp_out_fll=NULL; /* [sng] Group name */ int grp_id; /* [ID] Group ID */ int grp_out_id; /* [ID] Group ID (output) */ int var_out_id; /* [ID] Variable ID (output) */ trv_sct *var_trv; /* [sct] Variable GTT object */ var_sct *wgt=NULL; var_sct *wgt_out=NULL; var_sct *msk=NULL; var_sct *msk_out=NULL; in_id=in_id_arr[omp_get_thread_num()]; /* Obtain variable GTT object using full variable name */ var_trv=trv_tbl_var_nm_fll(var_prc[idx]->nm_fll,trv_tbl); /* Obtain group ID */ (void)nco_inq_grp_full_ncid(in_id,var_trv->grp_nm_fll,&grp_id); if(nco_dbg_lvl >= nco_dbg_var && nco_dbg_lvl < nco_dbg_nbr) rcd+=nco_var_prc_crr_prn(idx,var_prc[idx]->nm); if(nco_dbg_lvl >= nco_dbg_var && nco_dbg_lvl < nco_dbg_nbr) (void)fflush(fp_stderr); /* Allocate and, if necessary, initialize accumulation space for all processed variables */ var_prc_out[idx]->sz=var_prc[idx]->sz; /* fxm: verify that var_prc->tally is not needed */ if(!(var_prc_out[idx]->tally=(long *)nco_malloc_flg(var_prc_out[idx]->sz*sizeof(long)))){ (void)fprintf(fp_stdout,"%s: ERROR Unable to malloc() %ld*%ld bytes for tally buffer for variable %s in main()\n",nco_prg_nm_get(),var_prc_out[idx]->sz,(long)sizeof(long),var_prc_out[idx]->nm); nco_exit(EXIT_FAILURE); } /* end if err */ (void)nco_zero_long(var_prc_out[idx]->sz,var_prc_out[idx]->tally); if(!(var_prc_out[idx]->val.vp=(void *)nco_malloc_flg(var_prc_out[idx]->sz*nco_typ_lng(var_prc_out[idx]->type)))){ (void)fprintf(fp_stdout,"%s: ERROR Unable to malloc() %ld*%lu bytes for value buffer for variable %s in main()\n",nco_prg_nm_get(),var_prc_out[idx]->sz,(unsigned long)nco_typ_lng(var_prc_out[idx]->type),var_prc_out[idx]->nm); nco_exit(EXIT_FAILURE); } /* end if err */ (void)nco_var_zero(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->val); (void)nco_var_mtd_refresh(grp_id,var_prc[idx]); /* Find weighting variable that matches current variable */ if(wgt_nm) wgt=nco_var_get_wgt_trv(in_id,lmt_nbr,lmt_arg,MSA_USR_RDR,FORTRAN_IDX_CNV,wgt_nm,var_prc[idx],trv_tbl); /* Find mask variable that matches current variable */ if(msk_nm) msk=nco_var_get_wgt_trv(in_id,lmt_nbr,lmt_arg,MSA_USR_RDR,FORTRAN_IDX_CNV,msk_nm,var_prc[idx],trv_tbl); /* Retrieve variable from disk into memory */ (void)nco_msa_var_get_trv(in_id,var_prc[idx],trv_tbl); /* var_prc_out still has type = packed type for packed variables nco_typ_cnv_rth() fixes that for most operations, though not for minimization or maximization Following line is necessary only for packed variables subject to minimization or maximization */ if(var_prc[idx]->typ_dsk != var_prc[idx]->type && var_prc[idx]->typ_upk == var_prc[idx]->type) var_prc_out[idx]=nco_var_cnf_typ(var_prc[idx]->type,var_prc_out[idx]); /* Convert char, short, long, int, and float types to doubles before arithmetic */ var_prc[idx]=nco_typ_cnv_rth(var_prc[idx],nco_op_typ); var_prc_out[idx]=nco_typ_cnv_rth(var_prc_out[idx],nco_op_typ); /* Check mask found for this variable, using msk */ if(msk && (!var_prc[idx]->is_crd_var || WGT_MSK_CRD_VAR)){ msk_out=nco_var_cnf_dmn(var_prc[idx],msk,msk_out,MUST_CONFORM,&DO_CONFORM_MSK); /* Mask variable iff msk and var conform */ if(DO_CONFORM_MSK){ msk_out=nco_var_cnf_typ(var_prc[idx]->type,msk_out); /* mss_val for var_prc has been overwritten in nco_var_mtd_refresh() */ if(!var_prc[idx]->has_mss_val){ var_prc[idx]->has_mss_val=True; var_prc[idx]->mss_val=nco_mss_val_mk(var_prc[idx]->type); } /* end if */ /* Mask by changing variable to missing value where condition is false */ (void)nco_var_msk(var_prc[idx]->type,var_prc[idx]->sz,var_prc[idx]->has_mss_val,var_prc[idx]->mss_val,msk_val,op_typ_rlt,msk_out->val,var_prc[idx]->val); } /* end if */ } /* end if */ /* Perform non-linear transformations before weighting */ if(!var_prc[idx]->is_crd_var){ switch(nco_op_typ){ case nco_op_mabs: /* Absolute value variable before weighting */ case nco_op_mebs: /* Absolute value variable before weighting */ case nco_op_mibs: /* Absolute value variable before weighting */ (void)nco_var_abs(var_prc[idx]->type,var_prc[idx]->sz,var_prc[idx]->has_mss_val,var_prc[idx]->mss_val,var_prc[idx]->val); break; case nco_op_avgsqr: /* Square variable before weighting */ case nco_op_rms: /* Square variable before weighting */ case nco_op_rmssdn: /* Square variable before weighting */ (void)nco_var_mlt(var_prc[idx]->type,var_prc[idx]->sz,var_prc[idx]->has_mss_val,var_prc[idx]->mss_val,var_prc[idx]->val,var_prc[idx]->val); break; default: /* All other operations are linear, do nothing to them yet */ break; } /* end case */ } /* var_prc[idx]->is_crd_var */ /* Check weight found for this variable, using wgt */ if(wgt && (!var_prc[idx]->is_crd_var || WGT_MSK_CRD_VAR)){ /* fxm: nco_var_cnf_dmn() has bug where it does not allocate tally array for weights that do already conform to var_prc. TODO #114. */ wgt_out=nco_var_cnf_dmn(var_prc[idx],wgt,wgt_out,MUST_CONFORM,&DO_CONFORM_WGT); if(DO_CONFORM_WGT){ wgt_out=nco_var_cnf_typ(var_prc[idx]->type,wgt_out); /* Weight variable after any initial non-linear operation so, e.g., variable (not weights) is squared */ (void)nco_var_mlt(var_prc[idx]->type,var_prc[idx]->sz,var_prc[idx]->has_mss_val,var_prc[idx]->mss_val,wgt_out->val,var_prc[idx]->val); } /* end if weights conformed */ } /* end if weight was specified and then tested for conformance */ /* Copy (masked) (weighted) values from var_prc to var_prc_out */ (void)memcpy((void *)(var_prc_out[idx]->val.vp),(void *)(var_prc[idx]->val.vp),var_prc_out[idx]->sz*nco_typ_lng(var_prc_out[idx]->type)); /* 20050516: fxm: destruction of var_prc_out in nco_var_avg() leaves dangling pointers in var_out? */ /* Reduce variable over specified dimensions (tally array is set here) NB: var_prc_out[idx] is new, so corresponding var_out[idx] is dangling nco_var_avg() will perform nco_op_typ on all variables except coordinate variables nco_var_avg() always averages coordinate variables */ var_prc_out[idx]=nco_var_avg(var_prc_out[idx],dmn_avg,dmn_avg_nbr,nco_op_typ,flg_rdd,&ddra_info); /* var_prc_out[idx]->val now holds numerator of averaging expression documented in NCO Users Guide Denominator is also tricky due to sundry normalization options These logical switches are tricky---modify them with care */ if(NRM_BY_DNM && DO_CONFORM_WGT && (!var_prc[idx]->is_crd_var || WGT_MSK_CRD_VAR)){ /* Duplicate wgt_out as wgt_avg so that wgt_out is not contaminated by any averaging operation and may be re-used on next variable. Free wgt_avg after each use but continue to re-use wgt_out */ wgt_avg=nco_var_dpl(wgt_out); if(var_prc[idx]->has_mss_val){ double mss_val_dbl=double_CEWI; /* Set denominator to missing value at all locations where variable is missing value If this is accomplished by setting weight to missing value wherever variable is missing value then weight must not be re-used by next variable (which may conform but have missing values in different locations) This is one good reason to copy wgt_out into disposable wgt_avg for each new variable */ /* First, make sure wgt_avg has same missing value as variable */ (void)nco_mss_val_cp(var_prc[idx],wgt_avg); /* Copy missing value into double precision variable */ switch(wgt_avg->type){ case NC_FLOAT: mss_val_dbl=wgt_avg->mss_val.fp[0]; break; case NC_DOUBLE: mss_val_dbl=wgt_avg->mss_val.dp[0]; break; case NC_INT: mss_val_dbl=wgt_avg->mss_val.ip[0]; break; case NC_SHORT: mss_val_dbl=wgt_avg->mss_val.sp[0]; break; case NC_USHORT: mss_val_dbl=wgt_avg->mss_val.usp[0]; break; case NC_UINT: mss_val_dbl=wgt_avg->mss_val.uip[0]; break; case NC_INT64: mss_val_dbl=(double)wgt_avg->mss_val.i64p[0]; break; /* CEWI for MSVC */ case NC_UINT64: mss_val_dbl=(double)wgt_avg->mss_val.ui64p[0]; break; /* CEWI for MSVC */ case NC_BYTE: mss_val_dbl=wgt_avg->mss_val.bp[0]; break; case NC_UBYTE: mss_val_dbl=wgt_avg->mss_val.ubp[0]; break; case NC_CHAR: mss_val_dbl=wgt_avg->mss_val.cp[0]; break; case NC_STRING: break; /* Do nothing */ default: nco_dfl_case_nc_type_err(); break; } /* end switch */ /* Second, mask wgt_avg where variable is missing value */ (void)nco_var_msk(wgt_avg->type,wgt_avg->sz,var_prc[idx]->has_mss_val,var_prc[idx]->mss_val,mss_val_dbl,nco_op_ne,var_prc[idx]->val,wgt_avg->val); } /* endif weight must be checked for missing values */ /* Free current input buffer */ var_prc[idx]->val.vp=nco_free(var_prc[idx]->val.vp); if(msk && DO_CONFORM_MSK){ /* Must mask weight in same fashion as variable was masked If msk and var did not conform then do not mask wgt Ensure wgt_avg has a missing value */ if(!wgt_avg->has_mss_val){ wgt_avg->has_mss_val=True; wgt_avg->mss_val=nco_mss_val_mk(wgt_avg->type); } /* end if */ /* Mask by changing weight to missing value where condition is false */ (void)nco_var_msk(wgt_avg->type,wgt_avg->sz,wgt_avg->has_mss_val,wgt_avg->mss_val,msk_val,op_typ_rlt,msk_out->val,wgt_avg->val); } /* endif weight must be masked */ /* fxm: temporary kludge to make sure weight has tally space wgt_avg may lack valid tally array in ncwa because wgt_avg is created, sometimes, before the tally array for var_prc_out[idx] is created. When this occurs the nco_var_dpl() call in nco_var_cnf_dmn() does not copy tally array into wgt_avg. See related note about this above. TODO #114.*/ if(wgt_avg->sz > 0){ if(!(wgt_avg->tally=(long *)nco_realloc(wgt_avg->tally,wgt_avg->sz*sizeof(long)))){ (void)fprintf(fp_stdout,"%s: ERROR Unable to realloc() %ld*%ld bytes for tally buffer for weight %s in main()\n",nco_prg_nm_get(),wgt_avg->sz,(long)sizeof(long),wgt_avg->nm); nco_exit(EXIT_FAILURE); } /* end if */ } /* wgt_avg->sz */ /* Average weight over specified dimensions (tally array is set here) */ wgt_avg=nco_var_avg(wgt_avg,dmn_avg,dmn_avg_nbr,nco_op_avg,flg_rdd,&ddra_info); if(MULTIPLY_BY_TALLY){ /* NB: Currently this is not implemented */ /* Multiply numerator (weighted sum of variable) by tally We deviously accomplish this by dividing denominator by tally */ (void)nco_var_nrm(wgt_avg->type,wgt_avg->sz,wgt_avg->has_mss_val,wgt_avg->mss_val,wgt_avg->tally,wgt_avg->val); } /* endif */ /* Divide numerator by denominator */ /* Diagnose common PEBCAK before it causes core dump */ if(var_prc_out[idx]->sz == 1L && var_prc_out[idx]->type == NC_INT && var_prc_out[idx]->val.ip[0] == 0){ (void)fprintf(fp_stdout,"%s: ERROR Weight in denominator weight = 0.0, will cause SIGFPE\n%s: HINT Sum of masked, averaged weights must be non-zero\n%s: HINT A possible workaround is to remove variable \"%s\" from output file using \"%s -x -v %s ...\"\n%s: Expecting core dump...now!\n",nco_prg_nm,nco_prg_nm,nco_prg_nm,var_prc_out[idx]->nm,nco_prg_nm,var_prc_out[idx]->nm,nco_prg_nm); } /* end if */ /* Rather complex conditional statement is shorter than switch() */ if( /* Normalize by weighted tally if .... */ var_prc[idx]->is_crd_var || /* ...variable is a coordinate or ...*/ ((nco_op_typ != nco_op_min) && /* ...operation is not min() and... */ (nco_op_typ != nco_op_max) && /* ...operation is not max() and... */ (nco_op_typ != nco_op_mabs) && /* ...operation is not mabs() and... */ (nco_op_typ != nco_op_mibs) && /* ...operation is not mibs() and... */ (nco_op_typ != nco_op_tabs) && /* ...operation is not tabs() and... */ (nco_op_typ != nco_op_ttl)) /* ...operation is not ttl() and... */ ){ /* Divide numerator by masked, averaged, weights */ (void)nco_var_dvd(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,wgt_avg->val,var_prc_out[idx]->val); } /* endif */ /* Free wgt_avg, but keep wgt_out, after each use */ if(wgt_avg) wgt_avg=nco_var_free(wgt_avg); /* End of branch for normalization when weights were specified */ }else if(NRM_BY_DNM){ /* Branch for normalization when no weights were specified Normalization is just due to tally */ if(var_prc[idx]->is_crd_var){ /* Always return averages (never extrema or other statistics) of coordinates Prevent coordinate variables from encountering nco_var_nrm_sdn() */ (void)nco_var_nrm(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->tally,var_prc_out[idx]->val); }else{ /* !var_prc[idx]->is_crd_var */ switch(nco_op_typ){ case nco_op_mebs: /* Normalize sum by tally to create mean */ case nco_op_avg: /* Normalize sum by tally to create mean */ case nco_op_sqravg: /* Normalize sum by tally to create mean */ case nco_op_avgsqr: /* Normalize sum of squares by tally to create mean square */ case nco_op_rms: /* Normalize sum of squares by tally to create mean square */ case nco_op_sqrt: /* Normalize sum by tally to create mean */ (void)nco_var_nrm(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->tally,var_prc_out[idx]->val); break; case nco_op_rmssdn: /* Normalize sum of squares by tally-1 to create mean square for sdn */ (void)nco_var_nrm_sdn(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->tally,var_prc_out[idx]->val); break; case nco_op_min: /* Minimum is already in buffer, do nothing */ case nco_op_max: /* Maximum is already in buffer, do nothing */ case nco_op_mabs: /* Maximum absolute value is already in buffer, do nothing */ case nco_op_mibs: /* Minimum absolute value is already in buffer, do nothing */ case nco_op_tabs: /* Total absolute value is already in buffer, do nothing */ case nco_op_ttl: /* Total is already in buffer, do nothing */ break; default: (void)fprintf(fp_stdout,"%s: ERROR Illegal nco_op_typ in non-weighted normalization\n",nco_prg_nm); nco_exit(EXIT_FAILURE); break; } /* end switch */ } /* !var_prc[idx]->is_crd_var */ }else if(!NRM_BY_DNM){ /* User turned off normalization so we are done */ ; }else{ (void)fprintf(fp_stdout,"%s: ERROR Unforeseen logical branch in main()\n",nco_prg_nm); nco_exit(EXIT_FAILURE); } /* end if */ /* Some non-linear operations require additional processing */ if(!var_prc[idx]->is_crd_var){ switch(nco_op_typ){ case nco_op_sqravg: /* Square mean to create square of the mean (for sdn) */ (void)nco_var_mlt(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->val,var_prc_out[idx]->val); break; case nco_op_sqrt: /* Take root of mean to create root mean */ case nco_op_rms: /* Take root of mean of sum of squares to create root mean square */ case nco_op_rmssdn: /* Take root of sdn mean of sum of squares to create root mean square for sdn */ (void)nco_var_sqrt(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->tally,var_prc_out[idx]->val,var_prc_out[idx]->val); break; default: break; } /* end switch */ } /* var_prc[idx]->is_crd_var */ /* Free tally buffer */ var_prc_out[idx]->tally=(long *)nco_free(var_prc_out[idx]->tally); /* Revert any arithmetic promotion but leave unpacked (for now) */ var_prc_out[idx]=nco_var_cnf_typ(var_prc_out[idx]->typ_upk,var_prc_out[idx]); /* Edit group name for output */ if(gpe) grp_out_fll=nco_gpe_evl(gpe,var_trv->grp_nm_fll); else grp_out_fll=(char *)strdup(var_trv->grp_nm_fll); /* Obtain output group ID */ (void)nco_inq_grp_full_ncid(out_id,grp_out_fll,&grp_out_id); /* Memory management after current extracted group */ if(grp_out_fll) grp_out_fll=(char *)nco_free(grp_out_fll); /* Get variable ID */ (void)nco_inq_varid(grp_out_id,var_trv->nm,&var_out_id); /* Store the output variable ID */ var_prc_out[idx]->id=var_out_id; if(var_trv->ppc != NC_MAX_INT){ if(var_trv->flg_nsd) (void)nco_ppc_bitmask(var_trv->ppc,var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->val); else (void)nco_ppc_around(var_trv->ppc,var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->val); } /* endif ppc */ if(nco_is_xcp(var_trv->nm)) nco_xcp_prc(var_trv->nm,var_prc_out[idx]->type,var_prc_out[idx]->sz,(char *)var_prc_out[idx]->val.vp); #ifdef _OPENMP #pragma omp critical #endif /* _OPENMP */ { /* begin OpenMP critical */ /* Copy average to output file then free averaging buffer */ if(var_prc_out[idx]->nbr_dim == 0){ (void)nco_put_var1(grp_out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->val.vp,var_prc_out[idx]->type); }else{ /* end if variable is scalar */ (void)nco_put_vara(grp_out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->cnt,var_prc_out[idx]->val.vp,var_prc_out[idx]->type); } /* end if variable is array */ } /* end OpenMP critical */ if(flg_ddra){ /* DDRA diagnostics Usage: ncwa -O -C --mdl -a lat,lon,time -w lat ~/nco/data/in.nc ~/foo.nc ncwa -O -C --mdl -a lat,lon -w lat ${DATA}/nco_bm/stl_5km.nc ~/foo.nc ncwa -O -C --mdl -a lat,lon,time -w lat ${DATA}/nco_bm/gcm_T85.nc ~/foo.nc */ /* Assign remaining input for DDRA diagnostics */ ddra_info.lmn_nbr=var_prc[idx]->sz; /* [nbr] Variable size */ if(wgt) ddra_info.lmn_nbr_wgt=wgt->sz; /* [nbr] Weight size */ ddra_info.nco_op_typ=nco_op_typ; /* [enm] Operation type */ ddra_info.rnk_var=var_prc[idx]->nbr_dim; /* I [nbr] Variable rank (in input file) */ if(wgt) ddra_info.rnk_wgt=wgt->nbr_dim; /* [nbr] Rank of weight */ ddra_info.var_idx=idx; /* [enm] Index */ ddra_info.wrd_sz=nco_typ_lng(var_prc[idx]->type); /* [B] Bytes per element */ /* DDRA diagnostics */ rcd+=nco_ddra /* [fnc] Count operations */ (var_prc[idx]->nm, /* I [sng] Variable name */ wgt_nm, /* I [sng] Weight name */ &ddra_info); /* I [sct] DDRA information */ } /* !flg_ddra */ /* Free current output buffer */ var_prc_out[idx]->val.vp=nco_free(var_prc_out[idx]->val.vp); /* Free possible weight/mask found */ if(wgt) wgt=nco_var_free(wgt); if(wgt_out) wgt_out=nco_var_free(wgt_out); if(msk) msk=nco_var_free(msk); if(msk_out) msk_out=nco_var_free(msk_out); } /* end (OpenMP parallel for) loop over idx */ if(nco_dbg_lvl >= nco_dbg_var) (void)fprintf(stderr,"\n"); /* Close input netCDF file */ for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) nco_close(in_id_arr[thr_idx]); /* Remove local copy of file */ if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_in); } /* end loop over fl_idx */ /* Close output file and move it from temporary to permanent location */ (void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id); /* Clean memory unless dirty memory allowed */ if(flg_mmr_cln){ /* ncwa-specific memory */ if(dmn_avg_nbr > 0) dmn_avg=(dmn_sct **)nco_free(dmn_avg); if(msk_nm) msk_nm=(char *)nco_free(msk_nm); if(msk_cnd_sng) msk_cnd_sng=(char *)nco_free(msk_cnd_sng); if(wgt_avg) wgt_avg=nco_var_free(wgt_avg); if(wgt_nm) wgt_nm=(char *)nco_free(wgt_nm); /* NCO-generic clean-up */ /* Free individual strings/arrays */ if(cmd_ln) cmd_ln=(char *)nco_free(cmd_ln); if(cnk_map_sng) cnk_map_sng=(char *)nco_free(cnk_map_sng); if(cnk_plc_sng) cnk_plc_sng=(char *)nco_free(cnk_plc_sng); if(fl_in) fl_in=(char *)nco_free(fl_in); if(fl_out) fl_out=(char *)nco_free(fl_out); if(fl_out_tmp) fl_out_tmp=(char *)nco_free(fl_out_tmp); if(fl_pth) fl_pth=(char *)nco_free(fl_pth); if(fl_pth_lcl) fl_pth_lcl=(char *)nco_free(fl_pth_lcl); if(in_id_arr) in_id_arr=(int *)nco_free(in_id_arr); /* Free lists of strings */ if(fl_lst_in && fl_lst_abb == NULL) fl_lst_in=nco_sng_lst_free(fl_lst_in,fl_nbr); if(fl_lst_in && fl_lst_abb) fl_lst_in=nco_sng_lst_free(fl_lst_in,1); if(fl_lst_abb) fl_lst_abb=nco_sng_lst_free(fl_lst_abb,abb_arg_nbr); if(gaa_nbr > 0) gaa_arg=nco_sng_lst_free(gaa_arg,gaa_nbr); if(var_lst_in_nbr > 0) var_lst_in=nco_sng_lst_free(var_lst_in,var_lst_in_nbr); /* Free limits */ for(idx=0;idx<aux_nbr;idx++) aux_arg[idx]=(char *)nco_free(aux_arg[idx]); for(idx=0;idx<lmt_nbr;idx++) lmt_arg[idx]=(char *)nco_free(lmt_arg[idx]); for(idx=0;idx<ppc_nbr;idx++) ppc_arg[idx]=(char *)nco_free(ppc_arg[idx]); /* Free chunking information */ for(idx=0;idx<cnk_nbr;idx++) cnk_arg[idx]=(char *)nco_free(cnk_arg[idx]); if(cnk_nbr > 0 && (fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC)) cnk.cnk_dmn=(cnk_dmn_sct **)nco_cnk_lst_free(cnk.cnk_dmn,cnk_nbr); /* Free dimension lists */ if(nbr_dmn_xtr > 0) dim=nco_dmn_lst_free(dim,nbr_dmn_xtr); if(nbr_dmn_out > 0) dmn_out=nco_dmn_lst_free(dmn_out,nbr_dmn_out); /* Free variable lists */ if(xtr_nbr > 0) var=nco_var_lst_free(var,xtr_nbr); /* ncwa uses nco_var_lst_free() on var_prc_out because var_out has dangling pointers */ if(nbr_var_fix > 0) var_fix_out=nco_var_lst_free(var_fix_out,nbr_var_fix); if(nbr_var_prc > 0) var_prc_out=nco_var_lst_free(var_prc_out,nbr_var_prc); var_prc=(var_sct **)nco_free(var_prc); var_fix=(var_sct **)nco_free(var_fix); var_out=(var_sct **)nco_free(var_out); for(idx=0;idx<lmt_nbr;idx++) flg_dne[idx].dim_nm=(char *)nco_free(flg_dne[idx].dim_nm); if(flg_dne) flg_dne=(nco_dmn_dne_t *)nco_free(flg_dne); /* Free traversal table */ trv_tbl_free(trv_tbl); } /* !flg_mmr_cln */ /* End timer */ ddra_info.tmr_flg=nco_tmr_end; /* [enm] Timer flag */ rcd+=nco_ddra((char *)NULL,(char *)NULL,&ddra_info); if(rcd != NC_NOERR) nco_err_exit(rcd,"main"); #ifdef ENABLE_MPI MPI_Finalize(); #endif /* !ENABLE_MPI */ nco_exit_gracefully(); return EXIT_SUCCESS; } /* end main() */
omp_bug1.c
/****************************************************************************** * FILE: omp_bug1.c * DESCRIPTION: * This example attempts to show use of the parallel for construct. However * it will generate errors at compile time. Try to determine what is causing * the error. See omp_bug1fix.c for a corrected version. * AUTHOR: Blaise Barney 5/99 * LAST REVISED: 04/06/05 ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> #define N 50 #define CHUNKSIZE 5 int main (int argc, char *argv[]) { int i, chunk, tid; float a[N], b[N], c[N]; /* Some initializations */ for (i=0; i < N; i++) a[i] = b[i] = i * 1.0; chunk = CHUNKSIZE; #pragma omp parallel for \ shared(a,b,c,chunk) \ private(i,tid) \ schedule(static,chunk) { tid = omp_get_thread_num(); for (i=0; i < N; i++) { c[i] = a[i] + b[i]; printf("tid= %d i= %d c[i]= %f\n", tid, i, c[i]); } } /* end of parallel for construct */ }
zgels.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_gels * * Solves overdetermined or underdetermined linear systems * involving an m-by-n matrix A using a QR or LQ factorization of A. It * is assumed that A has full rank. The following options are provided: * * # trans = PlasmaNoTrans and m >= n: find the least squares solution of an * overdetermined system, i.e., solve the least squares problem: * minimize || B - A*X ||. * * # trans = PlasmaNoTrans and m < n: find the minimum norm solution of an * underdetermined system A * X = B. * * Several right-hand side vectors B and solution vectors X can be handled in a * single call; they are stored as the columns of the m-by-nrhs right-hand side * matrix B and the n-by-nrhs solution matrix X. * ******************************************************************************* * * @param[in] trans * - PlasmaNoTrans: the linear system involves A * (the only supported option for now). * * @param[in] m * The number of rows of the matrix A. m >= 0. * * @param[in] n * The number of columns of the matrix A. n >= 0. * * @param[in] nrhs * The number of right hand sides, i.e., the number of columns of the * matrices B and X. nrhs >= 0. * * @param[in,out] pA * On entry, pointer to the m-by-n matrix A. * On exit, * if m >= n, A is overwritten by details of its QR factorization as * returned by plasma_zgeqrf; * if m < n, A is overwritten by details of its LQ factorization as * returned by plasma_zgelqf. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,m). * * @param[out] T * On exit, auxiliary factorization data. * Matrix of T is allocated inside this function and needs to be * destroyed by plasma_desc_destroy. * * @param[in,out] pB * On entry, pointer to the m-by-nrhs matrix B of right-hand side * vectors, stored columnwise; * On exit, if return value = 0, B is overwritten by the solution * vectors, stored columnwise: * if m >= n, rows 1 to N of B contain the least squares solution * vectors; the residual sum of squares for the solution in each column * is given by the sum of squares of the modulus of elements n+1 to m * in that column; * if m < n, rows 1 to n of B contain the minimum norm solution * vectors; * * @param[in] ldb * The leading dimension of the array B. ldb >= max(1,m,n). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************* * * @sa plasma_omp_zgels * @sa plasma_cgels * @sa plasma_dgels * @sa plasma_sgels * @sa plasma_zgeqrf * @sa plasma_zgeqrs * ******************************************************************************/ int plasma_zgels(plasma_enum_t trans, int m, int n, int nrhs, plasma_complex64_t *pA, int lda, plasma_desc_t *T, plasma_complex64_t *pB, int ldb) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if (trans != PlasmaNoTrans) { plasma_error("only PlasmaNoTrans supported"); return PlasmaErrorNotSupported; } if (m < 0) { plasma_error("illegal value of m"); return -2; } if (n < 0) { plasma_error("illegal value of n"); return -3; } if (nrhs < 0) { plasma_error("illegal value of nrhs"); return -4; } if (lda < imax(1, m)) { plasma_error("illegal value of lda"); return -6; } if (ldb < imax(1, imax(m, n))) { plasma_error("illegal value of ldb"); return -9; } // quick return if (imin(m, imin(n, nrhs)) == 0) { for (int i = 0; i < imax(m, n); i++) for (int j = 0; j < nrhs; j++) pB[j*ldb+i] = 0.0; return PlasmaSuccess; } // Tune parameters. if (plasma->tuning) { if (m < n) plasma_tune_gelqf(plasma, PlasmaComplexDouble, m, n); else plasma_tune_geqrf(plasma, PlasmaComplexDouble, m, n); } // Set tiling parameters. int ib = plasma->ib; int nb = plasma->nb; int householder_mode = plasma->householder_mode; // Create tile matrices. plasma_desc_t A; plasma_desc_t B; int retval; retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, m, n, 0, 0, m, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, imax(m, n), nrhs, 0, 0, imax(m, n), nrhs, &B); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } // Prepare descriptor T. retval = plasma_descT_create(A, ib, householder_mode, T); if (retval != PlasmaSuccess) { plasma_error("plasma_descT_create() failed"); return retval; } // Allocate workspace. plasma_workspace_t work; size_t lwork = nb + ib*nb; // geqrt/gelqt: tau + work retval = plasma_workspace_create(&work, lwork, PlasmaComplexDouble); if (retval != PlasmaSuccess) { plasma_error("plasma_workspace_create() failed"); return retval; } // Create sequence. plasma_sequence_t *sequence = NULL; retval = plasma_sequence_create(&sequence); if (retval != PlasmaSuccess) { plasma_error("plasma_sequence_create() failed"); return retval; } // Initialize request. plasma_request_t request = PlasmaRequestInitializer; // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_zge2desc(pA, lda, A, sequence, &request); plasma_omp_zge2desc(pB, ldb, B, sequence, &request); // Call the tile async function. plasma_omp_zgels(PlasmaNoTrans, A, *T, B, work, sequence, &request); // Translate back to LAPACK layout. plasma_omp_zdesc2ge(A, pA, lda, sequence, &request); plasma_omp_zdesc2ge(B, pB, ldb, sequence, &request); } // implicit synchronization plasma_workspace_destroy(&work); // Free matrices in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&B); // Return status. int status = sequence->status; plasma_sequence_destroy(sequence); return status; } /***************************************************************************//** * * @ingroup plasma_gels * * Solves overdetermined or underdetermined linear * system of equations using the tile QR or the tile LQ factorization. * May return before the computation is finished. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] trans * - PlasmaNoTrans: the linear system involves A * (the only supported option for now). * * @param[in,out] A * Descriptor of matrix A stored in the tile layout. * On exit, * if m >= n, A is overwritten by details of its QR factorization * as returned by plasma_zgeqrf; * if m < n, A is overwritten by details of its LQ factorization * as returned by plasma_zgelqf. * * @param[out] T * Descriptor of matrix T. * Auxiliary factorization data, computed by * plasma_zgeqrf or plasma_zgelqf. * * @param[in,out] B * Descriptor of matrix B. * On entry, right-hand side matrix B in the tile layout. * On exit, solution matrix X in the tile layout. * * @param[in] work * Workspace for the auxiliary arrays needed by some coreblas kernels. * For QR/LQ factorizations used in GELS, it contains preallocated * space for tau and work arrays. * Allocated by the plasma_workspace_create function. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_zgels * @sa plasma_omp_cgels * @sa plasma_omp_dgels * @sa plasma_omp_sgels * ******************************************************************************/ void plasma_omp_zgels(plasma_enum_t trans, plasma_desc_t A, plasma_desc_t T, plasma_desc_t B, plasma_workspace_t work, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if (trans != PlasmaNoTrans) { plasma_error("only PlasmaNoTrans supported"); plasma_request_fail(sequence, request, PlasmaErrorNotSupported); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid descriptor A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(T) != PlasmaSuccess) { plasma_error("invalid descriptor T"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(B) != PlasmaSuccess) { plasma_error("invalid descriptor B"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (A.m == 0 || A.n == 0 || B.n == 0) { // Zero matrix B. plasma_pzlaset(PlasmaGeneral, 0.0, 0.0, B, sequence, request); return; } //=============================== // Solve using QR factorization. //=============================== if (A.m >= A.n) { if (plasma->householder_mode == PlasmaTreeHouseholder) { plasma_pzgeqrf_tree(A, T, work, sequence, request); } else { plasma_pzgeqrf(A, T, work, sequence, request); } if (plasma->householder_mode == PlasmaTreeHouseholder) { plasma_pzunmqr_tree(PlasmaLeft, Plasma_ConjTrans, A, T, B, work, sequence, request); } else { plasma_pzunmqr(PlasmaLeft, Plasma_ConjTrans, A, T, B, work, sequence, request); } plasma_pztrsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans, PlasmaNonUnit, 1.0, plasma_desc_view(A, 0, 0, A.n, A.n), plasma_desc_view(B, 0, 0, A.n, B.n), sequence, request); } //=============================== // Solve using LQ factorization. //=============================== else { if (plasma->householder_mode == PlasmaTreeHouseholder) { plasma_pzgelqf_tree(A, T, work, sequence, request); } else { plasma_pzgelqf(A, T, work, sequence, request); } // Zero the trailing block of the right-hand-side matrix. // B has less rows than X. plasma_pzlaset(PlasmaGeneral, 0.0, 0.0, plasma_desc_view(B, A.m, 0, A.n-A.m, B.n), sequence, request); // Solve L * Y = B. plasma_pztrsm( PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaNonUnit, 1.0, plasma_desc_view(A, 0, 0, A.m, A.m), plasma_desc_view(B, 0, 0, A.m, B.n), sequence, request); // Find X = Q^H * Y. if (plasma->householder_mode == PlasmaTreeHouseholder) { plasma_pzunmlq_tree(PlasmaLeft, Plasma_ConjTrans, A, T, B, work, sequence, request); } else { plasma_pzunmlq(PlasmaLeft, Plasma_ConjTrans, A, T, B, work, sequence, request); } } }
TaskDispatcher.h
#include "nvtt.h" // OpenMP // http://en.wikipedia.org/wiki/OpenMP #if defined(HAVE_OPENMP) #include <omp.h> #endif // Gran Central Dispatch (GCD/libdispatch) // http://developer.apple.com/mac/library/documentation/Performance/Reference/GCD_libdispatch_Ref/Reference/reference.html #if NV_OS_DARWIN && defined(HAVE_DISPATCH_H) //#define HAVE_GCD 1 //#include <dispatch/dispatch.h> #endif // Parallel Patterns Library (PPL) is part of Microsoft's concurrency runtime: // http://msdn.microsoft.com/en-us/library/dd504870.aspx #if NV_OS_WIN32 && _MSC_VER >= 1600 //#define HAVE_PPL 1 #include <ppl.h> #endif // Intel Thread Building Blocks (TBB). // http://www.threadingbuildingblocks.org/ #if defined(HAVE_TBB) #include <tbb/parallel_for.h> #endif #include "nvthread/ParallelFor.h" namespace nvtt { struct SequentialTaskDispatcher : public TaskDispatcher { virtual void dispatch(Task * task, void * context, int count) { for (int i = 0; i < count; i++) { task(context, i); } } }; struct ParallelTaskDispatcher : public TaskDispatcher { virtual void dispatch(Task * task, void * context, int count) { nv::ParallelFor parallelFor(task, context); parallelFor.run(count); // @@ Add support for custom grain. } }; #if defined(HAVE_OPENMP) struct OpenMPTaskDispatcher : public TaskDispatcher { virtual void dispatch(Task * task, void * context, int count) { #pragma omp parallel for for (int i = 0; i < count; i++) { task(context, i); } } }; #endif #if HAVE_GCD // Task dispatcher using Apple's Grand Central Dispatch. struct AppleTaskDispatcher : public TaskDispatcher { // @@ This is really lame, but I refuse to use size_t in the public API. struct BlockContext { Task * task; void * context; }; static void block(void * context, size_t id) { BlockContext * ctx = (BlockContext *)context; ctx->task(ctx->context, int(id)); } virtual void dispatch(Task * task, void * context, int count) { dispatch_queue_t q = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); BlockContext blockCtx = { task, context }; dispatch_apply_f(count, q, &blockCtx, block); } }; #endif #if defined(HAVE_PPL) struct TaskFunctor { TaskFunctor(Task * task, void * context) : task(task), context(context) {} void operator()(int n) const { task(context, n); } Task * task; void * context; }; // Task dispatcher using Microsoft's concurrency runtime. struct MicrosoftTaskDispatcher : public TaskDispatcher { virtual void dispatch(Task * task, void * context, int count) { TaskFunctor func(task, context); Concurrency::parallel_for(0, count, func); } }; #endif #if defined(HAVE_TBB) struct TaskFunctor { TaskFunctor(Task * task, void * context) : task(task), context(context) {} void operator()(int & n) const { task(context, n); } Task * task; void * context; }; // Task dispatcher using Inte's Thread Building Blocks. struct IntelTaskDispatcher : public TaskDispatcher { virtual void dispatch(Task * task, void * context, int count) { parallel_for(blocked_range<int>(0, count, 1), TaskFunctor(task, context)); } }; #endif #if defined(HAVE_OPENMP) typedef OpenMPTaskDispatcher ConcurrentTaskDispatcher; #elif defined(HAVE_TBB) typedef IntelTaskDispatcher ConcurrentTaskDispatcher; #elif defined(HAVE_PPL) typedef MicrosoftTaskDispatcher ConcurrentTaskDispatcher; #elif defined(HAVE_GCD) typedef AppleTaskDispatcher ConcurrentTaskDispatcher; #else //typedef SequentialTaskDispatcher ConcurrentTaskDispatcher; typedef ParallelTaskDispatcher ConcurrentTaskDispatcher; #endif } // namespace nvtt
GB_selector.c
//------------------------------------------------------------------------------ // GB_selector: select entries from a matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // GB_selector does the work for GB_select and the GxB_*select methods. It // also deletes zombies for GB_wait using the NONZOMBIE operator, and deletes // entries outside a smaller matrix for GxB_*resize. // TODO: GB_selector does not exploit the mask. // If C is NULL on input, A is modified in-place. // Otherwise, C is an uninitialized static header. #include "GB_select.h" #include "GB_ek_slice.h" #include "GB_sel__include.h" #include "GB_scalar.h" #include "GB_transpose.h" #define GB_FREE_WORKSPACE \ { \ GB_FREE_WORK (&Zp, Zp_size) ; \ GB_WERK_POP (Work, int64_t) ; \ GB_WERK_POP (A_ek_slicing, int64_t) ; \ GB_FREE (&Cp, Cp_size) ; \ GB_FREE (&Ch, Ch_size) ; \ GB_FREE (&Ci, Ci_size) ; \ GB_FREE (&Cx, Cx_size) ; \ } #define GB_FREE_ALL \ { \ GB_phbix_free (C) ; \ GB_FREE_WORKSPACE ; \ } GrB_Info GB_selector ( GrB_Matrix C, // output matrix, NULL or static header GB_Opcode opcode, // selector opcode const GB_Operator op, // user operator, NULL for resize/nonzombie const bool flipij, // if true, flip i and j for user operator GrB_Matrix A, // input matrix int64_t ithunk, // (int64_t) Thunk, if Thunk is NULL const GrB_Scalar Thunk, // optional input for select operator GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Info info ; ASSERT_OP_OK_OR_NULL (op, "selectop/idxunop for GB_selector", GB0) ; ASSERT_SCALAR_OK_OR_NULL (Thunk, "Thunk for GB_selector", GB0) ; ASSERT (GB_IS_SELECTOP_CODE (opcode) || GB_IS_INDEXUNARYOP_CODE (opcode)) ; ASSERT_MATRIX_OK (A, "A input for GB_selector", GB_FLIP (GB0)) ; // positional selector (tril, triu, diag, offdiag, resize, rowindex, ...): // can't be jumbled. nonzombie, entry-valued op, user op: jumbled OK ASSERT (GB_IMPLIES (GB_OPCODE_IS_POSITIONAL (opcode), !GB_JUMBLED (A))) ; ASSERT (C == NULL || (C != NULL && C->static_header)) ; //-------------------------------------------------------------------------- // declare workspace //-------------------------------------------------------------------------- bool in_place_A = (C == NULL) ; // GrB_wait and GB_resize only int64_t *restrict Zp = NULL ; size_t Zp_size = 0 ; GB_WERK_DECLARE (Work, int64_t) ; int64_t *restrict Wfirst = NULL ; int64_t *restrict Wlast = NULL ; int64_t *restrict Cp_kfirst = NULL ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; int64_t avlen = A->vlen ; int64_t avdim = A->vdim ; const bool A_iso = A->iso ; int64_t *restrict Cp = NULL ; size_t Cp_size = 0 ; int64_t *restrict Ch = NULL ; size_t Ch_size = 0 ; int64_t *restrict Ci = NULL ; size_t Ci_size = 0 ; GB_void *restrict Cx = NULL ; size_t Cx_size = 0 ; //-------------------------------------------------------------------------- // get Thunk //-------------------------------------------------------------------------- // The scalar value of Thunk has already been typecasted to an integer // (int64_t ithunk). // It is also now typecast to the same type as A (to the scalar athunk) // which is required for GxB_SelectOps, and to the op->ytype (the scalar // ythunk) for GrB_IndexUnaryOps. // If Thunk is NULL, or has no entry, it is treated as a scalar value // of zero. const size_t asize = A->type->size ; const GB_Type_code acode = A->type->code ; GrB_Type ytype = NULL, xtype = NULL ; GB_Type_code ycode = GB_ignore_code, xcode = GB_ignore_code ; size_t ysize = 1, xsize = 1 ; if (op != NULL) { if (op->ytype != NULL) { // get the type of the thunk input of the operator ytype = op->ytype ; ycode = ytype->code ; ysize = ytype->size ; } if (op->xtype != NULL) { // get the type of the A input of the operator xtype = op->xtype ; xcode = xtype->code ; xsize = xtype->size ; } } // athunk = (A->type) Thunk, for selectop thunk comparators only GB_void athunk [GB_VLA(asize)] ; memset (athunk, 0, asize) ; // ythunk = (op->ytype) Thunk, for idxnunop GB_void ythunk [GB_VLA(ysize)] ; memset (ythunk, 0, ysize) ; bool op_is_selectop = GB_IS_SELECTOP_CODE (opcode) ; bool op_is_idxunop = GB_IS_INDEXUNARYOP_CODE (opcode) ; bool op_is_positional = GB_OPCODE_IS_POSITIONAL (opcode) ; if (Thunk != NULL) { // Thunk is passed to GB_selector only if it is non-empty ASSERT (GB_nnz ((GrB_Matrix) Thunk) > 0) ; const GB_Type_code tcode = Thunk->type->code ; if (op_is_selectop && opcode != GB_USER_selop_code) { // athunk = (atype) Thunk, for built-in GxB_SelectOps only GB_cast_scalar (athunk, acode, Thunk->x, tcode, asize) ; } if (ytype != NULL) { // ythunk = (op->ytype) Thunk GB_cast_scalar (ythunk, ycode, Thunk->x, tcode, ysize) ; } } //-------------------------------------------------------------------------- // handle iso case for built-in select ops that depend only on the value //-------------------------------------------------------------------------- bool op_is_select_valued = opcode >= GB_NONZERO_selop_code && opcode <= GB_LE_THUNK_selop_code ; bool op_is_idxunop_valued = opcode >= GB_VALUENE_idxunop_code && opcode <= GB_VALUELE_idxunop_code ; if (A_iso && (op_is_select_valued || op_is_idxunop_valued)) { // select op is NONZERO, EQ_ZERO, GT_ZERO, GE_ZERO, LT_ZERO, LE_ZERO, // EQ_THUNK, GT_THUNK, GE_THUNK, LT_THUNK, or LE_THUNK, or the idxunop // VALUE* operators. All of these select/idxunop ops depend only on // the value of A(i,j). Since A is iso, either all entries in A will // be copied to C and thus C can be created as a shallow copy of A, or // no entries from A will be copied to C and thus C is an empty matrix. // The select factory is not needed, except to check the iso value via // GB_bitmap_selector. ASSERT (!in_place_A) ; ASSERT (C != NULL && C->static_header) ; // construct a scalar containing the iso scalar of A // xscalar = (op->xtype) A->x for idxunops GB_void xscalar [GB_VLA(xsize)] ; memset (xscalar, 0, xsize) ; struct GB_Scalar_opaque S_header ; GrB_Scalar S ; if (op_is_select_valued) { // wrap the iso-value of A in the scalar S, with no typecasting S = GB_Scalar_wrap (&S_header, A->type, A->x) ; } else { // wrap the iso-value of A in the scalar S, typecasted to xtype // xscalar = (op->xtype) A->x GB_cast_scalar (xscalar, xcode, A->x, acode, asize) ; S = GB_Scalar_wrap (&S_header, xtype, xscalar) ; } S->iso = false ; // but ensure S is not iso ASSERT_SCALAR_OK (S, "iso scalar wrap", GB0) ; // apply the select operator to the iso scalar S GB_OK (GB_bitmap_selector (C, false, opcode, op, false, (GrB_Matrix) S, ithunk, athunk, ythunk, Context)) ; ASSERT_MATRIX_OK (C, "C from iso scalar test", GB0) ; bool C_empty = (GB_nnz (C) == 0) ; GB_phbix_free (C) ; // check if C has 0 or 1 entry if (C_empty) { // C is an empty matrix return (GB_new (&C, true, // static header A->type, avlen, avdim, GB_Ap_calloc, true, GxB_SPARSE + GxB_HYPERSPARSE, GB_Global_hyper_switch_get ( ), 1, Context)) ; } else { // C is a shallow copy of A with all the same entries as A // set C->iso = A->iso OK return (GB_shallow_copy (C, true, A, Context)) ; } } // now if A is iso, the following operators still need to be handled: // GB_TRIL_selop_code : use GB_sel__tril_iso // GB_TRIU_selop_code : use GB_sel__triu_iso // GB_DIAG_selop_code : use GB_sel__diag_iso // GB_OFFDIAG_selop_code : use GB_sel__offdiag_iso // GB_NONZOMBIE_selop_code : use GB_sel__nonzombie_iso // GB_USER_selop_code : use GB_sel__user_iso // GB_ROWINDEX_idxunop_code : use GB_sel__rowindex_iso // GB_ROWLE_idxunop_code : use GB_sel__rowle_iso // GB_ROWGT_idxunop_code : use GB_sel__rowle_iso // all other idxunop : use GB_sel__idxunop_iso // column selectors are handled below: // GB_COLINDEX_idxunop_code : // GB_COLLE_idxunop_code : // GB_COLGT_idxunop_code : // Except for GB_USER_selop_code and idxunop, the GB_sel__*_iso methods do // not access the values of A and C, just the pattern. //-------------------------------------------------------------------------- // handle the bitmap/as-if-full case //-------------------------------------------------------------------------- bool use_bitmap_selector ; if (opcode == GB_NONZOMBIE_selop_code || in_place_A) { // GB_bitmap_selector does not support the nonzombie opcode, nor does // it support operating on A in place. For the NONZOMBIE operator, A // will never be bitmap. use_bitmap_selector = false ; } else if (opcode == GB_DIAG_selop_code) { // GB_bitmap_selector supports the DIAG operator, but it is currently // not efficient (GB_bitmap_selector should return a sparse diagonal // matrix, not bitmap). So use the sparse case if A is not bitmap, // since the sparse case below does not support the bitmap case. use_bitmap_selector = GB_IS_BITMAP (A) ; } else { // For bitmap, full, or as-if-full matrices (sparse/hypersparse with // all entries present, not jumbled, no zombies, and no pending // tuples), use the bitmap selector for all other operators (TRIL, // TRIU, OFFDIAG, NONZERO, EQ*, GT*, GE*, LT*, LE*, and user-defined // operators). use_bitmap_selector = GB_IS_BITMAP (A) || GB_as_if_full (A) ; } //-------------------------------------------------------------------------- // determine if C is iso for a non-iso A //-------------------------------------------------------------------------- bool C_iso = A_iso || // C iso value is Ax [0] (opcode == GB_EQ_ZERO_selop_code) || // C iso value is zero (opcode == GB_EQ_THUNK_selop_code) || // C iso value is thunk (opcode == GB_NONZERO_selop_code && acode == GB_BOOL_code) ; // C iso value is true if (C_iso) { GB_BURBLE_MATRIX (A, "(iso select) ") ; } //========================================================================== // bitmap/full case //========================================================================== if (use_bitmap_selector) { GB_BURBLE_MATRIX (A, "(bitmap select) ") ; ASSERT (C != NULL && C->static_header) ; return (GB_bitmap_selector (C, C_iso, opcode, op, flipij, A, ithunk, athunk, ythunk, Context)) ; } //========================================================================== // sparse/hypersparse case //========================================================================== //-------------------------------------------------------------------------- // determine the max number of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; //-------------------------------------------------------------------------- // get A: sparse, hypersparse, or full //-------------------------------------------------------------------------- // the case when A is bitmap is always handled above by GB_bitmap_selector ASSERT (!GB_IS_BITMAP (A)) ; int64_t *restrict Ap = A->p ; size_t Ap_size = A->p_size ; int64_t *restrict Ah = A->h ; int64_t *restrict Ai = A->i ; size_t Ai_size = A->i_size ; GB_void *restrict Ax = (GB_void *) A->x ; size_t Ax_size = A->x_size ; int64_t anvec = A->nvec ; bool A_jumbled = A->jumbled ; bool A_is_hyper = (Ah != NULL) ; //========================================================================== // column selector //========================================================================== // The column selectors can be done in a single pass. if (opcode == GB_COLINDEX_idxunop_code || opcode == GB_COLLE_idxunop_code || opcode == GB_COLGT_idxunop_code) { //---------------------------------------------------------------------- // find column j in A //---------------------------------------------------------------------- ASSERT_MATRIX_OK (A, "A for col selector", GB_FLIP (GB0)) ; int nth = nthreads_max ; ASSERT (!in_place_A) ; ASSERT (C != NULL && C->static_header) ; ASSERT (GB_JUMBLED_OK (A)) ; int64_t j = (opcode == GB_COLINDEX_idxunop_code) ? (-ithunk) : ithunk ; int64_t k = 0 ; bool found ; if (j < 0) { // j is outside the range of columns of A k = 0 ; found = false ; } else if (j >= avdim) { // j is outside the range of columns of A k = anvec ; found = false ; } else if (A_is_hyper) { // find the column j in the hyperlist of A int64_t kright = anvec-1 ; GB_SPLIT_BINARY_SEARCH (j, Ah, k, kright, found) ; // if found is true the Ah [k] == j // if found is false, then Ah [0..k-1] < j and Ah [k..anvec-1] > j } else { // j appears as the jth column in A; found is always true k = j ; found = true ; } //---------------------------------------------------------------------- // determine the # of entries and # of vectors in C //---------------------------------------------------------------------- int64_t pstart = Ap [k] ; int64_t pend = found ? Ap [k+1] : pstart ; int64_t ajnz = pend - pstart ; int64_t cnz, cnvec ; int64_t anz = Ap [anvec] ; if (opcode == GB_COLINDEX_idxunop_code) { // COLINDEX: delete column j: C = A (:, [0:j-1 j+1:end]) cnz = anz - ajnz ; cnvec = (A_is_hyper && found) ? (anvec-1) : anvec ; } else if (opcode == GB_COLLE_idxunop_code) { // COLLE: C = A (:, 0:j) cnz = pend ; cnvec = (A_is_hyper) ? (found ? (k+1) : k) : anvec ; } else // (opcode == GB_COLGT_idxunop_code) { // COLGT: C = A (:, j+1:end) cnz = anz - pend ; cnvec = anvec - ((A_is_hyper) ? (found ? (k+1) : k) : 0) ; } if (cnz == anz) { // C is the same as A: return it a pure shallow copy return (GB_shallow_copy (C, true, A, Context)) ; } else if (cnz == 0) { // return C as empty return (GB_new (&C, true, // auto (sparse or hyper), static header A->type, avlen, avdim, GB_Ap_calloc, true, GxB_HYPERSPARSE, GB_Global_hyper_switch_get ( ), 1, Context)) ; } //---------------------------------------------------------------------- // allocate C //---------------------------------------------------------------------- int sparsity = (A_is_hyper) ? GxB_HYPERSPARSE : GxB_SPARSE ; GB_OK (GB_new_bix (&C, true, // sparse or hyper (from A), static header A->type, avlen, avdim, GB_Ap_malloc, true, sparsity, false, A->hyper_switch, cnvec, cnz, true, A_iso, Context)) ; ASSERT (info == GrB_SUCCESS) ; int nth2 = GB_nthreads (cnvec, chunk, nth) ; int64_t *restrict Cp = C->p ; int64_t *restrict Ch = C->h ; int64_t *restrict Ci = C->i ; GB_void *restrict Cx = (GB_void *) C->x ; int64_t kk ; //---------------------------------------------------------------------- // construct C //---------------------------------------------------------------------- if (A_iso) { // Cx [0] = Ax [0] memcpy (Cx, Ax, asize) ; } if (opcode == GB_COLINDEX_idxunop_code) { //------------------------------------------------------------------ // COLINDEX: delete the column j //------------------------------------------------------------------ if (A_is_hyper) { ASSERT (found) ; // Cp [0:k-1] = Ap [0:k-1] GB_memcpy (Cp, Ap, k * sizeof (int64_t), nth) ; // Cp [k:cnvec] = Ap [k+1:anvec] - ajnz #pragma omp parallel for num_threads(nth2) for (kk = k ; kk <= cnvec ; kk++) { Cp [kk] = Ap [kk+1] - ajnz ; } // Ch [0:k-1] = Ah [0:k-1] GB_memcpy (Ch, Ah, k * sizeof (int64_t), nth) ; // Ch [k:cnvec-1] = Ah [k+1:anvec-1] GB_memcpy (Ch + k, Ah + (k+1), (cnvec-k) * sizeof (int64_t), nth) ; } else { // Cp [0:k] = Ap [0:k] GB_memcpy (Cp, Ap, (k+1) * sizeof (int64_t), nth) ; // Cp [k+1:anvec] = Ap [k+1:anvec] - ajnz #pragma omp parallel for num_threads(nth2) for (kk = k+1 ; kk <= cnvec ; kk++) { Cp [kk] = Ap [kk] - ajnz ; } } // Ci [0:pstart-1] = Ai [0:pstart-1] GB_memcpy (Ci, Ai, pstart * sizeof (int64_t), nth) ; // Ci [pstart:cnz-1] = Ai [pend:anz-1] GB_memcpy (Ci + pstart, Ai + pend, (cnz - pstart) * sizeof (int64_t), nth) ; if (!A_iso) { // Cx [0:pstart-1] = Ax [0:pstart-1] GB_memcpy (Cx, Ax, pstart * asize, nth) ; // Cx [pstart:cnz-1] = Ax [pend:anz-1] GB_memcpy (Cx + pstart * asize, Ax + pend * asize, (cnz - pstart) * asize, nth) ; } } else if (opcode == GB_COLLE_idxunop_code) { //------------------------------------------------------------------ // COLLE: C = A (:, 0:j) //------------------------------------------------------------------ if (A_is_hyper) { // Cp [0:cnvec] = Ap [0:cnvec] GB_memcpy (Cp, Ap, (cnvec+1) * sizeof (int64_t), nth) ; // Ch [0:cnvec-1] = Ah [0:cnvec-1] GB_memcpy (Ch, Ah, (cnvec) * sizeof (int64_t), nth) ; } else { // Cp [0:k+1] = Ap [0:k+1] ASSERT (found) ; GB_memcpy (Cp, Ap, (k+2) * sizeof (int64_t), nth) ; // Cp [k+2:cnvec] = cnz #pragma omp parallel for num_threads(nth2) for (kk = k+2 ; kk <= cnvec ; kk++) { Cp [kk] = cnz ; } } // Ci [0:cnz-1] = Ai [0:cnz-1] GB_memcpy (Ci, Ai, cnz * sizeof (int64_t), nth) ; if (!A_iso) { // Cx [0:cnz-1] = Ax [0:cnz-1] GB_memcpy (Cx, Ax, cnz * asize, nth) ; } } else // (opcode == GB_COLGT_idxunop_code) { //------------------------------------------------------------------ // COLGT: C = A (:, j+1:end) //------------------------------------------------------------------ if (A_is_hyper) { // Cp [0:cnvec] = Ap [k+found:anvec] - pend #pragma omp parallel for num_threads(nth2) for (kk = 0 ; kk <= cnvec ; kk++) { Cp [kk] = Ap [kk + k + found] - pend ; } // Ch [0:cnvec-1] = Ah [k+found:anvec-1] GB_memcpy (Ch, Ah + k + found, cnvec * sizeof (int64_t), nth) ; } else { ASSERT (found) ; // Cp [0:k] = 0 GB_memset (Cp, 0, (k+1) * sizeof (int64_t), nth) ; // Cp [k+1:cnvec] = Ap [k+1:cnvec] - pend #pragma omp parallel for num_threads(nth2) for (kk = k+1 ; kk <= cnvec ; kk++) { Cp [kk] = Ap [kk] - pend ; } } // Ci [0:cnz-1] = Ai [pend:anz-1] GB_memcpy (Ci, Ai + pend, cnz * sizeof (int64_t), nth) ; if (!A_iso) { // Cx [0:cnz-1] = Ax [pend:anz-1] GB_memcpy (Cx, Ax + pend * asize, cnz * asize, nth) ; } } //---------------------------------------------------------------------- // finalize the matrix, free workspace, and return result //---------------------------------------------------------------------- C->nvec = cnvec ; C->magic = GB_MAGIC ; C->jumbled = A_jumbled ; // C is jumbled if A is jumbled C->iso = C_iso ; // OK: burble already done above C->nvec_nonempty = GB_nvec_nonempty (C, Context) ; ASSERT_MATRIX_OK (C, "C output for GB_selector (column select)", GB0) ; return (GrB_SUCCESS) ; } //========================================================================== // all other select/idxunop operators //========================================================================== #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_phbix_free (C) ; \ GB_FREE_WORKSPACE ; \ } //-------------------------------------------------------------------------- // allocate the new vector pointers of C //-------------------------------------------------------------------------- int64_t cnz = 0 ; Cp = GB_CALLOC (anvec+1, int64_t, &Cp_size) ; if (Cp == NULL) { // out of memory return (GrB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // slice the entries for each task //-------------------------------------------------------------------------- int A_ntasks, A_nthreads ; double work = 8*anvec + ((opcode == GB_DIAG_selop_code) ? 0 : GB_nnz_held (A)) ; GB_SLICE_MATRIX_WORK (A, 8, chunk, work) ; //-------------------------------------------------------------------------- // allocate workspace for each task //-------------------------------------------------------------------------- GB_WERK_PUSH (Work, 3*A_ntasks, int64_t) ; if (Work == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } Wfirst = Work ; Wlast = Work + A_ntasks ; Cp_kfirst = Work + A_ntasks * 2 ; //-------------------------------------------------------------------------- // allocate workspace for phase1 //-------------------------------------------------------------------------- // phase1 counts the number of live entries in each vector of A. The // result is computed in Cp, where Cp [k] is the number of live entries in // the kth vector of A. Zp [k] is the location of the A(i,k) entry, for // positional operators. if (op_is_positional) { // allocate Zp Zp = GB_MALLOC_WORK (anvec, int64_t, &Zp_size) ; if (Zp == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } } //-------------------------------------------------------------------------- // phase1: count the live entries in each column //-------------------------------------------------------------------------- // define the worker for the switch factory #define GB_SELECT_PHASE1 #define GB_sel1(opname,aname) GB (_sel_phase1_ ## opname ## aname) #define GB_SEL_WORKER(opname,aname,atype) \ { \ GB_sel1 (opname, aname) (Zp, Cp, Wfirst, Wlast, A, \ flipij, ithunk, (atype *) athunk, ythunk, op, \ A_ek_slicing, A_ntasks, A_nthreads) ; \ } \ break ; // launch the switch factory const GB_Type_code typecode = (A_iso) ? GB_ignore_code : acode ; #include "GB_select_factory.c" #undef GB_SELECT_PHASE1 #undef GB_SEL_WORKER //-------------------------------------------------------------------------- // cumulative sum of Cp and compute Cp_kfirst //-------------------------------------------------------------------------- int64_t C_nvec_nonempty ; GB_ek_slice_merge2 (&C_nvec_nonempty, Cp_kfirst, Cp, anvec, Wfirst, Wlast, A_ek_slicing, A_ntasks, A_nthreads, Context) ; //-------------------------------------------------------------------------- // allocate new space for the compacted Ci and Cx //-------------------------------------------------------------------------- cnz = Cp [anvec] ; cnz = GB_IMAX (cnz, 1) ; Ci = GB_MALLOC (cnz, int64_t, &Ci_size) ; Cx = (GB_void *) GB_XALLOC (C_iso, cnz, asize, &Cx_size) ; if (Ci == NULL || Cx == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // set the iso value of C //-------------------------------------------------------------------------- if (C_iso) { // The pattern of C is computed by the worker below, for the DIAG, // OFFDIAG, TRIL, TRIU, NONZOMBIE, and USER select operators. GB_iso_select (Cx, opcode, athunk, Ax, acode, asize) ; } //-------------------------------------------------------------------------- // phase2: select the entries //-------------------------------------------------------------------------- // define the worker for the switch factory #define GB_SELECT_PHASE2 #define GB_sel2(opname,aname) GB (_sel_phase2_ ## opname ## aname) #define GB_SEL_WORKER(opname,aname,atype) \ { \ GB_sel2 (opname, aname) (Ci, (atype *) Cx, Zp, Cp, Cp_kfirst, A, \ flipij, ithunk, (atype *) athunk, ythunk, op, \ A_ek_slicing, A_ntasks, A_nthreads) ; \ } \ break ; // launch the switch factory #include "GB_select_factory.c" //-------------------------------------------------------------------------- // create the result //-------------------------------------------------------------------------- if (in_place_A) { //---------------------------------------------------------------------- // transplant Cp, Ci, Cx back into A //---------------------------------------------------------------------- // TODO: this is not parallel: use GB_hyper_prune if (A->h != NULL && C_nvec_nonempty < anvec) { // prune empty vectors from Ah and Ap int64_t cnvec = 0 ; for (int64_t k = 0 ; k < anvec ; k++) { if (Cp [k] < Cp [k+1]) { Ah [cnvec] = Ah [k] ; Ap [cnvec] = Cp [k] ; cnvec++ ; } } Ap [cnvec] = Cp [anvec] ; A->nvec = cnvec ; ASSERT (A->nvec == C_nvec_nonempty) ; GB_FREE (&Cp, Cp_size) ; } else { // free the old A->p and transplant in Cp as the new A->p GB_FREE (&Ap, Ap_size) ; A->p = Cp ; Cp = NULL ; A->p_size = Cp_size ; A->plen = anvec ; } ASSERT (Cp == NULL) ; GB_FREE (&Ai, Ai_size) ; GB_FREE (&Ax, Ax_size) ; A->i = Ci ; Ci = NULL ; A->i_size = Ci_size ; A->x = Cx ; Cx = NULL ; A->x_size = Cx_size ; A->nvec_nonempty = C_nvec_nonempty ; A->jumbled = A_jumbled ; // A remains jumbled (in-place select) A->iso = C_iso ; // OK: burble already done above // the NONZOMBIE opcode may have removed all zombies, but A->nzombie // is still nonzero. It is set to zero in GB_wait. ASSERT_MATRIX_OK (A, "A output for GB_selector", GB_FLIP (GB0)) ; } else { //---------------------------------------------------------------------- // create C and transplant Cp, Ch, Ci, Cx into C //---------------------------------------------------------------------- int sparsity = (A_is_hyper) ? GxB_HYPERSPARSE : GxB_SPARSE ; ASSERT (C != NULL && C->static_header) ; info = GB_new (&C, true, // sparse or hyper (from A), static header A->type, avlen, avdim, GB_Ap_null, true, sparsity, A->hyper_switch, anvec, Context) ; ASSERT (info == GrB_SUCCESS) ; if (A->h != NULL) { //------------------------------------------------------------------ // A and C are hypersparse: copy non-empty vectors from Ah to Ch //------------------------------------------------------------------ Ch = GB_MALLOC (anvec, int64_t, &Ch_size) ; if (Ch == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } // TODO: do in parallel: use GB_hyper_prune int64_t cnvec = 0 ; for (int64_t k = 0 ; k < anvec ; k++) { if (Cp [k] < Cp [k+1]) { Ch [cnvec] = Ah [k] ; Cp [cnvec] = Cp [k] ; cnvec++ ; } } Cp [cnvec] = Cp [anvec] ; C->nvec = cnvec ; ASSERT (C->nvec == C_nvec_nonempty) ; } C->p = Cp ; Cp = NULL ; C->p_size = Cp_size ; C->h = Ch ; Ch = NULL ; C->h_size = Ch_size ; C->i = Ci ; Ci = NULL ; C->i_size = Ci_size ; C->x = Cx ; Cx = NULL ; C->x_size = Cx_size ; C->plen = anvec ; C->magic = GB_MAGIC ; C->nvec_nonempty = C_nvec_nonempty ; C->jumbled = A_jumbled ; // C is jumbled if A is jumbled C->iso = C_iso ; // OK: burble already done above ASSERT_MATRIX_OK (C, "C output for GB_selector", GB0) ; } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; }
cnn.h
/* Copyright (c) 2016, TU Dresden Copyright (c) 2017, Heidelberg University All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the TU Dresden, Heidelberg University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL TU DRESDEN OR HEIDELBERG UNIVERSITY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #define CNN_OBJ_MAXINPUT 100.0 // reprojection errors are clamped at this magnitude #include "util.h" #include "maxloss.h" /** * @brief Checks whether the given matrix contains NaN entries. * @param m Input matrix. * @return True if m contrains NaN entries. */ inline bool containsNaNs(const cv::Mat& m) { return cv::sum(cv::Mat(m != m))[0] > 0; } /** * @brief Wrapper around the OpenCV PnP function that returns a zero pose in case PnP fails. See also documentation of cv::solvePnP. * @param objPts List of 3D points. * @param imgPts Corresponding 2D points. * @param camMat Calibration matrix of the camera. * @param distCoeffs Distortion coefficients. * @param rot Output parameter. Camera rotation. * @param trans Output parameter. Camera translation. * @param extrinsicGuess If true uses input rot and trans as initialization. * @param methodFlag Specifies the PnP algorithm to be used. * @return True if PnP succeeds. */ inline bool safeSolvePnP( const std::vector<cv::Point3f>& objPts, const std::vector<cv::Point2f>& imgPts, const cv::Mat& camMat, const cv::Mat& distCoeffs, cv::Mat& rot, cv::Mat& trans, bool extrinsicGuess, int methodFlag) { if(rot.type() == 0) rot = cv::Mat_<double>::zeros(1, 3); if(trans.type() == 0) trans= cv::Mat_<double>::zeros(1, 3); if(!cv::solvePnP(objPts, imgPts, camMat, distCoeffs, rot, trans, extrinsicGuess,methodFlag)) { rot = cv::Mat_<double>::zeros(1, 3); trans = cv::Mat_<double>::zeros(1, 3); return false; } return true; } /** * @brief Calculate the Shannon entropy of a discrete distribution. * @param dist Discrete distribution. Probability per entry, should sum to 1. * @return Shannon entropy. */ double entropy(const std::vector<double>& dist) { double e = 0; for(unsigned i = 0; i < dist.size(); i++) if(dist[i] > 0) e -= dist[i] * std::log2(dist[i]); return e; } /** * @brief Draws an entry of a discrete distribution according to the given probabilities. * * If randomDraw is false in the properties, this function will return the entry with the max. probability. * * @param probs Discrete distribution. Probability per entry, should sum to 1. * @return Chosen entry. */ int draw(const std::vector<double>& probs) { std::map<double, int> cumProb; double probSum = 0; double maxProb = -1; double maxIdx = 0; for(unsigned idx = 0; idx < probs.size(); idx++) { if(probs[idx] < EPS) continue; probSum += probs[idx]; cumProb[probSum] = idx; if(maxProb < 0 || probs[idx] > maxProb) { maxProb = probs[idx]; maxIdx = idx; } } if(GlobalProperties::getInstance()->tP.randomDraw) return cumProb.upper_bound(drand(0, probSum))->second; else return maxIdx; } /** * @brief Calculates the expected loss of a list of poses with associated probabilities. * @param gt Ground truth pose. * @param hyps List of estimated poses. * @param probs List of probabilities associated with the estimated poses. * @param losses Output parameter. List of losses for each estimated pose. * @return Expectation of loss. */ double expectedMaxLoss( const jp::cv_trans_t& gt, const std::vector<jp::cv_trans_t>& hyps, const std::vector<double>& probs, std::vector<double>& losses) { double loss = 0; losses.resize(hyps.size()); for(unsigned i = 0; i < hyps.size(); i++) { losses[i] = maxLoss(gt, hyps.at(i)); loss += probs[i] * losses[i]; } return loss; } /** * @brief Calculates the Jacobean of the PNP function w.r.t. the object coordinate inputs. * * PNP is treated as a n x 3 -> 6 fnuction, i.e. it takes n 3D coordinates and maps them to a 6D pose. * The Jacobean is therefore 6x3n. The Jacobean is calculated using central differences. * * @param imgPts List of 2D points. * @param objPts List of corresponding 3D points. * @param eps Epsilon used in central differences approximation. * @return 6x3n Jacobean matrix of partial derivatives. */ cv::Mat_<double> dPNP( const std::vector<cv::Point2f>& imgPts, std::vector<cv::Point3f> objPts, float eps = 0.001f) { int pnpMethod = (imgPts.size() == 4) ? CV_P3P : CV_ITERATIVE; //in case of P3P the 4th point is needed to resolve ambiguities, its derivative is zero int effectiveObjPoints = (pnpMethod == CV_P3P) ? 3 : objPts.size(); cv::Mat_<float> camMat = GlobalProperties::getInstance()->getCamMat(); cv::Mat_<double> jacobean = cv::Mat_<double>::zeros(6, objPts.size() * 3); bool success; // central differences for(unsigned i = 0; i < effectiveObjPoints; i++) for(unsigned j = 0; j < 3; j++) { if(j == 0) objPts[i].x += eps; else if(j == 1) objPts[i].y += eps; else if(j == 2) objPts[i].z += eps; // forward step jp::cv_trans_t fStep; success = safeSolvePnP(objPts, imgPts, camMat, cv::Mat(), fStep.first, fStep.second, false, pnpMethod); if(!success) return cv::Mat_<double>::zeros(6, objPts.size() * 3); if(j == 0) objPts[i].x -= 2 * eps; else if(j == 1) objPts[i].y -= 2 * eps; else if(j == 2) objPts[i].z -= 2 * eps; // backward step jp::cv_trans_t bStep; success = safeSolvePnP(objPts, imgPts, camMat, cv::Mat(), bStep.first, bStep.second, false, pnpMethod); if(!success) return cv::Mat_<double>::zeros(6, objPts.size() * 3); if(j == 0) objPts[i].x += eps; else if(j == 1) objPts[i].y += eps; else if(j == 2) objPts[i].z += eps; // gradient calculation fStep.first = (fStep.first - bStep.first) / (2 * eps); fStep.second = (fStep.second - bStep.second) / (2 * eps); fStep.first.copyTo(jacobean.col(i * 3 + j).rowRange(0, 3)); fStep.second.copyTo(jacobean.col(i * 3 + j).rowRange(3, 6)); if(containsNaNs(jacobean.col(i * 3 + j))) return cv::Mat_<double>::zeros(6, objPts.size() * 3); } return jacobean; } /** * @brief Calculate the average of all matrix entries. * @param mat Input matrix. * @return Average of entries. */ double getAvg(const cv::Mat_<double>& mat) { double avg = 0; for(unsigned x = 0; x < mat.cols; x++) for(unsigned y = 0; y < mat.rows; y++) { avg += std::abs(mat(y, x)); } return avg / mat.cols / mat.rows; } /** * @brief Return the maximum entry of the given matrix. * @param mat Input matrix. * @return Maximum entry. */ double getMax(const cv::Mat_<double>& mat) { double m = -1; for(unsigned x = 0; x < mat.cols; x++) for(unsigned y = 0; y < mat.rows; y++) { double val = std::abs(mat(y, x)); if(m < 0 || val > m) m = val; } return m; } /** * @brief Return the median of all entries of the given matrix. * @param mat Input matrix. * @return Median entry. */ double getMed(const cv::Mat_<double>& mat) { std::vector<double> vals; for(unsigned x = 0; x < mat.cols; x++) for(unsigned y = 0; y < mat.rows; y++) vals.push_back(std::abs(mat(y, x))); std::sort(vals.begin(), vals.end()); return vals[vals.size() / 2]; } /** * @brief Transform an RGB image to a floating point CNN input map. * * The image will be cropped to CNN input size. * In training mode, the input will be randomly shifted by small amounts, depending on the subsampling in the CNN output. * The method also creates a sampling map which maps each output of the CNN to a 2D input position in the original RGB image. * * @param img Input RGB image. * @param sampling Map that contains for each position in the CNN output the corresponding position in the RGB input (use to establish 2D-3D correspondences). * @param training True for training mode. * @return */ cv::Mat_<cv::Vec3f> getImgMap(const jp::img_bgr_t& img, cv::Mat_<cv::Point2i>& sampling, bool training) { GlobalProperties* gp = GlobalProperties::getInstance(); int cnnInputW = gp->getCNNInputDimX(); int cnnInputH = gp->getCNNInputDimY(); int cnnOutputW = gp->getCNNOutputDimX(); int cnnOutputH = gp->getCNNOutputDimY(); int cnnSubSampling = gp->dP.cnnSubSample; cv::Mat_<cv::Vec3f> imgMap(cnnInputH, cnnInputW); sampling = cv::Mat_<cv::Point2i>(cnnOutputH, cnnOutputW); int offsetX = img.cols - cnnInputW; int offsetY = img.rows - cnnInputH; if(training) { // random shift offsetX = irand(0, offsetX); offsetY = irand(0, offsetY); } else { // crop at the center offsetX /= 2; offsetY /= 2; } // crop image for(unsigned x = 0; x < cnnInputW; x++) for(unsigned y = 0; y < cnnInputH; y++) { imgMap(y, x) = img(y + offsetY, x + offsetX); } // create sampling map for(unsigned x = 0; x < sampling.cols; x++) for(unsigned y = 0; y < sampling.rows; y++) { sampling(y, x) = cv::Point2i( offsetX + x * cnnSubSampling + cnnSubSampling / 2, offsetY + y * cnnSubSampling + cnnSubSampling / 2); } return imgMap; } /** * @brief Process a RGB image with the object coordinate CNN. * @param colorData Input RGB image. * @param sampling Output paramter. Subsampling information. Each 2D location contains the pixel location in the original RGB image (needed again for backward pass). * @param imgMaps Output parameter. RGB image transformed to CNN input maps (needed again for backward pass). * @param training True if training mode (controls cropping if input image). * @param state Lua state for access to the object coordinate CNN. * @return Object coordinate estimation (sub sampled). */ jp::img_coord_t getCoordImg( const jp::img_bgr_t& colorData, cv::Mat_<cv::Point2i>& sampling, std::vector<cv::Mat_<cv::Vec3f>>& imgMaps, bool training, lua_State* state) { StopWatch stopW; imgMaps.resize(1); imgMaps[0] = getImgMap(colorData, sampling, training); // forward pass std::vector<cv::Vec3f> prediction = forward(imgMaps, sampling, state); // reorganize jp::img_coord_t modeImg = jp::img_coord_t::zeros(sampling.rows, sampling.cols); for(unsigned i = 0; i < prediction.size(); i++) { int x = i % modeImg.cols; int y = i / modeImg.cols; modeImg(y, x) = prediction[i]; } std::cout << "CNN prediction took " << stopW.stop() / 1000 << "s." << std::endl; return modeImg; } /** * @brief Calculate an image of reprojection errors for the given object coordinate prediction and the given pose. * @param hyp Pose estimate. * @param objectCoordinates Object coordinate estimate. * @param sampling Subsampling of the input image. * @param camMat Calibration matrix of the camera. * @return Image of reprojectiob errors. */ cv::Mat_<float> getDiffMap( const jp::cv_trans_t& hyp, const jp::img_coord_t& objectCoordinates, const cv::Mat_<cv::Point2i>& sampling, const cv::Mat& camMat) { cv::Mat_<float> diffMap(sampling.size()); std::vector<cv::Point3f> points3D; std::vector<cv::Point2f> projections; std::vector<cv::Point2f> points2D; std::vector<cv::Point2f> sources2D; // collect 2D-3D correspondences for(unsigned x = 0; x < sampling.cols; x++) for(unsigned y = 0; y < sampling.rows; y++) { // get 2D location of the original RGB frame cv::Point2f pt2D(sampling(y, x).x, sampling(y, x).y); // get associated 3D object coordinate prediction points3D.push_back(cv::Point3f( objectCoordinates(y, x)(0), objectCoordinates(y, x)(1), objectCoordinates(y, x)(2))); points2D.push_back(pt2D); sources2D.push_back(cv::Point2f(x, y)); } if(points3D.empty()) return diffMap; // project object coordinate into the image using the given pose cv::projectPoints(points3D, hyp.first, hyp.second, camMat, cv::Mat(), projections); // measure reprojection errors for(unsigned p = 0; p < projections.size(); p++) { cv::Point2f curPt = points2D[p] - projections[p]; float l = std::min(cv::norm(curPt), CNN_OBJ_MAXINPUT); diffMap(sources2D[p].y, sources2D[p].x) = l; } return diffMap; } /** * @brief Project a 3D point into the image an measures the reprojection error. * @param pt Ground truth 2D location. * @param obj 3D point. * @param hyp Pose estimate. * @param camMat Calibration matrix of the camera. * @return Reprojection error in pixels. */ float project(const cv::Point2f& pt, const cv::Point3f& obj, const jp::cv_trans_t hyp, const cv::Mat& camMat) { double f = camMat.at<float>(0, 0); double ppx = camMat.at<float>(0, 2); double ppy = camMat.at<float>(1, 2); //transform point cv::Mat objMat = cv::Mat(obj); objMat.convertTo(objMat, CV_64F); cv::Mat rot; cv::Rodrigues(hyp.first, rot); objMat = rot * objMat + hyp.second; // project double px = f * objMat.at<double>(0, 0) / objMat.at<double>(2, 0) + ppx; double py = f * objMat.at<double>(1, 0) / objMat.at<double>(2, 0) + ppy; //std::cout << "Projected position: " << px << ", " << py << std::endl; // return error return std::min(std::sqrt((pt.x - px) * (pt.x - px) + (pt.y - py) * (pt.y - py)), CNN_OBJ_MAXINPUT); } /** * @brief Calculates the Jacobean of the projection function w.r.t the given 3D point, ie. the function has the form 3 -> 1 * @param pt Ground truth 2D location. * @param obj 3D point. * @param hyp Pose estimate. * @param camMat Calibration matrix of the camera. * @return 1x3 Jacobean matrix of partial derivatives. */ cv::Mat_<double> dProjectdObj(const cv::Point2f& pt, const cv::Point3f& obj, const jp::cv_trans_t hyp, const cv::Mat& camMat) { double f = camMat.at<float>(0, 0); double ppx = camMat.at<float>(0, 2); double ppy = camMat.at<float>(1, 2); //transform point cv::Mat objMat = cv::Mat(obj); objMat.convertTo(objMat, CV_64F); cv::Mat rot; cv::Rodrigues(hyp.first, rot); objMat = rot * objMat + hyp.second; if(std::abs(objMat.at<double>(2, 0)) < EPS) // prevent division by zero return cv::Mat_<double>::zeros(1, 3); // project double px = f * objMat.at<double>(0, 0) / objMat.at<double>(2, 0) + ppx; double py = f * objMat.at<double>(1, 0) / objMat.at<double>(2, 0) + ppy; // calculate error double err = std::sqrt((pt.x - px) * (pt.x - px) + (pt.y - py) * (pt.y - py)); // early out if projection error is above threshold if(err > CNN_OBJ_MAXINPUT) return cv::Mat_<double>::zeros(1, 3); err += EPS; // avoid dividing by zero // derivative in x direction of obj coordinate double pxdx = f * rot.at<double>(0, 0) / objMat.at<double>(2, 0) - f * objMat.at<double>(0, 0) / objMat.at<double>(2, 0) / objMat.at<double>(2, 0) * rot.at<double>(2, 0); double pydx = f * rot.at<double>(1, 0) / objMat.at<double>(2, 0) - f * objMat.at<double>(1, 0) / objMat.at<double>(2, 0) / objMat.at<double>(2, 0) * rot.at<double>(2, 0); double dx = 0.5 / err * (2 * (pt.x - px) * -pxdx + 2 * (pt.y - py) * -pydx); // derivative in x direction of obj coordinate double pxdy = f * rot.at<double>(0, 1) / objMat.at<double>(2, 0) - f * objMat.at<double>(0, 0) / objMat.at<double>(2, 0) / objMat.at<double>(2, 0) * rot.at<double>(2, 1); double pydy = f * rot.at<double>(1, 1) / objMat.at<double>(2, 0) - f * objMat.at<double>(1, 0) / objMat.at<double>(2, 0) / objMat.at<double>(2, 0) * rot.at<double>(2, 1); double dy = 0.5 / err * (2 * (pt.x - px) * -pxdy + 2 * (pt.y - py) * -pydy); // derivative in x direction of obj coordinate double pxdz = f * rot.at<double>(0, 2) / objMat.at<double>(2, 0) - f * objMat.at<double>(0, 0) / objMat.at<double>(2, 0) / objMat.at<double>(2, 0) * rot.at<double>(2, 2); double pydz = f * rot.at<double>(1, 2) / objMat.at<double>(2, 0) - f * objMat.at<double>(1, 0) / objMat.at<double>(2, 0) / objMat.at<double>(2, 0) * rot.at<double>(2, 2); double dz = 0.5 / err * (2 * (pt.x - px) * -pxdz + 2 * (pt.y - py) * -pydz); cv::Mat_<double> jacobean(1, 3); jacobean(0, 0) = dx; jacobean(0, 1) = dy; jacobean(0, 2) = dz; return jacobean; } /** * @brief Calculates the Jacobean of the projection function w.r.t the given 6D pose, ie. the function has the form 6 -> 1 * @param pt Ground truth 2D location. * @param obj 3D point. * @param hyp Pose estimate. * @param camMat Calibration matrix of the camera. * @return 1x6 Jacobean matrix of partial derivatives. */ cv::Mat_<double> dProjectdHyp(const cv::Point2f& pt, const cv::Point3f& obj, const jp::cv_trans_t hyp, const cv::Mat& camMat) { double f = camMat.at<float>(0, 0); double ppx = camMat.at<float>(0, 2); double ppy = camMat.at<float>(1, 2); //transform point cv::Mat objMat = cv::Mat(obj); objMat.convertTo(objMat, CV_64F); cv::Mat rot, dRdH; cv::Rodrigues(hyp.first, rot, dRdH); dRdH = dRdH.t(); cv::Mat eyeMat = rot * objMat + hyp.second; if(std::abs(eyeMat.at<double>(2, 0)) < EPS) // prevent division by zero return cv::Mat_<double>::zeros(1, 6); // project double px = f * eyeMat.at<double>(0, 0) / eyeMat.at<double>(2, 0) + ppx; // flip x because of reasons (to conform with OpenCV implementation) double py = f * eyeMat.at<double>(1, 0) / eyeMat.at<double>(2, 0) + ppy; // calculate error double err = std::sqrt((pt.x - px) * (pt.x - px) + (pt.y - py) * (pt.y - py)); // early out if projection error is above threshold if(err > CNN_OBJ_MAXINPUT) return cv::Mat_<double>::zeros(1, 6); err += EPS; // avoid dividing by zero // derivative of the error wrt to projection cv::Mat_<double> dNdP = cv::Mat_<double>::zeros(1, 2); dNdP(0, 0) = -1 / err * (pt.x - px); dNdP(0, 1) = -1 / err * (pt.y - py); // derivative of projection function wrt rotation matrix cv::Mat_<double> dPdR = cv::Mat_<double>::zeros(2, 9); dPdR.row(0).colRange(0, 3) = f * objMat.t() / eyeMat.at<double>(2, 0); dPdR.row(1).colRange(3, 6) = f * objMat.t() / eyeMat.at<double>(2, 0); dPdR.row(0).colRange(6, 9) = -f * eyeMat.at<double>(0, 0) / eyeMat.at<double>(2, 0) / eyeMat.at<double>(2, 0) * objMat.t(); dPdR.row(1).colRange(6, 9) = -f * eyeMat.at<double>(1, 0) / eyeMat.at<double>(2, 0) / eyeMat.at<double>(2, 0) * objMat.t(); // combined derivative of the error wrt the rodriguez vector cv::Mat_<double> dNdH = dNdP * dPdR * dRdH; // derivative of projection wrt the translation vector cv::Mat_<double> dPdT = cv::Mat_<double>::zeros(2, 3); dPdT(0, 0) = f / eyeMat.at<double>(2, 0); dPdT(1, 1) = f / eyeMat.at<double>(2, 0); dPdT(0, 2) = -f * eyeMat.at<double>(0, 0) / eyeMat.at<double>(2, 0) / eyeMat.at<double>(2, 0); dPdT(1, 2) = -f * eyeMat.at<double>(1, 0) / eyeMat.at<double>(2, 0) / eyeMat.at<double>(2, 0); // combined derivative of error wrt the translation vector cv::Mat_<double> dNdT = dNdP * dPdT; cv::Mat_<double> jacobean(1, 6); dNdH.copyTo(jacobean.colRange(0, 3)); dNdT.copyTo(jacobean.colRange(3, 6)); return jacobean; } /** * @brief Applies soft max to the given list of scores. * @param scores List of scores. * @return Soft max distribution (sums to 1) */ std::vector<double> softMax(const std::vector<double>& scores) { double maxScore = 0; for(unsigned i = 0; i < scores.size(); i++) if(i == 0 || scores[i] > maxScore) maxScore = scores[i]; std::vector<double> sf(scores.size()); double sum = 0.0; for(unsigned i = 0; i < scores.size(); i++) { sf[i] = std::exp(scores[i] - maxScore); sum += sf[i]; } for(unsigned i = 0; i < scores.size(); i++) { sf[i] /= sum; // std::cout << "score: " << scores[i] << ", prob: " << sf[i] << std::endl; } return sf; } /** * @brief Calculates the Jacobean matrix of the function that maps n estimated object coordinates to a score, ie. the function has the form n x 3 -> 1. Returns one Jacobean matrix per hypothesis. * @param estObj Object coordinate estimation. * @param sampling Sub sampling of the RGB image. * @param points List of minimal sets. Each one (4 correspondences) defines one hypothesis. * @param stateObj Lua state for access to the score CNN. * @param jacobeans Output paramter. List of Jacobean matrices. One 1 x 3n matrix per pose hypothesis. * @param scoreOutputGradients Gradients w.r.t the score i.e. the gradients of the output of the score CNN. */ void dScore( jp::img_coord_t estObj, const cv::Mat_<cv::Point2i>& sampling, const std::vector<std::vector<cv::Point2i>>& points, lua_State* stateObj, std::vector<cv::Mat_<double>>& jacobeans, const std::vector<double>& scoreOutputGradients) { GlobalProperties* gp = GlobalProperties::getInstance(); cv::Mat_<float> camMat = gp->getCamMat(); int hypCount = points.size(); std::vector<std::vector<cv::Point2f>> imgPts(hypCount); std::vector<std::vector<cv::Point3f>> objPts(hypCount); std::vector<jp::cv_trans_t> hyps(hypCount); std::vector<cv::Mat_<float>> diffMaps(hypCount); #pragma omp parallel for for(unsigned h = 0; h < hypCount; h++) { for(unsigned i = 0; i < points[h].size(); i++) { int x = points[h][i].x; int y = points[h][i].y; imgPts[h].push_back(sampling(y, x)); objPts[h].push_back(cv::Point3f(estObj(y, x))); } // calculate hypothesis jp::cv_trans_t cvHyp; safeSolvePnP(objPts[h], imgPts[h], camMat, cv::Mat(), cvHyp.first, cvHyp.second, false, CV_P3P); hyps[h] = cvHyp; // calculate projection errors diffMaps[h] = getDiffMap(cvHyp, estObj, sampling, camMat); } std::vector<cv::Mat_<double>> dDiffMaps; backward(diffMaps, stateObj, scoreOutputGradients, dDiffMaps); jacobeans.resize(hypCount); #pragma omp parallel for for(unsigned h = 0; h < hypCount; h++) { cv::Mat_<double> jacobean = cv::Mat_<double>::zeros(1, estObj.cols * estObj.rows * 3); jacobeans[h] = jacobean; if(cv::norm(dDiffMaps[h]) < EPS) continue; // accumulate derivate of score wrt the object coordinates that are used to calculate the pose cv::Mat_<double> supportPointGradients = cv::Mat_<double>::zeros(1, 12); cv::Mat_<double> dHdO = dPNP(imgPts[h], objPts[h]); // 6x12 for(unsigned x = 0; x < dDiffMaps[h].cols; x++) for(unsigned y = 0; y < dDiffMaps[h].rows; y++) { cv::Point2f pt(sampling(y, x).x, sampling(y, x).y); cv::Point3f obj(estObj(y, x)); // account for the direct influence of all object coordinates in the score cv::Mat_<double> dPdO = dProjectdObj(pt, obj, hyps[h], camMat); dPdO *= dDiffMaps[h](y, x); dPdO.copyTo(jacobean.colRange(x * dDiffMaps[h].rows * 3 + y * 3, x * dDiffMaps[h].rows * 3 + y * 3 + 3)); // account for the indirect influence of the object coorindates that are used to calculate the pose cv::Mat_<double> dPdH = dProjectdHyp(sampling(y, x), cv::Point3f(estObj(y, x)), hyps[h], camMat); supportPointGradients += dDiffMaps[h](y, x) * dPdH * dHdO; } // add the accumulated derivatives for the object coordinates that are used to calculate the pose for(unsigned i = 0; i < points[h].size(); i++) { unsigned x = points[h][i].x; unsigned y = points[h][i].y; jacobean.colRange(x * dDiffMaps[h].rows * 3 + y * 3, x * dDiffMaps[h].rows * 3 + y * 3 + 3) += supportPointGradients.colRange(i * 3, i * 3 + 3); } } } /** * @brief Calculates the Jacobean matrix of the function that maps n estimated object coordinates to a soft max score, ie. the function has the form n x 3 -> 1. Returns one Jacobean matrix per hypothesis. * * This is the Soft maxed version of dScore (see above). * * @param estObj Object coordinate estimation. * @param sampling Sub sampling of the RGB image. * @param points List of minimal sets. Each one (4 correspondences) defines one hypothesis. * @param losses Loss measured for the hypotheses given by the points parameter. * @param sfScores Soft max probabilities for the hypotheses given by the points parameter. * @param stateObj Lua state for access to the score CNN. * @return List of Jacobean matrices. One 1 x 3n matrix per pose hypothesis. */ std::vector<cv::Mat_<double>> dSMScore( jp::img_coord_t estObj, const cv::Mat_<cv::Point2i>& sampling, const std::vector<std::vector<cv::Point2i>>& points, const std::vector<double>& losses, const std::vector<double>& sfScores, lua_State* stateObj) { // assemble the gradients wrt the scores, ie the gradients of soft max function std::vector<double> scoreOutputGradients(points.size()); for(unsigned i = 0; i < points.size(); i++) { scoreOutputGradients[i] = sfScores[i] * losses[i]; for(unsigned j = 0; j < points.size(); j++) scoreOutputGradients[i] -= sfScores[i] * sfScores[j] * losses[j]; } // calculate gradients of the score function std::vector<cv::Mat_<double>> jacobeans; dScore(estObj, sampling, points, stateObj, jacobeans, scoreOutputGradients); // data conversion for(unsigned i = 0; i < jacobeans.size(); i++) { // reorder to points row first into rows cv::Mat_<double> reformat(estObj.cols * estObj.rows, 3); for(unsigned x = 0; x < estObj.cols; x++) for(unsigned y = 0; y < estObj.rows; y++) { cv::Mat_<double> patchGrad = jacobeans[i].colRange( x * estObj.rows * 3 + y * 3, x * estObj.rows * 3 + y * 3 + 3); patchGrad.copyTo(reformat.row(y * estObj.cols + x)); } jacobeans[i] = reformat; } return jacobeans; } /** * @brief Processes a frame, ie. takes object coordinates, estimates poses, selects the best one and measures the error. * * This function performs the forward pass of DSAC but also calculates many intermediate results * for the backward pass (ie it can be made faster if one cares only about the forward pass). * * @param poseGT Ground truth pose (for evaluation only). * @param stateObj Lua state for access to the score CNN. * @param objHyps Number of hypotheses to be drawn. * @param camMat Calibration parameters of the camera. * @param inlierThreshold2D Inlier threshold in pixels. * @param refSteps Max. refinement steps (iterations). * @param expectedLoss Output paramter. Expectation of loss of the discrete hypothesis distributions. * @param sfEntropy Output parameter. Shannon entropy of the soft max distribution of hypotheses. * @param correct Output parameter. Was the final, selected hypothesis correct? * @param refHyps Output parameter. List of refined hypotheses sampled for the given image. * @param sfScores Output parameter. Soft max distribution for the sampled hypotheses. * @param estObj Output parameter. Estimated object coordinates (subsampling of the complete image). * @param sampling Output parameter. Subsampling of the RGB image. * @param sampledPoints Output parameter. List of initial 2D pixel locations of the subsampled input RGB image. 4 pixels per hypothesis. * @param losses Output parameter. List of losses of the sampled hypotheses. * @param inlierMaps Output parameter. Maps indicating which pixels of the subsampled input image have been inliers in the last step of hypothesis refinement, one map per hypothesis. * @param tErr Output parameter. Translational (in m) error of the final, selected hypothesis. * @param rotErr Output parameter. Rotational error of the final, selected hypothesis. * @param hypIdx Output parameter. Index of the final, selected hypothesis. * @param training True if training mode. Controls whether all hypotheses are refined or just the selected one. */ void processImage( const jp::cv_trans_t& hypGT, lua_State* stateObj, int objHyps, const cv::Mat& camMat, int inlierThreshold2D, int refSteps, double& expectedLoss, double& sfEntropy, bool& correct, std::vector<jp::cv_trans_t>& refHyps, std::vector<double>& sfScores, const jp::img_coord_t& estObj, const cv::Mat_<cv::Point2i>& sampling, std::vector<std::vector<cv::Point2i>>& sampledPoints, std::vector<double>& losses, std::vector<cv::Mat_<int>>& inlierMaps, double& tErr, double& rotErr, int& hypIdx, bool training = true) { std::cout << BLUETEXT("Sampling " << objHyps << " hypotheses.") << std::endl; StopWatch stopW; sampledPoints.resize(objHyps); // keep track of the points each hypothesis is sampled from refHyps.resize(objHyps); std::vector<std::vector<cv::Point2f>> imgPts(objHyps); std::vector<std::vector<cv::Point3f>> objPts(objHyps); // sample hypotheses #pragma omp parallel for for(unsigned h = 0; h < refHyps.size(); h++) while(true) { std::vector<cv::Point2f> projections; cv::Mat_<uchar> alreadyChosen = cv::Mat_<uchar>::zeros(estObj.size()); imgPts[h].clear(); objPts[h].clear(); sampledPoints[h].clear(); for(int j = 0; j < 4; j++) { // 2D location in the subsampled image int x = irand(0, estObj.cols); int y = irand(0, estObj.rows); if(alreadyChosen(y, x) > 0) { j--; continue; } alreadyChosen(y, x) = 1; imgPts[h].push_back(sampling(y, x)); // 2D location in the original RGB image objPts[h].push_back(cv::Point3f(estObj(y, x))); // 3D object coordinate sampledPoints[h].push_back(cv::Point2i(x, y)); // 2D pixel location in the subsampled image } if(!safeSolvePnP(objPts[h], imgPts[h], camMat, cv::Mat(), refHyps[h].first, refHyps[h].second, false, CV_P3P)) { continue; } cv::projectPoints(objPts[h], refHyps[h].first, refHyps[h].second, camMat, cv::Mat(), projections); // check reconstruction, 4 sampled points should be reconstructed perfectly bool foundOutlier = false; for(unsigned j = 0; j < imgPts[h].size(); j++) { if(cv::norm(imgPts[h][j] - projections[j]) < inlierThreshold2D) continue; foundOutlier = true; break; } if(foundOutlier) continue; else break; } std::cout << "Done in " << stopW.stop() / 1000 << "s." << std::endl; std::cout << BLUETEXT("Calculating scores.") << std::endl; // compute reprojection error images std::vector<cv::Mat_<float>> diffMaps(objHyps); #pragma omp parallel for for(unsigned h = 0; h < refHyps.size(); h++) diffMaps[h] = getDiffMap(refHyps[h], estObj, sampling, camMat); // execute score script to get hypothesis scores std::vector<double> scores = forward(diffMaps, stateObj); std::cout << "Done in " << stopW.stop() / 1000 << "s." << std::endl; std::cout << BLUETEXT("Drawing final Hypothesis.") << std::endl; // apply soft max to scores to get a distribution sfScores = softMax(scores); sfEntropy = entropy(sfScores); // measure distribution entropy hypIdx = draw(sfScores); // select winning hypothesis std::cout << "Done in " << stopW.stop() / 1000 << "s." << std::endl; std::cout << BLUETEXT("Refining poses:") << std::endl; // collect inliers inlierMaps.resize(refHyps.size()); double convergenceThresh = 0.01; // stop refinement if 6D pose vector converges #pragma omp parallel for for(unsigned h = 0; h < refHyps.size(); h++) { if(!training && hypIdx != h) continue; // in test mode only refine selected hypothesis cv::Mat_<float> localDiffMap = diffMaps[h]; // refine current hypothesis for(unsigned rStep = 0; rStep < refSteps; rStep++) { // collect inliers std::vector<cv::Point2f> localImgPts; std::vector<cv::Point3f> localObjPts; cv::Mat_<int> localInlierMap = cv::Mat_<int>::zeros(diffMaps[h].size()); for(unsigned x = 0; x < localDiffMap.cols; x++) for(unsigned y = 0; y < localDiffMap.rows; y++) { if(localDiffMap(y, x) < inlierThreshold2D) { localImgPts.push_back(sampling(y, x)); localObjPts.push_back(cv::Point3f(estObj(y, x))); localInlierMap(y, x) = 1; } } if(localImgPts.size() < 4) break; // recalculate pose jp::cv_trans_t hypUpdate; hypUpdate.first = refHyps[h].first.clone(); hypUpdate.second = refHyps[h].second.clone(); if(!safeSolvePnP(localObjPts, localImgPts, camMat, cv::Mat(), hypUpdate.first, hypUpdate.second, true, (localImgPts.size() > 4) ? CV_ITERATIVE : CV_P3P)) break; //abort if PnP fails if(maxLoss(hypUpdate, refHyps[h]) < convergenceThresh) break; // convergned refHyps[h] = hypUpdate; inlierMaps[h] = localInlierMap; // recalculate pose errors localDiffMap = getDiffMap(refHyps[h], estObj, sampling, camMat); } } std::cout << "Done in " << stopW.stop() / 1000 << "s." << std::endl; std::cout << BLUETEXT("Final Result:") << std::endl; // evaluated poses expectedLoss = expectedMaxLoss(hypGT, refHyps, sfScores, losses); std::cout << "Loss of winning hyp: " << maxLoss(hypGT, refHyps[hypIdx]) << ", prob: " << sfScores[hypIdx] << ", expected loss: " << expectedLoss << std::endl; // we measure error of inverted poses (because we estimate scene poses, not camera poses) jp::cv_trans_t invHypGT = getInvHyp(hypGT); jp::cv_trans_t invHypEst = getInvHyp(refHyps[hypIdx]); rotErr = calcAngularDistance(invHypGT, invHypEst); tErr = cv::norm(invHypEst.second - invHypGT.second); correct = false; if(rotErr < 5 && tErr < 0.05) { std::cout << GREENTEXT("Rotation Err: " << rotErr << "deg, Translation Err: " << tErr * 100 << "cm") << std::endl << std::endl; correct = true; } else std::cout << REDTEXT("Rotation Err: " << rotErr << "deg, Translation Err: " << tErr * 100 << "cm") << std::endl << std::endl; }
gen_fffc.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_utilities.h" #include "_hypre_parcsr_mv.h" #include "_hypre_lapack.h" #include "_hypre_blas.h" /* ----------------------------------------------------------------------------- * generate AFF or AFC * ----------------------------------------------------------------------------- */ HYPRE_Int hypre_ParCSRMatrixGenerateFFFC( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *cpts_starts, hypre_ParCSRMatrix *S, hypre_ParCSRMatrix **A_FC_ptr, hypre_ParCSRMatrix **A_FF_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); /* diag part of S */ hypre_CSRMatrix *S_diag = S ? hypre_ParCSRMatrixDiag(S) : A_diag; HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); HYPRE_Int skip_diag = S ? 0 : 1; /* off-diag part of S */ hypre_CSRMatrix *S_offd = S ? hypre_ParCSRMatrixOffd(S) : A_offd; HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); hypre_ParCSRMatrix *A_FC; hypre_CSRMatrix *A_FC_diag, *A_FC_offd; HYPRE_Int *A_FC_diag_i, *A_FC_diag_j, *A_FC_offd_i, *A_FC_offd_j = NULL; HYPRE_Complex *A_FC_diag_data, *A_FC_offd_data = NULL; HYPRE_Int num_cols_offd_A_FC; HYPRE_BigInt *col_map_offd_A_FC = NULL; hypre_ParCSRMatrix *A_FF; hypre_CSRMatrix *A_FF_diag, *A_FF_offd; HYPRE_Int *A_FF_diag_i, *A_FF_diag_j, *A_FF_offd_i, *A_FF_offd_j; HYPRE_Complex *A_FF_diag_data, *A_FF_offd_data; HYPRE_Int num_cols_offd_A_FF; HYPRE_BigInt *col_map_offd_A_FF = NULL; HYPRE_Int *fine_to_coarse; HYPRE_Int *fine_to_fine; HYPRE_Int *fine_to_coarse_offd = NULL; HYPRE_Int *fine_to_fine_offd = NULL; HYPRE_Int i, j, jj; HYPRE_Int startc, index; HYPRE_Int cpt, fpt, row; HYPRE_Int *CF_marker_offd = NULL, *marker_offd = NULL; HYPRE_Int *int_buf_data = NULL; HYPRE_BigInt *big_convert; HYPRE_BigInt *big_convert_offd = NULL; HYPRE_BigInt *big_buf_data = NULL; HYPRE_BigInt total_global_fpts, total_global_cpts, fpts_starts[2]; HYPRE_Int my_id, num_procs, num_sends; HYPRE_Int d_count_FF, d_count_FC, o_count_FF, o_count_FC; HYPRE_Int n_Fpts; HYPRE_Int *cpt_array, *fpt_array; HYPRE_Int start, stop; HYPRE_Int num_threads; num_threads = hypre_NumThreads(); /* MPI size and rank*/ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); fine_to_fine = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); big_convert = hypre_CTAlloc(HYPRE_BigInt, n_fine, HYPRE_MEMORY_HOST); cpt_array = hypre_CTAlloc(HYPRE_Int, num_threads + 1, HYPRE_MEMORY_HOST); fpt_array = hypre_CTAlloc(HYPRE_Int, num_threads + 1, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,j,jj,start,stop,row,cpt,fpt,d_count_FC,d_count_FF,o_count_FC,o_count_FF) #endif { HYPRE_Int my_thread_num = hypre_GetThreadNum(); start = (n_fine / num_threads) * my_thread_num; if (my_thread_num == num_threads - 1) { stop = n_fine; } else { stop = (n_fine / num_threads) * (my_thread_num + 1); } for (i = start; i < stop; i++) { if (CF_marker[i] > 0) { cpt_array[my_thread_num + 1]++; } else { fpt_array[my_thread_num + 1]++; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { for (i = 1; i < num_threads; i++) { cpt_array[i + 1] += cpt_array[i]; fpt_array[i + 1] += fpt_array[i]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif cpt = cpt_array[my_thread_num]; fpt = fpt_array[my_thread_num]; for (i = start; i < stop; i++) { if (CF_marker[i] > 0) { fine_to_coarse[i] = cpt++; fine_to_fine[i] = -1; } else { fine_to_fine[i] = fpt++; fine_to_coarse[i] = -1; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { HYPRE_BigInt big_Fpts; n_Fpts = fpt_array[num_threads]; big_Fpts = n_Fpts; hypre_MPI_Scan(&big_Fpts, fpts_starts + 1, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); fpts_starts[0] = fpts_starts[1] - big_Fpts; if (my_id == num_procs - 1) { total_global_fpts = fpts_starts[1]; total_global_cpts = cpts_starts[1]; } hypre_MPI_Bcast(&total_global_fpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i = start; i < stop; i++) { if (CF_marker[i] > 0) { big_convert[i] = (HYPRE_BigInt)fine_to_coarse[i] + cpts_starts[0]; } else { big_convert[i] = (HYPRE_BigInt)fine_to_fine[i] + fpts_starts[0]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { if (num_cols_A_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); big_convert_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_HOST); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); fine_to_fine_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); } index = 0; num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); big_buf_data = hypre_CTAlloc(HYPRE_BigInt, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); for (i = 0; i < num_sends; i++) { startc = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = startc; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) { int_buf_data[index] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; big_buf_data[index++] = big_convert[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data, big_convert_offd); hypre_ParCSRCommHandleDestroy(comm_handle); marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { if (CF_marker[i] < 0) { for (j = S_offd_i[i]; j < S_offd_i[i + 1]; j++) { marker_offd[S_offd_j[j]] = 1; } } } num_cols_offd_A_FC = 0; num_cols_offd_A_FF = 0; if (num_cols_A_offd) { for (i = 0; i < num_cols_A_offd; i++) { if (CF_marker_offd[i] > 0 && marker_offd[i] > 0) { fine_to_coarse_offd[i] = num_cols_offd_A_FC++; fine_to_fine_offd[i] = -1; } else if (CF_marker_offd[i] < 0 && marker_offd[i] > 0) { fine_to_fine_offd[i] = num_cols_offd_A_FF++; fine_to_coarse_offd[i] = -1; } } col_map_offd_A_FF = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_A_FF, HYPRE_MEMORY_HOST); col_map_offd_A_FC = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_A_FC, HYPRE_MEMORY_HOST); cpt = 0; fpt = 0; for (i = 0; i < num_cols_A_offd; i++) { if (CF_marker_offd[i] > 0 && marker_offd[i] > 0) { col_map_offd_A_FC[cpt++] = big_convert_offd[i]; } else if (CF_marker_offd[i] < 0 && marker_offd[i] > 0) { col_map_offd_A_FF[fpt++] = big_convert_offd[i]; } } } A_FF_diag_i = hypre_CTAlloc(HYPRE_Int, n_Fpts + 1, memory_location_P); A_FC_diag_i = hypre_CTAlloc(HYPRE_Int, n_Fpts + 1, memory_location_P); A_FF_offd_i = hypre_CTAlloc(HYPRE_Int, n_Fpts + 1, memory_location_P); A_FC_offd_i = hypre_CTAlloc(HYPRE_Int, n_Fpts + 1, memory_location_P); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif d_count_FC = 0; d_count_FF = 0; o_count_FC = 0; o_count_FF = 0; row = fpt_array[my_thread_num]; for (i = start; i < stop; i++) { if (CF_marker[i] < 0) { row++; d_count_FF++; /* account for diagonal element */ for (j = S_diag_i[i] + skip_diag; j < S_diag_i[i + 1]; j++) { jj = S_diag_j[j]; if (CF_marker[jj] > 0) { d_count_FC++; } else { d_count_FF++; } } A_FF_diag_i[row] = d_count_FF; A_FC_diag_i[row] = d_count_FC; for (j = S_offd_i[i]; j < S_offd_i[i + 1]; j++) { jj = S_offd_j[j]; if (CF_marker_offd[jj] > 0) { o_count_FC++; } else { o_count_FF++; } } A_FF_offd_i[row] = o_count_FF; A_FC_offd_i[row] = o_count_FC; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { HYPRE_Int fpt2; for (i = 1; i < num_threads + 1; i++) { fpt = fpt_array[i]; fpt2 = fpt_array[i - 1]; if (fpt == fpt2) { continue; } A_FC_diag_i[fpt] += A_FC_diag_i[fpt2]; A_FF_diag_i[fpt] += A_FF_diag_i[fpt2]; A_FC_offd_i[fpt] += A_FC_offd_i[fpt2]; A_FF_offd_i[fpt] += A_FF_offd_i[fpt2]; } row = fpt_array[num_threads]; d_count_FC = A_FC_diag_i[row]; d_count_FF = A_FF_diag_i[row]; o_count_FC = A_FC_offd_i[row]; o_count_FF = A_FF_offd_i[row]; A_FF_diag_j = hypre_CTAlloc(HYPRE_Int, d_count_FF, memory_location_P); A_FC_diag_j = hypre_CTAlloc(HYPRE_Int, d_count_FC, memory_location_P); A_FF_offd_j = hypre_CTAlloc(HYPRE_Int, o_count_FF, memory_location_P); A_FC_offd_j = hypre_CTAlloc(HYPRE_Int, o_count_FC, memory_location_P); A_FF_diag_data = hypre_CTAlloc(HYPRE_Real, d_count_FF, memory_location_P); A_FC_diag_data = hypre_CTAlloc(HYPRE_Real, d_count_FC, memory_location_P); A_FF_offd_data = hypre_CTAlloc(HYPRE_Real, o_count_FF, memory_location_P); A_FC_offd_data = hypre_CTAlloc(HYPRE_Real, o_count_FC, memory_location_P); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif row = fpt_array[my_thread_num]; d_count_FC = A_FC_diag_i[row]; d_count_FF = A_FF_diag_i[row]; o_count_FC = A_FC_offd_i[row]; o_count_FF = A_FF_offd_i[row]; for (i = start; i < stop; i++) { if (CF_marker[i] < 0) { HYPRE_Int jS, jA; row++; jA = A_diag_i[i]; A_FF_diag_j[d_count_FF] = fine_to_fine[A_diag_j[jA]]; A_FF_diag_data[d_count_FF++] = A_diag_data[jA++]; for (j = S_diag_i[i] + skip_diag; j < S_diag_i[i + 1]; j++) { jA = A_diag_i[i] + 1; jS = S_diag_j[j]; while (A_diag_j[jA] != jS) { jA++; } if (CF_marker[S_diag_j[j]] > 0) { A_FC_diag_j[d_count_FC] = fine_to_coarse[A_diag_j[jA]]; A_FC_diag_data[d_count_FC++] = A_diag_data[jA++]; } else { A_FF_diag_j[d_count_FF] = fine_to_fine[A_diag_j[jA]]; A_FF_diag_data[d_count_FF++] = A_diag_data[jA++]; } } A_FF_diag_i[row] = d_count_FF; A_FC_diag_i[row] = d_count_FC; for (j = S_offd_i[i]; j < S_offd_i[i + 1]; j++) { jA = A_offd_i[i]; jS = S_offd_j[j]; while (jS != A_offd_j[jA]) { jA++; } if (CF_marker_offd[S_offd_j[j]] > 0) { A_FC_offd_j[o_count_FC] = fine_to_coarse_offd[A_offd_j[jA]]; A_FC_offd_data[o_count_FC++] = A_offd_data[jA++]; } else { A_FF_offd_j[o_count_FF] = fine_to_fine_offd[A_offd_j[jA]]; A_FF_offd_data[o_count_FF++] = A_offd_data[jA++]; } } A_FF_offd_i[row] = o_count_FF; A_FC_offd_i[row] = o_count_FC; } } } /*end parallel region */ A_FC = hypre_ParCSRMatrixCreate(comm, total_global_fpts, total_global_cpts, fpts_starts, cpts_starts, num_cols_offd_A_FC, A_FC_diag_i[n_Fpts], A_FC_offd_i[n_Fpts]); A_FF = hypre_ParCSRMatrixCreate(comm, total_global_fpts, total_global_fpts, fpts_starts, fpts_starts, num_cols_offd_A_FF, A_FF_diag_i[n_Fpts], A_FF_offd_i[n_Fpts]); A_FC_diag = hypre_ParCSRMatrixDiag(A_FC); hypre_CSRMatrixData(A_FC_diag) = A_FC_diag_data; hypre_CSRMatrixI(A_FC_diag) = A_FC_diag_i; hypre_CSRMatrixJ(A_FC_diag) = A_FC_diag_j; A_FC_offd = hypre_ParCSRMatrixOffd(A_FC); hypre_CSRMatrixData(A_FC_offd) = A_FC_offd_data; hypre_CSRMatrixI(A_FC_offd) = A_FC_offd_i; hypre_CSRMatrixJ(A_FC_offd) = A_FC_offd_j; hypre_ParCSRMatrixColMapOffd(A_FC) = col_map_offd_A_FC; hypre_CSRMatrixMemoryLocation(A_FC_diag) = memory_location_P; hypre_CSRMatrixMemoryLocation(A_FC_offd) = memory_location_P; A_FF_diag = hypre_ParCSRMatrixDiag(A_FF); hypre_CSRMatrixData(A_FF_diag) = A_FF_diag_data; hypre_CSRMatrixI(A_FF_diag) = A_FF_diag_i; hypre_CSRMatrixJ(A_FF_diag) = A_FF_diag_j; A_FF_offd = hypre_ParCSRMatrixOffd(A_FF); hypre_CSRMatrixData(A_FF_offd) = A_FF_offd_data; hypre_CSRMatrixI(A_FF_offd) = A_FF_offd_i; hypre_CSRMatrixJ(A_FF_offd) = A_FF_offd_j; hypre_ParCSRMatrixColMapOffd(A_FF) = col_map_offd_A_FF; hypre_CSRMatrixMemoryLocation(A_FF_diag) = memory_location_P; hypre_CSRMatrixMemoryLocation(A_FF_offd) = memory_location_P; hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_fine, HYPRE_MEMORY_HOST); hypre_TFree(big_convert, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_fine_offd, HYPRE_MEMORY_HOST); hypre_TFree(big_convert_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(big_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(cpt_array, HYPRE_MEMORY_HOST); hypre_TFree(fpt_array, HYPRE_MEMORY_HOST); *A_FC_ptr = A_FC; *A_FF_ptr = A_FF; return hypre_error_flag; } /* ----------------------------------------------------------------------------- * generate AFF, AFC, for 2 stage extended interpolation * ----------------------------------------------------------------------------- */ HYPRE_Int hypre_ParCSRMatrixGenerateFFFC3( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *cpts_starts, hypre_ParCSRMatrix *S, hypre_ParCSRMatrix **A_FC_ptr, hypre_ParCSRMatrix **A_FF_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); /* diag part of S */ hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); /* off-diag part of S */ hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); hypre_ParCSRMatrix *A_FC; hypre_CSRMatrix *A_FC_diag, *A_FC_offd; HYPRE_Int *A_FC_diag_i, *A_FC_diag_j, *A_FC_offd_i, *A_FC_offd_j = NULL; HYPRE_Complex *A_FC_diag_data, *A_FC_offd_data = NULL; HYPRE_Int num_cols_offd_A_FC; HYPRE_BigInt *col_map_offd_A_FC = NULL; hypre_ParCSRMatrix *A_FF; hypre_CSRMatrix *A_FF_diag, *A_FF_offd; HYPRE_Int *A_FF_diag_i, *A_FF_diag_j, *A_FF_offd_i, *A_FF_offd_j; HYPRE_Complex *A_FF_diag_data, *A_FF_offd_data; HYPRE_Int num_cols_offd_A_FF; HYPRE_BigInt *col_map_offd_A_FF = NULL; HYPRE_Int *fine_to_coarse; HYPRE_Int *fine_to_fine; HYPRE_Int *fine_to_coarse_offd = NULL; HYPRE_Int *fine_to_fine_offd = NULL; HYPRE_Int i, j, jj; HYPRE_Int startc, index; HYPRE_Int cpt, fpt, new_fpt, row, rowc; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *int_buf_data = NULL; HYPRE_BigInt *big_convert; HYPRE_BigInt *big_convert_offd = NULL; HYPRE_BigInt *big_buf_data = NULL; HYPRE_BigInt total_global_fpts, total_global_cpts, total_global_new_fpts; HYPRE_BigInt fpts_starts[2], new_fpts_starts[2]; HYPRE_Int my_id, num_procs, num_sends; HYPRE_Int d_count_FF, d_count_FC, o_count_FF, o_count_FC; HYPRE_Int n_Fpts; HYPRE_Int n_new_Fpts; HYPRE_Int *cpt_array, *fpt_array, *new_fpt_array; HYPRE_Int start, stop; HYPRE_Int num_threads; num_threads = hypre_NumThreads(); /* MPI size and rank*/ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); fine_to_fine = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); big_convert = hypre_CTAlloc(HYPRE_BigInt, n_fine, HYPRE_MEMORY_HOST); cpt_array = hypre_CTAlloc(HYPRE_Int, num_threads + 1, HYPRE_MEMORY_HOST); fpt_array = hypre_CTAlloc(HYPRE_Int, num_threads + 1, HYPRE_MEMORY_HOST); new_fpt_array = hypre_CTAlloc(HYPRE_Int, num_threads + 1, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,j,jj,start,stop,row,rowc,cpt,new_fpt,fpt,d_count_FC,d_count_FF,o_count_FC,o_count_FF) #endif { HYPRE_Int my_thread_num = hypre_GetThreadNum(); start = (n_fine / num_threads) * my_thread_num; if (my_thread_num == num_threads - 1) { stop = n_fine; } else { stop = (n_fine / num_threads) * (my_thread_num + 1); } for (i = start; i < stop; i++) { if (CF_marker[i] > 0) { cpt_array[my_thread_num + 1]++; } else if (CF_marker[i] == -2) { new_fpt_array[my_thread_num + 1]++; fpt_array[my_thread_num + 1]++; } else { fpt_array[my_thread_num + 1]++; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { for (i = 1; i < num_threads; i++) { cpt_array[i + 1] += cpt_array[i]; fpt_array[i + 1] += fpt_array[i]; new_fpt_array[i + 1] += new_fpt_array[i]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif cpt = cpt_array[my_thread_num]; fpt = fpt_array[my_thread_num]; for (i = start; i < stop; i++) { if (CF_marker[i] > 0) { fine_to_coarse[i] = cpt++; fine_to_fine[i] = -1; } else { fine_to_fine[i] = fpt++; fine_to_coarse[i] = -1; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { HYPRE_BigInt big_Fpts, big_new_Fpts; n_Fpts = fpt_array[num_threads]; n_new_Fpts = new_fpt_array[num_threads]; big_Fpts = n_Fpts; big_new_Fpts = n_new_Fpts; hypre_MPI_Scan(&big_Fpts, fpts_starts + 1, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); hypre_MPI_Scan(&big_new_Fpts, new_fpts_starts + 1, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); fpts_starts[0] = fpts_starts[1] - big_Fpts; new_fpts_starts[0] = new_fpts_starts[1] - big_new_Fpts; if (my_id == num_procs - 1) { total_global_new_fpts = new_fpts_starts[1]; total_global_fpts = fpts_starts[1]; total_global_cpts = cpts_starts[1]; } hypre_MPI_Bcast(&total_global_new_fpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); hypre_MPI_Bcast(&total_global_fpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i = start; i < stop; i++) { if (CF_marker[i] > 0) { big_convert[i] = (HYPRE_BigInt)fine_to_coarse[i] + cpts_starts[0]; } else { big_convert[i] = (HYPRE_BigInt)fine_to_fine[i] + fpts_starts[0]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { if (num_cols_A_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); big_convert_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_HOST); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); fine_to_fine_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); } index = 0; num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); big_buf_data = hypre_CTAlloc(HYPRE_BigInt, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); for (i = 0; i < num_sends; i++) { startc = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = startc; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) { int_buf_data[index] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; big_buf_data[index++] = big_convert[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data, big_convert_offd); hypre_ParCSRCommHandleDestroy(comm_handle); num_cols_offd_A_FC = 0; num_cols_offd_A_FF = 0; if (num_cols_A_offd) { for (i = 0; i < num_cols_A_offd; i++) { if (CF_marker_offd[i] > 0) { fine_to_coarse_offd[i] = num_cols_offd_A_FC++; fine_to_fine_offd[i] = -1; } else { fine_to_fine_offd[i] = num_cols_offd_A_FF++; fine_to_coarse_offd[i] = -1; } } col_map_offd_A_FF = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_A_FF, HYPRE_MEMORY_HOST); col_map_offd_A_FC = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_A_FC, HYPRE_MEMORY_HOST); cpt = 0; fpt = 0; for (i = 0; i < num_cols_A_offd; i++) { if (CF_marker_offd[i] > 0) { col_map_offd_A_FC[cpt++] = big_convert_offd[i]; } else { col_map_offd_A_FF[fpt++] = big_convert_offd[i]; } } } A_FF_diag_i = hypre_CTAlloc(HYPRE_Int, n_new_Fpts + 1, memory_location_P); A_FC_diag_i = hypre_CTAlloc(HYPRE_Int, n_Fpts + 1, memory_location_P); A_FF_offd_i = hypre_CTAlloc(HYPRE_Int, n_new_Fpts + 1, memory_location_P); A_FC_offd_i = hypre_CTAlloc(HYPRE_Int, n_Fpts + 1, memory_location_P); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif d_count_FC = 0; d_count_FF = 0; o_count_FC = 0; o_count_FF = 0; row = new_fpt_array[my_thread_num]; rowc = fpt_array[my_thread_num]; for (i = start; i < stop; i++) { if (CF_marker[i] == -2) { row++; rowc++; d_count_FF++; /* account for diagonal element */ for (j = S_diag_i[i]; j < S_diag_i[i + 1]; j++) { jj = S_diag_j[j]; if (CF_marker[jj] > 0) { d_count_FC++; } else { d_count_FF++; } } A_FF_diag_i[row] = d_count_FF; A_FC_diag_i[rowc] = d_count_FC; for (j = S_offd_i[i]; j < S_offd_i[i + 1]; j++) { jj = S_offd_j[j]; if (CF_marker_offd[jj] > 0) { o_count_FC++; } else { o_count_FF++; } } A_FF_offd_i[row] = o_count_FF; A_FC_offd_i[rowc] = o_count_FC; } else if (CF_marker[i] < 0) { rowc++; for (j = S_diag_i[i]; j < S_diag_i[i + 1]; j++) { jj = S_diag_j[j]; if (CF_marker[jj] > 0) { d_count_FC++; } } A_FC_diag_i[rowc] = d_count_FC; for (j = S_offd_i[i]; j < S_offd_i[i + 1]; j++) { jj = S_offd_j[j]; if (CF_marker_offd[jj] > 0) { o_count_FC++; } } A_FC_offd_i[rowc] = o_count_FC; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { HYPRE_Int fpt2, new_fpt2; for (i = 1; i < num_threads + 1; i++) { fpt = fpt_array[i]; new_fpt = new_fpt_array[i]; fpt2 = fpt_array[i - 1]; new_fpt2 = new_fpt_array[i - 1]; if (new_fpt != new_fpt2) { A_FF_diag_i[new_fpt] += A_FF_diag_i[new_fpt2]; A_FF_offd_i[new_fpt] += A_FF_offd_i[new_fpt2]; } if (fpt != fpt2) { A_FC_diag_i[fpt] += A_FC_diag_i[fpt2]; A_FC_offd_i[fpt] += A_FC_offd_i[fpt2]; } } row = new_fpt_array[num_threads]; rowc = fpt_array[num_threads]; d_count_FC = A_FC_diag_i[rowc]; d_count_FF = A_FF_diag_i[row]; o_count_FC = A_FC_offd_i[rowc]; o_count_FF = A_FF_offd_i[row]; A_FF_diag_j = hypre_CTAlloc(HYPRE_Int, d_count_FF, memory_location_P); A_FC_diag_j = hypre_CTAlloc(HYPRE_Int, d_count_FC, memory_location_P); A_FF_offd_j = hypre_CTAlloc(HYPRE_Int, o_count_FF, memory_location_P); A_FC_offd_j = hypre_CTAlloc(HYPRE_Int, o_count_FC, memory_location_P); A_FF_diag_data = hypre_CTAlloc(HYPRE_Real, d_count_FF, memory_location_P); A_FC_diag_data = hypre_CTAlloc(HYPRE_Real, d_count_FC, memory_location_P); A_FF_offd_data = hypre_CTAlloc(HYPRE_Real, o_count_FF, memory_location_P); A_FC_offd_data = hypre_CTAlloc(HYPRE_Real, o_count_FC, memory_location_P); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif row = new_fpt_array[my_thread_num]; rowc = fpt_array[my_thread_num]; d_count_FC = A_FC_diag_i[rowc]; d_count_FF = A_FF_diag_i[row]; o_count_FC = A_FC_offd_i[rowc]; o_count_FF = A_FF_offd_i[row]; for (i = start; i < stop; i++) { if (CF_marker[i] == -2) { HYPRE_Int jS, jA; row++; rowc++; jA = A_diag_i[i]; A_FF_diag_j[d_count_FF] = fine_to_fine[A_diag_j[jA]]; A_FF_diag_data[d_count_FF++] = A_diag_data[jA++]; for (j = S_diag_i[i]; j < S_diag_i[i + 1]; j++) { jA = A_diag_i[i] + 1; jS = S_diag_j[j]; while (A_diag_j[jA] != jS) { jA++; } if (CF_marker[S_diag_j[j]] > 0) { A_FC_diag_j[d_count_FC] = fine_to_coarse[A_diag_j[jA]]; A_FC_diag_data[d_count_FC++] = A_diag_data[jA++]; } else { A_FF_diag_j[d_count_FF] = fine_to_fine[A_diag_j[jA]]; A_FF_diag_data[d_count_FF++] = A_diag_data[jA++]; } } A_FF_diag_i[row] = d_count_FF; A_FC_diag_i[rowc] = d_count_FC; for (j = S_offd_i[i]; j < S_offd_i[i + 1]; j++) { jA = A_offd_i[i]; jS = S_offd_j[j]; while (jS != A_offd_j[jA]) { jA++; } if (CF_marker_offd[S_offd_j[j]] > 0) { A_FC_offd_j[o_count_FC] = fine_to_coarse_offd[A_offd_j[jA]]; A_FC_offd_data[o_count_FC++] = A_offd_data[jA++]; } else { A_FF_offd_j[o_count_FF] = fine_to_fine_offd[A_offd_j[jA]]; A_FF_offd_data[o_count_FF++] = A_offd_data[jA++]; } } A_FF_offd_i[row] = o_count_FF; A_FC_offd_i[rowc] = o_count_FC; } else if (CF_marker[i] < 0) { HYPRE_Int jS, jA; rowc++; for (j = S_diag_i[i]; j < S_diag_i[i + 1]; j++) { jA = A_diag_i[i] + 1; jS = S_diag_j[j]; while (A_diag_j[jA] != jS) { jA++; } if (CF_marker[S_diag_j[j]] > 0) { A_FC_diag_j[d_count_FC] = fine_to_coarse[A_diag_j[jA]]; A_FC_diag_data[d_count_FC++] = A_diag_data[jA++]; } } A_FC_diag_i[rowc] = d_count_FC; for (j = S_offd_i[i]; j < S_offd_i[i + 1]; j++) { jA = A_offd_i[i]; jS = S_offd_j[j]; while (jS != A_offd_j[jA]) { jA++; } if (CF_marker_offd[S_offd_j[j]] > 0) { A_FC_offd_j[o_count_FC] = fine_to_coarse_offd[A_offd_j[jA]]; A_FC_offd_data[o_count_FC++] = A_offd_data[jA++]; } } A_FC_offd_i[rowc] = o_count_FC; } } } /*end parallel region */ A_FC = hypre_ParCSRMatrixCreate(comm, total_global_fpts, total_global_cpts, fpts_starts, cpts_starts, num_cols_offd_A_FC, A_FC_diag_i[n_Fpts], A_FC_offd_i[n_Fpts]); A_FF = hypre_ParCSRMatrixCreate(comm, total_global_new_fpts, total_global_fpts, new_fpts_starts, fpts_starts, num_cols_offd_A_FF, A_FF_diag_i[n_new_Fpts], A_FF_offd_i[n_new_Fpts]); A_FC_diag = hypre_ParCSRMatrixDiag(A_FC); hypre_CSRMatrixData(A_FC_diag) = A_FC_diag_data; hypre_CSRMatrixI(A_FC_diag) = A_FC_diag_i; hypre_CSRMatrixJ(A_FC_diag) = A_FC_diag_j; A_FC_offd = hypre_ParCSRMatrixOffd(A_FC); hypre_CSRMatrixData(A_FC_offd) = A_FC_offd_data; hypre_CSRMatrixI(A_FC_offd) = A_FC_offd_i; hypre_CSRMatrixJ(A_FC_offd) = A_FC_offd_j; hypre_ParCSRMatrixColMapOffd(A_FC) = col_map_offd_A_FC; hypre_CSRMatrixMemoryLocation(A_FC_diag) = memory_location_P; hypre_CSRMatrixMemoryLocation(A_FC_offd) = memory_location_P; A_FF_diag = hypre_ParCSRMatrixDiag(A_FF); hypre_CSRMatrixData(A_FF_diag) = A_FF_diag_data; hypre_CSRMatrixI(A_FF_diag) = A_FF_diag_i; hypre_CSRMatrixJ(A_FF_diag) = A_FF_diag_j; A_FF_offd = hypre_ParCSRMatrixOffd(A_FF); hypre_CSRMatrixData(A_FF_offd) = A_FF_offd_data; hypre_CSRMatrixI(A_FF_offd) = A_FF_offd_i; hypre_CSRMatrixJ(A_FF_offd) = A_FF_offd_j; hypre_ParCSRMatrixColMapOffd(A_FF) = col_map_offd_A_FF; hypre_CSRMatrixMemoryLocation(A_FF_diag) = memory_location_P; hypre_CSRMatrixMemoryLocation(A_FF_offd) = memory_location_P; hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_fine, HYPRE_MEMORY_HOST); hypre_TFree(big_convert, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_fine_offd, HYPRE_MEMORY_HOST); hypre_TFree(big_convert_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(big_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(cpt_array, HYPRE_MEMORY_HOST); hypre_TFree(fpt_array, HYPRE_MEMORY_HOST); hypre_TFree(new_fpt_array, HYPRE_MEMORY_HOST); *A_FC_ptr = A_FC; *A_FF_ptr = A_FF; return hypre_error_flag; } /* ----------------------------------------------------------------------------- * generate AFF, AFC, AFFC for 2 stage extended+i(e)interpolation * ----------------------------------------------------------------------------- */ HYPRE_Int hypre_ParCSRMatrixGenerateFFFCD3( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *cpts_starts, hypre_ParCSRMatrix *S, hypre_ParCSRMatrix **A_FC_ptr, hypre_ParCSRMatrix **A_FF_ptr, HYPRE_Real **D_lambda_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); /* diag part of S */ hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); /* off-diag part of S */ hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_Real *D_lambda; hypre_ParCSRMatrix *A_FC; hypre_CSRMatrix *A_FC_diag, *A_FC_offd; HYPRE_Int *A_FC_diag_i, *A_FC_diag_j, *A_FC_offd_i, *A_FC_offd_j = NULL; HYPRE_Complex *A_FC_diag_data, *A_FC_offd_data = NULL; HYPRE_Int num_cols_offd_A_FC; HYPRE_BigInt *col_map_offd_A_FC = NULL; hypre_ParCSRMatrix *A_FF; hypre_CSRMatrix *A_FF_diag, *A_FF_offd; HYPRE_Int *A_FF_diag_i, *A_FF_diag_j, *A_FF_offd_i, *A_FF_offd_j; HYPRE_Complex *A_FF_diag_data, *A_FF_offd_data; HYPRE_Int num_cols_offd_A_FF; HYPRE_BigInt *col_map_offd_A_FF = NULL; HYPRE_Int *fine_to_coarse; HYPRE_Int *fine_to_fine; HYPRE_Int *fine_to_coarse_offd = NULL; HYPRE_Int *fine_to_fine_offd = NULL; HYPRE_Int i, j, jj; HYPRE_Int startc, index; HYPRE_Int cpt, fpt, new_fpt, row, rowc; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *int_buf_data = NULL; HYPRE_BigInt *big_convert; HYPRE_BigInt *big_convert_offd = NULL; HYPRE_BigInt *big_buf_data = NULL; HYPRE_BigInt total_global_fpts, total_global_cpts, total_global_new_fpts; HYPRE_BigInt fpts_starts[2], new_fpts_starts[2]; HYPRE_Int my_id, num_procs, num_sends; HYPRE_Int d_count_FF, d_count_FC, o_count_FF, o_count_FC; HYPRE_Int n_Fpts; HYPRE_Int n_new_Fpts; HYPRE_Int *cpt_array, *fpt_array, *new_fpt_array; HYPRE_Int start, stop; HYPRE_Int num_threads; num_threads = hypre_NumThreads(); /* MPI size and rank*/ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); fine_to_fine = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); big_convert = hypre_CTAlloc(HYPRE_BigInt, n_fine, HYPRE_MEMORY_HOST); cpt_array = hypre_CTAlloc(HYPRE_Int, num_threads + 1, HYPRE_MEMORY_HOST); fpt_array = hypre_CTAlloc(HYPRE_Int, num_threads + 1, HYPRE_MEMORY_HOST); new_fpt_array = hypre_CTAlloc(HYPRE_Int, num_threads + 1, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,j,jj,start,stop,row,rowc,cpt,new_fpt,fpt,d_count_FC,d_count_FF,o_count_FC,o_count_FF) #endif { HYPRE_Int my_thread_num = hypre_GetThreadNum(); start = (n_fine / num_threads) * my_thread_num; if (my_thread_num == num_threads - 1) { stop = n_fine; } else { stop = (n_fine / num_threads) * (my_thread_num + 1); } for (i = start; i < stop; i++) { if (CF_marker[i] > 0) { cpt_array[my_thread_num + 1]++; } else if (CF_marker[i] == -2) { new_fpt_array[my_thread_num + 1]++; fpt_array[my_thread_num + 1]++; } else { fpt_array[my_thread_num + 1]++; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { for (i = 1; i < num_threads; i++) { cpt_array[i + 1] += cpt_array[i]; fpt_array[i + 1] += fpt_array[i]; new_fpt_array[i + 1] += new_fpt_array[i]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif cpt = cpt_array[my_thread_num]; fpt = fpt_array[my_thread_num]; for (i = start; i < stop; i++) { if (CF_marker[i] > 0) { fine_to_coarse[i] = cpt++; fine_to_fine[i] = -1; } else { fine_to_fine[i] = fpt++; fine_to_coarse[i] = -1; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { HYPRE_BigInt big_Fpts, big_new_Fpts; n_Fpts = fpt_array[num_threads]; n_new_Fpts = new_fpt_array[num_threads]; big_Fpts = n_Fpts; big_new_Fpts = n_new_Fpts; hypre_MPI_Scan(&big_Fpts, fpts_starts + 1, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); hypre_MPI_Scan(&big_new_Fpts, new_fpts_starts + 1, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); fpts_starts[0] = fpts_starts[1] - big_Fpts; new_fpts_starts[0] = new_fpts_starts[1] - big_new_Fpts; if (my_id == num_procs - 1) { total_global_new_fpts = new_fpts_starts[1]; total_global_fpts = fpts_starts[1]; total_global_cpts = cpts_starts[1]; } hypre_MPI_Bcast(&total_global_new_fpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); hypre_MPI_Bcast(&total_global_fpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i = start; i < stop; i++) { if (CF_marker[i] > 0) { big_convert[i] = (HYPRE_BigInt)fine_to_coarse[i] + cpts_starts[0]; } else { big_convert[i] = (HYPRE_BigInt)fine_to_fine[i] + fpts_starts[0]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { if (num_cols_A_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); big_convert_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_HOST); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); fine_to_fine_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); } index = 0; num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); big_buf_data = hypre_CTAlloc(HYPRE_BigInt, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); for (i = 0; i < num_sends; i++) { startc = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = startc; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) { int_buf_data[index] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; big_buf_data[index++] = big_convert[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data, big_convert_offd); hypre_ParCSRCommHandleDestroy(comm_handle); num_cols_offd_A_FC = 0; num_cols_offd_A_FF = 0; if (num_cols_A_offd) { for (i = 0; i < num_cols_A_offd; i++) { if (CF_marker_offd[i] > 0) { fine_to_coarse_offd[i] = num_cols_offd_A_FC++; fine_to_fine_offd[i] = -1; } else { fine_to_fine_offd[i] = num_cols_offd_A_FF++; fine_to_coarse_offd[i] = -1; } } col_map_offd_A_FF = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_A_FF, HYPRE_MEMORY_HOST); col_map_offd_A_FC = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_A_FC, HYPRE_MEMORY_HOST); cpt = 0; fpt = 0; for (i = 0; i < num_cols_A_offd; i++) { if (CF_marker_offd[i] > 0) { col_map_offd_A_FC[cpt++] = big_convert_offd[i]; } else { col_map_offd_A_FF[fpt++] = big_convert_offd[i]; } } } A_FF_diag_i = hypre_CTAlloc(HYPRE_Int, n_new_Fpts + 1, memory_location_P); A_FC_diag_i = hypre_CTAlloc(HYPRE_Int, n_Fpts + 1, memory_location_P); A_FF_offd_i = hypre_CTAlloc(HYPRE_Int, n_new_Fpts + 1, memory_location_P); A_FC_offd_i = hypre_CTAlloc(HYPRE_Int, n_Fpts + 1, memory_location_P); D_lambda = hypre_CTAlloc(HYPRE_Real, n_Fpts, memory_location_P); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif d_count_FC = 0; d_count_FF = 0; o_count_FC = 0; o_count_FF = 0; row = new_fpt_array[my_thread_num]; rowc = fpt_array[my_thread_num]; for (i = start; i < stop; i++) { if (CF_marker[i] == -2) { row++; rowc++; d_count_FF++; /* account for diagonal element */ for (j = S_diag_i[i]; j < S_diag_i[i + 1]; j++) { jj = S_diag_j[j]; if (CF_marker[jj] > 0) { d_count_FC++; } else { d_count_FF++; } } A_FF_diag_i[row] = d_count_FF; A_FC_diag_i[rowc] = d_count_FC; for (j = S_offd_i[i]; j < S_offd_i[i + 1]; j++) { jj = S_offd_j[j]; if (CF_marker_offd[jj] > 0) { o_count_FC++; } else { o_count_FF++; } } A_FF_offd_i[row] = o_count_FF; A_FC_offd_i[rowc] = o_count_FC; } else if (CF_marker[i] < 0) { rowc++; for (j = S_diag_i[i]; j < S_diag_i[i + 1]; j++) { jj = S_diag_j[j]; if (CF_marker[jj] > 0) { d_count_FC++; } } A_FC_diag_i[rowc] = d_count_FC; for (j = S_offd_i[i]; j < S_offd_i[i + 1]; j++) { jj = S_offd_j[j]; if (CF_marker_offd[jj] > 0) { o_count_FC++; } } A_FC_offd_i[rowc] = o_count_FC; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { HYPRE_Int fpt2, new_fpt2; for (i = 1; i < num_threads + 1; i++) { fpt = fpt_array[i]; new_fpt = new_fpt_array[i]; fpt2 = fpt_array[i - 1]; new_fpt2 = new_fpt_array[i - 1]; if (fpt != fpt2) { A_FC_diag_i[fpt] += A_FC_diag_i[fpt2]; A_FC_offd_i[fpt] += A_FC_offd_i[fpt2]; } if (new_fpt != new_fpt2) { A_FF_diag_i[new_fpt] += A_FF_diag_i[new_fpt2]; A_FF_offd_i[new_fpt] += A_FF_offd_i[new_fpt2]; } } row = new_fpt_array[num_threads]; rowc = fpt_array[num_threads]; d_count_FC = A_FC_diag_i[rowc]; d_count_FF = A_FF_diag_i[row]; o_count_FC = A_FC_offd_i[rowc]; o_count_FF = A_FF_offd_i[row]; A_FF_diag_j = hypre_CTAlloc(HYPRE_Int, d_count_FF, memory_location_P); A_FC_diag_j = hypre_CTAlloc(HYPRE_Int, d_count_FC, memory_location_P); A_FF_offd_j = hypre_CTAlloc(HYPRE_Int, o_count_FF, memory_location_P); A_FC_offd_j = hypre_CTAlloc(HYPRE_Int, o_count_FC, memory_location_P); A_FF_diag_data = hypre_CTAlloc(HYPRE_Real, d_count_FF, memory_location_P); A_FC_diag_data = hypre_CTAlloc(HYPRE_Real, d_count_FC, memory_location_P); A_FF_offd_data = hypre_CTAlloc(HYPRE_Real, o_count_FF, memory_location_P); A_FC_offd_data = hypre_CTAlloc(HYPRE_Real, o_count_FC, memory_location_P); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif row = new_fpt_array[my_thread_num]; rowc = fpt_array[my_thread_num]; d_count_FC = A_FC_diag_i[rowc]; d_count_FF = A_FF_diag_i[row]; o_count_FC = A_FC_offd_i[rowc]; o_count_FF = A_FF_offd_i[row]; for (i = start; i < stop; i++) { if (CF_marker[i] == -2) { HYPRE_Int jS, jA; HYPRE_Real sum = 0; row++; jA = A_diag_i[i]; A_FF_diag_j[d_count_FF] = fine_to_fine[A_diag_j[jA]]; A_FF_diag_data[d_count_FF++] = A_diag_data[jA++]; for (j = S_diag_i[i]; j < S_diag_i[i + 1]; j++) { jA = A_diag_i[i] + 1; jS = S_diag_j[j]; while (A_diag_j[jA] != jS) { jA++; } if (CF_marker[S_diag_j[j]] > 0) { A_FC_diag_j[d_count_FC] = fine_to_coarse[A_diag_j[jA]]; A_FC_diag_data[d_count_FC++] = A_diag_data[jA++]; } else { sum += 1; D_lambda[rowc] += A_diag_data[jA]; A_FF_diag_j[d_count_FF] = fine_to_fine[A_diag_j[jA]]; A_FF_diag_data[d_count_FF++] = A_diag_data[jA++]; } } for (j = S_offd_i[i]; j < S_offd_i[i + 1]; j++) { jA = A_offd_i[i]; jS = S_offd_j[j]; while (jS != A_offd_j[jA]) { jA++; } if (CF_marker_offd[S_offd_j[j]] > 0) { A_FC_offd_j[o_count_FC] = fine_to_coarse_offd[A_offd_j[jA]]; A_FC_offd_data[o_count_FC++] = A_offd_data[jA++]; } else { sum += 1; D_lambda[rowc] += A_offd_data[jA]; A_FF_offd_j[o_count_FF] = fine_to_fine_offd[A_offd_j[jA]]; A_FF_offd_data[o_count_FF++] = A_offd_data[jA++]; } } if (sum) { D_lambda[rowc] = D_lambda[rowc] / sum; } rowc++; A_FF_diag_i[row] = d_count_FF; A_FC_diag_i[rowc] = d_count_FC; A_FF_offd_i[row] = o_count_FF; A_FC_offd_i[rowc] = o_count_FC; } else if (CF_marker[i] < 0) { HYPRE_Int jS, jA; HYPRE_Real sum = 0; for (j = S_diag_i[i]; j < S_diag_i[i + 1]; j++) { jA = A_diag_i[i] + 1; jS = S_diag_j[j]; while (A_diag_j[jA] != jS) { jA++; } if (CF_marker[S_diag_j[j]] > 0) { A_FC_diag_j[d_count_FC] = fine_to_coarse[A_diag_j[jA]]; A_FC_diag_data[d_count_FC++] = A_diag_data[jA++]; } else { sum += 1; D_lambda[rowc] += A_diag_data[jA]; } } for (j = S_offd_i[i]; j < S_offd_i[i + 1]; j++) { jA = A_offd_i[i]; jS = S_offd_j[j]; while (jS != A_offd_j[jA]) { jA++; } if (CF_marker_offd[S_offd_j[j]] > 0) { A_FC_offd_j[o_count_FC] = fine_to_coarse_offd[A_offd_j[jA]]; A_FC_offd_data[o_count_FC++] = A_offd_data[jA++]; } else { sum += 1; D_lambda[rowc] += A_offd_data[jA]; } } if (sum) { D_lambda[rowc] = D_lambda[rowc] / sum; } rowc++; A_FC_diag_i[rowc] = d_count_FC; A_FC_offd_i[rowc] = o_count_FC; } } } /*end parallel region */ A_FC = hypre_ParCSRMatrixCreate(comm, total_global_fpts, total_global_cpts, fpts_starts, cpts_starts, num_cols_offd_A_FC, A_FC_diag_i[n_Fpts], A_FC_offd_i[n_Fpts]); A_FF = hypre_ParCSRMatrixCreate(comm, total_global_new_fpts, total_global_fpts, new_fpts_starts, fpts_starts, num_cols_offd_A_FF, A_FF_diag_i[n_new_Fpts], A_FF_offd_i[n_new_Fpts]); A_FC_diag = hypre_ParCSRMatrixDiag(A_FC); hypre_CSRMatrixData(A_FC_diag) = A_FC_diag_data; hypre_CSRMatrixI(A_FC_diag) = A_FC_diag_i; hypre_CSRMatrixJ(A_FC_diag) = A_FC_diag_j; A_FC_offd = hypre_ParCSRMatrixOffd(A_FC); hypre_CSRMatrixData(A_FC_offd) = A_FC_offd_data; hypre_CSRMatrixI(A_FC_offd) = A_FC_offd_i; hypre_CSRMatrixJ(A_FC_offd) = A_FC_offd_j; hypre_ParCSRMatrixColMapOffd(A_FC) = col_map_offd_A_FC; hypre_CSRMatrixMemoryLocation(A_FC_diag) = memory_location_P; hypre_CSRMatrixMemoryLocation(A_FC_offd) = memory_location_P; A_FF_diag = hypre_ParCSRMatrixDiag(A_FF); hypre_CSRMatrixData(A_FF_diag) = A_FF_diag_data; hypre_CSRMatrixI(A_FF_diag) = A_FF_diag_i; hypre_CSRMatrixJ(A_FF_diag) = A_FF_diag_j; A_FF_offd = hypre_ParCSRMatrixOffd(A_FF); hypre_CSRMatrixData(A_FF_offd) = A_FF_offd_data; hypre_CSRMatrixI(A_FF_offd) = A_FF_offd_i; hypre_CSRMatrixJ(A_FF_offd) = A_FF_offd_j; hypre_ParCSRMatrixColMapOffd(A_FF) = col_map_offd_A_FF; hypre_CSRMatrixMemoryLocation(A_FF_diag) = memory_location_P; hypre_CSRMatrixMemoryLocation(A_FF_offd) = memory_location_P; hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_fine, HYPRE_MEMORY_HOST); hypre_TFree(big_convert, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_fine_offd, HYPRE_MEMORY_HOST); hypre_TFree(big_convert_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(big_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(cpt_array, HYPRE_MEMORY_HOST); hypre_TFree(fpt_array, HYPRE_MEMORY_HOST); hypre_TFree(new_fpt_array, HYPRE_MEMORY_HOST); *A_FC_ptr = A_FC; *A_FF_ptr = A_FF; *D_lambda_ptr = D_lambda; return hypre_error_flag; }
paint.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP AAA IIIII N N TTTTT % % P P A A I NN N T % % PPPP AAAAA I N N N T % % P A A I N NN T % % P A A IIIII N N T % % % % % % Methods to Paint on an Image % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/cache.h" #include "magick/channel.h" #include "magick/color-private.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/draw.h" #include "magick/draw-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/paint.h" #include "magick/pixel-private.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/thread-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l o o d f i l l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FloodfillPaintImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % By default target must match a particular pixel color exactly. % However, in many cases two colors may differ by a small amount. The % fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now % interpreted as the same color for the purposes of the floodfill. % % The format of the FloodfillPaintImage method is: % % MagickBooleanType FloodfillPaintImage(Image *image, % const ChannelType channel,const DrawInfo *draw_info, % const MagickPixelPacket target,const ssize_t x_offset, % const ssize_t y_offset,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel(s). % % o draw_info: the draw info. % % o target: the RGB value of the target color. % % o x_offset,y_offset: the starting location of the operation. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType FloodfillPaintImage(Image *image, const ChannelType channel,const DrawInfo *draw_info, const MagickPixelPacket *target,const ssize_t x_offset,const ssize_t y_offset, const MagickBooleanType invert) { #define MaxStacksize 131072UL #define PushSegmentStack(up,left,right,delta) \ { \ if (s >= (segment_stack+MaxStacksize)) \ ThrowBinaryException(DrawError,"SegmentStackOverflow",image->filename) \ else \ { \ if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (ssize_t) image->rows)) \ { \ s->x1=(double) (left); \ s->y1=(double) (up); \ s->x2=(double) (right); \ s->y2=(double) (delta); \ s++; \ } \ } \ } CacheView *floodplane_view, *image_view; ExceptionInfo *exception; Image *floodplane_image; MagickBooleanType skip; MagickPixelPacket fill, pixel; MemoryInfo *segment_info; PixelPacket fill_color; register SegmentInfo *s; SegmentInfo *segment_stack; ssize_t offset, start, x, x1, x2, y; /* Check boundary conditions. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickSignature); if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns)) return(MagickFalse); if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows)) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); if ((image->matte == MagickFalse) && (draw_info->fill.opacity != OpaqueOpacity)) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Set floodfill state. */ floodplane_image=CloneImage(image,0,0,MagickTrue,&image->exception); if (floodplane_image == (Image *) NULL) return(MagickFalse); (void) SetImageAlphaChannel(floodplane_image,OpaqueAlphaChannel); segment_info=AcquireVirtualMemory(MaxStacksize,sizeof(*segment_stack)); if (segment_info == (MemoryInfo *) NULL) { floodplane_image=DestroyImage(floodplane_image); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } segment_stack=(SegmentInfo *) GetVirtualMemoryBlob(segment_info); /* Push initial segment on stack. */ exception=(&image->exception); x=x_offset; y=y_offset; start=0; s=segment_stack; PushSegmentStack(y,x,x,1); PushSegmentStack(y+1,x,x,-1); GetMagickPixelPacket(image,&fill); GetMagickPixelPacket(image,&pixel); image_view=AcquireVirtualCacheView(image,exception); floodplane_view=AcquireAuthenticCacheView(floodplane_image,exception); while (s > segment_stack) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; register PixelPacket *restrict q; /* Pop segment off stack. */ s--; x1=(ssize_t) s->x1; x2=(ssize_t) s->x2; offset=(ssize_t) s->y2; y=(ssize_t) s->y1+offset; /* Recolor neighboring pixels. */ p=GetCacheViewVirtualPixels(image_view,0,y,(size_t) (x1+1),1,exception); q=GetCacheViewAuthenticPixels(floodplane_view,0,y,(size_t) (x1+1),1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewVirtualIndexQueue(image_view); p+=x1; q+=x1; for (x=x1; x >= 0; x--) { if (q->opacity == (Quantum) TransparentOpacity) break; SetMagickPixelPacket(image,p,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) == invert) break; q->opacity=(Quantum) TransparentOpacity; p--; q--; } if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse) break; skip=x >= x1 ? MagickTrue : MagickFalse; if (skip == MagickFalse) { start=x+1; if (start < x1) PushSegmentStack(y,start,x1-1,-offset); x=x1+1; } do { if (skip == MagickFalse) { if (x < (ssize_t) image->columns) { p=GetCacheViewVirtualPixels(image_view,x,y,image->columns-x,1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y, image->columns-x,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for ( ; x < (ssize_t) image->columns; x++) { if (q->opacity == (Quantum) TransparentOpacity) break; SetMagickPixelPacket(image,p,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) == invert) break; q->opacity=(Quantum) TransparentOpacity; p++; q++; } if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse) break; } PushSegmentStack(y,start,x-1,offset); if (x > (x2+1)) PushSegmentStack(y,x2+1,x-1,-offset); } skip=MagickFalse; x++; if (x <= x2) { p=GetCacheViewVirtualPixels(image_view,x,y,(size_t) (x2-x+1),1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y,(size_t) (x2-x+1),1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for ( ; x <= x2; x++) { if (q->opacity == (Quantum) TransparentOpacity) break; SetMagickPixelPacket(image,p,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) != invert) break; p++; q++; } } start=x; } while (x <= x2); } for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; /* Tile fill color onto floodplane. */ p=GetCacheViewVirtualPixels(floodplane_view,0,y,image->columns,1, exception); q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(p) != OpaqueOpacity) { (void) GetFillColor(draw_info,x,y,&fill_color); SetMagickPixelPacket(image,&fill_color,(IndexPacket *) NULL,&fill); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&fill); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(fill.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(fill.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(fill.blue)); if (((channel & OpacityChannel) != 0) || (draw_info->fill.opacity != OpaqueOpacity)) SetPixelOpacity(q,ClampToQuantum(fill.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(fill.index)); } p++; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) break; } floodplane_view=DestroyCacheView(floodplane_view); image_view=DestroyCacheView(image_view); segment_info=RelinquishVirtualMemory(segment_info); floodplane_image=DestroyImage(floodplane_image); return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GradientImage() applies a continuously smooth color transitions along a % vector from one color to another. % % Note, the interface of this method will change in the future to support % more than one transistion. % % The format of the GradientImage method is: % % MagickBooleanType GradientImage(Image *image,const GradientType type, % const SpreadMethod method,const PixelPacket *start_color, % const PixelPacket *stop_color) % % A description of each parameter follows: % % o image: the image. % % o type: the gradient type: linear or radial. % % o spread: the gradient spread meathod: pad, reflect, or repeat. % % o start_color: the start color. % % o stop_color: the stop color. % % This provides a good example of making use of the DrawGradientImage % function and the gradient structure in draw_info. */ static inline double MagickMax(const double x,const double y) { return(x > y ? x : y); } MagickExport MagickBooleanType GradientImage(Image *image, const GradientType type,const SpreadMethod method, const PixelPacket *start_color,const PixelPacket *stop_color) { DrawInfo *draw_info; GradientInfo *gradient; MagickBooleanType status; register ssize_t i; /* Set gradient start-stop end points. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(start_color != (const PixelPacket *) NULL); assert(stop_color != (const PixelPacket *) NULL); draw_info=AcquireDrawInfo(); gradient=(&draw_info->gradient); gradient->type=type; gradient->bounding_box.width=image->columns; gradient->bounding_box.height=image->rows; gradient->gradient_vector.x2=(double) image->columns-1.0; gradient->gradient_vector.y2=(double) image->rows-1.0; if ((type == LinearGradient) && (gradient->gradient_vector.y2 != 0.0)) gradient->gradient_vector.x2=0.0; gradient->center.x=(double) gradient->gradient_vector.x2/2.0; gradient->center.y=(double) gradient->gradient_vector.y2/2.0; gradient->radius=MagickMax(gradient->center.x,gradient->center.y); gradient->spread=method; /* Define the gradient to fill between the stops. */ gradient->number_stops=2; gradient->stops=(StopInfo *) AcquireQuantumMemory(gradient->number_stops, sizeof(*gradient->stops)); if (gradient->stops == (StopInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) ResetMagickMemory(gradient->stops,0,gradient->number_stops* sizeof(*gradient->stops)); for (i=0; i < (ssize_t) gradient->number_stops; i++) GetMagickPixelPacket(image,&gradient->stops[i].color); SetMagickPixelPacket(image,start_color,(IndexPacket *) NULL, &gradient->stops[0].color); gradient->stops[0].offset=0.0; SetMagickPixelPacket(image,stop_color,(IndexPacket *) NULL, &gradient->stops[1].color); gradient->stops[1].offset=1.0; /* Draw a gradient on the image. */ status=DrawGradientImage(image,draw_info); draw_info=DestroyDrawInfo(draw_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O i l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OilPaintImage() applies a special effect filter that simulates an oil % painting. Each pixel is replaced by the most frequent color occurring % in a circular region defined by radius. % % The format of the OilPaintImage method is: % % Image *OilPaintImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the circular neighborhood. % % o exception: return any errors or warnings in this structure. % */ static size_t **DestroyHistogramThreadSet(size_t **histogram) { register ssize_t i; assert(histogram != (size_t **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (histogram[i] != (size_t *) NULL) histogram[i]=(size_t *) RelinquishMagickMemory(histogram[i]); histogram=(size_t **) RelinquishMagickMemory(histogram); return(histogram); } static size_t **AcquireHistogramThreadSet(const size_t count) { register ssize_t i; size_t **histogram, number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); histogram=(size_t **) AcquireQuantumMemory(number_threads, sizeof(*histogram)); if (histogram == (size_t **) NULL) return((size_t **) NULL); (void) ResetMagickMemory(histogram,0,number_threads*sizeof(*histogram)); for (i=0; i < (ssize_t) number_threads; i++) { histogram[i]=(size_t *) AcquireQuantumMemory(count, sizeof(**histogram)); if (histogram[i] == (size_t *) NULL) return(DestroyHistogramThreadSet(histogram)); } return(histogram); } MagickExport Image *OilPaintImage(const Image *image,const double radius, ExceptionInfo *exception) { #define NumberPaintBins 256 #define OilPaintImageTag "OilPaint/Image" CacheView *image_view, *paint_view; Image *linear_image, *paint_image; MagickBooleanType status; MagickOffsetType progress; size_t **restrict histograms, width; ssize_t y; /* Initialize painted image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); width=GetOptimalKernelWidth2D(radius,0.5); linear_image=CloneImage(image,0,0,MagickTrue,exception); paint_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (paint_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (paint_image != (Image *) NULL) linear_image=DestroyImage(paint_image); return((Image *) NULL); } if (SetImageStorageClass(paint_image,DirectClass) == MagickFalse) { InheritException(exception,&paint_image->exception); linear_image=DestroyImage(linear_image); paint_image=DestroyImage(paint_image); return((Image *) NULL); } histograms=AcquireHistogramThreadSet(NumberPaintBins); if (histograms == (size_t **) NULL) { linear_image=DestroyImage(linear_image); paint_image=DestroyImage(paint_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Oil paint image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(linear_image,exception); paint_view=AcquireAuthenticCacheView(paint_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(linear_image,paint_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict paint_indexes; register ssize_t x; register PixelPacket *restrict q; register size_t *histogram; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) (width/2L),linear_image->columns+width,width,exception); q=QueueCacheViewAuthenticPixels(paint_view,0,y,paint_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); paint_indexes=GetCacheViewAuthenticIndexQueue(paint_view); histogram=histograms[GetOpenMPThreadId()]; for (x=0; x < (ssize_t) linear_image->columns; x++) { register ssize_t i, u; size_t count; ssize_t j, k, v; /* Assign most frequent color. */ i=0; j=0; count=0; (void) ResetMagickMemory(histogram,0,NumberPaintBins*sizeof(*histogram)); for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { k=(ssize_t) ScaleQuantumToChar(ClampToQuantum(GetPixelIntensity( linear_image,p+u+i))); histogram[k]++; if (histogram[k] > count) { j=i+u; count=histogram[k]; } } i+=(ssize_t) (linear_image->columns+width); } *q=(*(p+j)); if (linear_image->colorspace == CMYKColorspace) SetPixelIndex(paint_indexes+x,GetPixelIndex(indexes+x+j)); p++; q++; } if (SyncCacheViewAuthenticPixels(paint_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OilPaintImage) #endif proceed=SetImageProgress(image,OilPaintImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } paint_view=DestroyCacheView(paint_view); image_view=DestroyCacheView(image_view); histograms=DestroyHistogramThreadSet(histograms); linear_image=DestroyImage(linear_image); if (status == MagickFalse) paint_image=DestroyImage(paint_image); return(paint_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O p a q u e P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpaquePaintImage() changes any pixel that matches color with the color % defined by fill. % % By default color must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. Fuzz defines % how much tolerance is acceptable to consider two colors as the same. % For example, set fuzz to 10 and the color red at intensities of 100 and % 102 respectively are now interpreted as the same color. % % The format of the OpaquePaintImage method is: % % MagickBooleanType OpaquePaintImage(Image *image, % const PixelPacket *target,const PixelPacket *fill, % const MagickBooleanType invert) % MagickBooleanType OpaquePaintImageChannel(Image *image, % const ChannelType channel,const PixelPacket *target, % const PixelPacket *fill,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel(s). % % o target: the RGB value of the target color. % % o fill: the replacement color. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType OpaquePaintImage(Image *image, const MagickPixelPacket *target,const MagickPixelPacket *fill, const MagickBooleanType invert) { return(OpaquePaintImageChannel(image,CompositeChannels,target,fill,invert)); } MagickExport MagickBooleanType OpaquePaintImageChannel(Image *image, const ChannelType channel,const MagickPixelPacket *target, const MagickPixelPacket *fill,const MagickBooleanType invert) { #define OpaquePaintImageTag "Opaque/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(target != (MagickPixelPacket *) NULL); assert(fill != (MagickPixelPacket *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && (IsMagickGray(fill) == MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace); if ((fill->opacity != OpaqueOpacity) && (image->matte == MagickFalse)) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Make image color opaque. */ status=MagickTrue; progress=0; exception=(&image->exception); GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) != invert) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(fill->red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(fill->green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(fill->blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(fill->opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(fill->index)); } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OpaquePaintImageChannel) #endif proceed=SetImageProgress(image,OpaquePaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImage() changes the opacity value associated with any pixel % that matches color to the value defined by opacity. % % By default color must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. Fuzz defines % how much tolerance is acceptable to consider two colors as the same. % For example, set fuzz to 10 and the color red at intensities of 100 and % 102 respectively are now interpreted as the same color. % % The format of the TransparentPaintImage method is: % % MagickBooleanType TransparentPaintImage(Image *image, % const MagickPixelPacket *target,const Quantum opacity, % const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o target: the target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType TransparentPaintImage(Image *image, const MagickPixelPacket *target,const Quantum opacity, const MagickBooleanType invert) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(target != (MagickPixelPacket *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Make image color transparent. */ status=MagickTrue; progress=0; exception=(&image->exception); GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) != invert) q->opacity=opacity; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransparentPaintImage) #endif proceed=SetImageProgress(image,TransparentPaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e C h r o m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImageChroma() changes the opacity value associated with any % pixel that matches color to the value defined by opacity. % % As there is one fuzz value for the all the channels, the % TransparentPaintImage() API is not suitable for the operations like chroma, % where the tolerance for similarity of two color component (RGB) can be % different, Thus we define this method take two target pixels (one % low and one hight) and all the pixels of an image which are lying between % these two pixels are made transparent. % % The format of the TransparentPaintImage method is: % % MagickBooleanType TransparentPaintImage(Image *image, % const MagickPixelPacket *low,const MagickPixelPacket *hight, % const Quantum opacity,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o low: the low target color. % % o high: the high target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType TransparentPaintImageChroma(Image *image, const MagickPixelPacket *low,const MagickPixelPacket *high, const Quantum opacity,const MagickBooleanType invert) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(high != (MagickPixelPacket *) NULL); assert(low != (MagickPixelPacket *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,ResetAlphaChannel); /* Make image color transparent. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType match; MagickPixelPacket pixel; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); GetMagickPixelPacket(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); match=((pixel.red >= low->red) && (pixel.red <= high->red) && (pixel.green >= low->green) && (pixel.green <= high->green) && (pixel.blue >= low->blue) && (pixel.blue <= high->blue)) ? MagickTrue : MagickFalse; if (match != invert) q->opacity=opacity; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransparentPaintImageChroma) #endif proceed=SetImageProgress(image,TransparentPaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
1237.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop #pragma omp target teams distribute private(j) for (i = 1; i < _PB_NI - 1; ++i) { #pragma omp target teams distribute parallel for schedule(static, 14) num_threads(14) for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
binnedBase.h
#ifndef binnedBase_h #define binnedBase_h #include "../../baseFunctions/fpForestBase.h" #include <vector> #include <stdio.h> #include <ctime> #include <chrono> #include <cstdlib> #include "binStruct.h" #include <random> #include <numeric> #include <iostream> #include <algorithm> #include <map> namespace fp { template <typename T, typename Q> class binnedBase : public fpForestBase<T> { protected: std::vector<binStruct<T, Q> > bins; int numBins; std::map<std::pair<int, int>, double> pairMat; std::vector<int> binSizes; std::vector<int> binSeeds; inline void checkParameters(){ if(fpSingleton::getSingleton().returnNumTreeBins() > fpSingleton::getSingleton().returnNumTrees()){ fpSingleton::getSingleton().setNumTreeBins(fpSingleton::getSingleton().returnNumTrees()); } if(fpSingleton::getSingleton().returnNumTreeBins() < 1){ fpSingleton::getSingleton().setNumTreeBins(fpSingleton::getSingleton().returnNumThreads()); } } public: ~binnedBase(){} binnedBase(){ checkParameters(); numBins = fpSingleton::getSingleton().returnNumTreeBins(); generateSeedsForBins(); } inline void generateSeedsForBins(){ binSeeds.resize(numBins); for(int i = 0; i < numBins; ++i){ binSeeds[i] = fpSingleton::getSingleton().genRandom(std::numeric_limits<int>::max()); } } inline void printForestType(){ std::cout << "This is a binned forest.\n"; } inline void changeForestSize(){ bins.reserve(numBins); } inline void calcBinSizes(){ int minBinSize = fpSingleton::getSingleton().returnNumTrees()/numBins; binSizes.resize(numBins,minBinSize); int remainingTreesToBin = fpSingleton::getSingleton().returnNumTrees()-minBinSize*numBins; while(remainingTreesToBin != 0){ ++binSizes[--remainingTreesToBin]; } } inline void growBins(){ calcBinSizes(); fpDisplayProgress printProgress; bins.resize(numBins); #pragma omp parallel for num_threads(fpSingleton::getSingleton().returnNumThreads()) for(int j = 0; j < numBins; ++j){ bins[j].createBin(binSizes[j], binSeeds[j]); } std::cout << "\n"<< std::flush; } inline float reportOOB(){ return -1; } inline std::map<std::string, int> calcBinStats(){ int maxDepth=0; int totalLeafNodes=0; int totalLeafDepth=0; int tempMaxDepth; for(int i = 0; i < numBins; ++i){ tempMaxDepth = bins[i].returnMaxDepth(); maxDepth = ((maxDepth < tempMaxDepth) ? tempMaxDepth : maxDepth); totalLeafNodes += bins[i].returnNumLeafNodes(); totalLeafDepth += bins[i].returnLeafDepthSum(); } std::map<std::string, int> binStats; binStats["maxDepth"] = maxDepth; binStats["totalLeafDepth"] = totalLeafDepth; binStats["totalLeafNodes"] = totalLeafNodes; return binStats; } inline void binStats(){ std::map<std::string, int> binStats = calcBinStats(); std::cout << "max depth: " << binStats["maxDepth"] << "\n"; std::cout << "avg leaf node depth: " << float(binStats["totalLeafDepth"])/float(binStats["totalLeafNodes"]) << "\n"; std::cout << "avg num leaf nodes per tree: " << binStats["totalLeafNodes"]/fpSingleton::getSingleton().returnNumTrees() << "\n"; std::cout << "num leaf nodes: " << binStats["totalLeafNodes"] << "\n"; } void printBin0(){ bins[0].printBin(); } inline void growForest(){ // checkParameters(); //TODO: change this so forest isn't grown dynamically. //changeForestSize(); growBins(); binStats(); } inline int predictClass(int observationNumber){ std::vector<int> predictions(fpSingleton::getSingleton().returnNumClasses(),0); #pragma omp parallel for num_threads(fpSingleton::getSingleton().returnNumThreads()) for(int k = 0; k < numBins; ++k){ bins[k].predictBinObservation(observationNumber, predictions); } assert(std::accumulate(predictions.begin(), predictions.end(),0) == fpSingleton::getSingleton().returnNumTrees()); int bestClass = 0; for(int j = 1; j < fpSingleton::getSingleton().returnNumClasses(); ++j){ if(predictions[bestClass] < predictions[j]){ bestClass = j; } } return bestClass; } inline int predictClass(std::vector<T>& observation){ std::vector<int> predictions(fpSingleton::getSingleton().returnNumClasses(),0); #pragma omp parallel for num_threads(fpSingleton::getSingleton().returnNumThreads()) for(int k = 0; k < numBins; ++k){ bins[k].predictBinObservation(observation, predictions); } int bestClass = 0; for(int j = 1; j < fpSingleton::getSingleton().returnNumClasses(); ++j){ if(predictions[bestClass] < predictions[j]){ bestClass = j; } } return bestClass; } inline std::vector<int> predictClassPost(std::vector<T>& observation){ std::vector<int> predictions(fpSingleton::getSingleton().returnNumClasses(),0); #pragma omp parallel for num_threads(fpSingleton::getSingleton().returnNumThreads()) for(int k = 0; k < numBins; ++k){ bins[k].predictBinObservation(observation, predictions); } return predictions; } inline int predictClass(const T* observation){ /* std::vector<int> predictions(fpSingleton::getSingleton().returnNumClasses(),0); #pragma omp parallel for num_threads(fpSingleton::getSingleton().returnNumThreads()) for(int k = 0; k < numBins; ++k){ bins[k].predictBinObservation(observation, predictions); } int bestClass = 0; for(int j = 1; j < fpSingleton::getSingleton().returnNumClasses(); ++j){ if(predictions[bestClass] < predictions[j]){ bestClass = j; } } return bestClass; */ return 0; } inline std::map<std::pair<int, int>, double> returnPairMat(){ return pairMat; } inline float testForest(){ int numTried = 0; int numWrong = 0; for (int i = 0; i <fpSingleton::getSingleton().returnNumObservations();i++){ ++numTried; int predClass = predictClass(i); if(predClass != fpSingleton::getSingleton().returnTestLabel(i)){ ++numWrong; } } std::cout << "\nnumWrong= " << numWrong << "\n"; return (float)numWrong/(float)numTried; } }; }// namespace fp #endif //binnedBase_h
GB_unaryop__ainv_fp64_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_fp64_int64 // op(A') function: GB_tran__ainv_fp64_int64 // C type: double // A type: int64_t // cast: double cij = (double) aij // unaryop: cij = -aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ double z = (double) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_FP64 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_fp64_int64 ( double *restrict Cx, const int64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_fp64_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 16; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
run.c
// Taken from https://benchmarksgame-team.pages.debian.net/benchmarksgame/program/binarytrees-gcc-3.html // The Computer Language Benchmarks Game // https://salsa.debian.org/benchmarksgame-team/benchmarksgame/ // // Contributed by Jeremy Zerfas // Based on the C++ program from Jon Harrop, Alex Mizrahi, and Bruno Coutinho. // *reset* // // License: https://benchmarksgame-team.pages.debian.net/benchmarksgame/license.html // This controls the width of lines that are output by this program. #define MAXIMUM_LINE_WIDTH 60 #include <stdint.h> #include <stdlib.h> #include <stdio.h> typedef off_t off64_t; // This is needed to keep APR happy on 32 bit systems. #include <apr_pools.h> // intptr_t should be the native integer type on most sane systems. typedef intptr_t intnative_t; typedef struct tree_node{ struct tree_node * left_Node, * right_Node; } tree_node; // Create a binary tree of depth tree_Depth in memory_Pool, set the root node's // value to root_Node_Value, and finally return a pointer to the created binary // tree. static inline tree_node * create_Tree(const intnative_t tree_Depth, apr_pool_t * const memory_Pool){ tree_node * const root_Node=apr_palloc(memory_Pool, sizeof(tree_node)); // If tree_Depth is one or more then recursively call create_Tree() in order // to create the left and right subtrees using 2*root_Node_Value-1 and // 2*root_Node_Value respectively as the root values for those subtrees. if(tree_Depth>0){ root_Node->left_Node=create_Tree(tree_Depth-1, memory_Pool); root_Node->right_Node=create_Tree(tree_Depth-1, memory_Pool); }else root_Node->left_Node=root_Node->right_Node=NULL; return root_Node; } // Compute and return the checksum for the binary tree that has root_Node as the // root node. static inline intnative_t compute_Tree_Checksum( const tree_node * const root_Node){ // If there are subtrees then recursively call compute_Tree_Checksum() on // them and factor their values into the checksum, otherwise just return // the value of root_Node. if(root_Node->left_Node) return compute_Tree_Checksum(root_Node->left_Node)+ compute_Tree_Checksum(root_Node->right_Node)+1; else return 1; } int main(int argc, char ** argv){ // Set minimum_Tree_Depth to 4 and maximum_Tree_Depth to the maximum of what // was specified as the argument to the program and minimum_Tree_Depth+2. const intnative_t minimum_Tree_Depth=4; intnative_t maximum_Tree_Depth=atoi(argv[1]); if(maximum_Tree_Depth < minimum_Tree_Depth+2) maximum_Tree_Depth=minimum_Tree_Depth+2; apr_initialize(); apr_pool_t * memory_Pool; // Create a memory pool, create a binary tree of depth maximum_Tree_Depth+1, // compute the checksum of the binary tree, print the statistics, and then // delete the memory pool. apr_pool_create_unmanaged(&memory_Pool); tree_node * stretch_Tree=create_Tree(maximum_Tree_Depth+1, memory_Pool); printf("stretch tree of depth %jd\t check: %jd\n", (intmax_t)maximum_Tree_Depth+1, (intmax_t)compute_Tree_Checksum(stretch_Tree)); apr_pool_destroy(memory_Pool); // Create a memory pool and then create a long-lived binary tree of depth // maximum_Tree_Depth which will be left alone for a while while // more binary trees get allocated and deallocaited as required by the // rules. We'll finish working with this later. apr_pool_create_unmanaged(&memory_Pool); tree_node * long_Lived_Tree=create_Tree(maximum_Tree_Depth, memory_Pool); // Create a lot of binary trees in parallel of depths ranging from // minimum_Tree_Depth to maximum_Tree_Depth, compute and tally up all their // checksums, destroy the trees, and then record the statistics to // output_Buffer[] so they can be displayed in order later. char output_Buffer[maximum_Tree_Depth+1][MAXIMUM_LINE_WIDTH+1]; intnative_t current_Tree_Depth; #pragma omp parallel for for(current_Tree_Depth=minimum_Tree_Depth; current_Tree_Depth<=maximum_Tree_Depth; current_Tree_Depth+=2){ intnative_t iterations=1<<(maximum_Tree_Depth-current_Tree_Depth+ minimum_Tree_Depth); // Create a memory pool for this thread to use. apr_pool_t * thread_Memory_Pool; apr_pool_create_unmanaged(&thread_Memory_Pool); intnative_t i=1, total_Trees_Checksum=0; for(; i<=iterations; ++i){ // Create a binary tree of depth current_Tree_Depth tree_node * const tree_1=create_Tree(current_Tree_Depth, thread_Memory_Pool); total_Trees_Checksum+=compute_Tree_Checksum(tree_1); apr_pool_clear(thread_Memory_Pool); } apr_pool_destroy(thread_Memory_Pool); // Record the statistics for the trees of depth current_Tree_Depth. sprintf(output_Buffer[current_Tree_Depth], "%jd\t trees of depth %jd\t check: %jd\n", (intmax_t)iterations, (intmax_t)current_Tree_Depth, (intmax_t)total_Trees_Checksum); } // Print the statistics for all of the various tree depths. for(current_Tree_Depth=minimum_Tree_Depth; current_Tree_Depth<=maximum_Tree_Depth; current_Tree_Depth+=2) printf("%s", output_Buffer[current_Tree_Depth]); // Compute the checksum of the long-lived binary tree that we created // earlier, print the statistics, and then delete the memory pool. printf("long lived tree of depth %jd\t check: %jd\n", (intmax_t)maximum_Tree_Depth, (intmax_t)compute_Tree_Checksum(long_Lived_Tree)); apr_pool_destroy(memory_Pool); apr_terminate(); return 0; }
conv3x3s1_winograd64_pack4_neon_permute.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #include "option.h" #include "mat.h" namespace ncnn{ static void conv3x3s1_winograd64_pack4_neon_permute(const Mat& bottom_blob, Mat& top_blob, const Option& opt, int outch, int inch, int outh, int outw) { size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; Mat bottom_blob_tm = bottom_blob; //Mat bottom_blob_tm2 = top_blob; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm/8 * w_tm/8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2 = top_blob; #if __aarch64__ if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles/12 + (tiles%12)/8 + (tiles%12%8)/4 + (tiles%12%4)/2 + tiles%12%2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles/8 + (tiles%8)/4 + (tiles%4)/2 + tiles%2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles/4 + (tiles%4)/2 + tiles%2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles/2 + tiles%2, 64, elemsize, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, elemsize, elempack, opt.workspace_allocator); #else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles/8 + (tiles%8)/4 + (tiles%4)/2 + tiles%2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles/4 + (tiles%4)/2 + tiles%2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles/2 + tiles%2, 64, elemsize, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, elemsize, elempack, opt.workspace_allocator); #endif #pragma omp parallel for num_threads(opt.num_threads) for (int r=0; r<64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i=0; #if __aarch64__ for (; i+11<tiles; i+=12) { float* tm2p = tm2.row(i/12); const float* r0 = bottom_blob_tm; r0 += (r*tiles + i) * 4; for (int q=0; q<inch; q++) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v8.4s, v9.4s, v10.4s, v11.4s}, [%0] \n" "st1 {v0.4s}, [%1], #16 \n" "st1 {v4.4s}, [%1], #16 \n" "st1 {v8.4s}, [%1], #16 \n" "sub %0, %0, #128 \n" "st1 {v1.4s}, [%1], #16 \n" "st1 {v5.4s}, [%1], #16 \n" "st1 {v9.4s}, [%1], #16 \n" "st1 {v2.4s}, [%1], #16 \n" "st1 {v6.4s}, [%1], #16 \n" "st1 {v10.4s}, [%1], #16 \n" "st1 {v3.4s}, [%1], #16 \n" "st1 {v7.4s}, [%1], #16 \n" "st1 {v11.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11" ); r0 += bottom_blob_tm.cstep * 4; } } #endif for (; i+7<tiles; i+=8) { #if __aarch64__ float* tm2p = tm2.row(i/12 + (i%12)/8); #else float* tm2p = tm2.row(i/8); #endif const float* r0 = bottom_blob_tm; r0 += (r*tiles + i) * 4; for (int q=0; q<inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0] \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" "sub %0, %0, #64 \n" "st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" ); #else asm volatile( "pld [%0, #512] \n" "vldm %0!, {d0-d7} \n" "pld [%0, #512] \n" "vldm %0, {d16-d23} \n" // transpose 8x4 "vtrn.32 q0, q1 \n" "vtrn.32 q2, q3 \n" "vtrn.32 q8, q9 \n" "vtrn.32 q10, q11 \n" "vswp d1, d4 \n" "vswp d3, d6 \n" "vswp d17, d20 \n" "vswp d19, d22 \n" "vswp q1, q8 \n" "vswp q3, q10 \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "sub %0, %0, #64 \n" "vst1.f32 {d4-d7}, [%1 :128]! \n" "vst1.f32 {d20-d23}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11" ); #endif r0 += bottom_blob_tm.cstep * 4; } } for (; i+3<tiles; i+=4) { #if __aarch64__ float* tm2p = tm2.row(i/12 + (i%12)/8 + (i%12%8)/4); #else float* tm2p = tm2.row(i/8 + (i%8)/4); #endif const float* r0 = bottom_blob_tm; r0 += (r*tiles + i) * 4; for (int q=0; q<inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0] \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3" ); #else asm volatile( "pld [%0, #512] \n" "vldm %0, {d0-d7} \n" "vstm %1!, {d0-d7} \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1", "q2", "q3" ); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } for (; i+1<tiles; i+=2) { #if __aarch64__ float* tm2p = tm2.row(i/12 + (i%12)/8 + (i%12%8)/4 + (i%12%4)/2); #else float* tm2p = tm2.row(i/8 + (i%8)/4 + (i%4)/2); #endif const float* r0 = bottom_blob_tm; r0 += (r*tiles + i) * 4; for (int q=0; q<inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.4s, v1.4s}, [%0] \n" "st1 {v0.4s, v1.4s}, [%1], #32 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1" ); #else asm volatile( "pld [%0, #256] \n" "vld1.f32 {d0-d3}, [%0 :128] \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1" ); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } for (; i<tiles; i++) { #if __aarch64__ float* tm2p = tm2.row(i/12 + (i%12)/8 + (i%12%8)/4 + (i%12%4)/2 + i%12%2); #else float* tm2p = tm2.row(i/8 + (i%8)/4 + (i%4)/2 + i%2); #endif const float* r0 = bottom_blob_tm; r0 += (r*tiles + i) * 4; for (int q=0; q<inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.4s}, [%0] \n" "st1 {v0.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0" ); #else asm volatile( "pld [%0, #128] \n" "vld1.f32 {d0-d1}, [%0 :128] \n" "vst1.f32 {d0-d1}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0" ); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } } } } }
5030.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "atax.h" /* Array initialization. */ static void init_array (int nx, int ny, DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny), DATA_TYPE POLYBENCH_1D(x,NY,ny)) { int i, j; for (i = 0; i < ny; i++) x[i] = i * M_PI; for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) A[i][j] = ((DATA_TYPE) i*(j+1)) / nx; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int nx, DATA_TYPE POLYBENCH_1D(y,NX,nx)) { int i; for (i = 0; i < nx; i++) { fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]); if (i % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_atax(int nx, int ny, DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny), DATA_TYPE POLYBENCH_1D(x,NY,ny), DATA_TYPE POLYBENCH_1D(y,NY,ny), DATA_TYPE POLYBENCH_1D(tmp,NX,nx)) { int i, j; #pragma scop #pragma omp parallel num_threads(4) { #pragma omp for schedule(static, 1) for (i = 0; i < _PB_NY; i++) y[i] = 0; #pragma omp for private (j) schedule(static, 1) for (i = 0; i < _PB_NX; i++) { tmp[i] = 0; for (j = 0; j < _PB_NY; j++) tmp[i] = tmp[i] + A[i][j] * x[j]; for (j = 0; j < _PB_NY; j++) y[j] = y[j] + A[i][j] * tmp[i]; } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int nx = NX; int ny = NY; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NX, NY, nx, ny); POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, NY, ny); POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, NY, ny); POLYBENCH_1D_ARRAY_DECL(tmp, DATA_TYPE, NX, nx); /* Initialize array(s). */ init_array (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_atax (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x), POLYBENCH_ARRAY(y), POLYBENCH_ARRAY(tmp)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(nx, POLYBENCH_ARRAY(y))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(x); POLYBENCH_FREE_ARRAY(y); POLYBENCH_FREE_ARRAY(tmp); return 0; }
convolution_pack8to1_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_pack8to1_int8_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_int8, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { int* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { v4i32 _sum = __msa_fill_w(0); const signed char* kptr = weight_data_int8.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const signed char* sptr = m.row<const signed char>(i * stride_h) + j * stride_w * 8; for (int k = 0; k < maxk; k++) { v16i8 _val = __msa_ld_b(sptr + space_ofs[k] * 8, 0); v8i16 _val16 = (v8i16)__msa_ilvr_b(__msa_clti_s_b(_val, 0), _val); v16i8 _w = __msa_ld_b(kptr, 0); v8i16 _w16 = (v8i16)__msa_ilvr_b(__msa_clti_s_b(_w, 0), _w); v8i16 _s0 = __msa_mulv_h(_val16, _w16); _sum = __msa_addv_w(_sum, __msa_hadd_s_w(_s0, _s0)); kptr += 8; } } outptr[j] = __msa_reduce_add_w(_sum); } outptr += outw; } } }
set.c
/***************************************************************************** * set.c: h264 encoder library ***************************************************************************** * Copyright (C) 2005-2008 Loren Merritt <lorenm@u.washington.edu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA. *****************************************************************************/ #include "common.h" #include <omp.h> #define SHIFT(x,s) ((s)<0 ? (x)<<-(s) : (s)==0 ? (x) : ((x)+(1<<((s)-1)))>>(s)) #define DIV(n,d) (((n) + ((d)>>1)) / (d)) static const int dequant4_scale[6][3] = { { 10, 13, 16 }, { 11, 14, 18 }, { 13, 16, 20 }, { 14, 18, 23 }, { 16, 20, 25 }, { 18, 23, 29 } }; static const int quant4_scale[6][3] = { { 13107, 8066, 5243 }, { 11916, 7490, 4660 }, { 10082, 6554, 4194 }, { 9362, 5825, 3647 }, { 8192, 5243, 3355 }, { 7282, 4559, 2893 }, }; static const int quant8_scan[16] = { 0,3,4,3, 3,1,5,1, 4,5,2,5, 3,1,5,1 }; static const int dequant8_scale[6][6] = { { 20, 18, 32, 19, 25, 24 }, { 22, 19, 35, 21, 28, 26 }, { 26, 23, 42, 24, 33, 31 }, { 28, 25, 45, 26, 35, 33 }, { 32, 28, 51, 30, 40, 38 }, { 36, 32, 58, 34, 46, 43 }, }; static const int quant8_scale[6][6] = { { 13107, 11428, 20972, 12222, 16777, 15481 }, { 11916, 10826, 19174, 11058, 14980, 14290 }, { 10082, 8943, 15978, 9675, 12710, 11985 }, { 9362, 8228, 14913, 8931, 11984, 11259 }, { 8192, 7346, 13159, 7740, 10486, 9777 }, { 7282, 6428, 11570, 6830, 9118, 8640 } }; int x264_cqm_init( x264_t *h ) { int def_quant4[6][16]; int def_quant8[6][64]; int def_dequant4[6][16]; int def_dequant8[6][64]; int quant4_mf[4][6][4][4]; int quant8_mf[2][6][8][8]; int q, i, j, i_list; int deadzone[4] = { 32 - h->param.analyse.i_luma_deadzone[1], 32 - h->param.analyse.i_luma_deadzone[0], 32 - 11, 32 - 21 }; int max_qp_err = -1; #pragma omp for for( i = 0; i < 6; i++ ) { int size = i<4 ? 16 : 64; for( j = (i<4 ? 0 : 4); j < i; j++ ) if( !memcmp( h->pps->scaling_list[i], h->pps->scaling_list[j], size*sizeof(uint8_t) ) ) break; if( j < i ) { h-> quant4_mf[i] = h-> quant4_mf[j]; h->dequant4_mf[i] = h->dequant4_mf[j]; h->unquant4_mf[i] = h->unquant4_mf[j]; } else { h-> quant4_mf[i] = x264_malloc(52*size*sizeof(uint16_t) ); h->dequant4_mf[i] = x264_malloc( 6*size*sizeof(int) ); h->unquant4_mf[i] = x264_malloc(52*size*sizeof(int) ); } for( j = (i<4 ? 0 : 4); j < i; j++ ) if( deadzone[j&3] == deadzone[i&3] && !memcmp( h->pps->scaling_list[i], h->pps->scaling_list[j], size*sizeof(uint8_t) ) ) break; if( j < i ) h->quant4_bias[i] = h->quant4_bias[j]; else h->quant4_bias[i] = x264_malloc(52*size*sizeof(uint16_t) ); } #pragma omp for for( q = 0; q < 6; q++ ) { for( i = 0; i < 16; i++ ) { int j = (i&1) + ((i>>2)&1); def_dequant4[q][i] = dequant4_scale[q][j]; def_quant4[q][i] = quant4_scale[q][j]; } for( i = 0; i < 64; i++ ) { int j = quant8_scan[((i>>1)&12) | (i&3)]; def_dequant8[q][i] = dequant8_scale[q][j]; def_quant8[q][i] = quant8_scale[q][j]; } } #pragma omp parallel for for( q = 0; q < 6; q++ ) { for( i_list = 0; i_list < 4; i_list++ ) for( i = 0; i < 16; i++ ) { h->dequant4_mf[i_list][q][0][i] = def_dequant4[q][i] * h->pps->scaling_list[i_list][i]; quant4_mf[i_list][q][0][i] = DIV(def_quant4[q][i] * 16, h->pps->scaling_list[i_list][i]); } for( i_list = 0; i_list < 2; i_list++ ) for( i = 0; i < 64; i++ ) { h->dequant8_mf[i_list][q][0][i] = def_dequant8[q][i] * h->pps->scaling_list[4+i_list][i]; quant8_mf[i_list][q][0][i] = DIV(def_quant8[q][i] * 16, h->pps->scaling_list[4+i_list][i]); } } #pragma omp for for( q = 0; q < 52; q++ ) { for( i_list = 0; i_list < 4; i_list++ ) for( i = 0; i < 16; i++ ) { h->unquant4_mf[i_list][q][i] = (1ULL << (q/6 + 15 + 8)) / quant4_mf[i_list][q%6][0][i]; h-> quant4_mf[i_list][q][i] = j = SHIFT(quant4_mf[i_list][q%6][0][i], q/6 - 1); // round to nearest, unless that would cause the deadzone to be negative h->quant4_bias[i_list][q][i] = X264_MIN( DIV(deadzone[i_list]<<10, j), (1<<15)/j ); if( j > 0xffff && q > max_qp_err ) max_qp_err = q; } if( h->param.analyse.b_transform_8x8 ) for( i_list = 0; i_list < 2; i_list++ ) for( i = 0; i < 64; i++ ) { h->unquant8_mf[i_list][q][i] = (1ULL << (q/6 + 16 + 8)) / quant8_mf[i_list][q%6][0][i]; h-> quant8_mf[i_list][q][i] = j = SHIFT(quant8_mf[i_list][q%6][0][i], q/6); h->quant8_bias[i_list][q][i] = X264_MIN( DIV(deadzone[i_list]<<10, j), (1<<15)/j ); if( j > 0xffff && q > max_qp_err ) max_qp_err = q; } } if( !h->mb.b_lossless && max_qp_err >= h->param.rc.i_qp_min ) { x264_log( h, X264_LOG_ERROR, "Quantization overflow.\n" ); x264_log( h, X264_LOG_ERROR, "Your CQM is incompatible with QP < %d, but min QP is set to %d\n", max_qp_err+1, h->param.rc.i_qp_min ); return -1; } return 0; } void x264_cqm_delete( x264_t *h ) { int i, j; #pragma omp for for( i = 0; i < 6; i++ ) { for( j = 0; j < i; j++ ) if( h->quant4_mf[i] == h->quant4_mf[j] ) break; if( j == i ) { x264_free( h-> quant4_mf[i] ); x264_free( h->dequant4_mf[i] ); x264_free( h->unquant4_mf[i] ); } for( j = 0; j < i; j++ ) if( h->quant4_bias[i] == h->quant4_bias[j] ) break; if( j == i ) x264_free( h->quant4_bias[i] ); } } static int x264_cqm_parse_jmlist( x264_t *h, const char *buf, const char *name, uint8_t *cqm, const uint8_t *jvt, int length ) { char *p; char *nextvar; int i; p = strstr( buf, name ); if( !p ) { memset( cqm, 16, length ); return 0; } p += strlen( name ); if( *p == 'U' || *p == 'V' ) p++; nextvar = strstr( p, "INT" ); for( i = 0; i < length && (p = strpbrk( p, " \t\n," )) && (p = strpbrk( p, "0123456789" )); i++ ) { int coef = -1; sscanf( p, "%d", &coef ); if( i == 0 && coef == 0 ) { memcpy( cqm, jvt, length ); return 0; } if( coef < 1 || coef > 255 ) { x264_log( h, X264_LOG_ERROR, "bad coefficient in list '%s'\n", name ); return -1; } cqm[i] = coef; } if( (nextvar && p > nextvar) || i != length ) { x264_log( h, X264_LOG_ERROR, "not enough coefficients in list '%s'\n", name ); return -1; } return 0; } int x264_cqm_parse_file( x264_t *h, const char *filename ) { char *buf, *p; int b_error = 0; h->param.i_cqm_preset = X264_CQM_CUSTOM; buf = x264_slurp_file( filename ); if( !buf ) { x264_log( h, X264_LOG_ERROR, "can't open file '%s'\n", filename ); return -1; } while( (p = strchr( buf, '#' )) != NULL ) memset( p, ' ', strcspn( p, "\n" ) ); b_error |= x264_cqm_parse_jmlist( h, buf, "INTRA4X4_LUMA", h->param.cqm_4iy, x264_cqm_jvt4i, 16 ); b_error |= x264_cqm_parse_jmlist( h, buf, "INTRA4X4_CHROMA", h->param.cqm_4ic, x264_cqm_jvt4i, 16 ); b_error |= x264_cqm_parse_jmlist( h, buf, "INTER4X4_LUMA", h->param.cqm_4py, x264_cqm_jvt4p, 16 ); b_error |= x264_cqm_parse_jmlist( h, buf, "INTER4X4_CHROMA", h->param.cqm_4pc, x264_cqm_jvt4p, 16 ); b_error |= x264_cqm_parse_jmlist( h, buf, "INTRA8X8_LUMA", h->param.cqm_8iy, x264_cqm_jvt8i, 64 ); b_error |= x264_cqm_parse_jmlist( h, buf, "INTER8X8_LUMA", h->param.cqm_8py, x264_cqm_jvt8p, 64 ); x264_free( buf ); return b_error; }
separation_objective.h
#pragma once #ifndef OPTIMIZATION_LIB_SEPARATION_OBJECTIVE_H #define OPTIMIZATION_LIB_SEPARATION_OBJECTIVE_H // STL includes #include <vector> // Optimization lib includes #include "../data_providers/plain_data_provider.h" #include "./dense_objective_function.h" template<Eigen::StorageOptions StorageOrder_> class Separation : public DenseObjectiveFunction<StorageOrder_> { public: /** * Public type definitions */ enum class Properties : int32_t { Delta = DenseObjectiveFunction<StorageOrder_>::Properties::Count_, ValuePerEdge }; /** * Constructors and destructor */ Separation(const std::shared_ptr<MeshDataProvider>& mesh_data_provider, const std::shared_ptr<EmptyDataProvider>& empty_data_provider) : DenseObjectiveFunction(mesh_data_provider, empty_data_provider, "Separation", 0, false) { this->Initialize(); } virtual ~Separation() { } /** * Setters */ void SetDelta(const double delta) { delta_ = delta; } bool SetProperty(const int32_t property_id, const std::any property_context, const std::any property_value) override { if(DenseObjectiveFunction<StorageOrder_>::SetProperty(property_id, property_context, property_value)) { return true; } const Properties properties = static_cast<Properties>(property_id); switch (properties) { case Properties::Delta: SetDelta(std::any_cast<const double>(property_value)); return true; } return false; } /** * Getters */ double GetDelta() const { return delta_; } bool GetProperty(const int32_t property_id, const int32_t property_modifier_id, const std::any property_context, std::any& property_value) override { if (DenseObjectiveFunction<StorageOrder_>::GetProperty(property_id, property_modifier_id, property_context, property_value)) { return true; } const Properties properties = static_cast<Properties>(property_id); switch (properties) { case Properties::Delta: property_value = GetDelta(); return true; } return false; } private: /** * Overrides */ void CalculateValue(double& f) override { EsepP = Esep * X; EsepP_squared.resize(EsepP.rows(), 2); int rows = EsepP.rows(); #pragma omp parallel for for(int i = 0; i < rows; i++) { EsepP_squared.coeffRef(i, 0) = EsepP.coeffRef(i, 0) * EsepP.coeffRef(i, 0); EsepP_squared.coeffRef(i, 1) = EsepP.coeffRef(i, 1) * EsepP.coeffRef(i, 1); } EsepP_squared_rowwise_sum = EsepP_squared.rowwise().sum(); EsepP_squared_rowwise_sum_plus_delta = EsepP_squared_rowwise_sum.array() + delta_; f_per_pair = EsepP_squared_rowwise_sum.cwiseQuotient(EsepP_squared_rowwise_sum_plus_delta); // add edge length factor f_per_pair = f_per_pair.cwiseProduct(edge_lenghts_per_pair); // sum everything up f = f_per_pair.sum(); } void CalculateValuePerVertex(Eigen::VectorXd& f_per_vertex) override { f_per_vertex.setZero(); int64_t vertex1_index; int64_t vertex2_index; #pragma omp parallel for for (int i = 0; i < Esept.outerSize(); ++i) { // no inner loop because there are only 2 nnz values per col Eigen::SparseMatrix<double>::InnerIterator it(Esept, i); int64_t vertex1_index = it.row(); int64_t vertex2_index = (++it).row(); f_per_vertex.coeffRef(vertex1_index) += EsepP_squared_rowwise_sum[i]; f_per_vertex.coeffRef(vertex2_index) += EsepP_squared_rowwise_sum[i]; } } void CalculateValuePerEdge(Eigen::VectorXd& domain_value_per_edge, Eigen::VectorXd& image_value_per_edge) override { } void CalculateGradient(Eigen::VectorXd& g) override { Eigen::MatrixX2d ge; Eigen::VectorXd d_vec = Eigen::VectorXd::Constant(EsepP_squared_rowwise_sum.rows(), delta_); Eigen::VectorXd x_plus_d = EsepP_squared_rowwise_sum + d_vec; Eigen::VectorXd d = d_vec.cwiseQuotient(x_plus_d.cwiseAbs2()); ge = 2.0 * Esept * d.cwiseProduct(edge_lenghts_per_pair).asDiagonal() * EsepP; g = Eigen::Map<Eigen::VectorXd>(ge.data(), 2.0 * ge.rows(), 1); } void PreUpdate(const Eigen::VectorXd& x) override { X = Eigen::Map<const Eigen::MatrixX2d>(x.data(), x.rows() >> 1, 2); } void PreInitialize() override { Esep4 << 1, 0, -1, 0, 0, 1, 0, -1, -1, 0, 1, 0, 0, -1, 0, 1; Esep = this->mesh_data_provider_->GetCorrespondingVertexPairsCoefficients(); Esept = Esep.transpose(); edge_lenghts_per_pair = this->mesh_data_provider_->GetCorrespondingVertexPairsEdgeLength(); } void InitializeTriplets(std::vector<Eigen::Triplet<double>>& triplets) override { const int64_t outer_size = Esept.outerSize(); triplets.reserve(10 * outer_size); auto image_vertices_count = this->mesh_data_provider_->GetImageVerticesCount(); for (int i = 0; i < outer_size; ++i) { Eigen::SparseMatrix<double>::InnerIterator it(Esept, i); int idx_xi = it.row(); int idx_xj = (++it).row(); // The indices in the small hessians are setup like this: // xi, xi+n, xj, xj+n from top to bottom and left to right // we traverse only the upper diagonal of each 4x4 hessian // and thus store 10 values, gathered in column order. // First column triplets.push_back(Eigen::Triplet<double>(idx_xi, idx_xi, 0)); // Second column triplets.push_back(Eigen::Triplet<double>(idx_xi, idx_xi + image_vertices_count, 0)); triplets.push_back(Eigen::Triplet<double>(idx_xi + image_vertices_count, idx_xi + image_vertices_count, 0)); // Third column triplets.push_back(Eigen::Triplet<double>(idx_xi, idx_xj, 0)); triplets.push_back(Eigen::Triplet<double>(idx_xj, idx_xi + image_vertices_count, 0)); triplets.push_back(Eigen::Triplet<double>(idx_xj, idx_xj, 0)); // Fourth column triplets.push_back(Eigen::Triplet<double>(idx_xi, idx_xj + image_vertices_count, 0)); triplets.push_back(Eigen::Triplet<double>(idx_xi + image_vertices_count, idx_xj + image_vertices_count, 0)); triplets.push_back(Eigen::Triplet<double>(idx_xj, idx_xj + image_vertices_count, 0)); triplets.push_back(Eigen::Triplet<double>(idx_xj + image_vertices_count, idx_xj + image_vertices_count, 0)); } } void CalculateRawTriplets(std::vector<Eigen::Triplet<double>>& triplets) override { // no inner loop because there are only 2 nnz values per col #pragma omp parallel for for (int i = 0; i < Esept.outerSize(); ++i) { Eigen::Vector2d xi, xj; Eigen::Matrix4d sh; int idx_xi; int idx_xj; int factor; Eigen::SparseMatrix<double>::InnerIterator it(Esept, i); idx_xi = it.row(); factor = it.value(); idx_xj = (++it).row(); xi = X.row(idx_xi); xj = X.row(idx_xj); FindSingleHessian(xi, xj, sh); sh *= factor; sh *= edge_lenghts_per_pair(i); int ind = 10 * i; for (int a = 0; a < 4; ++a) { for (int b = 0; b <= a; ++b) { const_cast<double&>(triplets[ind++].value()) = sh(b, a); } } } } /** * Private methods */ void FindSingleHessian(const Eigen::Vector2d& xi, const Eigen::Vector2d& xj, Eigen::Matrix4d& H) { bool speedup = true; Eigen::Vector2d dx = xi - xj; Eigen::Vector4d dxx; dxx << dx, -dx; double t = 0.5 * dx.squaredNorm(); double fp, fpp; fp = delta_ / ((t + delta_) * (t + delta_)); Eigen::Matrix4d Esep4; Esep4 << 1, 0, -1, 0, 0, 1, 0, -1, -1, 0, 1, 0, 0, -1, 0, 1; H = fp * Esep4; } /** * Fields */ double delta_ = 1.0; Eigen::MatrixX2d X; Eigen::SparseMatrix<double> Esep; Eigen::SparseMatrix<double> Esept; Eigen::MatrixX2d EsepP; Eigen::Matrix4d Esep4; Eigen::MatrixX2d EsepP_squared; Eigen::VectorXd f_per_pair; Eigen::VectorXd edge_lenghts_per_pair; Eigen::VectorXd EsepP_squared_rowwise_sum; Eigen::VectorXd EsepP_squared_rowwise_sum_plus_delta; }; #endif
GB_unop__ainv_uint16_uint16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__ainv_uint16_uint16 // op(A') function: GB_unop_tran__ainv_uint16_uint16 // C type: uint16_t // A type: uint16_t // cast: uint16_t cij = aij // unaryop: cij = -aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CAST(z, aij) \ uint16_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint16_t z = aij ; \ Cx [pC] = -z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__ainv_uint16_uint16 ( uint16_t *Cx, // Cx and Ax may be aliased const uint16_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint16_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; uint16_t z = aij ; Cx [p] = -z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint16_t aij = Ax [p] ; uint16_t z = aij ; Cx [p] = -z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__ainv_uint16_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ten_tusscher_2004_epi_S2_8.c
//Original Ten Tusscher #include <assert.h> #include <stdlib.h> #include "ten_tusscher_2004_epi_S2_8.h" GET_CELL_MODEL_DATA(init_cell_model_data) { assert(cell_model); if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } //TODO: this should be called only once for the whole mesh, like in the GPU code SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.5842952036377,0.00128565944387103,0.780099820495480,0.779891429745287,0.000174393739918514,0.485256917711117,0.00293695180405064,0.999998353377971,1.92835627097292e-08,1.88702895818503e-05,0.999771688553227,1.00752249338212,0.999998992047043,3.50314526199846e-05,0.385259168087778,10.9299732336017,138.769394935141}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = i; for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu(real dt, real *sv, real stim_current) { assert(sv); real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL /// real Gks=0.062; ///#endif //Parameters for Ik1 real GK1=5.405; //Parameters for Ito //#ifdef EPI real Gto=0.294; //#endif // #ifdef ENDO // real Gto=0.073; //#endif //#ifdef MCELL // real Gto=0.294; ///#endif //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.5907103927890,0.000229174696551543,0.000135600743214705,0.000244198204310948,0.234645147026502,0.147156616775923,0.132319220918311,4.50163672674724,0.0134440815134434,1.00008939093995,1100,0.000411655953120344,0.507706195372124,0.0197898387698969,0.00549475817614002,3.13375532573724e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; ///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
scatter_int_avx2.c
// create a list of 64 numbers, and only sum the even ones #include <stdio.h> #include <stdlib.h> #define N 32000 #define SCALE 8 int main() { srand(time(NULL)); int *numbers = malloc(sizeof(int)*N); int *result1 = malloc(sizeof(int)*N); int *result2 = malloc(sizeof(int)*N); int *mask = malloc(sizeof(int)*N); // Init the numbers for (int i = 0; i<N; i++) numbers[i] = rand() % 10; for (int i = 0; i<N; i++) { result1[i] = 0; result2[i] = 0; } for (int i = 0; i<N; i++) mask[i] = rand() % N; for (int i = 0; i<SCALE; i++) printf("%d ", numbers[i]); puts("\n---"); for (int i = 0; i<SCALE; i++) printf("%d ", mask[i]); puts("\n---"); puts("---------------------------------------------"); //Serial for (int i = 0; i<SCALE; i++) { result1[mask[i]] = numbers[mask[i]]; } #pragma omp simd simdlen(SCALE) for (int i = 0; i<SCALE; i++) { result2[mask[i]] = numbers[mask[i]]; } // print for (int i = 0; i<SCALE; i++) printf("%d ", result1[i]); puts("\n---"); for (int i = 0; i<SCALE; i++) printf("%d ", result2[i]); puts("\n---"); int errors = 0; for (int i = 0; i<SCALE; i++) { if (result1[i] != result2[i]) ++errors; } printf("Errors: %d\n", errors); //printf("Result1: %f | Result2: %f\n", result1, result2); return 0; }
outputdep-var-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // This x should be not lastprivate since it is live-in // x is both live-in and live-out, and written, cannot be reduction // // So, the loop cannot be parallelized #include <stdio.h> #include <stdlib.h> int main(int argc, char* argv[]) { int len=100; if (argc>1) len = atoi(argv[1]); int a[len]; int i,x=10; #pragma omp parallel for for (i=0;i<len;i++) { a[i] = x; x=i; } printf("x=%d, a[0]=%d\n",x,a[0]); return 0; }
HardTanh.h
// -------------------------------------------------------------------------- // Binary Brain -- binary neural net framework // // Copyright (C) 2018-2019 by Ryuji Fuchikami // https://github.com/ryuz // ryuji.fuchikami@nifty.com // -------------------------------------------------------------------------- #pragma once #include "bb/Manager.h" #include "bb/Binarize.h" namespace bb { // Hard-Tanh template <typename BinType = float, typename RealType = float> class HardTanh : public Binarize<BinType, RealType> { using _super = Binarize<BinType, RealType>; public: static inline std::string ModelName(void) { return "HardTanh"; } static inline std::string ObjectName(void){ return ModelName() + "_" + DataType<BinType>::Name() + "_" + DataType<RealType>::Name(); } std::string GetModelName(void) const override { return ModelName(); } std::string GetObjectName(void) const override { return ObjectName(); } protected: using _super::m_host_only; using _super::m_binary_th; using _super::m_hardtanh_min; using _super::m_hardtanh_max; bool m_binary_mode = false; public: // 生成情報 struct create_t { RealType hardtanh_min = (RealType)-1.0; RealType hardtanh_max = (RealType)+1.0; }; protected: HardTanh(create_t const &create) { m_hardtanh_min = create.hardtanh_min; m_hardtanh_max = create.hardtanh_max; m_binary_th = (m_hardtanh_min + m_hardtanh_max) / (RealType)2; } /** * @brief コマンド処理 * @detail コマンド処理 * @param args コマンド */ void CommandProc(std::vector<std::string> args) override { // バイナリモード設定 if ( args.size() == 2 && args[0] == "binary" ) { m_binary_mode = EvalBool(args[1]); } // HostOnlyモード設定 if (args.size() == 2 && args[0] == "host_only") { m_host_only = EvalBool(args[1]); } } public: ~HardTanh() {} static std::shared_ptr<HardTanh> Create(create_t const &create) { return std::shared_ptr<HardTanh>(new HardTanh(create)); } static std::shared_ptr<HardTanh> Create(RealType hardtanh_min = (RealType)-1, RealType hardtanh_max = (RealType)+1) { create_t create; create.hardtanh_min = hardtanh_min; create.hardtanh_max = hardtanh_max; return Create(create); } #ifdef BB_PYBIND11 static std::shared_ptr<HardTanh> CreatePy(double hardtanh_min = -1.0, double hardtanh_max = +1.0) { create_t create; create.hardtanh_min = (RealType)hardtanh_min; create.hardtanh_max = (RealType)hardtanh_max; return Create(create); } #endif // 1ノードのみForward計算 std::vector<double> ForwardNode(index_t node, std::vector<double> x_vec) const override { if ( m_binary_mode ) { return _super::ForwardNode(node, x_vec); } for ( auto& x : x_vec ) { if ( x <= m_hardtanh_min ) { x = (double)m_hardtanh_min; } if ( x >= m_hardtanh_max ) { x = (double)m_hardtanh_max; } } return x_vec; } /** * @brief forward演算 * @detail forward演算を行う * @param x 入力データ * @param train 学習時にtrueを指定 * @return forward演算結果 */ inline FrameBuffer Forward(FrameBuffer x_buf, bool train = true) override { // binaryモード if ( DataType<BinType>::type == BB_TYPE_BIT || m_binary_mode ) { return _super::Forward(x_buf, train); } BB_ASSERT(x_buf.GetType() == DataType<RealType>::type); // backward用に保存 if ( train ) { this->PushFrameBuffer(x_buf); } // 戻り値の設定 FrameBuffer y_buf(x_buf.GetFrameSize(), x_buf.GetShape(), x_buf.GetType()); #ifdef BB_WITH_CUDA if ( DataType<RealType>::type == BB_TYPE_FP32 && !m_host_only && x_buf.IsDeviceAvailable() && y_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) { // CUDA版 auto ptr_x = x_buf.LockDeviceMemoryConst(); auto ptr_y = y_buf.LockDeviceMemory(); bbcu_fp32_HardTanh_Forward( (float const *)ptr_x.GetAddr(), (float *)ptr_y.GetAddr(), (float )m_hardtanh_min, (float )m_hardtanh_max, (int )y_buf.GetNodeSize(), (int )y_buf.GetFrameSize(), (int )(y_buf.GetFrameStride() / sizeof(float)) ); return y_buf; } #endif { // 汎用版 index_t frame_size = x_buf.GetFrameSize(); index_t node_size = x_buf.GetNodeSize(); auto x_ptr = x_buf.template LockConst<RealType>(); auto y_ptr = y_buf.template Lock<BinType>(); // Hard-Tanh #pragma omp parallel for for (index_t node = 0; node < node_size; ++node) { for (index_t frame = 0; frame < frame_size; ++frame) { auto x = x_ptr.Get(frame, node); if ( x <= m_hardtanh_min ) { x = m_hardtanh_min; } if ( x >= m_hardtanh_max ) { x = m_hardtanh_max; } y_ptr.Set(frame, node, x); } } return y_buf; } } /** * @brief backward演算 * @detail backward演算を行う * * @return backward演算結果 */ inline FrameBuffer Backward(FrameBuffer dy_buf) override { if (dy_buf.Empty()) { return dy_buf; } // binaryモード if ( DataType<BinType>::type == BB_TYPE_BIT || m_binary_mode) { return _super::Backward(dy_buf); } BB_ASSERT(dy_buf.GetType() == DataType<RealType>::type); // 戻り値のサイズ設定 FrameBuffer dx_buf(dy_buf.GetFrameSize(), dy_buf.GetShape(), dy_buf.GetType()); FrameBuffer x_buf = this->PopFrameBuffer(); #ifdef BB_WITH_CUDA if ( DataType<RealType>::type == BB_TYPE_FP32 && !m_host_only && x_buf.IsDeviceAvailable() && dx_buf.IsDeviceAvailable() && dy_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) { // GPU版 auto ptr_x = x_buf.LockDeviceMemoryConst(); auto ptr_dy = dy_buf.LockDeviceMemoryConst(); auto ptr_dx = dx_buf.LockDeviceMemory(true); bbcu_fp32_HardTanh_Backward( (float const *)ptr_x.GetAddr(), (float const *)ptr_dy.GetAddr(), (float *)ptr_dx.GetAddr(), (float )m_hardtanh_min, (float )m_hardtanh_max, (int )dx_buf.GetNodeSize(), (int )dx_buf.GetFrameSize(), (int )(dx_buf.GetFrameStride() / sizeof(float)) ); return dx_buf; } #endif { // 汎用版 index_t frame_size = dx_buf.GetFrameSize(); index_t node_size = dx_buf.GetNodeSize(); auto x_ptr = x_buf.template LockConst<RealType>(); auto dy_ptr = dy_buf.template LockConst<RealType>(); auto dx_ptr = dx_buf.template Lock<RealType>(); // Hard-Tanh #pragma omp parallel for for (index_t node = 0; node < node_size; ++node) { for (index_t frame = 0; frame < frame_size; ++frame) { auto x = x_ptr.Get(frame, node); auto dy = dy_ptr.Get(frame, node); if ( x <= m_hardtanh_min ) { dy = (RealType)0; } if ( x >= m_hardtanh_max ) { dy = (RealType)0; } dx_ptr.Set(frame, node, dy); } } return dx_buf; } } // シリアライズ protected: void DumpObjectData(std::ostream &os) const override { // バージョン std::int64_t ver = 1; bb::SaveValue(os, ver); // 親クラス _super::DumpObjectData(os); // メンバ bb::SaveValue(os, m_binary_mode); } void LoadObjectData(std::istream &is) override { // バージョン std::int64_t ver; bb::LoadValue(is, ver); BB_ASSERT(ver == 1); // 親クラス _super::LoadObjectData(is); // メンバ bb::LoadValue(is, m_binary_mode); } }; } // end of file
Vector.h
/* * Vector.h * * Created on: 12.03.2014 * Author: Michael Wegner (michael.wegner@student.kit.edu) */ #ifndef VECTOR_H_ #define VECTOR_H_ #include <vector> #include "../Globals.h" #include "AlgebraicGlobals.h" #include <cassert> namespace NetworKit { // forward declaration of DynamicMatrix class class DynamicMatrix; /** * @ingroup algebraic * The Vector class represents a basic vector with double coefficients. */ class Vector { private: std::vector<double> values; bool transposed; public: /** Default constructor */ Vector(); /** * Constructs the Vector with @a dimension elements with value @a initialValue. * @param dimension The dimension of this vector. * @param initialValue All coefficients will be initialized to @a initialValue. * @param transpose Indicates whether this vector is transposed (row vector) or not (column vector). */ Vector(const count dimension, const double initialValue = 0, const bool transpose = false); /** * Constructs the Vector with the contents of @a values. * @param values The values of this Vector. * @param transpose Indicates whether this vector is transposed (row vector) or not (column vector). */ Vector(const std::vector<double> &values, const bool transpose = false); /** * Constructs the Vector from the contents of the initializer list @a list. * @param list The initializer list. */ Vector(const std::initializer_list<double> &list); /** Default copy constructor */ Vector(const Vector &other) = default; /** Default move constructor */ Vector(Vector &&other) = default; /** Default destructor */ virtual ~Vector() = default; /** Default copy assignment operator */ Vector& operator=(const Vector &other) = default; /** Default move assignment operator */ Vector& operator=(Vector &&other) = default; /** * @return dimension of vector */ inline count getDimension() const { return values.size(); } /** * A transposed vector is a row vector. * @return True, if this vector is transposed, otherwise false. */ bool isTransposed() const; /** * @return Transposed copy of this vector. */ Vector transpose() const; /** * Calculates and returns the Euclidean length of this vector * @return The Euclidean length of this vector. */ double length() const; /** * Calculates and returns the arithmetic mean of this vector * @return The arithmetic mean of this vector. */ double mean() const; /** * Returns a reference to the element at index @a idx without checking the range of this vector. * @param idx The index of the element. * @return Reference to the element at index @a idx. */ inline double& operator[](const index idx) { assert(idx < values.size()); return values[idx]; } /** * Returns a constant reference to the element at index @a idx without checking the range of this vector. * @a idx The index of the element. * @return Constant reference to the element at index @a idx. */ inline const double& operator[](const index idx) const { assert(idx < values.size()); return values[idx]; } /** * Returns a reference to the element at index @a idx. If @a idx is not a valid index an exception is thrown. * @param idx The index of the element. * @return Reference to the element at index @a idx. */ double &at(const index idx) { if (idx >= values.size()) { throw std::runtime_error("index out of range"); } else { return values[idx]; } } /** * Compares this vector and @a other element-wise. * @return True, if this vector is element-wise equal to @a other, otherwise false. */ bool operator==(const Vector &other) const; /** * Compares this vector and @a other element-wise. * @return True, if this vector is element-wise unequal to @a other, otherwise false. */ bool operator!=(const Vector &other) const; /** * Computes the outer product of @a v1 and @a v2. * @param v1 First Vector. * @param v2 Second Vector. * @return The resulting matrix from the outer product. */ template<class Matrix = DynamicMatrix> static Matrix outerProduct(const Vector& v1, const Vector& v2); /** * Computes the inner product (dot product) of the vectors @a v1 and @a v2. * @return The result of the inner product. */ static double innerProduct(const Vector &v1, const Vector &v2); /** * Computes the inner product (dot product) of this vector and @a other. * @return The result of the inner product. */ double operator*(const Vector &other) const; /** * Multiplies this vector with @a matrix and returns the result. * @return The result of multiplying this vector with @a matrix. */ template<typename Matrix = DynamicMatrix> Vector operator*(const Matrix& matrix) const; /** * Multiplies this vector with a scalar specified in @a scalar and returns the result in a new vector. * @return The result of multiplying this vector with @a scalar. */ Vector operator*(const double &scalar) const; /** * Multiplies this vector with a scalar specified in @a scalar. * @return Reference to this vector. */ Vector& operator*=(const double &scalar); /** * Divides this vector by a divisor specified in @a divisor and returns the result in a new vector. * @return The result of dividing this vector by @a divisor. */ Vector operator/(const double &divisor) const; /** * Divides this vector by a divisor specified in @a divisor. * @return Reference to this vector. */ Vector& operator/=(const double &divisor); /** * Adds this vector to @a other and returns the result. * Note that the dimensions of the vectors have to be the same. * @return The sum of this vector and @a other. */ Vector operator+(const Vector &other) const; /** * Adds @a value to each element of this vector and returns the result. */ Vector operator+(const double value) const; /** * Adds @a other to this vector. * Note that the dimensions of the vectors have to be the same. * @return Reference to this vector. */ Vector& operator+=(const Vector &other); /** * Adds @a value to each element of this vector. */ Vector& operator+=(const double value); /** * Subtracts @a other from this vector and returns the result. * Note that the dimensions of the vectors have to be the same. * @return The difference of this vector and @a other. * */ Vector operator-(const Vector &other) const; /** * Subtracts @a value from each element of this vector and returns the result. */ Vector operator-(const double value) const; /** * Subtracts @a other from this vector. * Note that the dimensions of the vectors have to be the same. * @return Reference to this vector. */ Vector& operator-=(const Vector &other); /** * Subtracts @a value from each element of this vector. */ Vector& operator-=(const double value); /** * Applies the unary function @a unaryElementFunction to each value in the Vector. Note that it must hold that the * function applied to the zero element of this matrix returns the zero element. * @param unaryElementFunction */ template<typename F> void apply(const F unaryElementFunction); /** * Iterate over all elements of the vector and call handler (lambda closure). */ template<typename L> void forElements(L handle); /** * Iterate over all elements of the vector and call handler (lambda closure). */ template<typename L> void forElements(L handle) const; /** * Iterate in parallel over all elements of the vector and call handler (lambda closure). * */ template<typename L> void parallelForElements(L handle); /** * Iterate in parallel over all elements of the vector and call handler (lambda closure). */ template<typename L> void parallelForElements(L handle) const; }; /** * Multiplies the vector @a v with a scalar specified in @a scalar and returns the result. * @return The result of multiplying this vector with @a scalar. */ inline Vector operator*(const double &scalar, const Vector &v) { return v.operator*(scalar); } template<class Matrix> Matrix Vector::outerProduct(const Vector& v1, const Vector& v2) { std::vector<Triplet> triplets; for (index i = 0; i < v1.getDimension(); ++i) { for (index j = 0; j < v2.getDimension(); ++j) { double result = v1[i] * v2[j]; if (fabs(result) >= FLOAT_EPSILON) { triplets.push_back({i,j,result}); } } } return Matrix(v1.getDimension(), v2.getDimension(), triplets); } template<class Matrix> Vector Vector::operator*(const Matrix& matrix) const { assert(isTransposed()); // vector must be of the form 1xn assert(getDimension() == matrix.numberOfRows()); // dimensions of vector and matrix must match Vector result(matrix.numberOfColumns(), 0.0, true); #pragma omp parallel for for (count k = 0; k < matrix.numberOfColumns(); ++k) { Vector column = matrix.column(k); result[k] = (*this) * column; } return result; } template<typename F> void Vector::apply(const F unaryElementFunction) { #pragma omp parallel for for (index i = 0; i < getDimension(); ++i) { values[i] = unaryElementFunction(values[i]); } } template<typename L> inline void Vector::forElements(L handle) { for (uint64_t i = 0; i < getDimension(); i++) { handle(values[i]); } } template<typename L> inline void Vector::forElements(L handle) const { for (uint64_t i = 0; i < getDimension(); i++) { handle(values[i]); } } template<typename L> inline void Vector::parallelForElements(L handle) { #pragma omp parallel for for (uint64_t i = 0; i < getDimension(); i++) { handle(i, values[i]); } } template<typename L> inline void Vector::parallelForElements(L handle) const { #pragma omp parallel for for (uint64_t i = 0; i < getDimension(); i++) { handle(i, values[i]); } } } /* namespace NetworKit */ #endif /* VECTOR_H_ */
Fig_7.11_fibonacciTasks.c
int fib (int n) { int x,y; if (n < 2) return n; #pragma omp task shared(x) x = fib(n-1); #pragma omp task shared(y) y = fib(n-2); #pragma omp taskwait return (x+y); } int main() { int NW = 30; #pragma omp parallel { #pragma omp single fib(NW); } }
GB_binop__isgt_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isgt_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__isgt_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__isgt_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__isgt_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_uint8) // A*D function (colscale): GB (_AxD__isgt_uint8) // D*A function (rowscale): GB (_DxB__isgt_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__isgt_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__isgt_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_uint8) // C=scalar+B GB (_bind1st__isgt_uint8) // C=scalar+B' GB (_bind1st_tran__isgt_uint8) // C=A+scalar GB (_bind2nd__isgt_uint8) // C=A'+scalar GB (_bind2nd_tran__isgt_uint8) // C type: uint8_t // A type: uint8_t // A pattern? 0 // B type: uint8_t // B pattern? 0 // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x > y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGT || GxB_NO_UINT8 || GxB_NO_ISGT_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isgt_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isgt_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isgt_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isgt_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isgt_uint8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isgt_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint8_t alpha_scalar ; uint8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ; beta_scalar = (*((uint8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isgt_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isgt_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isgt_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isgt_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isgt_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isgt_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB (_bind1st_tran__isgt_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB (_bind2nd_tran__isgt_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
BIN.h
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include <immintrin.h> #include <algorithm> #include "utility.h" /* * Manage the group * prepare hash table for each group */ template <class IT, class NT> class BIN { public: BIN(): total_intprod(0), max_intprod(0), max_nz(0), thread_num(omp_get_max_threads()) { } BIN(IT rows): total_intprod(0), max_intprod(0), max_nz(0), thread_num(omp_get_max_threads()), min_ht_size(8) { assert(rows != 0); row_nz = my_malloc<IT>(rows); rows_offset = my_malloc<IT>(thread_num + 1); bin_id = my_malloc<char>(rows); local_hash_table_id = my_malloc<IT*>(thread_num); local_hash_table_val = my_malloc<NT*>(thread_num); } BIN(IT rows, IT ht_size): total_intprod(0), max_intprod(0), max_nz(0), thread_num(omp_get_max_threads()), min_ht_size(ht_size) { assert(rows != 0); row_nz = my_malloc<IT>(rows); rows_offset = my_malloc<IT>(thread_num + 1); bin_id = my_malloc<char>(rows); local_hash_table_id = my_malloc<IT*>(thread_num); local_hash_table_val = my_malloc<NT*>(thread_num); } ~BIN() { my_free<IT>(row_nz); my_free<IT>(rows_offset); my_free<char>(bin_id); #pragma omp parallel { int tid = omp_get_thread_num(); my_free<IT>(local_hash_table_id[tid]); my_free<NT>(local_hash_table_val[tid]); } my_free<IT*>(local_hash_table_id); my_free<NT*>(local_hash_table_val); } void set_max_bin(const IT *arpt, const IT *acol, const IT *brpt, const IT rows, const IT cols); void set_min_bin(const IT rows, const IT cols); void create_local_hash_table(const IT cols); void set_intprod_num(const IT *arpt, const IT *acol, const IT *brpt, const IT rows); void set_rows_offset(const IT rows); void set_bin_id(const IT rows, const IT cols, const IT min); long long int total_intprod; IT max_intprod; IT max_nz; IT thread_num; IT min_ht_size; IT *row_nz; // the number of flop or non-zero elements of output matrix IT *rows_offset; // offset for row_nz char *bin_id; IT **local_hash_table_id; NT **local_hash_table_val; }; /* Count the number of intermediate products per row (= flop / 2) */ template <class IT, class NT> inline void BIN<IT, NT>::set_intprod_num(const IT *arpt, const IT *acol, const IT *brpt, const IT rows) { #pragma omp parallel { IT each_int_prod = 0; #pragma omp for for (IT i = 0; i < rows; ++i) { IT nz_per_row = 0; for (IT j = arpt[i]; j < arpt[i + 1]; ++j) { nz_per_row += brpt[acol[j] + 1] - brpt[acol[j]]; } row_nz[i] = nz_per_row; each_int_prod += nz_per_row; } #pragma omp atomic total_intprod += each_int_prod; } } /* Get total number of floating operations and average * then, use it for assigning rows to thread as the amount of work is equally distributed */ template <class IT, class NT> inline void BIN<IT, NT>::set_rows_offset(const IT rows) { IT *ps_row_nz = my_malloc<IT>(rows + 1); /* Prefix sum of #intermediate products */ scan(row_nz, ps_row_nz, rows + 1); IT average_intprod = (total_intprod + thread_num - 1) / thread_num; // long long int average_intprod = total_intprod / thread_num; /* Search end point of each range */ rows_offset[0] = 0; #pragma omp parallel { int tid = omp_get_thread_num(); long long int end_itr = (lower_bound(ps_row_nz, ps_row_nz + rows + 1, average_intprod * (tid + 1))) - ps_row_nz; rows_offset[tid + 1] = end_itr; // if (tid == thread_num - 1) rows_offset[tid + 1] = rows; } rows_offset[thread_num] = rows; my_free<IT>(ps_row_nz); } /* * Prepare hash table for each thread_num * once allocate memory space for hash table, the thread reuse it for each row */ template <class IT, class NT> inline void BIN<IT, NT>::create_local_hash_table(const IT cols) { #pragma omp parallel { int tid = omp_get_thread_num(); IT ht_size = 0; /* Get max size of hash table */ for (IT j = rows_offset[tid]; j < rows_offset[tid + 1]; ++j) { if (ht_size < row_nz[j]) ht_size = row_nz[j]; } /* the size of hash table is aligned as 2^n */ if (ht_size > 0) { if (ht_size > cols) ht_size = cols; int k = min_ht_size; while (k < ht_size) { k <<= 1; } ht_size = k; } local_hash_table_id[tid] = my_malloc<IT>(ht_size); local_hash_table_val[tid] = my_malloc<NT>(ht_size); } } /* * Precompute how many entries each row requires for the hash table * the size is 2^bin_id */ template <class IT, class NT> inline void BIN<IT, NT>::set_bin_id(const IT rows, const IT cols, const IT min) { IT i; #pragma omp parallel for for (i = 0; i < rows; ++i) { IT j; IT nz_per_row = row_nz[i]; if (nz_per_row > cols) nz_per_row = cols; if (nz_per_row == 0) { bin_id[i] = 0; } else { j = 0; while (nz_per_row > (min << j)) { j++; } bin_id[i] = j + 1; } } } /* grouping and preparing hash table based on the number of floating operations */ template <class IT, class NT> inline void BIN<IT, NT>::set_max_bin(const IT *arpt, const IT *acol, const IT *brpt, const IT rows, const IT cols) { set_intprod_num(arpt, acol, brpt, rows); set_rows_offset(rows); set_bin_id(rows, cols, min_ht_size); } /* Reset the size of hash table which each row requires */ template <class IT, class NT> inline void BIN<IT, NT>::set_min_bin(const IT rows, const IT cols) { set_bin_id(rows, cols, min_ht_size); }
nbody_mkl.c
#include <stdlib.h> #include <stdio.h> #include <mkl.h> #include <mkl_extensions.h> #include <string.h> #include <vec.h> #include "nbody.h" #include "nbody_mkl.h" /** Computes Sum(G * pm / r ** 2 * (dx / r)). * * Diagonal elements are not counted in the sum. * */ void compute_force(MKL_INT n, double *dx, double *pm, double *r, double *tmp1, double *output) { MKL_INT size = n * n; vdMuli(size, pm, G, tmp1); vdPowx(size, r, 2.0, output); vdDiv(size, tmp1, output, tmp1); vdDiv(size, dx, r, output); vdMul(size, tmp1, output, tmp1); memset(output, 0, sizeof(double) * n); #pragma omp parallel for for (MKL_INT i = 0; i < n; i++) { double sum = 0.0; for (MKL_INT j = 0; j < n; j++) { // Ignore diagonal elements. if (i != j) { // Causes some imprecision compared to reference? sum += tmp1[i*n + j]; } } output[i] += sum; } } void move(MKL_INT n, double *m, double *x, double *y, double *z, double *vx, double *vy, double *vz, // Temporaries that have n * n space. double *dx, double *dy, double *dz, double *pm, double *r, double *tmp1, double *tmp2) { set_delta(n, x, dx); set_delta(n, y, dy); set_delta(n, z, dz); set_pm(n, m, pm); MKL_INT size = n * n; // r = sqrt(dx**2 + dy**2 + dz**2) vdPowx(size, dx, 2.0, tmp1); vdPowx(size, dy, 2.0, tmp2); vdAdd(size, tmp1, tmp2, tmp1); vdPowx(size, dz, 2.0, tmp2); vdAdd(size, tmp1, tmp2, tmp1); vdSqrt(size, tmp1, r); compute_force(n, dx, pm, r, tmp1, tmp2); vdDiv(n, tmp2, m, tmp1); vdMuli(n, tmp1, dt, tmp1); vdAdd(n, vx, tmp1, vx); vdMuli(n, vx, dt, tmp1); vdAdd(n, x, tmp1, x); compute_force(n, dy, pm, r, tmp1, tmp2); vdDiv(n, tmp2, m, tmp1); vdMuli(n, tmp1, dt, tmp1); vdAdd(n, vy, tmp1, vy); vdMuli(n, vy, dt, tmp1); vdAdd(n, y, tmp1, y); compute_force(n, dz, pm, r, tmp1, tmp2); vdDiv(n, tmp2, m, tmp1); vdMuli(n, tmp1, dt, tmp1); vdAdd(n, vz, tmp1, vz); vdMuli(n, vz, dt, tmp1); vdAdd(n, z, tmp1, z); } void run_mkl(int iterations, MKL_INT n, double *m, double *x, double *y, double *z, double *vx, double *vy, double *vz) { vec_t dx = new_vec(n * n, 0); vec_t dy = new_vec(n * n, 0); vec_t dz = new_vec(n * n, 0); vec_t pm = new_vec(n * n, 0); vec_t r = new_vec(n * n, 0); vec_t tmp1 = new_vec(n * n, 0); vec_t tmp2 = new_vec(n * n, 0); for (int i = 0; i < iterations; i++) { printf("iteration %d\n", i); move(n, m, x, y, z, vx, vy, vz, dx.data, dy.data, dz.data, pm.data, r.data, tmp1.data, tmp2.data); } }
GB_unaryop__identity_int64_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int64_int16 // op(A') function: GB_tran__identity_int64_int16 // C type: int64_t // A type: int16_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int64_int16 ( int64_t *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int64_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
serial_tree_learner.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_ #define LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_ #include <LightGBM/dataset.h> #include <LightGBM/tree.h> #include <LightGBM/tree_learner.h> #include <LightGBM/cuda/vector_cudahost.h> #include <LightGBM/utils/array_args.h> #include <LightGBM/utils/json11.h> #include <LightGBM/utils/random.h> #include <string> #include <cmath> #include <cstdio> #include <memory> #include <random> #include <vector> #include "col_sampler.hpp" #include "data_partition.hpp" #include "feature_histogram.hpp" #include "leaf_splits.hpp" #include "monotone_constraints.hpp" #include "split_info.hpp" #ifdef USE_GPU // Use 4KBytes aligned allocator for ordered gradients and ordered hessians when GPU is enabled. // This is necessary to pin the two arrays in memory and make transferring faster. #include <boost/align/aligned_allocator.hpp> #endif namespace LightGBM { using json11::Json; /*! \brief forward declaration */ class CostEfficientGradientBoosting; /*! * \brief Used for learning a tree by single machine */ class SerialTreeLearner: public TreeLearner { public: friend CostEfficientGradientBoosting; explicit SerialTreeLearner(const Config* config); ~SerialTreeLearner(); void Init(const Dataset* train_data, bool is_constant_hessian) override; void ResetTrainingData(const Dataset* train_data, bool is_constant_hessian) override { ResetTrainingDataInner(train_data, is_constant_hessian, true); } void ResetIsConstantHessian(bool is_constant_hessian) override { share_state_->is_constant_hessian = is_constant_hessian; } virtual void ResetTrainingDataInner(const Dataset* train_data, bool is_constant_hessian, bool reset_multi_val_bin); void ResetConfig(const Config* config) override; inline void SetForcedSplit(const Json* forced_split_json) override { if (forced_split_json != nullptr && !forced_split_json->is_null()) { forced_split_json_ = forced_split_json; } else { forced_split_json_ = nullptr; } } Tree* Train(const score_t* gradients, const score_t *hessians, bool is_first_tree) override; Tree* FitByExistingTree(const Tree* old_tree, const score_t* gradients, const score_t* hessians) const override; Tree* FitByExistingTree(const Tree* old_tree, const std::vector<int>& leaf_pred, const score_t* gradients, const score_t* hessians) const override; void SetBaggingData(const Dataset* subset, const data_size_t* used_indices, data_size_t num_data) override { if (subset == nullptr) { data_partition_->SetUsedDataIndices(used_indices, num_data); share_state_->SetUseSubrow(false); } else { ResetTrainingDataInner(subset, share_state_->is_constant_hessian, false); share_state_->SetUseSubrow(true); share_state_->SetSubrowCopied(false); share_state_->bagging_use_indices = used_indices; share_state_->bagging_indices_cnt = num_data; } } void AddPredictionToScore(const Tree* tree, double* out_score) const override { CHECK_LE(tree->num_leaves(), data_partition_->num_leaves()); if (tree->num_leaves() <= 1) { return; } #pragma omp parallel for schedule(static, 1) for (int i = 0; i < tree->num_leaves(); ++i) { double output = static_cast<double>(tree->LeafOutput(i)); data_size_t cnt_leaf_data = 0; auto tmp_idx = data_partition_->GetIndexOnLeaf(i, &cnt_leaf_data); for (data_size_t j = 0; j < cnt_leaf_data; ++j) { out_score[tmp_idx[j]] += output; } } } void RenewTreeOutput(Tree* tree, const ObjectiveFunction* obj, std::function<double(const label_t*, int)> residual_getter, data_size_t total_num_data, const data_size_t* bag_indices, data_size_t bag_cnt) const override; /*! \brief Get output of parent node, used for path smoothing */ double GetParentOutput(const Tree* tree, const LeafSplits* leaf_splits) const; protected: void ComputeBestSplitForFeature(FeatureHistogram* histogram_array_, int feature_index, int real_fidx, int8_t is_feature_used, int num_data, const LeafSplits* leaf_splits, SplitInfo* best_split, double parent_output); void GetShareStates(const Dataset* dataset, bool is_constant_hessian, bool is_first_time); void RecomputeBestSplitForLeaf(Tree* tree, int leaf, SplitInfo* split); /*! * \brief Some initial works before training */ virtual void BeforeTrain(); /*! * \brief Some initial works before FindBestSplit */ virtual bool BeforeFindBestSplit(const Tree* tree, int left_leaf, int right_leaf); virtual void FindBestSplits(const Tree* tree); virtual void ConstructHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract); virtual void FindBestSplitsFromHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract, const Tree*); /*! * \brief Partition tree and data according best split. * \param tree Current tree, will be splitted on this function. * \param best_leaf The index of leaf that will be splitted. * \param left_leaf The index of left leaf after splitted. * \param right_leaf The index of right leaf after splitted. */ inline virtual void Split(Tree* tree, int best_leaf, int* left_leaf, int* right_leaf) { SplitInner(tree, best_leaf, left_leaf, right_leaf, true); } void SplitInner(Tree* tree, int best_leaf, int* left_leaf, int* right_leaf, bool update_cnt); /* Force splits with forced_split_json dict and then return num splits forced.*/ int32_t ForceSplits(Tree* tree, int* left_leaf, int* right_leaf, int* cur_depth); /*! * \brief Get the number of data in a leaf * \param leaf_idx The index of leaf * \return The number of data in the leaf_idx leaf */ inline virtual data_size_t GetGlobalDataCountInLeaf(int leaf_idx) const; /*! \brief number of data */ data_size_t num_data_; /*! \brief number of features */ int num_features_; /*! \brief training data */ const Dataset* train_data_; /*! \brief gradients of current iteration */ const score_t* gradients_; /*! \brief hessians of current iteration */ const score_t* hessians_; /*! \brief training data partition on leaves */ std::unique_ptr<DataPartition> data_partition_; /*! \brief pointer to histograms array of parent of current leaves */ FeatureHistogram* parent_leaf_histogram_array_; /*! \brief pointer to histograms array of smaller leaf */ FeatureHistogram* smaller_leaf_histogram_array_; /*! \brief pointer to histograms array of larger leaf */ FeatureHistogram* larger_leaf_histogram_array_; /*! \brief store best split points for all leaves */ std::vector<SplitInfo> best_split_per_leaf_; /*! \brief store best split per feature for all leaves */ std::vector<SplitInfo> splits_per_leaf_; /*! \brief stores minimum and maximum constraints for each leaf */ std::unique_ptr<LeafConstraintsBase> constraints_; /*! \brief stores best thresholds for all feature for smaller leaf */ std::unique_ptr<LeafSplits> smaller_leaf_splits_; /*! \brief stores best thresholds for all feature for larger leaf */ std::unique_ptr<LeafSplits> larger_leaf_splits_; #ifdef USE_GPU /*! \brief gradients of current iteration, ordered for cache optimized, aligned to 4K page */ std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_gradients_; /*! \brief hessians of current iteration, ordered for cache optimized, aligned to 4K page */ std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_hessians_; #elif USE_CUDA /*! \brief gradients of current iteration, ordered for cache optimized */ std::vector<score_t, CHAllocator<score_t>> ordered_gradients_; /*! \brief hessians of current iteration, ordered for cache optimized */ std::vector<score_t, CHAllocator<score_t>> ordered_hessians_; #else /*! \brief gradients of current iteration, ordered for cache optimized */ std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> ordered_gradients_; /*! \brief hessians of current iteration, ordered for cache optimized */ std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> ordered_hessians_; #endif /*! \brief used to cache historical histogram to speed up*/ HistogramPool histogram_pool_; /*! \brief config of tree learner*/ const Config* config_; ColSampler col_sampler_; const Json* forced_split_json_; std::unique_ptr<TrainingShareStates> share_state_; std::unique_ptr<CostEfficientGradientBoosting> cegb_; }; inline data_size_t SerialTreeLearner::GetGlobalDataCountInLeaf(int leaf_idx) const { if (leaf_idx >= 0) { return data_partition_->leaf_count(leaf_idx); } else { return 0; } } } // namespace LightGBM #endif // LightGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
GB_binop__bclr_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bclr_uint16) // A.*B function (eWiseMult): GB (_AemultB_01__bclr_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__bclr_uint16) // A.*B function (eWiseMult): GB (_AemultB_03__bclr_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bclr_uint16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bclr_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__bclr_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bclr_uint16) // C=scalar+B GB (_bind1st__bclr_uint16) // C=scalar+B' GB (_bind1st_tran__bclr_uint16) // C=A+scalar GB (_bind2nd__bclr_uint16) // C=A'+scalar GB (_bind2nd_tran__bclr_uint16) // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = GB_BITCLR (aij, bij, uint16_t, 16) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITCLR (x, y, uint16_t, 16) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BCLR || GxB_NO_UINT16 || GxB_NO_BCLR_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bclr_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bclr_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bclr_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bclr_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__bclr_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bclr_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__bclr_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bclr_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bclr_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITCLR (x, bij, uint16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bclr_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITCLR (aij, y, uint16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITCLR (x, aij, uint16_t, 16) ; \ } GrB_Info GB (_bind1st_tran__bclr_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITCLR (aij, y, uint16_t, 16) ; \ } GrB_Info GB (_bind2nd_tran__bclr_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__tanh_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__tanh_fp32_fp32 // op(A') function: GB_unop_tran__tanh_fp32_fp32 // C type: float // A type: float // cast: float cij = aij // unaryop: cij = tanhf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = tanhf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = tanhf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TANH || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__tanh_fp32_fp32 ( float *Cx, // Cx and Ax may be aliased const float *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = tanhf (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__tanh_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
thread_scale.c
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */ /* * See COPYRIGHT in top-level directory. */ #include <stdlib.h> #include <stdio.h> #include <omp.h> #include "zmtest_absqueue.h" #define TEST_NELEMTS 64 #define NITER (1024*32) /*------------------------------------------------------------------------- * Function: run * * Purpose: Test the correctness of queue operations by counting the number * of dequeued elements to the expected number * * Return: Success: 0 * Failure: 1 *------------------------------------------------------------------------- */ static inline void run() { unsigned test_counter = 0; zm_absqueue_t queue; double t1, t2; printf("#threads \t throughput ops/s\n"); int nthreads; for (nthreads = 2; nthreads <= omp_get_max_threads(); nthreads ++) { zm_absqueue_init(&queue); int nelem_enq, nelem_deq; #if defined(ZMTEST_MPMC) nelem_enq = TEST_NELEMTS/(nthreads/2); nelem_deq = (nthreads/2)*nelem_enq; #elif defined(ZMTEST_MPSC) nelem_enq = TEST_NELEMTS/(nthreads-1); nelem_deq = (nthreads-1)*nelem_enq; #elif defined(ZMTEST_SPMC) nelem_enq = TEST_NELEMTS; nelem_deq = nelem_enq; #endif t1 = omp_get_wtime(); #pragma omp parallel num_threads(nthreads) { int tid, producer_b; #if defined(ZMTEST_ALLOC_QELEM) int *input; #else int input = 1; #endif tid = omp_get_thread_num(); #if defined(ZMTEST_MPMC) producer_b = (tid % 2 == 0); #elif defined(ZMTEST_MPSC) producer_b = (tid != 0); #elif defined(ZMTEST_SPMC) producer_b = (tid == 0); #endif int elem; for(int i = 0; i<NITER; i++) { if(producer_b) { /* producer */ for(elem=0; elem < nelem_enq; elem++) { #if defined(ZMTEST_ALLOC_QELEM) input = malloc(sizeof *input); *input = 1; zm_absqueue_enqueue(&queue, (void*) input); #else zm_absqueue_enqueue(&queue, (void*) &input); #endif } } else { /* consumer */ while(test_counter < nelem_deq) { int* elem = NULL; zm_absqueue_dequeue(&queue, (void**)&elem); if ((elem != NULL) && (*elem == 1)) { #pragma omp atomic test_counter++; #if defined(ZMTEST_ALLOC_QELEM) free(elem); #endif } } } } } t2 = omp_get_wtime(); printf("%d \t %lf\n", nthreads, (double)nelem_deq*NITER/(t2-t1)); } } /* end run() */ int main(int argc, char **argv) { run(); } /* end main() */
8952.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "atax.h" /* Array initialization. */ static void init_array (int nx, int ny, DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny), DATA_TYPE POLYBENCH_1D(x,NY,ny)) { int i, j; for (i = 0; i < ny; i++) x[i] = i * M_PI; for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) A[i][j] = ((DATA_TYPE) i*(j+1)) / nx; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int nx, DATA_TYPE POLYBENCH_1D(y,NX,nx)) { int i; for (i = 0; i < nx; i++) { fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]); if (i % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_atax(int nx, int ny, DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny), DATA_TYPE POLYBENCH_1D(x,NY,ny), DATA_TYPE POLYBENCH_1D(y,NY,ny), DATA_TYPE POLYBENCH_1D(tmp,NX,nx)) { int i, j; #pragma scop { #pragma omp target teams distribute schedule(dynamic, 16) for (i = 0; i < _PB_NY; i++) { y[i] = 0; } #pragma omp target teams distribute schedule(dynamic, 16) for (i = 0; i < _PB_NX; i++) { tmp[i] = 0; for (j = 0; j < _PB_NY; j++) tmp[i] = tmp[i] + A[i][j] * x[j]; for (j = 0; j < _PB_NY; j++) y[j] = y[j] + A[i][j] * tmp[i]; } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int nx = NX; int ny = NY; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NX, NY, nx, ny); POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, NY, ny); POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, NY, ny); POLYBENCH_1D_ARRAY_DECL(tmp, DATA_TYPE, NX, nx); /* Initialize array(s). */ init_array (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_atax (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x), POLYBENCH_ARRAY(y), POLYBENCH_ARRAY(tmp)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(nx, POLYBENCH_ARRAY(y))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(x); POLYBENCH_FREE_ARRAY(y); POLYBENCH_FREE_ARRAY(tmp); return 0; }
loop-16.c
/* { dg-do run } */ extern void abort (void); volatile int count; static int test (void) { return ++count > 0; } int i; int main () { #pragma omp for lastprivate (i) for (i = 0; i < 10; ++i) { int *p = &i; if (test ()) continue; abort (); } if (i != count) abort (); return 0; }
dof_updater.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Jordi Cotela // #if !defined(KRATOS_DOF_UPDATER_H_INCLUDED ) #define KRATOS_DOF_UPDATER_H_INCLUDED // Project includes #include "includes/define.h" #include "includes/model_part.h" namespace Kratos { ///@addtogroup KratosCore ///@{ ///@name Kratos Classes ///@{ /// Utility class to update the values of degree of freedom (Dof) variables after solving the system. /** This class encapsulates the operation of updating nodal degrees of freedom after a system solution. * In pseudo-code, the operation to be performed is * for each dof: dof.variable += dx[dof.equation_id] * This operation is a simple loop in shared memory, but requires additional infrastructure in MPI, * to obtain out-of-process update data. DofUpdater takes care of both the operation and the eventual * auxiliary infrastructure. * @see TrilinosDofUpdater for the trilinos version. */ template< class TSparseSpace > class DofUpdater { public: ///@name Type Definitions ///@{ /// Pointer definition of DofUpdater KRATOS_CLASS_POINTER_DEFINITION(DofUpdater); using DofType = Dof<typename TSparseSpace::DataType>; using DofsArrayType = PointerVectorSet< DofType, SetIdentityFunction<DofType>, std::less<typename SetIdentityFunction<DofType>::result_type>, std::equal_to<typename SetIdentityFunction<DofType>::result_type>, DofType* >; using SystemVectorType = typename TSparseSpace::VectorType; ///@} ///@name Life Cycle ///@{ /// Default constructor. DofUpdater(){} /// Deleted copy constructor DofUpdater(DofUpdater const& rOther) = delete; /// Destructor. virtual ~DofUpdater(){} /// Deleted assignment operator DofUpdater& operator=(DofUpdater const& rOther) = delete; ///@} ///@name Operations ///@{ /// Create a new instance of this class. /** This function is used by the SparseSpace class to create new * DofUpdater instances of the appropriate type. * @return a std::unique_pointer to the new instance. * @see UblasSpace::CreateDofUpdater(), TrilinosSpace::CreateDofUpdater(). */ virtual typename DofUpdater::UniquePointer Create() const { return Kratos::make_unique<DofUpdater>(); } /// Initialize the DofUpdater in preparation for a subsequent UpdateDofs call. /** Note that the base DofUpdater does not have internal data, so this does nothing. * @param[in] rDofSet The list of degrees of freedom. * @param[in] rDx The update vector. */ virtual void Initialize( const DofsArrayType& rDofSet, const SystemVectorType& rDx) {} /// Free internal storage to reset the instance and/or optimize memory consumption. /** Note that the base DofUpdater does not have internal data, so this does nothing. */ virtual void Clear() {} /// Calculate new values for the problem's degrees of freedom using the update vector rDx. /** For each Dof in rDofSet, this function calculates the updated value for the corresponding * variable as value += rDx[dof.EquationId()]. * @param[in/out] rDofSet The list of degrees of freedom. * @param[in] rDx The update vector. * This method will check if Initialize() was called before and call it if necessary. */ virtual void UpdateDofs( DofsArrayType& rDofSet, const SystemVectorType& rDx) { const int num_dof = static_cast<int>(rDofSet.size()); #pragma omp parallel for for(int i = 0; i < num_dof; ++i) { auto it_dof = rDofSet.begin() + i; if (it_dof->IsFree()) it_dof->GetSolutionStepValue() += TSparseSpace::GetValue(rDx,it_dof->EquationId()); } } /// Assign new values for the problem's degrees of freedom using the vector rX. /** For each Dof in rDofSet, this function assigns the value for the corresponding * variable as value = rX[dof.EquationId()]. * @param[in/out] rDofSet The list of degrees of freedom. * @param[in] rX The solution vector. * This method will check if Initialize() was called before and call it if necessary. */ virtual void AssignDofs(DofsArrayType& rDofSet, const SystemVectorType& rX) { const int num_dof = static_cast<int>(rDofSet.size()); #pragma omp parallel for for(int i = 0; i < num_dof; ++i) { auto it_dof = rDofSet.begin() + i; if (it_dof->IsFree()) it_dof->GetSolutionStepValue() = TSparseSpace::GetValue(rX,it_dof->EquationId()); } } ///@} ///@name Input and output ///@{ /// Turn back information as a string. virtual std::string Info() const { std::stringstream buffer; buffer << "DofUpdater" ; return buffer.str(); } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const { rOStream << this->Info() << std::endl; } /// Print object's data. virtual void PrintData(std::ostream& rOStream) const { rOStream << this->Info() << std::endl; } ///@} }; // Class DofUpdater ///@} ///@name Input and output ///@{ /// input stream function template< class TSparseSpace > inline std::istream& operator >> ( std::istream& rIStream, DofUpdater<TSparseSpace>& rThis) { return rIStream; } /// output stream function template< class TSparseSpace > inline std::ostream& operator << ( std::ostream& rOStream, const DofUpdater<TSparseSpace>& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} ///@} addtogroup block } // namespace Kratos. #endif // KRATOS_DOF_UPDATER_H_INCLUDED defined
multiply.h
#pragma once #include <vector> #include <unordered_map> #include <algorithm> #include <cmath> #include <omp.h> #include "_cuda.h" using std::vector; using std::unordered_map; using std::max; using std::abs; // MULTIPLY-VALUE // -------------- template <class T> void multiplyValue(T *a, int N, T v) { for (int i=0; i<N; i++) a[i] *= v; } template <class T> void multiplyValue(vector<T>& a, T v) { multiplyValue(a.data(), a.size(), v); } template <class K, class T> void multiplyValue(unordered_map<K, T>& a, T v) { for (auto& p : a) p.second *= v; } // MULTIPLY-VALUE-AT // ----------------- template <class T, class I> void multiplyValueAt(T *a, T v, I&& is) { for (int i : is) a[i] *= v; } template <class T, class I> void multiplyValueAt(vector<T>& a, T v, I&& is) { multiplyValueAt(a.data(), v, is); } template <class K, class T, class I> void multiplyValueAt(unordered_map<K, T>& a, T v, I&& ks) { for (auto&& k : ks) a[k] *= v; } // MULTIPLY // -------- template <class T> void multiply(T *a, T *x, T *y, int N) { for (int i=0; i<N; i++) a[i] = x[i] * y[i]; } template <class T> void multiply(vector<T>& a, vector<T>& x, vector<T>& y) { multiply(a.data(), x.data(), y.data(), x.size()); } template <class K, class T> void multiply(unordered_map<K, T>& a, unordered_map<K, T>& x, unordered_map<K, T>& y) { for (auto&& p : x) a[p.first] = x[p.first] * y[p.first]; } // MULTIPLY-ABS // ------------ template <class T> void multiplyAbs(T *a, T *x, T *y, int N) { for (int i=0; i<N; i++) a[i] = abs(x[i] * y[i]); } template <class T> void multiplyAbs(vector<T>& a, vector<T>& x, vector<T>& y) { multiplyAbs(a.data(), x.data(), y.data(), x.size()); } template <class K, class T> void multiplyAbs(unordered_map<K, T>& a, unordered_map<K, T>& x, unordered_map<K, T>& y) { for (auto&& p : x) a[p.first] = abs(x[p.first] * y[p.first]); } // MULTIPLY-VALUE (OMP) // -------------------- template <class T> void multiplyValueOmp(T *a, int N, T v) { #pragma omp parallel for for (int i=0; i<N; i++) a[i] *= v; } template <class T> void multiplyValueOmp(vector<T>& a, T v) { multiplyValueOmp(a.data(), a.size(), v); } // MULTIPLY-VALUE (CUDA) // --------------------- template <class T> __device__ void multiplyValueKernelLoop(T *a, int N, T v, int i, int DI) { for (; i<N; i+=DI) a[i] *= v; } template <class T> __global__ void multiplyValueKernel(T *a, int N, T v) { DEFINE(t, b, B, G); multiplyValueKernelLoop(a, N, v, B*b+t, G*B); } template <class T> void multiplyValueCuda(T *a, int N, T v) { int B = BLOCK_DIM; int G = min(ceilDiv(N, B), GRID_DIM); size_t N1 = N * sizeof(T); T *aD; TRY( cudaMalloc(&aD, N1) ); TRY( cudaMemcpy(aD, a, N1, cudaMemcpyHostToDevice) ); multiplyValueKernel<<<G, B>>>(aD, N, v); TRY( cudaMemcpy(a, aD, N1, cudaMemcpyDeviceToHost) ); TRY( cudaFree(aD) ); } template <class T> void multiplyValueCuda(vector<T>& a, T v) { multiplyValueCuda(a.data(), a.size(), v); } // MULTIPLY (CUDA) // --------------- template <class T> __device__ void multiplyKernelLoop(T *a, T *x, T *y, int N, int i, int DI) { for (; i<N; i+=DI) a[i] = x[i] * y[i]; } template <class T> __global__ void multiplyKernel(T *a, T *x, T* y, int N) { DEFINE(t, b, B, G); multiplyKernelLoop(a, x, y, N, B*b+t, G*B); } template <class T> void multiplyCuda(T *a, T *x, T *y, int N, T v) { int B = BLOCK_DIM; int G = min(ceilDiv(N, B), GRID_DIM); size_t N1 = N * sizeof(T); T *xD, *yD; TRY( cudaMalloc(&xD, N1) ); TRY( cudaMalloc(&yD, N1) ); TRY( cudaMemcpy(xD, x, N1, cudaMemcpyHostToDevice) ); TRY( cudaMemcpy(yD, y, N1, cudaMemcpyHostToDevice) ); multiplyKernel<<<G, B>>>(xD, xD, yD, N); TRY( cudaMemcpy(a, xD, N1, cudaMemcpyDeviceToHost) ); TRY( cudaFree(xD) ); TRY( cudaFree(yD) ); } template <class T> void multiplyCuda(vector<T>& a, vector<T>& x, vector<T>& y) { multiplyCuda(a.data(), x.data(), y.data(), x.size()); } // MULTIPLY-ABS (CUDA) // ------------------- template <class T> __device__ void multiplyAbsKernelLoop(T *a, T *x, T *y, int N, int i, int DI) { for (; i<N; i+=DI) a[i] = abs(x[i] * y[i]); } template <class T> __global__ void multiplyAbsKernel(T *a, T *x, T* y, int N) { DEFINE(t, b, B, G); multiplyAbsKernelLoop(a, x, y, N, B*b+t, G*B); } template <class T> void multiplyAbsCuda(T *a, T *x, T *y, int N, T v) { int B = BLOCK_DIM; int G = min(ceilDiv(N, B), GRID_DIM); size_t N1 = N * sizeof(T); T *xD, *yD; TRY( cudaMalloc(&xD, N1) ); TRY( cudaMalloc(&yD, N1) ); TRY( cudaMemcpy(xD, x, N1, cudaMemcpyHostToDevice) ); TRY( cudaMemcpy(yD, y, N1, cudaMemcpyHostToDevice) ); multiplyAbsKernel<<<G, B>>>(xD, xD, yD, N); TRY( cudaMemcpy(a, xD, N1, cudaMemcpyDeviceToHost) ); TRY( cudaFree(xD) ); TRY( cudaFree(yD) ); } template <class T> void multiplyAbsCuda(vector<T>& a, vector<T>& x, vector<T>& y) { multiplyAbsCuda(a.data(), x.data(), y.data(), x.size()); }
tcp_md5_fmt_plug.c
/* * Cracker for TCP MD5 Signatures, http://www.ietf.org/rfc/rfc2385.txt * * This software is Copyright (c) 2013, Dhiru Kholia <dhiru [at] openwall.com>, * and it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_tcpmd5; #elif FMT_REGISTERS_H john_register_one(&fmt_tcpmd5); #else #include <string.h> #ifdef _OPENMP #include <omp.h> #ifdef __MIC__ #ifndef OMP_SCALE #define OMP_SCALE 8192 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 32768 // scaled K8-dual HT #endif #endif #endif #include "arch.h" #include "md5.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "memdbg.h" #define FORMAT_LABEL "tcp-md5" #define FORMAT_NAME "TCP MD5 Signatures, BGP, MSDP" #define FORMAT_TAG "$tcpmd5$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 // Linux Kernel says "#define TCP_MD5SIG_MAXKEYLEN 80" #define PLAINTEXT_LENGTH 80 #define BINARY_SIZE 16 #define BINARY_ALIGN sizeof(uint32_t) #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(int) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define MAX_SALT 1500 static struct fmt_tests tests[] = { /* BGP TCP_MD5SIG hashes */ {"$tcpmd5$c0a83814c0a838280006002800b3d10515f72762291b6878a010007300000000$eaf8d1f1da3f03c90b42709e9508fc73", "lolcats"}, {"$tcpmd5$c0a83828c0a8381400060034d12100b36e73c1c300000000d002390800000000$9a75888344bf20488ebef3ee5b16dd2a", "longbutstilllamepassword"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *saved_len; static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static struct custom_salt { int length; unsigned char salt[MAX_SALT]; // fixed length, but should be OK } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_len); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *q = NULL; int len; p = ciphertext; if (!strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; q = strrchr(ciphertext, '$'); if (!q) return 0; q = q + 1; if ((q - p - 1) > MAX_SALT * 2) return 0; len = strspn(q, HEXCHARS_lc); if (len != BINARY_SIZE * 2 || len != strlen(q)) return 0; if (strspn(p, HEXCHARS_lc) != q - p - 1) return 0; return 1; } static void *get_salt(char *ciphertext) { static struct custom_salt cs; int i, len; memset(&cs, 0, SALT_SIZE); if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; len = (strrchr(ciphertext, '$') - ciphertext) / 2; for (i = 0; i < len; i++) cs.salt[i] = (atoi16[ARCH_INDEX(ciphertext[2 * i])] << 4) | atoi16[ARCH_INDEX(ciphertext[2 * i + 1])]; cs.length = len; return &cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '$') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { MD5_CTX ctx; MD5_Init(&ctx); MD5_Update(&ctx, cur_salt->salt, cur_salt->length); MD5_Update(&ctx, saved_key[index], saved_len[index]); MD5_Final((unsigned char*)crypt_out[index], &ctx); } return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (((uint32_t*)binary)[0] == crypt_out[index][0]) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void tcpmd5_set_key(char *key, int index) { saved_len[index] = strlen(key); /* strncpy will pad with zeros, which is needed */ strncpy(saved_key[index], key, sizeof(saved_key[0])); } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_tcpmd5 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT, { NULL }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, tcpmd5_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
move_particle_utility_pfem2.h
/* ============================================================================== KratosIncompressibleFluidApplication A library based on: Kratos A General Purpose Software for Multi-Physics Finite Element Analysis Version 1.0 (Released on march 05, 2007). Copyright 2007 Pooyan Dadvand, Riccardo Rossi pooyan@cimne.upc.edu rrossi@cimne.upc.edu - CIMNE (International Center for Numerical Methods in Engineering), Gran Capita' s/n, 08034 Barcelona, Spain Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following condition: Distribution of this code for any commercial purpose is permissible ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS. The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ============================================================================== */ // // Project Name: Kratos // Last Modified by: $Author: pbecker $ // Date: $Date: 2011-09-21 12:30:32 $ // Revision: $Revision: 1.0 $ // // #if !defined(KRATOS_MOVE_PARTICLE_UTILITY_PFEM2_INCLUDED) #define KRATOS_MOVE_PARTICLE_UTILITY_FLUID_PFEM2_INCLUDED // System includes #include <string> #include <iostream> #include <algorithm> // External includes // Project includes #include "includes/define.h" #include "includes/node.h" /// #include "includes/dof.h" #include "includes/variables.h" #include "includes/cfd_variables.h" #include "includes/deprecated_variables.h" #include "containers/array_1d.h" #include "containers/data_value_container.h" #include "includes/mesh.h" #include "utilities/math_utils.h" #include "processes/node_erase_process.h" /// #include "utilities/geometry_utilities.h" #include "includes/model_part.h" #include "spatial_containers/spatial_containers.h" #include "spatial_containers/bounding_box.h" #include "spatial_containers/cell.h" #include "spatial_containers/bins_dynamic_objects.h" #include "utilities/spatial_containers_configure.h" #include "geometries/line_2d_2.h" #include "geometries/triangle_2d_3.h" #include "geometries/triangle_3d_3.h" #include "geometries/point.h" #include "pfem_2_application.h" #include "pfem_particle_fluidonly.h" //#include "utilities/enrich_2d_2dofs.h" #include "utilities/enrichment_utilities.h" #include "utilities/openmp_utils.h" #include "time.h" //#include "processes/process.h" namespace Kratos { //this class is to be modified by the user to customize the interpolation process template< unsigned int TDim> class MoveParticleUtilityPFEM2 { public: typedef SpatialContainersConfigure<TDim> Configure; typedef typename Configure::PointType PointType; //typedef PointType::CoordinatesArrayType CoordinatesArrayType; typedef typename Configure::ContainerType ContainerType; //typedef Configure::PointerType PointerType; typedef typename Configure::IteratorType IteratorType; typedef typename Configure::ResultContainerType ResultContainerType; //typedef Configure::ResultPointerType ResultPointerType; typedef typename Configure::ResultIteratorType ResultIteratorType; typedef PointerVector< PFEM_Particle_Fluid, PFEM_Particle_Fluid*, std::vector<PFEM_Particle_Fluid*> > ParticlePointerVector; //typedef Configure::ContactPairType ContactPairType; //typedef Configure::ContainerContactType ContainerContactType; //typedef Configure::IteratorContactType IteratorContactType; //typedef Configure::PointerContactType PointerContactType; //typedef Configure::PointerTypeIterator PointerTypeIterator; KRATOS_CLASS_POINTER_DEFINITION(MoveParticleUtilityPFEM2); //template<unsigned int TDim> MoveParticleUtilityPFEM2(ModelPart& model_part, int maximum_number_of_particles) : mr_model_part(model_part) , mmaximum_number_of_particles(maximum_number_of_particles) { std::cout << "initializing moveparticle utility" << std::endl; Check(); //tools to move the domain, in case we are using a moving domain approach. mintialized_transfer_tool=false; mcalculation_domain_complete_displacement=ZeroVector(3); mcalculation_domain_added_displacement=ZeroVector(3); //storing water and air density and their inverses, just in case it is needed for the streamline integration ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); mDENSITY_AIR = CurrentProcessInfo[DENSITY_AIR]; mDENSITY_WATER = CurrentProcessInfo[DENSITY_WATER]; //mmaximum_number_of_particles = maximum_number_of_particles; //loop in elements to change their ID to their position in the array. Easier to get information later. //DO NOT PARALELIZE THIS! IT MUST BE SERIAL!!!!!!!!!!!!!!!!!!!!!! ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); for(unsigned int ii=0; ii<mr_model_part.Elements().size(); ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; ielem->SetId(ii+1); } mlast_elem_id= (mr_model_part.ElementsEnd()-1)->Id(); int node_id=0; // we look for the smallest edge. could be used as a weighting function when going lagrangian->eulerian instead of traditional shape functions(method currently used) ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator pnode = inodebegin+ii; array_1d<double,3> position_node; double distance=0.0; position_node = pnode->Coordinates(); WeakPointerVector< Node<3> >& rneigh = pnode->GetValue(NEIGHBOUR_NODES); //we loop all the nodes to check all the edges const double number_of_neighbours = double(rneigh.size()); for( WeakPointerVector<Node<3> >::iterator inode = rneigh.begin(); inode!=rneigh.end(); inode++) { array_1d<double,3> position_difference; position_difference = inode->Coordinates() - position_node; double current_distance= sqrt(pow(position_difference[0],2)+pow(position_difference[1],2)+pow(position_difference[2],2)); //if (current_distance>distance) // distance=current_distance; distance += current_distance / number_of_neighbours; } //and we save the largest edge. pnode->FastGetSolutionStepValue(MEAN_SIZE)=distance; node_id=pnode->GetId(); } } mlast_node_id=node_id; //we also calculate the element mean size in the same way, for the courant number //also we set the right size to the LHS column for the pressure enrichments, in order to recover correctly the enrichment pressure vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); //before doing anything we must reset the vector of nodes contained by each element (particles that are inside each element. #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; double elem_size; array_1d<double,3> Edge(3,0.0); Edge = ielem->GetGeometry()[1].Coordinates() - ielem->GetGeometry()[0].Coordinates(); elem_size = Edge[0]*Edge[0]; for (unsigned int d = 1; d < TDim; d++) elem_size += Edge[d]*Edge[d]; for (unsigned int i = 2; i < (TDim+1); i++) for(unsigned int j = 0; j < i; j++) { Edge = ielem->GetGeometry()[i].Coordinates() - ielem->GetGeometry()[j].Coordinates(); double Length = Edge[0]*Edge[0]; for (unsigned int d = 1; d < TDim; d++) Length += Edge[d]*Edge[d]; if (Length < elem_size) elem_size = Length; } elem_size = sqrt(elem_size); ielem->SetValue(MEAN_SIZE, elem_size); //and the matrix column for the enrichments in the pressure. if (TDim==3) ielem->SetValue(ENRICH_LHS_ROW_3D, ZeroVector(4)); // { // Vector & lhs_enrich = ielem->GetValue(ENRICH_LHS_ROW_3D); // lhs_enrich.resize(4); // lhs_enrich=ZeroVector(4); // } else ielem->SetValue(ENRICH_LHS_ROW, ZeroVector(3)); //KRATOS_WATCH(mElemSize) } } //matrix containing the position of the 4/15/45 particles that we will seed at the beggining BoundedMatrix<double, 5*(1+TDim), 3 > pos; BoundedMatrix<double, 5*(1+TDim), (1+TDim) > N; int particle_id=0; mnelems = mr_model_part.Elements().size(); std::cout << "about to resize vectors" << std::endl; //setting the right size to the vector containing the particles assigned to each element //particles vector. this vector contains ALL the particles in the simulation. mparticles_vector.resize(mnelems*mmaximum_number_of_particles); //and this vector contains the current number of particles that are in each element (currently zero) mnumber_of_particles_in_elems.resize(mnelems); mnumber_of_particles_in_elems=ZeroVector(mnelems); //when moving the particles, an auxiliary vector is necessary (to store the previous number) mnumber_of_particles_in_elems_aux.resize(mnelems); //each element will have a list of pointers to all the particles that are inside. //this vector contains the pointers to the vector of (particle) pointers of each element. mpointers_to_particle_pointers_vectors.resize(mnelems); //int artz; //std::cin >> artz; int i_int=0; //careful! it's not the id, but the position inside the array! std::cout << "about to create particles" << std::endl; //now we seed: LOOP IN ELEMENTS //using loop index, DO NOT paralelize this! change lines : mparticles_in_elems_pointers((ii*mmaximum_number_of_particles)+mparticles_in_elems_integers(ii)) = pparticle; and the next one for(unsigned int ii=0; ii<mr_model_part.Elements().size(); ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; (ielem->GetValue(FLUID_PARTICLE_POINTERS)) = ParticlePointerVector( mmaximum_number_of_particles*2);//, &firstparticle ); ParticlePointerVector& particle_pointers = (ielem->GetValue(FLUID_PARTICLE_POINTERS)); //now we link the mpointers_to_particle_pointers_vectors to the corresponding element mpointers_to_particle_pointers_vectors(ii) = &particle_pointers; //now we resize the vector of particle pointers. it is double sized because we move the particles from an initial position (first half) to a final position (second half). //for(int j=0; j<(mmaximum_number_of_particles*2); j++) // particle_pointers.push_back(&firstparticle); int & number_of_particles = ielem->GetValue(NUMBER_OF_FLUID_PARTICLES); number_of_particles=0; //int & number_of_water_particles = ielem->GetValue(NUMBER_OF_WATER_PARTICLES); Geometry< Node<3> >& geom = ielem->GetGeometry(); //unsigned int elem_id = ielem->Id(); //mareas_vector[i_int]=CalculateArea(geom); UNUSED SO COMMENTED ComputeGaussPointPositions_initial(geom, pos, N); //we also have the standard (4), and 45 //now we seed the particles in the current element for (unsigned int j = 0; j < pos.size1(); j++) { ++particle_id; PFEM_Particle_Fluid& pparticle =mparticles_vector[particle_id-1]; pparticle.X()=pos(j,0); pparticle.Y()=pos(j,1); pparticle.Z()=pos(j,2); pparticle.GetEraseFlag()=false; array_1d<float, 3 > & vel = pparticle.GetVelocity(); float & distance= pparticle.GetDistance(); noalias(vel) = ZeroVector(3); distance=0.0; for (unsigned int k = 0; k < (TDim+1); k++) { noalias(vel) += (N(j, k) * geom[k].FastGetSolutionStepValue(VELOCITY)); distance += N(j, k) * geom[k].FastGetSolutionStepValue(DISTANCE); } if( ii % 100000 == 0) KRATOS_WATCH(particle_id); if (distance<=0.0) { distance=-1.0; } //else if(distance<2.0) //{ // distance=1.0; //} else { distance=1.0; } particle_pointers(j) = &pparticle; number_of_particles++ ; } ++i_int; } bool nonzero_mesh_velocity = false; //seeing if we have to use the mesh_velocity or not for(ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode!=mr_model_part.NodesEnd(); inode++) { const array_1d<double, 3 > velocity = inode->FastGetSolutionStepValue(MESH_VELOCITY); for(unsigned int i = 0; i!=3; i++) { if (fabs(velocity[i])>1.0e-9) nonzero_mesh_velocity=true; } if( nonzero_mesh_velocity==true) break; } if ( nonzero_mesh_velocity==true) muse_mesh_velocity_to_convect = true; // if there is mesh velocity, then we have to take it into account when moving the particles else muse_mesh_velocity_to_convect = false; //otherwise, we can avoid reading the values since we know it is zero everywhere (to save time!) m_nparticles=particle_id; //we save the last particle created as the total number of particles we have. For the moment this is true. KRATOS_WATCH(m_nparticles); //KRATOS_WATCH(mlast_elem_id); mparticle_printing_tool_initialized=false; //std::cin >> artz; } ~MoveParticleUtilityPFEM2() {} void MountBin() { KRATOS_TRY //copy the elements to a new container, as the list will //be shuffled duringthe construction of the tree ContainerType& rElements = mr_model_part.ElementsArray(); IteratorType it_begin = rElements.begin(); IteratorType it_end = rElements.end(); //const int number_of_elem = rElements.size(); typename BinsObjectDynamic<Configure>::Pointer paux = typename BinsObjectDynamic<Configure>::Pointer(new BinsObjectDynamic<Configure>(it_begin, it_end ) ); paux.swap(mpBinsObjectDynamic); //BinsObjectDynamic<Configure> mpBinsObjectDynamic(it_begin, it_end ); std::cout << "finished mounting Bins" << std::endl; KRATOS_CATCH("") } //TOOL TO TRANSFER INFORMATION INITIALLY FROM ONE DOMAIN TO OTHER. void IntializeTransferTool(ModelPart* topographic_model_part, array_1d<double, 3 > initial_domains_offset, bool ovewrite_particle_data) //mtopographic_model_part(topographic_model_part) { KRATOS_TRY mintialized_transfer_tool=true; const unsigned int max_results = 1000; std::cout << "initializing transfer utility" << std::endl; ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); mcalculation_domain_complete_displacement=initial_domains_offset; mtopographic_model_part_pointer = topographic_model_part; //copying the pointer. //CONSTRUCTING BIN STRUCTURE ContainerType& rElements_topo = mtopographic_model_part_pointer->ElementsArray(); IteratorType it_begin_topo = rElements_topo.begin(); IteratorType it_end_topo = rElements_topo.end(); typename BinsObjectDynamic<Configure>::Pointer paux = typename BinsObjectDynamic<Configure>::Pointer(new BinsObjectDynamic<Configure>(it_begin_topo, it_end_topo ) ); paux.swap(mpTopographicBinsObjectDynamic); std::cout << "Gathering Information From Topographic Domain for the first time" << std::endl; if(ovewrite_particle_data==false) { std::cout << "Not overwriting particle data (assuming correct initial conditions in calculation domain)" << std::endl; } else { std::cout << "Replacing particle information using the Topographic domain" << std::endl; const int offset = CurrentProcessInfo[WATER_PARTICLE_POINTERS_OFFSET]; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //KRATOS_WATCH(offset) //(flag managed only by MoveParticles ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { ResultContainerType results(max_results); ResultIteratorType result_begin = results.begin(); for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { if (results.size()!=max_results) results.resize(max_results); //const int & elem_id = ielem->Id(); ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; Element::Pointer pelement(*it_begin_topo); //we have no idea in which element it might be from the topographic domain, so we just set it in the first element. //Geometry<Node<3> >& geom = ielem->GetGeometry(); //array_1d<double,TDim+1> N; ParticlePointerVector& element_particle_pointers = (ielem->GetValue(FLUID_PARTICLE_POINTERS)); int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_FLUID_PARTICLES); //std::cout << "elem " << ii << " with " << (unsigned int)number_of_particles_in_elem << " particles" << std::endl; for (int iii=0; iii<number_of_particles_in_elem ; iii++ ) { //KRATOS_WATCH(iii) if (iii>mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop! break; PFEM_Particle_Fluid & pparticle = element_particle_pointers[offset+iii]; bool erase_flag= pparticle.GetEraseFlag(); if (erase_flag==false) { OverwriteParticleDataUsingTopographicDomain(pparticle,pelement,mcalculation_domain_complete_displacement,result_begin, max_results); } } } } } KRATOS_CATCH("") } //TOOL TO TRANSFER INFORMATION FROM ONE DOMAIN TO OTHER when necessary. to be don void PreReseedUsingTopographicDomain(const int minimum_number_of_particles, array_1d<double, 3 > domains_added_displacement) //mtopographic_model_part(topographic_model_part) { KRATOS_TRY if(mintialized_transfer_tool==false) KRATOS_THROW_ERROR(std::logic_error, "TRANSFER TOOL NOT INITIALIZED!", ""); const unsigned int max_results = 1000; std::cout << "executing transfer tool" << std::endl; ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); mcalculation_domain_added_displacement = domains_added_displacement; mcalculation_domain_complete_displacement += domains_added_displacement; ContainerType& rElements_topo = mtopographic_model_part_pointer->ElementsArray(); IteratorType it_begin_topo = rElements_topo.begin(); const int offset = CurrentProcessInfo[WATER_PARTICLE_POINTERS_OFFSET]; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //KRATOS_WATCH(offset) //(flag managed only by MoveParticles ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { ResultContainerType results(max_results); ResultIteratorType result_begin = results.begin(); Element::Pointer pelement(*it_begin_topo); //we have no idea in which element it might be from the topographic domain, so we just set it in the first element. BoundedMatrix<double, (TDim+1), 3 > pos; BoundedMatrix<double, (TDim+1) , (TDim+1) > N; unsigned int freeparticle=0; //we start with the first position in the particles array for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { if (results.size()!=max_results) results.resize(max_results); //const int & elem_id = ielem->Id(); ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; ParticlePointerVector& element_particle_pointers = (ielem->GetValue(FLUID_PARTICLE_POINTERS)); int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_FLUID_PARTICLES); if (number_of_particles_in_elem<(minimum_number_of_particles))// && (ielem->GetGeometry())[0].Y()<0.10 ) { //KRATOS_WATCH("elem with little particles") Geometry< Node<3> >& geom = ielem->GetGeometry(); ComputeGaussPointPositionsForPreReseed(geom, pos, N); //double conductivity = ielem->GetProperties()[CONDUCTIVITY]; //KRATOS_WATCH(conductivity); for (unsigned int j = 0; j < (pos.size1()); j++) //i am dropping the last one, the one in the middle of the element { bool keep_looking = true; while(keep_looking) { if (mparticles_vector[freeparticle].GetEraseFlag()==true) { #pragma omp critical { if (mparticles_vector[freeparticle].GetEraseFlag()==true) { mparticles_vector[freeparticle].GetEraseFlag()=false; keep_looking=false; } } if (keep_looking==false) break; /* else if (freeparticle<(it_end_particle_model_part-1)) freeparticle++; */ else freeparticle++; //break; } else { //if (freeparticle<(it_end_particle_model_part-1)) freeparticle++; //else //break; //we finished the list and we couldnt find a free space } } PFEM_Particle_Fluid pparticle(pos(j,0),pos(j,1),pos(j,2)); /* PFEM_Particle_Fluid & pparticle = mparticles_vector[freeparticle]; pparticle.X() = pos(j,0); pparticle.Y() = pos(j,1); pparticle.Z() = pos(j,2); */ array_1d<double,TDim+1>aux2_N; bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux2_N); if (is_found==false) { KRATOS_WATCH(aux2_N); } pparticle.GetEraseFlag()=false; OverwriteParticleDataUsingTopographicDomain(pparticle,pelement,mcalculation_domain_complete_displacement,result_begin, max_results); //and we copy it to the array: mparticles_vector[freeparticle] = pparticle; element_particle_pointers(offset+number_of_particles_in_elem) = &mparticles_vector[freeparticle]; number_of_particles_in_elem++; //KRATOS_WATCH(number_of_particles_in_elem); //KRATOS_WATCH(mparticles_vector[freeparticle]) //KRATOS_WATCH(geom) } } } } KRATOS_CATCH("") } void CalculateVelOverElemSize() { KRATOS_TRY //ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const double nodal_weight = 1.0/ (1.0 + double (TDim) ); ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); if (muse_mesh_velocity_to_convect==false) { #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; Geometry<Node<3> >& geom = ielem->GetGeometry(); array_1d<double, 3 >vector_mean_velocity=ZeroVector(3); for (unsigned int i=0; i != (TDim+1) ; i++) vector_mean_velocity += geom[i].FastGetSolutionStepValue(VELOCITY); vector_mean_velocity *= nodal_weight; const double mean_velocity = sqrt ( pow(vector_mean_velocity[0],2) + pow(vector_mean_velocity[1],2) + pow(vector_mean_velocity[2],2) ); ielem->SetValue(VELOCITY_OVER_ELEM_SIZE, mean_velocity / ( ielem->GetValue(MEAN_SIZE) ) ); } } } else { #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; Geometry<Node<3> >& geom = ielem->GetGeometry(); array_1d<double, 3 >vector_mean_velocity=ZeroVector(3); for (unsigned int i=0; i != (TDim+1) ; i++) vector_mean_velocity += geom[i].FastGetSolutionStepValue(VELOCITY)-geom[i].FastGetSolutionStepValue(MESH_VELOCITY); vector_mean_velocity *= nodal_weight; const double mean_velocity = sqrt ( pow(vector_mean_velocity[0],2) + pow(vector_mean_velocity[1],2) + pow(vector_mean_velocity[2],2) ); ielem->SetValue(VELOCITY_OVER_ELEM_SIZE, mean_velocity / ( ielem->GetValue(MEAN_SIZE) ) ); } } } KRATOS_CATCH("") } //name self explained void ResetBoundaryConditions(bool fully_reset_nodes) { KRATOS_TRY if (fully_reset_nodes) { ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; if (inode->IsFixed(VELOCITY_X)) { inode->FastGetSolutionStepValue(VELOCITY_X)=inode->GetSolutionStepValue(VELOCITY_X,1); } if (inode->IsFixed(VELOCITY_Y)) { inode->FastGetSolutionStepValue(VELOCITY_Y)=inode->GetSolutionStepValue(VELOCITY_Y,1); } if (TDim==3) if (inode->IsFixed(VELOCITY_Z)) { inode->FastGetSolutionStepValue(VELOCITY_Z)=inode->GetSolutionStepValue(VELOCITY_Z,1); } if (inode->IsFixed(PRESSURE)) inode->FastGetSolutionStepValue(PRESSURE)=inode->GetSolutionStepValue(PRESSURE,1); inode->GetSolutionStepValue(PRESSURE,1)=inode->FastGetSolutionStepValue(PRESSURE); } } } else //for fractional step only! { ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; const array_1d<double, 3 > original_velocity = inode->FastGetSolutionStepValue(VELOCITY); if (inode->IsFixed(VELOCITY_X) || inode->IsFixed(VELOCITY_Y) || inode->IsFixed(VELOCITY_Z) ) { const array_1d<double, 3 > & normal = inode->FastGetSolutionStepValue(NORMAL); const double normal_scalar_sq = normal[0]*normal[0]+normal[1]*normal[1]+normal[2]*normal[2]; const array_1d<double, 3 > normal_adimensionalized = normal / sqrt(normal_scalar_sq); array_1d<double, 3 > & velocity = inode->FastGetSolutionStepValue(VELOCITY); array_1d<double, 3 > normal_velocity; for (unsigned int j=0; j!=3; j++) normal_velocity[j] = fabs(normal_adimensionalized[j])*original_velocity[j]; if (inode->IsFixed(VELOCITY_X)) { velocity[0] = original_velocity[0] - normal_velocity[0]; } if (inode->IsFixed(VELOCITY_Y)) { velocity[1] = original_velocity[1] - normal_velocity[1]; } if (TDim==3) if (inode->IsFixed(VELOCITY_Z)) { velocity[2] = original_velocity[2] - normal_velocity[2]; } } if (inode->IsFixed(PRESSURE)) inode->FastGetSolutionStepValue(PRESSURE)=inode->GetSolutionStepValue(PRESSURE,1); } } } KRATOS_CATCH("") } //setting the normal component of the velocity to zero void ResetBoundaryConditionsSlip() { KRATOS_TRY { ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; if(inode->FastGetSolutionStepValue(IS_STRUCTURE)!=0.0) { array_1d<double, 3 >& velocity = inode->FastGetSolutionStepValue(VELOCITY); const array_1d<double, 3 > & normal = inode->FastGetSolutionStepValue(NORMAL); const double normal_scalar_sq = normal[0]*normal[0]+normal[1]*normal[1]+normal[2]*normal[2]; const array_1d<double, 3 > normal_adimensionalized = normal / sqrt(normal_scalar_sq); //calculating the normal component of the velocity array_1d<double, 3 > normal_velocity; for (unsigned int j=0; j!=3; j++) normal_velocity[j] = normal_adimensionalized[j]*velocity[j]; const double dot_prod = normal_velocity[0]*velocity[0] + normal_velocity[1]*velocity[1] + normal_velocity[2]*velocity[2]; //if the dot product of velocity * normal velocity is lower than zero, then they have opposite signs and we must invert the direction: if (dot_prod<0.0) normal_velocity*= -1.0; velocity -= normal_velocity; //substracting the normal component } else if (inode->IsFixed(VELOCITY_X) && inode->IsFixed(VELOCITY_Y) ) { inode->FastGetSolutionStepValue(VELOCITY) = inode->GetSolutionStepValue(VELOCITY,1); } } } } KRATOS_CATCH("") } void CalculateDeltaVelocity() { KRATOS_TRY ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->FastGetSolutionStepValue(DELTA_VELOCITY) = inode->FastGetSolutionStepValue(VELOCITY) - inode->FastGetSolutionStepValue(PROJECTED_VELOCITY) ; } } KRATOS_CATCH("") } void CopyVectorVarToPreviousTimeStep(const Variable< array_1d<double, 3 > >& OriginVariable, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY ModelPart::NodesContainerType::iterator inodebegin = rNodes.begin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, rNodes.size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; noalias(inode->GetSolutionStepValue(OriginVariable,1)) = inode->FastGetSolutionStepValue(OriginVariable); } } KRATOS_CATCH("") } void CopyScalarVarToPreviousTimeStep(const Variable<double>& OriginVariable, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY ModelPart::NodesContainerType::iterator inodebegin = rNodes.begin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, rNodes.size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->GetSolutionStepValue(OriginVariable,1) = inode->FastGetSolutionStepValue(OriginVariable); } } KRATOS_CATCH("") } //to move all the particles across the streamlines. heavy task! void MoveParticles(const bool discriminate_streamlines) //,const bool pressure_gradient_integrate) { KRATOS_TRY ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const int offset = CurrentProcessInfo[WATER_PARTICLE_POINTERS_OFFSET]; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //moveparticlesdiff reads from the pointers of one part (ie odd) and saves into the other part (ie even part) //since it is the only function in the whole procedure that does this, it must use alternatively one part and the other. //KRATOS_WATCH(offset) bool even_timestep; if (offset!=0) even_timestep=false; else even_timestep=true; const int post_offset = mmaximum_number_of_particles*int(even_timestep); //and we also save the offset to know the location in which we will save the pointers after we've moved the particles //KRATOS_WATCH(post_offset) double delta_t = CurrentProcessInfo[DELTA_TIME]; const array_1d<double,3> gravity= CurrentProcessInfo[GRAVITY]; array_1d<double,TDim+1> N; const unsigned int max_results = 10000; //double integration_distance= 2.0; max_nsubsteps = 10; max_substep_dt=delta_t/double(max_nsubsteps); vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); //before doing anything we must reset the vector of nodes contained by each element (particles that are inside each element. #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator old_element = ielembegin+ii; int & number_of_particles = old_element->GetValue(NUMBER_OF_FLUID_PARTICLES); mnumber_of_particles_in_elems_aux(ii)=number_of_particles; mnumber_of_particles_in_elems(ii)=0; //we reset the local vectors for a faster access; } } bool nonzero_mesh_velocity = false; //seeing if we have to use the mesh_velocity or not for(ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode!=mr_model_part.NodesEnd(); inode++) { const array_1d<double, 3 > velocity = inode->FastGetSolutionStepValue(MESH_VELOCITY); for(unsigned int i = 0; i!=3; i++) { if (fabs(velocity[i])>1.0e-9) nonzero_mesh_velocity=true; } if( nonzero_mesh_velocity==true) break; } if ( nonzero_mesh_velocity==true) muse_mesh_velocity_to_convect = true; // if there is mesh velocity, then we have to take it into account when moving the particles else muse_mesh_velocity_to_convect = false; //otherwise, we can avoid reading the values since we know it is zero everywhere (to save time!) std::cout << "convecting particles" << std::endl; //We move the particles across the fixed mesh and saving change data into them (using the function MoveParticle) const bool local_use_mesh_velocity_to_convect = muse_mesh_velocity_to_convect; #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { const array_1d<double,3> mesh_displacement = mcalculation_domain_added_displacement; //if it is a standard problem, displacements are zero and therefore nothing is added. ResultContainerType results(max_results); WeakPointerVector< Element > elements_in_trajectory; elements_in_trajectory.resize(20); for(unsigned int ielem=element_partition[kkk]; ielem<element_partition[kkk+1]; ielem++) { //for(unsigned int ielem=0; ielem<mr_model_part.Elements().size(); ielem++) //{ ModelPart::ElementsContainerType::iterator old_element = ielembegin+ielem; const int old_element_id = old_element->Id(); ParticlePointerVector& old_element_particle_pointers = *mpointers_to_particle_pointers_vectors(old_element_id-1); if ( (results.size()) !=max_results) results.resize(max_results); unsigned int number_of_elements_in_trajectory=0; //excluding the origin one (current one, ielem) for(int ii=0; ii<(mnumber_of_particles_in_elems_aux(ielem)); ii++) { PFEM_Particle_Fluid & pparticle = old_element_particle_pointers[offset+ii]; Element::Pointer pcurrent_element( *old_element.base() ); ResultIteratorType result_begin = results.begin(); bool & erase_flag=pparticle.GetEraseFlag(); if (erase_flag==false){ MoveParticle(pparticle,pcurrent_element,elements_in_trajectory,number_of_elements_in_trajectory,result_begin,max_results, mesh_displacement, discriminate_streamlines, local_use_mesh_velocity_to_convect); //saqué N de los argumentos, no lo necesito ya q empieza SIEMPRE en un nodo y no me importa donde termina const int current_element_id = pcurrent_element->Id(); int & number_of_particles_in_current_elem = mnumber_of_particles_in_elems(current_element_id-1); //int & number_of_water_particles_in_current_elem = mnumber_of_water_particles_in_elems(current_element_id-1); if (number_of_particles_in_current_elem<mmaximum_number_of_particles && erase_flag==false) { { ParticlePointerVector& current_element_particle_pointers = *mpointers_to_particle_pointers_vectors(current_element_id-1); #pragma omp critical { if (number_of_particles_in_current_elem<mmaximum_number_of_particles) // we cant go over this node, there's no room. otherwise we would be in the position of the first particle of the next element!! { current_element_particle_pointers(post_offset+number_of_particles_in_current_elem) = &pparticle; number_of_particles_in_current_elem++ ; if (number_of_particles_in_current_elem>mmaximum_number_of_particles) KRATOS_WATCH("MAL"); } else pparticle.GetEraseFlag()=true; //so we just delete it! } } } else pparticle.GetEraseFlag()=true; //so we just delete it! } } } } //now we pass info from the local vector to the elements: #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator old_element = ielembegin+ii; old_element->GetValue(NUMBER_OF_FLUID_PARTICLES) = mnumber_of_particles_in_elems(ii); //old_element->GetValue(NUMBER_OF_WATER_PARTICLES) = mnumber_of_water_particles_in_elems(ii); } } //after having changed everything we change the status of the modd_timestep flag: CurrentProcessInfo[WATER_PARTICLE_POINTERS_OFFSET] = post_offset;; // KRATOS_CATCH("") } void TransferLagrangianToEulerian() //explicit { KRATOS_TRY ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); //const double delta_t =CurrentProcessInfo[DELTA_TIME]; const double threshold= 0.0/(double(TDim)+1.0); std::cout << "projecting info to mesh" << std::endl; const int offset = CurrentProcessInfo[WATER_PARTICLE_POINTERS_OFFSET]; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //KRATOS_WATCH(offset) //(flag managed only by MoveParticles //we must project data from the particles (lagrangian) into the eulerian mesh //ValuesVectorType eulerian_nodes_old_temperature; //int nnodes = mr_model_part.Nodes().size(); //array_1d<double,(n_nodes)> eulerian_nodes_sumweights; //we save data from previous time step of the eulerian mesh in case we must reuse it later cos no particle was found around the nodes //though we could've use a bigger buffer, to be changed later! //after having saved data, we reset them to zero, this way it's easier to add the contribution of the surrounding particles. ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->FastGetSolutionStepValue(DISTANCE)=0.0; inode->FastGetSolutionStepValue(PROJECTED_VELOCITY)=ZeroVector(3); inode->FastGetSolutionStepValue(YP)=0.0; } } //adding contribution, loop on elements, since each element has stored the particles found inside of it vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; array_1d<double,3*(TDim+1)> nodes_positions; array_1d<double,3*(TDim+1)> nodes_addedvel = ZeroVector(3*(TDim+1)); array_1d<double,(TDim+1)> nodes_added_distance = ZeroVector((TDim+1)); array_1d<double,(TDim+1)> nodes_addedweights = ZeroVector((TDim+1)); //array_1d<double,(TDim+1)> weighting_inverse_divisor; Geometry<Node<3> >& geom = ielem->GetGeometry(); for (int i=0 ; i!=(TDim+1) ; ++i) { nodes_positions[i*3+0]=geom[i].X(); nodes_positions[i*3+1]=geom[i].Y(); nodes_positions[i*3+2]=geom[i].Z(); //weighting_inverse_divisor[i]=1.0/((geom[i].FastGetSolutionStepValue(MEAN_SIZE))*1.01); } ///KRATOS_WATCH(ielem->Id()) ///KRATOS_WATCH(ielem->GetValue(NEIGHBOUR_NODES).size()); int & number_of_particles_in_elem= ielem->GetValue(NUMBER_OF_FLUID_PARTICLES); ParticlePointerVector& element_particle_pointers = (ielem->GetValue(FLUID_PARTICLE_POINTERS)); for (int iii=0; iii<number_of_particles_in_elem ; iii++ ) { if (iii==mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop! break; PFEM_Particle_Fluid & pparticle = element_particle_pointers[offset+iii]; if (pparticle.GetEraseFlag()==false) { array_1d<double,3> & position = pparticle.Coordinates(); const array_1d<float,3>& velocity = pparticle.GetVelocity(); const float& particle_distance = pparticle.GetDistance(); // -1 if water, +1 if air array_1d<double,TDim+1> N; bool is_found = CalculatePosition(nodes_positions,position[0],position[1],position[2],N); if (is_found==false) //something went wrong. if it was close enough to the edge we simply send it inside the element. { KRATOS_WATCH(N); for (int j=0 ; j!=(TDim+1); j++) if (N[j]<0.0 && N[j]> -1e-5) N[j]=1e-10; } for (int j=0 ; j!=(TDim+1); j++) //going through the 3/4 nodes of the element { //double sq_dist = 0; //these lines for a weighting function based on the distance (or square distance) from the node insteadof the shape functions //for (int k=0 ; k!=(TDim); k++) sq_dist += ((position[k] - nodes_positions[j*3+k])*(position[k] - nodes_positions[j*3+k])); //double weight = (1.0 - (sqrt(sq_dist)*weighting_inverse_divisor[j] ) ); double weight=N(j); //weight=N(j)*N(j)*N(j); if (weight<threshold) weight=1e-10; if (weight<0.0) {KRATOS_WATCH(weight)}//;weight=0.0;KRATOS_WATCH(velocity);KRATOS_WATCH(N);KRATOS_WATCH(number_of_particles_in_elem);}//{KRATOS_WATCH(weight); KRATOS_WATCH(geom[j].Id()); KRATOS_WATCH(position);} else { nodes_addedweights[j]+= weight; //nodes_addedtemp[j] += weight * particle_temp; nodes_added_distance[j] += weight*particle_distance; //nodes_added_oxygen[j] += weight*particle_oxygen; for (int k=0 ; k!=(TDim); k++) //x,y,(z) { nodes_addedvel[j*3+k] += weight * double(velocity[k]); } }// } } } for (int i=0 ; i!=(TDim+1) ; ++i) { geom[i].SetLock(); geom[i].FastGetSolutionStepValue(DISTANCE) +=nodes_added_distance[i]; geom[i].FastGetSolutionStepValue(PROJECTED_VELOCITY_X) +=nodes_addedvel[3*i+0]; geom[i].FastGetSolutionStepValue(PROJECTED_VELOCITY_Y) +=nodes_addedvel[3*i+1]; geom[i].FastGetSolutionStepValue(PROJECTED_VELOCITY_Z) +=nodes_addedvel[3*i+2]; //we are updating info to the previous time step!! geom[i].FastGetSolutionStepValue(YP) +=nodes_addedweights[i]; geom[i].UnSetLock(); } } } #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; double sum_weights = inode->FastGetSolutionStepValue(YP); if (sum_weights>0.00001) { //inode->FastGetSolutionStepValue(TEMPERATURE_OLD_IT)=(inode->FastGetSolutionStepValue(TEMPERATURE_OLD_IT))/sum_weights; //resetting the temperature double & dist = inode->FastGetSolutionStepValue(DISTANCE); dist /=sum_weights; //resetting the density inode->FastGetSolutionStepValue(PROJECTED_VELOCITY)=(inode->FastGetSolutionStepValue(PROJECTED_VELOCITY))/sum_weights; //resetting the velocity } else //this should never happen because other ways to recover the information have been executed before, but leaving it just in case.. { inode->FastGetSolutionStepValue(DISTANCE)=3.0; //resetting the temperature //inode->FastGetSolutionStepValue(DISTANCE)=inode->GetSolutionStepValue(DISTANCE,1); //resetting the temperature inode->FastGetSolutionStepValue(PROJECTED_VELOCITY)=inode->GetSolutionStepValue(VELOCITY,1); } ///finally, if there was an inlet that had a fixed position for the distance function, that has to remain unchanged: if (inode->IsFixed(DISTANCE)) inode->FastGetSolutionStepValue(DISTANCE)=inode->GetSolutionStepValue(DISTANCE,1); } } KRATOS_CATCH("") } void TransferLagrangianToEulerianImp() //semi implicit { KRATOS_TRY ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); std::cout << "projecting info to mesh (semi implicit)" << std::endl; const int offset = CurrentProcessInfo[WATER_PARTICLE_POINTERS_OFFSET]; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //KRATOS_WATCH(offset) //(flag managed only by MoveParticles //we must project data from the particles (lagrangian) into the eulerian mesh //ValuesVectorType eulerian_nodes_old_temperature; //int nnodes = mr_model_part.Nodes().size(); //array_1d<double,(n_nodes)> eulerian_nodes_sumweights; //we save data from previous time step of the eulerian mesh in case we must reuse it later cos no particle was found around the nodes //though we could've use a bigger buffer, to be changed later! //after having saved data, we reset them to zero, this way it's easier to add the contribution of the surrounding particles. ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->FastGetSolutionStepValue(DISTANCE)=0.0; inode->FastGetSolutionStepValue(PROJECTED_VELOCITY)=ZeroVector(3); inode->FastGetSolutionStepValue(YP)=0.0; } } //adding contribution, loop on elements, since each element has stored the particles found inside of it vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { //creating a matrix for each of the problems. BoundedMatrix<double, TDim+1 , TDim+1 > mass_matrix; // WE ONLY NEED ONE! they are the same for all the variables! //_x,mass_matrix_y,mass_matrix_z,mass_matrix_d; //mass matrices for the projected vel (x,y,z) and the distance array_1d<double,(TDim+1)> rhs_x,rhs_y,rhs_z,rhs_d; array_1d<double,3*(TDim+1)> nodes_positions; array_1d<double,3*(TDim+1)> nodes_addedvel = ZeroVector(3*(TDim+1)); array_1d<double,(TDim+1)> nodes_added_distance = ZeroVector((TDim+1)); array_1d<double,(TDim+1)> nodes_addedweights = ZeroVector((TDim+1)); for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; nodes_addedvel = ZeroVector(3*(TDim+1)); //resetting vectors nodes_added_distance = ZeroVector((TDim+1)); //resetting vectors nodes_addedweights = ZeroVector((TDim+1)); //resetting vectors mass_matrix = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices. WE ONLY NEED ONE! they are the same for all the variable. only the rhs changes. //mass_matrix_y = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices //mass_matrix_z = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices //mass_matrix_d = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices rhs_x = ZeroVector((TDim+1)); //resetting vectors rhs_y = ZeroVector((TDim+1)); //resetting vectors rhs_z = ZeroVector((TDim+1)); //resetting vectors rhs_d = ZeroVector((TDim+1)); //resetting vectors Geometry<Node<3> >& geom = ielem->GetGeometry(); const double elem_volume = geom.Area(); for (int i=0 ; i!=(TDim+1) ; ++i) //saving the nodal positions for faster access { nodes_positions[i*3+0]=geom[i].X(); nodes_positions[i*3+1]=geom[i].Y(); nodes_positions[i*3+2]=geom[i].Z(); } ///KRATOS_WATCH(ielem->Id()) ///KRATOS_WATCH(ielem->GetValue(NEIGHBOUR_NODES).size()); int & number_of_particles_in_elem= ielem->GetValue(NUMBER_OF_FLUID_PARTICLES); ParticlePointerVector& element_particle_pointers = (ielem->GetValue(FLUID_PARTICLE_POINTERS)); for (int iii=0; iii<number_of_particles_in_elem ; iii++ ) { if (iii==mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop! break; PFEM_Particle_Fluid & pparticle = element_particle_pointers[offset+iii]; if (pparticle.GetEraseFlag()==false) { array_1d<double,3> & position = pparticle.Coordinates(); const array_1d<float,3>& velocity = pparticle.GetVelocity(); const float& particle_distance = pparticle.GetDistance(); // -1 if water, +1 if air array_1d<double,TDim+1> N; bool is_found = CalculatePosition(nodes_positions,position[0],position[1],position[2],N); if (is_found==false) //something went wrong. if it was close enough to the edge we simply send it inside the element. { KRATOS_WATCH(N); for (int j=0 ; j!=(TDim+1); j++) if (N[j]<0.0 && N[j]> -1e-5) N[j]=1e-10; } for (int j=0 ; j!=(TDim+1); j++) //going through the 3/4 nodes of the element { double weight=N(j); for (int k=0 ; k!=(TDim+1); k++) //building the mass matrix mass_matrix(j,k) += weight*N(k); rhs_x[j] += weight * double(velocity[0]); rhs_y[j] += weight * double(velocity[1]); rhs_z[j] += weight * double(velocity[2]); rhs_d[j] += weight * double(particle_distance); //adding also a part with the lumped mass matrix to reduce overshoots and undershoots if(true) { double this_particle_weight = weight*elem_volume/(double(number_of_particles_in_elem))*0.1; //can be increased or reduced to change the lumped mass contrubtion nodes_addedweights[j]+= this_particle_weight; nodes_added_distance[j] += this_particle_weight*particle_distance; for (int k=0 ; k!=(TDim); k++) //x,y,(z) { nodes_addedvel[j*3+k] += this_particle_weight * double(velocity[k]); } } } } } //now we invert the matrix BoundedMatrix<double, TDim+1 , TDim+1 > inverse_mass_matrix=ZeroMatrix(TDim+1 , TDim+1); if(TDim==3) InvertMatrix( mass_matrix, inverse_mass_matrix); else InvertMatrix3x3( mass_matrix, inverse_mass_matrix); //and now compute the elemental contribution to the gobal system: if(number_of_particles_in_elem>(TDim*3)) //otherwise it's impossible to define a correctly the gradients, therefore the results inside the element are useless. { for (int i=0 ; i!=(TDim+1); i++) { for (int j=0 ; j!=(TDim+1); j++) { nodes_addedvel[3*i+0] += inverse_mass_matrix(i,j)*rhs_x[j]*elem_volume*(1.0/(double(1+TDim))); nodes_addedvel[3*i+1] += inverse_mass_matrix(i,j)*rhs_y[j]*elem_volume*(1.0/(double(1+TDim))); nodes_addedvel[3*i+2] += inverse_mass_matrix(i,j)*rhs_z[j]*elem_volume*(1.0/(double(1+TDim))); nodes_added_distance[i] += inverse_mass_matrix(i,j)*rhs_d[j]*elem_volume*(1.0/(double(1+TDim))); } } //and also to the mass matrix. LUMPED (but for the contribution of the grandient at elemental level. for (int i=0 ; i!=(TDim+1); i++) nodes_addedweights[i] += elem_volume*(1.0/(double(1+TDim))); } for (int i=0 ; i!=(TDim+1) ; ++i) { geom[i].SetLock(); geom[i].FastGetSolutionStepValue(DISTANCE) +=nodes_added_distance[i]; geom[i].FastGetSolutionStepValue(PROJECTED_VELOCITY_X) +=nodes_addedvel[3*i+0]; geom[i].FastGetSolutionStepValue(PROJECTED_VELOCITY_Y) +=nodes_addedvel[3*i+1]; geom[i].FastGetSolutionStepValue(PROJECTED_VELOCITY_Z) +=nodes_addedvel[3*i+2]; //we are updating info to the previous time step!! geom[i].FastGetSolutionStepValue(YP) +=nodes_addedweights[i]; geom[i].UnSetLock(); } } } #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; double sum_weights = inode->FastGetSolutionStepValue(YP); if (sum_weights>0.00001) { //inode->FastGetSolutionStepValue(TEMPERATURE_OLD_IT)=(inode->FastGetSolutionStepValue(TEMPERATURE_OLD_IT))/sum_weights; //resetting the temperature double & dist = inode->FastGetSolutionStepValue(DISTANCE); dist /=sum_weights; //resetting the density inode->FastGetSolutionStepValue(PROJECTED_VELOCITY)=(inode->FastGetSolutionStepValue(PROJECTED_VELOCITY))/sum_weights; //resetting the velocity } else //this should never happen because other ways to recover the information have been executed before, but leaving it just in case.. { inode->FastGetSolutionStepValue(DISTANCE)=3.0; //resetting the temperature //inode->FastGetSolutionStepValue(DISTANCE)=inode->GetSolutionStepValue(DISTANCE,1); //resetting the temperature inode->FastGetSolutionStepValue(PROJECTED_VELOCITY)=inode->GetSolutionStepValue(VELOCITY,1); } ///finally, if there was an inlet that had a fixed position for the distance function, that has to remain unchanged: if (inode->IsFixed(DISTANCE)) inode->FastGetSolutionStepValue(DISTANCE)=inode->GetSolutionStepValue(DISTANCE,1); } } KRATOS_CATCH("") } void AccelerateParticlesWithoutMovingUsingDeltaVelocity() { KRATOS_TRY //std::cout << "updating particles" << std::endl; ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const int offset = CurrentProcessInfo[WATER_PARTICLE_POINTERS_OFFSET]; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //(flag managed only by MoveParticles //KRATOS_WATCH(offset) ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { //const int & elem_id = ielem->Id(); ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; Element::Pointer pelement(*ielem.base()); Geometry<Node<3> >& geom = ielem->GetGeometry(); ParticlePointerVector& element_particle_pointers = (ielem->GetValue(FLUID_PARTICLE_POINTERS)); int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_FLUID_PARTICLES); //std::cout << "elem " << ii << " with " << (unsigned int)number_of_particles_in_elem << " particles" << std::endl; for (int iii=0; iii<number_of_particles_in_elem ; iii++ ) { //KRATOS_WATCH(iii) if (iii>mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop! break; PFEM_Particle_Fluid & pparticle = element_particle_pointers[offset+iii]; bool erase_flag= pparticle.GetEraseFlag(); if (erase_flag==false) { AccelerateParticleUsingDeltaVelocity(pparticle,pelement,geom); //'lite' version, we pass by reference the geometry, so much cheaper } } } } KRATOS_CATCH("") } //************************************************************************************************************** //************************************************************************************************************** template< class TDataType > void AddUniqueWeakPointer (WeakPointerVector< TDataType >& v, const typename TDataType::WeakPointer candidate) { typename WeakPointerVector< TDataType >::iterator i = v.begin(); typename WeakPointerVector< TDataType >::iterator endit = v.end(); while ( i != endit && (i)->Id() != (candidate.lock())->Id()) { i++; } if( i == endit ) { v.push_back(candidate); } } //************************************************************************************************************** //************************************************************************************************************** void PreReseed(int minimum_number_of_particles) { KRATOS_TRY ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const int offset = CurrentProcessInfo[WATER_PARTICLE_POINTERS_OFFSET]; const int max_results = 1000; //tools for the paralelization unsigned int number_of_threads = OpenMPUtils::GetNumThreads(); vector<unsigned int> elem_partition; int number_of_rows=mr_model_part.Elements().size(); elem_partition.resize(number_of_threads + 1); int elem_partition_size = number_of_rows / number_of_threads; elem_partition[0] = 0; elem_partition[number_of_threads] = number_of_rows; //KRATOS_WATCH(elem_partition_size); for (unsigned int i = 1; i < number_of_threads; i++) elem_partition[i] = elem_partition[i - 1] + elem_partition_size; const bool local_use_mesh_velocity_to_convect = muse_mesh_velocity_to_convect; #pragma omp parallel firstprivate(elem_partition) { ResultContainerType results(max_results); int k = OpenMPUtils::ThisThread(); ModelPart::ElementsContainerType::iterator it_begin = mr_model_part.ElementsBegin() + elem_partition[k]; ModelPart::ElementsContainerType::iterator it_end = mr_model_part.ElementsBegin() + elem_partition[k+1] ; //ModelPart::NodesContainerType local_list=aux[k]; //PointerVectorSet<PFEM_Particle_Fluid, IndexedObject> & list=aux[k]; //KRATOS_WATCH(k); BoundedMatrix<double, (TDim+1), 3 > pos; BoundedMatrix<double, (TDim+1) , (TDim+1) > N; unsigned int freeparticle=0; //we start with the first position in the particles array //int local_id=1; for (ModelPart::ElementsContainerType::iterator ielem = it_begin; ielem != it_end; ielem++) { results.resize(max_results); //const int & elem_id = ielem->Id(); ParticlePointerVector& element_particle_pointers = (ielem->GetValue(FLUID_PARTICLE_POINTERS)); int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_FLUID_PARTICLES); if (number_of_particles_in_elem<(minimum_number_of_particles))// && (ielem->GetGeometry())[0].Y()<0.10 ) { //KRATOS_WATCH("elem with little particles") Geometry< Node<3> >& geom = ielem->GetGeometry(); ComputeGaussPointPositionsForPreReseed(geom, pos, N); //double conductivity = ielem->GetProperties()[CONDUCTIVITY]; //KRATOS_WATCH(conductivity); for (unsigned int j = 0; j < (pos.size1()); j++) //i am dropping the last one, the one in the middle of the element { bool keep_looking = true; while(keep_looking) { if (mparticles_vector[freeparticle].GetEraseFlag()==true) { #pragma omp critical { if (mparticles_vector[freeparticle].GetEraseFlag()==true) { mparticles_vector[freeparticle].GetEraseFlag()=false; keep_looking=false; } } if (keep_looking==false) break; else freeparticle++; } else { freeparticle++; } } PFEM_Particle_Fluid pparticle(pos(j,0),pos(j,1),pos(j,2)); array_1d<double,TDim+1>aux2_N; bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux2_N); if (is_found==false) { KRATOS_WATCH(aux2_N); } pparticle.GetEraseFlag()=false; ResultIteratorType result_begin = results.begin(); Element::Pointer pelement( *ielem.base() ); MoveParticle_inverse_way(pparticle, pelement, result_begin, max_results, local_use_mesh_velocity_to_convect); //and we copy it to the array: mparticles_vector[freeparticle] = pparticle; element_particle_pointers(offset+number_of_particles_in_elem) = &mparticles_vector[freeparticle]; pparticle.GetEraseFlag()=false; number_of_particles_in_elem++; } } } } KRATOS_CATCH("") } //************************************************************************************************************** //************************************************************************************************************** void PostReseed(int minimum_number_of_particles, double mass_correction_factor ) //pooyan's way { KRATOS_TRY ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const int offset = CurrentProcessInfo[WATER_PARTICLE_POINTERS_OFFSET]; if (mass_correction_factor>0.5) mass_correction_factor=0.5; if (mass_correction_factor<-0.5) mass_correction_factor=-0.5; //mass_correction_factor=0.0; //ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); //const double delta_t = CurrentProcessInfo[DELTA_TIME]; //array_1d<double,3> & gravity= CurrentProcessInfo[GRAVITY]; //const int max_results = 1000; const double threshold = mass_correction_factor*0.5; //TOOLS FOR THE PARALELIZATION //int last_id= (mr_linea_model_part.NodesEnd()-1)->Id(); unsigned int number_of_threads = OpenMPUtils::GetNumThreads(); //KRATOS_WATCH(number_of_threads); vector<unsigned int> elem_partition; int number_of_rows=mr_model_part.Elements().size(); //KRATOS_WATCH(number_of_threads); //KRATOS_THROW_ERROR(std::logic_error, "Add ----NODAL_H---- variable!!!!!! ERROR", ""); elem_partition.resize(number_of_threads + 1); int elem_partition_size = number_of_rows / number_of_threads; elem_partition[0] = 0; elem_partition[number_of_threads] = number_of_rows; //KRATOS_WATCH(elem_partition_size); for (unsigned int i = 1; i < number_of_threads; i++) elem_partition[i] = elem_partition[i - 1] + elem_partition_size; //typedef Node < 3 > PointType; //std::vector<ModelPart::NodesContainerType> aux;// aux; //aux.resize(number_of_threads); //ModelPart::NodesContainerType::iterator it_begin_particle_model_part = mr_linea_model_part.NodesBegin(); //ModelPart::NodesContainerType::iterator it_end_particle_model_part = mr_linea_model_part.NodesEnd(); #pragma omp parallel firstprivate(elem_partition) // firstprivate(results)//we will add the nodes in different parts of aux and later assemple everything toghether, remaming particles ids to get consecutive ids { unsigned int reused_particles=0; unsigned int freeparticle = 0; //we start by the first position; int k = OpenMPUtils::ThisThread(); ModelPart::ElementsContainerType::iterator it_begin = mr_model_part.ElementsBegin() + elem_partition[k]; ModelPart::ElementsContainerType::iterator it_end = mr_model_part.ElementsBegin() + elem_partition[k+1] ; BoundedMatrix<double, (3+2*TDim), 3 > pos; //7 particles (2D) or 9 particles (3D) BoundedMatrix<double, (3+2*TDim), (TDim+1) > N; array_1d<double, 3 > vel_complete, vel_without_air_nodes; double sum_Ns_without_air_nodes; double mesh_distance; array_1d<double, (3+2*TDim) > distances; array_1d<int, (3+2*TDim) > positions; array_1d<bool, (3+2*TDim) > is_water_particle; //for both unsigned int number_of_reseeded_particles; //unsigned int number_of_water_reseeded_particles; //array_1d<double, 3 > nodes_distances; //int local_id=1; for (ModelPart::ElementsContainerType::iterator ielem = it_begin; ielem != it_end; ielem++) { //results.resize(max_results); int & number_of_particles_in_elem= ielem->GetValue(NUMBER_OF_FLUID_PARTICLES); ParticlePointerVector& element_particle_pointers = (ielem->GetValue(FLUID_PARTICLE_POINTERS)); Geometry< Node<3> >& geom = ielem->GetGeometry(); if ( (number_of_particles_in_elem<(minimum_number_of_particles)))// && (geom[0].Y()<0.10) ) || (number_of_water_particles_in_elem>2 && number_of_particles_in_elem<(minimum_number_of_particles) ) ) { //bool reseed_more=false; number_of_reseeded_particles=0; //reseed_more=true; number_of_reseeded_particles= 3+2*TDim; ComputeGaussPointPositionsForPostReseed(geom, pos, N); distances = ZeroVector(3+2*TDim); bool has_water_node=false; bool has_air_node=false; double mean_element_distance = 0.0; for (unsigned int j = 0; j < (TDim+1); j++) { mean_element_distance += (1.0/double(TDim+1))*(geom[j].FastGetSolutionStepValue(DISTANCE)); if ((geom[j].FastGetSolutionStepValue(DISTANCE))<0.0) has_water_node=true; else has_air_node=true; } //first we check the particle distance according to the nodal values for (unsigned int j = 0; j < number_of_reseeded_particles; j++) //first we order particles { positions[j]=j+1; //just creating a vector from 1 to 7 or whathever our lenght is (7 for 2d, 9 for 3d) for (unsigned int l = 0; l < (TDim+1); l++) { distances[j] += N(j, l) * geom[l].FastGetSolutionStepValue(DISTANCE); } } if ( (has_air_node && has_water_node) ) //for slit elements we use the distance function { for (unsigned int j = 0; j < number_of_reseeded_particles ; j++) //first we order particles { if (distances[j]>threshold) is_water_particle[j]=false; else is_water_particle[j]=true; } } else if (has_air_node) { double water_fraction = 0.5 - 0.5*(mean_element_distance); if (water_fraction>0.9 && mass_correction_factor<0.0) //to avoid seeding air particles when we are in a pure water element mass_correction_factor = 0.0; unsigned int number_of_water_reseeded_particles = double(number_of_reseeded_particles)*(1.01+mass_correction_factor*1.0)*water_fraction; BubbleSort(distances, positions, number_of_reseeded_particles); //ok. now we have the particles ordered from the "watermost" to "airmost". therefore we will fill the water particles and later the air ones using that order for (unsigned int j = 0; j < number_of_reseeded_particles ; j++) //first we order particles { int array_position = positions[j]-1; if (array_position>3 && number_of_reseeded_particles==4) { KRATOS_WATCH("error in reseeding") } if ( (j+1) <= number_of_water_reseeded_particles ) //means it is a water particle is_water_particle[array_position]=true; else is_water_particle[array_position]=false; } } else //only water particles { for (unsigned int j = 0; j < number_of_reseeded_particles ; j++) //first we order particles is_water_particle[j]=true; } bool fix_distance = false; unsigned int node_with_fixed_distance = 0; for (unsigned int j = 0; j < (TDim+1) ; j++) //we go over the 3/4 nodes: { if ((geom[j].IsFixed(DISTANCE))) { fix_distance = true; node_with_fixed_distance = j; } } // so now if the 3 were fixed, we assign the sign of the first node to all the particles: if (fix_distance) { bool is_water_for_all_particles=true; if ((geom[node_with_fixed_distance].FastGetSolutionStepValue(DISTANCE))>0.0) is_water_for_all_particles=false; for (unsigned int j = 0; j < number_of_reseeded_particles ; j++) //first we order particles is_water_particle[j]=is_water_for_all_particles; } for (unsigned int j = 0; j < number_of_reseeded_particles; j++) { //now we have to find an empty space ( a particle that was about to be deleted) in the particles model part. once found. there will be our renewed particle: bool keep_looking = true; while(keep_looking) { if (mparticles_vector[freeparticle].GetEraseFlag()==true) { #pragma omp critical { if (mparticles_vector[freeparticle].GetEraseFlag()==true) { mparticles_vector[freeparticle].GetEraseFlag()=false; keep_looking=false; } } if (keep_looking==false) break; else freeparticle++; } else { freeparticle++; } } PFEM_Particle_Fluid pparticle(pos(j,0),pos(j,1),pos(j,2)); array_1d<float, 3 > & vel = pparticle.GetVelocity(); float& distance= pparticle.GetDistance(); array_1d<double,TDim+1>aux_N; bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux_N); if (is_found==false) { KRATOS_WATCH(aux_N); KRATOS_WATCH(j) KRATOS_WATCH(ielem->Id()) } noalias(vel_complete)=ZeroVector(3); noalias(vel_without_air_nodes)=ZeroVector(3); sum_Ns_without_air_nodes=0.0; noalias(vel) = ZeroVector(3); distance=0.0; mesh_distance = 0.0; //oxygen = 0.0; for (unsigned int l = 0; l < (TDim+1); l++) { noalias(vel_complete) += N(j, l) * geom[l].FastGetSolutionStepValue(VELOCITY); mesh_distance += N(j,l) * geom[l].FastGetSolutionStepValue(DISTANCE); if ((geom[l].FastGetSolutionStepValue(DISTANCE))<0.0) { sum_Ns_without_air_nodes+=N(j, l); noalias(vel_without_air_nodes) += N(j, l) * geom[l].FastGetSolutionStepValue(VELOCITY); } } ///COMMENT TO GET A CONTINOUS DISTANCE FUNCTION FIELD if (is_water_particle[j]) { distance=-1.0; } else { //if (mesh_distance<2.0) distance=1.0; //else // distance=3.0; } if (distance<0.0 && sum_Ns_without_air_nodes>0.01) vel = vel_without_air_nodes / sum_Ns_without_air_nodes ; else vel = vel_complete; pparticle.GetEraseFlag()=false; mparticles_vector[freeparticle]=pparticle; element_particle_pointers(offset+number_of_particles_in_elem) = &mparticles_vector[freeparticle]; number_of_particles_in_elem++; if (keep_looking) { KRATOS_THROW_ERROR(std::logic_error, "FINISHED THE LIST AND COULDNT FIND A FREE CELL FOR THE NEW PARTICLE!", ""); } else { reused_particles++; } } } } } KRATOS_CATCH("") } void ExecuteParticlesPritingTool( ModelPart& lagrangian_model_part, int input_filter_factor ) { KRATOS_TRY //mfilter_factor; //we will only print one out of every "filter_factor" particles of the total particle list if(mparticle_printing_tool_initialized==false) { mfilter_factor=input_filter_factor; if(lagrangian_model_part.NodesBegin()-lagrangian_model_part.NodesEnd()>0) KRATOS_THROW_ERROR(std::logic_error, "AN EMPTY MODEL PART IS REQUIRED FOR THE PRINTING OF PARTICLES", ""); lagrangian_model_part.AddNodalSolutionStepVariable(VELOCITY); lagrangian_model_part.AddNodalSolutionStepVariable(DISPLACEMENT); lagrangian_model_part.AddNodalSolutionStepVariable(DISTANCE); for (unsigned int i=0; i!=((mmaximum_number_of_particles*mnelems)/mfilter_factor)+mfilter_factor; i++) { Node < 3 > ::Pointer pnode = lagrangian_model_part.CreateNewNode( i+mlast_node_id+1 , 0.0, 0.0, 0.0); //recordar que es el nueevo model part!! //pnode->SetBufferSize(mr_model_part.NodesBegin()->GetBufferSize()); pnode->SetBufferSize(1); } mparticle_printing_tool_initialized=true; } //resetting data of the unused particles const double inactive_particle_position= -10.0; array_1d<double,3>inactive_particle_position_vector; inactive_particle_position_vector(0)=inactive_particle_position; inactive_particle_position_vector(1)=inactive_particle_position; inactive_particle_position_vector(2)=inactive_particle_position; ModelPart::NodesContainerType::iterator inodebegin = lagrangian_model_part.NodesBegin(); for(unsigned int ii=0; ii<lagrangian_model_part.Nodes().size(); ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->FastGetSolutionStepValue(DISTANCE) = 0.0; inode->FastGetSolutionStepValue(VELOCITY) = ZeroVector(3); inode->FastGetSolutionStepValue(DISPLACEMENT) = inactive_particle_position_vector; } int counter=0; //ModelPart::NodesContainerType::iterator it_begin = lagrangian_model_part.NodesBegin(); for (int i=0; i!=mmaximum_number_of_particles*mnelems; i++) { PFEM_Particle_Fluid& pparticle =mparticles_vector[i]; if(pparticle.GetEraseFlag()==false && i%mfilter_factor==0) { ModelPart::NodesContainerType::iterator inode = inodebegin+counter; //copying info from the particle to the (printing) node. inode->FastGetSolutionStepValue(DISTANCE) = pparticle.GetDistance(); inode->FastGetSolutionStepValue(VELOCITY) = pparticle.GetVelocity(); inode->FastGetSolutionStepValue(DISPLACEMENT) = pparticle.Coordinates(); counter++; } } KRATOS_CATCH("") } void ExecuteParticlesPritingToolForDroppletsOnly( ModelPart& lagrangian_model_part, int input_filter_factor ) { KRATOS_TRY //mfilter_factor; //we will only print one out of every "filter_factor" particles of the total particle list const int first_particle_id=1000000; if(mparticle_printing_tool_initialized==false) { mfilter_factor=input_filter_factor; if(lagrangian_model_part.NodesBegin()-lagrangian_model_part.NodesEnd()>0) KRATOS_THROW_ERROR(std::logic_error, "AN EMPTY MODEL PART IS REQUIRED FOR THE PRINTING OF PARTICLES", ""); lagrangian_model_part.AddNodalSolutionStepVariable(VELOCITY); lagrangian_model_part.AddNodalSolutionStepVariable(DISPLACEMENT); lagrangian_model_part.AddNodalSolutionStepVariable(DISTANCE); for (unsigned int i=0; i!=((mmaximum_number_of_particles*mnelems)/mfilter_factor)+mfilter_factor; i++) { Node < 3 > ::Pointer pnode = lagrangian_model_part.CreateNewNode( i+first_particle_id+1 , 0.0, 0.0, 0.0); //recordar que es el nueevo model part!! //pnode->SetBufferSize(mr_model_part.NodesBegin()->GetBufferSize()); pnode->SetBufferSize(1); } mparticle_printing_tool_initialized=true; } //resetting data of the unused particles const double inactive_particle_position= -10.0; array_1d<double,3>inactive_particle_position_vector; inactive_particle_position_vector(0)=inactive_particle_position; inactive_particle_position_vector(1)=inactive_particle_position; inactive_particle_position_vector(2)=inactive_particle_position; ModelPart::NodesContainerType::iterator inodebegin = lagrangian_model_part.NodesBegin(); for(unsigned int ii=0; ii<lagrangian_model_part.Nodes().size(); ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->FastGetSolutionStepValue(DISTANCE) = 0.0; inode->FastGetSolutionStepValue(VELOCITY) = ZeroVector(3); inode->FastGetSolutionStepValue(DISPLACEMENT) = inactive_particle_position_vector; } const int max_number_of_printed_particles=lagrangian_model_part.Nodes().size(); ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const int offset = CurrentProcessInfo[WATER_PARTICLE_POINTERS_OFFSET]; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //(flag managed only by MoveParticles //KRATOS_WATCH(offset) ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); int counter=0; for(unsigned int ii=0; ii<mr_model_part.Elements().size(); ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; Element::Pointer pelement(*ielem.base()); Geometry<Node<3> >& geom = ielem->GetGeometry(); //double mean_elem_dist=0.0; bool pure_air_elem=true; for(unsigned int j=0; j<(TDim+1); j++) { if (geom[j].FastGetSolutionStepValue(DISTANCE)<0.0) pure_air_elem=false; //mean_elem_dist += geom[j].FastGetSolutionStepValue(DISTANCE); } //if (mean_elem_dist>0.0) //only air elements if (pure_air_elem==true) { ParticlePointerVector& element_particle_pointers = (ielem->GetValue(FLUID_PARTICLE_POINTERS)); int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_FLUID_PARTICLES); //std::cout << "elem " << ii << " with " << (unsigned int)number_of_particles_in_elem << " particles" << std::endl; for (int iii=0; iii<number_of_particles_in_elem ; iii++ ) { //KRATOS_WATCH(iii) if (iii>mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop! break; PFEM_Particle_Fluid & pparticle = element_particle_pointers[offset+iii]; bool erase_flag= pparticle.GetEraseFlag(); if (erase_flag==false && pparticle.GetDistance()<0.0) { ModelPart::NodesContainerType::iterator inode = inodebegin+counter; //copying info from the particle to the (printing) node. inode->FastGetSolutionStepValue(DISTANCE) = pparticle.GetDistance(); inode->FastGetSolutionStepValue(VELOCITY) = pparticle.GetVelocity(); inode->FastGetSolutionStepValue(DISPLACEMENT) = pparticle.Coordinates(); counter++; } } } if (counter>(max_number_of_printed_particles-30)) //we are approaching the end of the model part. so we stop before it's too late break; } KRATOS_CATCH("") } void AssignNodalVelocityUsingInletConditions(const double inlet_vel) { KRATOS_TRY //first we are going to delete all the velocities! ModelPart::ConditionsContainerType::iterator iconditionbegin = mr_model_part.ConditionsBegin(); vector<unsigned int> condition_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Conditions().size(), condition_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=condition_partition[kkk]; ii<condition_partition[kkk+1]; ii++) { ModelPart::ConditionsContainerType::iterator icondition = iconditionbegin+ii; if ( icondition->GetValue(IS_INLET) > 0.5 ) { Geometry<Node<3> >& geom = icondition->GetGeometry(); array_1d<double,3> normal = ZeroVector(3); this->CalculateNormal(geom,normal); const double normal_lenght = sqrt(normal[0]*normal[0] + normal[1]*normal[1] + normal[2]*normal[2]); const array_1d<double,3> velocity = - inlet_vel/normal_lenght * normal; for (unsigned int l = 0; l < (TDim); l++) { geom[l].SetLock(); geom[l].FastGetSolutionStepValue(VELOCITY) = velocity; geom[l].UnSetLock(); } } } } KRATOS_CATCH("") } void RotateParticlesAndDomainVelocities(array_1d<double, 3 > rotations) { KRATOS_TRY if(fabs(rotations[0])>0.000000001 || fabs(rotations[1])>0.000000001) KRATOS_THROW_ERROR(std::invalid_argument,"ROTATIONS ONLY IMPLEMENTED AROUND Z AXIS! (xy plane) ",""); const double cosinus_theta = cos(rotations[2]); const double sinus_theta = sin(rotations[2]); //std::cout << "updating particles" << std::endl; ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const int offset = CurrentProcessInfo[WATER_PARTICLE_POINTERS_OFFSET]; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //(flag managed only by MoveParticles //KRATOS_WATCH(offset) ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { //const int & elem_id = ielem->Id(); ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; Element::Pointer pelement(*ielem.base()); ParticlePointerVector& element_particle_pointers = (ielem->GetValue(FLUID_PARTICLE_POINTERS)); int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_FLUID_PARTICLES); //std::cout << "elem " << ii << " with " << (unsigned int)number_of_particles_in_elem << " particles" << std::endl; for (int iii=0; iii<number_of_particles_in_elem ; iii++ ) { //KRATOS_WATCH(iii) if (iii>mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop! break; PFEM_Particle_Fluid & pparticle = element_particle_pointers[offset+iii]; bool erase_flag= pparticle.GetEraseFlag(); if (erase_flag==false) { array_1d<float, 3 > & vel = pparticle.GetVelocity(); const float vel_x = vel[0]; const float vel_y = vel[1]; vel[0] = cosinus_theta*vel_x + sinus_theta*vel_y; vel[1] = cosinus_theta*vel_y - sinus_theta*vel_x; } } } } ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; if (inode->IsFixed(VELOCITY_X)==false) { array_1d<double, 3 > & vel = inode->FastGetSolutionStepValue(VELOCITY); const double vel_x = vel[0]; const double vel_y = vel[1]; vel[0] = cosinus_theta*vel_x + sinus_theta*vel_y; vel[1] = cosinus_theta*vel_y - sinus_theta*vel_x; } } } KRATOS_CATCH("") } protected: private: void Check() { if(mr_model_part.NodesBegin()->SolutionStepsDataHas(DISTANCE) == false) KRATOS_THROW_ERROR(std::invalid_argument,"missing DISTANCE variable on solution step data",""); if(mr_model_part.NodesBegin()->SolutionStepsDataHas(PRESS_PROJ) == false) KRATOS_THROW_ERROR(std::invalid_argument,"missing PRESS_PROJ variable on solution step data",""); if(mr_model_part.NodesBegin()->SolutionStepsDataHas(VELOCITY) == false) KRATOS_THROW_ERROR(std::invalid_argument,"missing VELOCITY variable on solution step data",""); if(mr_model_part.NodesBegin()->SolutionStepsDataHas(PRESSURE) == false) KRATOS_THROW_ERROR(std::invalid_argument,"missing PRESSURE variable on solution step data",""); if(mr_model_part.NodesBegin()->SolutionStepsDataHas(PROJECTED_VELOCITY) == false) KRATOS_THROW_ERROR(std::invalid_argument,"missing PROJECTED_VELOCITY variable on solution step data",""); if(mr_model_part.NodesBegin()->SolutionStepsDataHas(DELTA_VELOCITY) == false) KRATOS_THROW_ERROR(std::invalid_argument,"missing DELTA_VELOCITY variable on solution step data",""); if(mr_model_part.NodesBegin()->SolutionStepsDataHas(MESH_VELOCITY) == false) KRATOS_THROW_ERROR(std::invalid_argument,"missing MESH_VELOCITY variable on solution step data",""); if(mr_model_part.NodesBegin()->SolutionStepsDataHas(YP) == false) KRATOS_THROW_ERROR(std::invalid_argument,"missing YP variable on solution step data",""); if(mr_model_part.NodesBegin()->SolutionStepsDataHas(NORMAL) == false) KRATOS_THROW_ERROR(std::invalid_argument,"missing NORMAL variable on solution step data",""); if(mr_model_part.NodesBegin()->SolutionStepsDataHas(NODAL_AREA) == false) KRATOS_THROW_ERROR(std::invalid_argument,"missing NODAL_AREA variable on solution step data",""); } ///this function moves a particle according to the "velocity" given ///by "rVariable". The movement is performed in nsubsteps, during a total time ///of Dt void MoveParticle( PFEM_Particle_Fluid & pparticle, Element::Pointer & pelement, WeakPointerVector< Element >& elements_in_trajectory, unsigned int & number_of_elements_in_trajectory, ResultIteratorType result_begin, const unsigned int MaxNumberOfResults, const array_1d<double,3> mesh_displacement, const bool discriminate_streamlines, const bool use_mesh_velocity_to_convect) { ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); double delta_t = CurrentProcessInfo[DELTA_TIME]; array_1d<double,3> & gravity= CurrentProcessInfo[GRAVITY]; unsigned int nsubsteps; double substep_dt; bool KEEP_INTEGRATING=false; bool is_found; //bool have_air_node; //bool have_water_node; array_1d<double,3> vel; array_1d<double,3> vel_without_other_phase_nodes=ZeroVector(3); array_1d<double,3> position; array_1d<double,3> mid_position; array_1d<double,TDim+1> N; //we start with the first position, then it will enter the loop. position = pparticle.Coordinates(); //initial coordinates const float particle_distance = pparticle.GetDistance(); array_1d<float,3> particle_velocity = pparticle.GetVelocity(); //double distance=0.0; array_1d<double,3> last_useful_vel; double sum_Ns_without_other_phase_nodes; //double pressure=0.0; ///***** //bool flying_water_particle=true; //if a water particle does not find a water element in its whole path, then we add the gravity*dt double only_integral = 0.0 ; is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //good, now we know where this point is: if(is_found == true) { KEEP_INTEGRATING=true; Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in vel=ZeroVector(3); vel_without_other_phase_nodes = ZeroVector(3); sum_Ns_without_other_phase_nodes=0.0; //distance=0.0; if (particle_distance<0.0 && discriminate_streamlines==true) { for(unsigned int j=0; j<(TDim+1); j++) { if ((geom[j].FastGetSolutionStepValue(DISTANCE))<0.0) //ok. useful info! { sum_Ns_without_other_phase_nodes += N[j]; noalias(vel_without_other_phase_nodes) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j]; if (use_mesh_velocity_to_convect) noalias(vel_without_other_phase_nodes) -= geom[j].FastGetSolutionStepValue(MESH_VELOCITY)*N[j]; } noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j]; if (use_mesh_velocity_to_convect) noalias(vel) -= geom[j].FastGetSolutionStepValue(MESH_VELOCITY)*N[j]; } if (sum_Ns_without_other_phase_nodes>0.01) { vel = vel_without_other_phase_nodes / sum_Ns_without_other_phase_nodes; //flying_water_particle=false; } else { vel = particle_velocity; if (use_mesh_velocity_to_convect) { for(unsigned int j=0; j<(TDim+1); j++) noalias(vel) -= geom[j].FastGetSolutionStepValue(MESH_VELOCITY)*N[j]; } } } else // air particle or we are not following streamlines { for(unsigned int j=0; j<(TDim+1); j++) { noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j]; if (use_mesh_velocity_to_convect) noalias(vel) -= geom[j].FastGetSolutionStepValue(MESH_VELOCITY)*N[j]; } //flying_water_particle=false; } //calculating substep to get +- courant(substep) = 0.1 nsubsteps = 10.0 * (delta_t * pelement->GetValue(VELOCITY_OVER_ELEM_SIZE)); if (nsubsteps<1) nsubsteps=1; substep_dt = delta_t / double(nsubsteps); only_integral = 1.0;// weight;//*double(nsubsteps); position += vel*substep_dt;//weight; ///***** last_useful_vel=vel; ///***** //DONE THE FIRST LOCATION OF THE PARTICLE, NOW WE PROCEED TO STREAMLINE INTEGRATION USING THE MESH VELOCITY ////////////////////////////////////////////////////////////////////////////////////////////////////// unsigned int check_from_element_number=0; for(unsigned int i=0; i<(nsubsteps-1); i++)// this is for the substeps n+1. in the first one we already knew the position of the particle. { if (KEEP_INTEGRATING==true) { is_found = FindNodeOnMesh(position, N ,pelement,elements_in_trajectory,number_of_elements_in_trajectory,check_from_element_number,result_begin,MaxNumberOfResults); //good, now we know where this point is: if(is_found == true) { Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in sum_Ns_without_other_phase_nodes=0.0; if (particle_distance<0.0 && discriminate_streamlines==true) { vel_without_other_phase_nodes = ZeroVector(3); for(unsigned int j=0; j<TDim+1; j++) { if ((geom[j].FastGetSolutionStepValue(DISTANCE))<0.0) //ok. useful info! { sum_Ns_without_other_phase_nodes += N[j]; noalias(vel_without_other_phase_nodes) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j]; if (use_mesh_velocity_to_convect) noalias(vel_without_other_phase_nodes) -= geom[j].FastGetSolutionStepValue(MESH_VELOCITY)*N[j]; } noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j]; if (use_mesh_velocity_to_convect) noalias(vel) -= geom[j].FastGetSolutionStepValue(MESH_VELOCITY)*N[j]; } //if (have_water_node) //if (distance<0.0) if (sum_Ns_without_other_phase_nodes>0.01) { vel = vel_without_other_phase_nodes / sum_Ns_without_other_phase_nodes; //flying_water_particle=false; } else { particle_velocity += substep_dt * gravity; vel = particle_velocity; if (use_mesh_velocity_to_convect) { for(unsigned int j=0; j<(TDim+1); j++) noalias(vel) -= geom[j].FastGetSolutionStepValue(MESH_VELOCITY)*N[j]; } } } else //air particle or we are not discriminating streamlines { vel_without_other_phase_nodes = ZeroVector(3); vel = ZeroVector(3); for(unsigned int j=0; j<(TDim+1); j++) { noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j]; if (use_mesh_velocity_to_convect) noalias(vel) -= geom[j].FastGetSolutionStepValue(MESH_VELOCITY)*N[j]; } //flying_water_particle=false; } only_integral += 1.0; //values saved for the current time step position+=vel*substep_dt;//weight; } else { KEEP_INTEGRATING=false; break; } } else break; } } //if there's a mesh velocity, we add it at the end in a single step: position-=mesh_displacement; if (KEEP_INTEGRATING==false) (pparticle.GetEraseFlag()=true); else is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //we must save the pointer of the last element that we're in (inside the pointervector pelement) if (is_found==false) ( pparticle.GetEraseFlag()=true); pparticle.Coordinates() = position; } void AccelerateParticleUsingDeltaVelocity( PFEM_Particle_Fluid & pparticle, Element::Pointer & pelement, Geometry< Node<3> >& geom) { array_1d<double,TDim+1> N; ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const double delta_t = CurrentProcessInfo[DELTA_TIME]; array_1d<double,3> gravity = CurrentProcessInfo[GRAVITY]; //we start with the first position, then it will enter the loop. array_1d<double,3> coords = pparticle.Coordinates(); float & particle_distance = pparticle.GetDistance(); //double distance=0.0; array_1d<double,3> delta_velocity = ZeroVector(3); array_1d<double,3> delta_velocity_without_air = ZeroVector(3); array_1d<double,3> delta_velocity_without_water = ZeroVector(3); double sum_Ns_without_water_nodes = 0.0; double sum_Ns_without_air_nodes = 0.0; bool is_found = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if(is_found == false) { KRATOS_WATCH(N) for (int j=0 ; j!=(TDim+1); j++) if (N[j]<0.0 ) N[j]=1e-10; } if (particle_distance>0.0) //no problem. air { for(unsigned int j=0; j<(TDim+1); j++) { //just for air if ((geom[j].FastGetSolutionStepValue(DISTANCE))>0.0) { noalias(delta_velocity_without_water) += geom[j].FastGetSolutionStepValue(DELTA_VELOCITY)*N[j]; sum_Ns_without_air_nodes += N[j]; } //both air and water noalias(delta_velocity) += geom[j].FastGetSolutionStepValue(DELTA_VELOCITY)*N[j]; } if (sum_Ns_without_water_nodes>0.01) { //delta_velocity = delta_velocity_without_water/sum_Ns_without_water_nodes ; //commented = using all the velocities always! } //else we use the complete field } else //water particle { for(unsigned int j=0; j<(TDim+1); j++) { if ((geom[j].FastGetSolutionStepValue(DISTANCE))<0.0) { noalias(delta_velocity_without_air) += geom[j].FastGetSolutionStepValue(DELTA_VELOCITY)*N[j]; sum_Ns_without_air_nodes += N[j]; } noalias(delta_velocity) += geom[j].FastGetSolutionStepValue(DELTA_VELOCITY)*N[j]; } if (sum_Ns_without_air_nodes>0.01) { delta_velocity = delta_velocity_without_air/sum_Ns_without_air_nodes ; } else { if (mDENSITY_WATER>(10.0*mDENSITY_AIR)) { delta_velocity=gravity*(1.0-mDENSITY_AIR/mDENSITY_WATER)*delta_t; } } } pparticle.GetVelocity() = pparticle.GetVelocity() + delta_velocity; } void MoveParticle_inverse_way( PFEM_Particle_Fluid & pparticle, Element::Pointer & pelement, //NOT A REFERENCE!! WE SHALL NOT OVERWRITE THE ELEMENT IT BELONGS TO! ResultIteratorType result_begin, const unsigned int MaxNumberOfResults, const bool use_mesh_velocity_to_convect) { ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); double delta_t = CurrentProcessInfo[DELTA_TIME]; unsigned int nsubsteps; double substep_dt; bool KEEP_INTEGRATING=false; bool is_found; array_1d<double,3> vel; array_1d<double,3> particle_vel; array_1d<double,3> position; array_1d<double,3> mid_position; array_1d<double,TDim+1> N; //we start with the first position, then it will enter the loop. position = pparticle.Coordinates(); // + (pparticle)->FastGetSolutionStepValue(DISPLACEMENT); //initial coordinates float & distance = pparticle.GetDistance(); double only_integral = 0.0 ; is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //good, now we know where this point is: if(is_found == true) { KEEP_INTEGRATING=true; Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in vel=ZeroVector(3); particle_vel=ZeroVector(3); distance=0.0; for(unsigned int j=0; j<(TDim+1); j++) { distance += geom[j].FastGetSolutionStepValue(DISTANCE)*N(j); noalias(particle_vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j]; noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j]; if (use_mesh_velocity_to_convect) noalias(vel) -= geom[j].FastGetSolutionStepValue(MESH_VELOCITY)*N[j]; } //calculating substep to get +- courant(substep) = 1/4 nsubsteps = 10.0 * (delta_t * pelement->GetValue(VELOCITY_OVER_ELEM_SIZE)); if (nsubsteps<1) nsubsteps=1; substep_dt = delta_t / double(nsubsteps); only_integral = 1.0;// weight;//*double(nsubsteps); position -= vel*substep_dt;//weight; for(unsigned int i=0; i<(nsubsteps-1); i++)// this is for the substeps n+1. in the first one we already knew the position of the particle. { if (KEEP_INTEGRATING==true) { is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //good, now we know where this point is: if(is_found == true) { Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in vel=ZeroVector(3); particle_vel=ZeroVector(3); distance=0.0; for(unsigned int j=0; j<(TDim+1); j++) { noalias(particle_vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j] ; noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j] ; distance += geom[j].FastGetSolutionStepValue(DISTANCE)*N(j); if (use_mesh_velocity_to_convect) noalias(vel) -= geom[j].FastGetSolutionStepValue(MESH_VELOCITY)*N[j]; } only_integral += 1.0;//weight ; //values saved for the current time step position-=vel*substep_dt;//weight; } else KEEP_INTEGRATING=false; } } ///COMMENT TO GET A A CONTINOUS DISTANCE FUNCTION FIELD!!!!! if(distance>0.0) { //if(distance<2.0) distance=1.0; //else // distance=3.0; } else distance=-1.0; pparticle.GetVelocity()=particle_vel; } //else {KRATOS_WATCH(position); } } void OverwriteParticleDataUsingTopographicDomain( PFEM_Particle_Fluid & pparticle, Element::Pointer & pelement, array_1d<double,3> domains_offset, ResultIteratorType result_begin, const unsigned int MaxNumberOfResults) { array_1d<double,TDim+1> N; //we start with the first position, then it will enter the loop. array_1d<double,3> coords = pparticle.Coordinates()+domains_offset; float & particle_distance = pparticle.GetDistance(); bool is_found = FindNodeOnTopographicMesh(coords, N ,pelement,result_begin,MaxNumberOfResults); //good, now we know where this point is: if (is_found) //it is part of the solid topographic domain { particle_distance= -1.0; } else //it is outside the topographic domain, therefore it is air or whatever it means { particle_distance= 1.0; } pparticle.GetVelocity() = ZeroVector(3); } ///this function should find the element into which a given node is located ///and return a pointer to the element and the vector containing the ///shape functions that define the postion within the element ///if "false" is devolved the element is not found bool FindNodeOnMesh( array_1d<double,3>& position, array_1d<double,TDim+1>& N, Element::Pointer & pelement, ResultIteratorType result_begin, const unsigned int MaxNumberOfResults) { typedef std::size_t SizeType; const array_1d<double,3>& coords = position; array_1d<double,TDim+1> aux_N; //before using the bin to search for possible elements we check first the last element in which the particle was. Geometry<Node<3> >& geom_default = pelement->GetGeometry(); //(*(i))->GetGeometry(); bool is_found_1 = CalculatePosition(geom_default,coords[0],coords[1],coords[2],N); if(is_found_1 == true) //that was easy! { return true; } //to begin with we check the neighbour elements; it is a bit more expensive WeakPointerVector< Element >& neighb_elems = pelement->GetValue(NEIGHBOUR_ELEMENTS); //the first we check is the one that has negative shape function, because it means it went outside in this direction: //commented, it is not faster than simply checking all the neighbours (branching) /* unsigned int checked_element=0; for (unsigned int i=0;i!=(TDim+1);i++) { if (N[i]<0.0) { checked_element=i; Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],aux_N); if (is_found_2) { pelement=Element::Pointer(((neighb_elems(i)))); N=aux_N; return true; } break; } } */ //we check all the neighbour elements for (unsigned int i=0;i!=(neighb_elems.size());i++) { Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if (is_found_2) { pelement=Element::Pointer(((neighb_elems(i)))); return true; } } //if checking all the neighbour elements did not work, we have to use the bins //ask to the container for the list of candidate elements SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(Point{coords}, result_begin, MaxNumberOfResults ); if(results_found>0){ //loop over the candidate elements and check if the particle falls within for(SizeType i = 0; i< results_found; i++) { Geometry<Node<3> >& geom = (*(result_begin+i))->GetGeometry(); //find local position bool is_found = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if(is_found == true) { pelement=Element::Pointer((*(result_begin+i))); return true; } } } //if nothing worked, then: //not found case return false; } // VERSION INCLUDING PREDEFINED ELEMENTS FOLLOWING A TRAJECTORY bool FindNodeOnMesh( array_1d<double,3>& position, array_1d<double,TDim+1>& N, Element::Pointer & pelement, WeakPointerVector< Element >& elements_in_trajectory, unsigned int & number_of_elements_in_trajectory, unsigned int & check_from_element_number, ResultIteratorType result_begin, const unsigned int MaxNumberOfResults) { typedef std::size_t SizeType; const array_1d<double,3>& coords = position; array_1d<double,TDim+1> aux_N; //before using the bin to search for possible elements we check first the last element in which the particle was. Geometry<Node<3> >& geom_default = pelement->GetGeometry(); //(*(i))->GetGeometry(); bool is_found_1 = CalculatePosition(geom_default,coords[0],coords[1],coords[2],N); if(is_found_1 == true) { return true; //that was easy! } //if it was not found in the first element, we can proceed to check in the following elements (in the trajectory defined by previous particles that started from the same element. for (unsigned int i=(check_from_element_number);i!=number_of_elements_in_trajectory;i++) { Geometry<Node<3> >& geom = elements_in_trajectory[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],aux_N); if (is_found_2) { pelement=Element::Pointer(((elements_in_trajectory(i)))); N=aux_N; check_from_element_number = i+1 ; //now i element matches pelement, so to avoid cheching twice the same element we send the counter to the following element. return true; } } //now we check the neighbour elements: WeakPointerVector< Element >& neighb_elems = pelement->GetValue(NEIGHBOUR_ELEMENTS); //the first we check is the one that has negative shape function, because it means it went outside in this direction: //commented, it is not faster than simply checking all the neighbours (branching) /* unsigned int checked_element=0; for (unsigned int i=0;i!=(TDim+1);i++) { if (N[i]<0.0) { checked_element=i; Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],aux_N); if (is_found_2) { pelement=Element::Pointer(((neighb_elems(i)))); N=aux_N; return true; } break; } } */ //we check all the neighbour elements for (unsigned int i=0;i!=(neighb_elems.size());i++) { Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if (is_found_2) { pelement=Element::Pointer(((neighb_elems(i)))); if (number_of_elements_in_trajectory<20) { elements_in_trajectory(number_of_elements_in_trajectory)=pelement; number_of_elements_in_trajectory++; check_from_element_number = number_of_elements_in_trajectory; //we do it after doing the ++ to the counter, so we woudlnt enter the loop that searches in the elements_in_trajectory list. we are the particle that is adding elements to the list } return true; } } //if checking all the neighbour elements did not work, we have to use the bins //ask to the container for the list of candidate elements SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(Point{coords}, result_begin, MaxNumberOfResults ); if(results_found>0) { //loop over the candidate elements and check if the particle falls within for(SizeType i = 0; i< results_found; i++) { Geometry<Node<3> >& geom = (*(result_begin+i))->GetGeometry(); //find local position bool is_found = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if(is_found == true) { pelement=Element::Pointer((*(result_begin+i))); if (number_of_elements_in_trajectory<20) { elements_in_trajectory(number_of_elements_in_trajectory)=pelement; number_of_elements_in_trajectory++; check_from_element_number = number_of_elements_in_trajectory; //we do it after doing the ++ to the counter, so we woudlnt enter the loop that searches in the elements_in_trajectory list. we are the particle that is adding elements to the list } return true; } } } //not found case return false; } ///this function should find the element into which a given node is located ///and return a pointer to the element and the vector containing the ///shape functions that define the postion within the element ///if "false" is devolved the element is not found bool FindNodeOnTopographicMesh( array_1d<double,3>& position, array_1d<double,TDim+1>& N, Element::Pointer & pelement, ResultIteratorType result_begin, const unsigned int MaxNumberOfResults) { typedef std::size_t SizeType; const array_1d<double,3>& coords = position; array_1d<double,TDim+1> aux_N; //before using the bin to search for possible elements we check first the last element in which the particle was. //ModelPart::ElementsContainerType::iterator i = mr_model_part.ElementsBegin()+last_element; Geometry<Node<3> >& geom_default = pelement->GetGeometry(); //(*(i))->GetGeometry(); bool is_found_1 = CalculatePosition(geom_default,coords[0],coords[1],coords[2],N); if(is_found_1 == true) { //pelement = (*(i)); return true; } //to begin with we check the neighbour elements: WeakPointerVector< Element >& neighb_elems = pelement->GetValue(NEIGHBOUR_ELEMENTS); for (unsigned int i=0;i!=(neighb_elems.size());i++) { Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if (is_found_2) { pelement=Element::Pointer(((neighb_elems(i)))); return true; } } //ask to the container for the list of candidate elements SizeType results_found = mpTopographicBinsObjectDynamic->SearchObjectsInCell(Point{coords}, result_begin, MaxNumberOfResults ); //KRATOS_WATCH(results_found) if(results_found>0){ //loop over the candidate elements and check if the particle falls within for(SizeType i = 0; i< results_found; i++) { Geometry<Node<3> >& geom = (*(result_begin+i))->GetGeometry(); //find local position bool is_found = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if(is_found == true) { pelement=Element::Pointer((*(result_begin+i))); return true; } } } //not found case return false; } //*************************************** //*************************************** inline bool CalculatePosition(Geometry<Node < 3 > >&geom, const double xc, const double yc, const double zc, array_1d<double, 3 > & N ) { double x0 = geom[0].X(); double y0 = geom[0].Y(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double area = CalculateVol(x0, y0, x1, y1, x2, y2); double inv_area = 0.0; if (area == 0.0) { KRATOS_THROW_ERROR(std::logic_error, "element with zero area found", ""); } else { inv_area = 1.0 / area; } N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area; N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area; N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area; //KRATOS_WATCH(N); if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true return true; return false; } //////////// //using the pre loaded nodal coordinates inline bool CalculatePosition(const array_1d<double,3*(TDim+1)>& nodes_positions, const double xc, const double yc, const double zc, array_1d<double, 3 > & N ) { const double& x0 = nodes_positions[0]; const double& y0 = nodes_positions[1]; const double& x1 = nodes_positions[3]; const double& y1 = nodes_positions[4]; const double& x2 = nodes_positions[6]; const double& y2 = nodes_positions[7]; double area = CalculateVol(x0, y0, x1, y1, x2, y2); double inv_area = 0.0; if (area == 0.0) { KRATOS_THROW_ERROR(std::logic_error, "element with zero area found", ""); } else { inv_area = 1.0 / area; } N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area; N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area; N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area; //KRATOS_WATCH(N); if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true return true; return false; } //*************************************** //*************************************** inline bool CalculatePosition(Geometry<Node < 3 > >&geom, const double xc, const double yc, const double zc, array_1d<double, 4 > & N ) { double x0 = geom[0].X(); double y0 = geom[0].Y(); double z0 = geom[0].Z(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double z1 = geom[1].Z(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double z2 = geom[2].Z(); double x3 = geom[3].X(); double y3 = geom[3].Y(); double z3 = geom[3].Z(); double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3); double inv_vol = 0.0; if (vol < 0.000000000000000000000000000001) { KRATOS_THROW_ERROR(std::logic_error, "element with zero vol found", ""); } else { inv_vol = 1.0 / vol; } N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol; N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol; N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol; N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol; if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0) //if the xc yc zc is inside the tetrahedron return true return true; return false; } /////////////////// //using the pre loaded nodal coordinates inline bool CalculatePosition(const array_1d<double,3*(TDim+1)>& nodes_positions, const double xc, const double yc, const double zc, array_1d<double, 4 > & N ) { const double& x0 = nodes_positions[0]; const double& y0 = nodes_positions[1]; const double& z0 = nodes_positions[2]; const double& x1 = nodes_positions[3]; const double& y1 = nodes_positions[4]; const double& z1 = nodes_positions[5]; const double& x2 = nodes_positions[6]; const double& y2 = nodes_positions[7]; const double& z2 = nodes_positions[8]; const double& x3 = nodes_positions[9]; const double& y3 = nodes_positions[10]; const double& z3 = nodes_positions[11]; double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3); double inv_vol = 0.0; if (vol < 0.000000000000000000000000000001) { KRATOS_THROW_ERROR(std::logic_error, "element with zero vol found", ""); } else { inv_vol = 1.0 / vol; } N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol; N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol; N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol; N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol; if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0) //if the xc yc zc is inside the tetrahedron return true return true; return false; } inline double CalculateVol(const double x0, const double y0, const double x1, const double y1, const double x2, const double y2 ) { return 0.5 * ((x1 - x0)*(y2 - y0)- (y1 - y0)*(x2 - x0)); } //*************************************** //*************************************** inline double CalculateVol(const double x0, const double y0, const double z0, const double x1, const double y1, const double z1, const double x2, const double y2, const double z2, const double x3, const double y3, const double z3 ) { double x10 = x1 - x0; double y10 = y1 - y0; double z10 = z1 - z0; double x20 = x2 - x0; double y20 = y2 - y0; double z20 = z2 - z0; double x30 = x3 - x0; double y30 = y3 - y0; double z30 = z3 - z0; double detJ = x10 * y20 * z30 - x10 * y30 * z20 + y10 * z20 * x30 - y10 * x20 * z30 + z10 * x20 * y30 - z10 * y20 * x30; return detJ * 0.1666666666666666666667; } void ComputeGaussPointPositions_4(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 7, 3 > & pos,BoundedMatrix<double, 7, 3 > & N) { double one_third = 1.0 / 3.0; double one_sixt = 0.15; //1.0 / 6.0; double two_third = 0.7; //2.0 * one_third; N(0, 0) = one_sixt; N(0, 1) = one_sixt; N(0, 2) = two_third; N(1, 0) = two_third; N(1, 1) = one_sixt; N(1, 2) = one_sixt; N(2, 0) = one_sixt; N(2, 1) = two_third; N(2, 2) = one_sixt; N(3, 0) = one_third; N(3, 1) = one_third; N(3, 2) = one_third; //first pos(0, 0) = one_sixt * geom[0].X() + one_sixt * geom[1].X() + two_third * geom[2].X(); pos(0, 1) = one_sixt * geom[0].Y() + one_sixt * geom[1].Y() + two_third * geom[2].Y(); pos(0, 2) = one_sixt * geom[0].Z() + one_sixt * geom[1].Z() + two_third * geom[2].Z(); //second pos(1, 0) = two_third * geom[0].X() + one_sixt * geom[1].X() + one_sixt * geom[2].X(); pos(1, 1) = two_third * geom[0].Y() + one_sixt * geom[1].Y() + one_sixt * geom[2].Y(); pos(1, 2) = two_third * geom[0].Z() + one_sixt * geom[1].Z() + one_sixt * geom[2].Z(); //third pos(2, 0) = one_sixt * geom[0].X() + two_third * geom[1].X() + one_sixt * geom[2].X(); pos(2, 1) = one_sixt * geom[0].Y() + two_third * geom[1].Y() + one_sixt * geom[2].Y(); pos(2, 2) = one_sixt * geom[0].Z() + two_third * geom[1].Z() + one_sixt * geom[2].Z(); //fourth pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X(); pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y(); pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z(); } void ComputeGaussPointPositionsForPostReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 7, 3 > & pos,BoundedMatrix<double, 7, 3 > & N) //2d { double one_third = 1.0 / 3.0; double one_eight = 0.12; //1.0 / 6.0; double three_quarters = 0.76; //2.0 * one_third; N(0, 0) = one_eight; N(0, 1) = one_eight; N(0, 2) = three_quarters; N(1, 0) = three_quarters; N(1, 1) = one_eight; N(1, 2) = one_eight; N(2, 0) = one_eight; N(2, 1) = three_quarters; N(2, 2) = one_eight; N(3, 0) = one_third; N(3, 1) = one_third; N(3, 2) = one_third; N(4, 0) = one_eight; N(4, 1) = 0.44; N(4, 2) = 0.44; N(5, 0) = 0.44; N(5, 1) = one_eight; N(5, 2) = 0.44; N(6, 0) = 0.44; N(6, 1) = 0.44; N(6, 2) = one_eight; //first pos(0, 0) = one_eight * geom[0].X() + one_eight * geom[1].X() + three_quarters * geom[2].X(); pos(0, 1) = one_eight * geom[0].Y() + one_eight * geom[1].Y() + three_quarters * geom[2].Y(); pos(0, 2) = one_eight * geom[0].Z() + one_eight * geom[1].Z() + three_quarters * geom[2].Z(); //second pos(1, 0) = three_quarters * geom[0].X() + one_eight * geom[1].X() + one_eight * geom[2].X(); pos(1, 1) = three_quarters * geom[0].Y() + one_eight * geom[1].Y() + one_eight * geom[2].Y(); pos(1, 2) = three_quarters * geom[0].Z() + one_eight * geom[1].Z() + one_eight * geom[2].Z(); //third pos(2, 0) = one_eight * geom[0].X() + three_quarters * geom[1].X() + one_eight * geom[2].X(); pos(2, 1) = one_eight * geom[0].Y() + three_quarters * geom[1].Y() + one_eight * geom[2].Y(); pos(2, 2) = one_eight * geom[0].Z() + three_quarters * geom[1].Z() + one_eight * geom[2].Z(); //fourth pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X(); pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y(); pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z(); //fifth pos(4, 0) = one_eight * geom[0].X() + 0.44 * geom[1].X() + 0.44 * geom[2].X(); pos(4, 1) = one_eight * geom[0].Y() + 0.44 * geom[1].Y() + 0.44 * geom[2].Y(); pos(4, 2) = one_eight * geom[0].Z() + 0.44 * geom[1].Z() + 0.44 * geom[2].Z(); //sixth pos(5, 0) = 0.44 * geom[0].X() + one_eight * geom[1].X() + 0.44 * geom[2].X(); pos(5, 1) = 0.44 * geom[0].Y() + one_eight * geom[1].Y() + 0.44 * geom[2].Y(); pos(5, 2) = 0.44 * geom[0].Z() + one_eight * geom[1].Z() + 0.44 * geom[2].Z(); //seventh pos(6, 0) = 0.44 * geom[0].X() + 0.44 * geom[1].X() + one_eight * geom[2].X(); pos(6, 1) = 0.44 * geom[0].Y() + 0.44 * geom[1].Y() + one_eight * geom[2].Y(); pos(6, 2) = 0.44 * geom[0].Z() + 0.44 * geom[1].Z() + one_eight * geom[2].Z(); } void ComputeGaussPointPositionsForPostReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 9, 3 > & pos,BoundedMatrix<double, 9, 4 > & N) //3D { double one_quarter = 0.25; double small_fraction = 0.1; //1.0 / 6.0; double big_fraction = 0.7; //2.0 * one_third; double mid_fraction = 0.3; //2.0 * one_third; N(0, 0) = big_fraction; N(0, 1) = small_fraction; N(0, 2) = small_fraction; N(0, 3) = small_fraction; N(1, 0) = small_fraction; N(1, 1) = big_fraction; N(1, 2) = small_fraction; N(1, 3) = small_fraction; N(2, 0) = small_fraction; N(2, 1) = small_fraction; N(2, 2) = big_fraction; N(2, 3) = small_fraction; N(3, 0) = small_fraction; N(3, 1) = small_fraction; N(3, 2) = small_fraction; N(3, 3) = big_fraction; N(4, 0) = one_quarter; N(4, 1) = one_quarter; N(4, 2) = one_quarter; N(4, 3) = one_quarter; N(5, 0) = small_fraction; N(5, 1) = mid_fraction; N(5, 2) = mid_fraction; N(5, 3) = mid_fraction; N(6, 0) = mid_fraction; N(6, 1) = small_fraction; N(6, 2) = mid_fraction; N(6, 3) = mid_fraction; N(7, 0) = mid_fraction; N(7, 1) = mid_fraction; N(7, 2) = small_fraction; N(7, 3) = mid_fraction; N(8, 0) = mid_fraction; N(8, 1) = mid_fraction; N(8, 2) = mid_fraction; N(8, 3) = small_fraction; pos=ZeroMatrix(9,3); for (unsigned int i=0; i!=4; i++) //going through the 4 nodes { array_1d<double, 3 > & coordinates = geom[i].Coordinates(); for (unsigned int j=0; j!=9; j++) //going through the 9 particles { for (unsigned int k=0; k!=3; k++) //x,y,z pos(j,k) += N(j,i) * coordinates[k]; } } } void ComputeGaussPointPositionsForPreReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 3, 3 > & pos,BoundedMatrix<double, 3, 3 > & N) //2D { N(0, 0) = 0.5; N(0, 1) = 0.25; N(0, 2) = 0.25; N(1, 0) = 0.25; N(1, 1) = 0.5; N(1, 2) = 0.25; N(2, 0) = 0.25; N(2, 1) = 0.25; N(2, 2) = 0.5; //first pos(0, 0) = 0.5 * geom[0].X() + 0.25 * geom[1].X() + 0.25 * geom[2].X(); pos(0, 1) = 0.5 * geom[0].Y() + 0.25 * geom[1].Y() + 0.25 * geom[2].Y(); pos(0, 2) = 0.5 * geom[0].Z() + 0.25 * geom[1].Z() + 0.25 * geom[2].Z(); //second pos(1, 0) = 0.25 * geom[0].X() + 0.5 * geom[1].X() + 0.25 * geom[2].X(); pos(1, 1) = 0.25 * geom[0].Y() + 0.5 * geom[1].Y() + 0.25 * geom[2].Y(); pos(1, 2) = 0.25 * geom[0].Z() + 0.5 * geom[1].Z() + 0.25 * geom[2].Z(); //third pos(2, 0) = 0.25 * geom[0].X() + 0.25 * geom[1].X() + 0.5 * geom[2].X(); pos(2, 1) = 0.25 * geom[0].Y() + 0.25 * geom[1].Y() + 0.5 * geom[2].Y(); pos(2, 2) = 0.25 * geom[0].Z() + 0.25 * geom[1].Z() + 0.5 * geom[2].Z(); } void ComputeGaussPointPositionsForPreReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 4, 3 > & pos,BoundedMatrix<double, 4, 4 > & N) //3D { //creating 4 particles, each will be closer to a node and equidistant to the other nodes N(0, 0) = 0.4; N(0, 1) = 0.2; N(0, 2) = 0.2; N(0, 3) = 0.2; N(1, 0) = 0.2; N(1, 1) = 0.4; N(1, 2) = 0.2; N(1, 3) = 0.2; N(2, 0) = 0.2; N(2, 1) = 0.2; N(2, 2) = 0.4; N(2, 3) = 0.2; N(3, 0) = 0.2; N(3, 1) = 0.2; N(3, 2) = 0.2; N(3, 3) = 0.4; pos=ZeroMatrix(4,3); for (unsigned int i=0; i!=4; i++) //going through the 4 nodes { array_1d<double, 3 > & coordinates = geom[i].Coordinates(); for (unsigned int j=0; j!=4; j++) //going through the 4 particles { for (unsigned int k=0; k!=3; k++) //x,y,z pos(j,k) += N(j,i) * coordinates[k]; } } } void ComputeGaussPointPositions_45(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 45, 3 > & pos,BoundedMatrix<double, 45, 3 > & N) { //std::cout << "NEW ELEMENT" << std::endl; unsigned int counter=0; for (unsigned int i=0; i!=9;i++) { for (unsigned int j=0; j!=(9-i);j++) { N(counter,0)=0.05+double(i)*0.1; N(counter,1)=0.05+double(j)*0.1; N(counter,2)=1.0 - ( N(counter,1)+ N(counter,0) ) ; pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X(); pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y(); pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z(); //std::cout << N(counter,0) << " " << N(counter,1) << " " << N(counter,2) << " " << std::endl; counter++; } } } void ComputeGaussPointPositions_initial(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 15, 3 > & pos,BoundedMatrix<double, 15, 3 > & N) //2D { //std::cout << "NEW ELEMENT" << std::endl; unsigned int counter=0; for (unsigned int i=0; i!=5;i++) { for (unsigned int j=0; j!=(5-i);j++) { N(counter,0)=0.05+double(i)*0.2; N(counter,1)=0.05+double(j)*0.2; N(counter,2)=1.0 - ( N(counter,1)+ N(counter,0) ) ; pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X(); pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y(); pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z(); //std::cout << N(counter,0) << " " << N(counter,1) << " " << N(counter,2) << " " << std::endl; counter++; } } } void ComputeGaussPointPositions_initial(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 20, 3 > & pos,BoundedMatrix<double, 20, 4 > & N) //3D { //std::cout << "NEW ELEMENT" << std::endl; //double total; double fraction_increment; unsigned int counter=0; for (unsigned int i=0; i!=4;i++) //going to build a particle "pyramid"(tetrahedra) by layers. the first layer will be made by a triangle of 4 base X 4 height. since it is a triangle, it means it will have 10 particles { //std::cout << "inside i" << i << std::endl; for (unsigned int j=0; j!=(4-i);j++) { //std::cout << "inside j" << j << std::endl; for (unsigned int k=0; k!=(4-i-j);k++) { //std::cout << "inside k" << k << std::endl; N(counter,0)= 0.27 * ( 0.175 + double(i) ) ; //this is our "surface" in which we will build each layer, so we must construct a triangle using what's left of the shape functions total (a total of 1) //total = 1.0 - N(counter,0); fraction_increment = 0.27; // N(counter,1)=fraction_increment * (0.175 + double(j)); N(counter,2)=fraction_increment * (0.175 + double(k)); N(counter,3)=1.0 - ( N(counter,0)+ N(counter,1) + N(counter,2) ) ; pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X() + N(counter,3) * geom[3].X(); pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y() + N(counter,3) * geom[3].Y(); pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z() + N(counter,3) * geom[3].Z(); //std::cout << N(counter,0) << " " << N(counter,1) << " " << N(counter,2) << " " << std::endl; counter++; } } } } // Bubble Sort Function for Descending Order void BubbleSort(array_1d<double,7> &distances , array_1d<int,7 > &positions, unsigned int & arrange_number) { int i, j; bool flag = true; // set flag to 1 to start first pass double temp; // holding variable int temp_position; int numLength = arrange_number; for(i = 1; (i <= numLength) && flag; i++) { flag = false; for (j=0; j < (numLength -1); j++) { if (distances[j+1] < distances[j]) // descending order simply changes to > { temp = distances[j]; // swap elements distances[j] = distances[j+1]; distances[j+1] = temp; temp_position = positions[j]; //swap positions positions[j] = positions[j+1]; positions[j+1] = temp_position; flag = true; // indicates that a swap occurred. } } } return; //arrays are passed to functions by address; nothing is returned } void BubbleSort(array_1d<double,9> &distances , array_1d<int,9 > &positions, unsigned int & arrange_number) { int i, j; bool flag = true; // set flag to 1 to start first pass double temp; // holding variable int temp_position; int numLength = arrange_number; for(i = 1; (i <= numLength) && flag; i++) { flag = false; for (j=0; j < (numLength -1); j++) { if (distances[j+1] < distances[j]) // descending order simply changes to > { temp = distances[j]; // swap elements distances[j] = distances[j+1]; distances[j+1] = temp; temp_position = positions[j]; //swap positions positions[j] = positions[j+1]; positions[j+1] = temp_position; flag = true; // indicates that a swap occurred. } } } return; //arrays are passed to functions by address; nothing is returned } template<class T> bool InvertMatrix(const T& input, T& inverse) { typedef permutation_matrix<std::size_t> pmatrix; // create a working copy of the input T A(input); // create a permutation matrix for the LU-factorization pmatrix pm(A.size1()); // perform LU-factorization int res = lu_factorize(A, pm); if (res != 0) return false; // create identity matrix of "inverse" inverse.assign(identity_matrix<double> (A.size1())); // backsubstitute to get the inverse lu_substitute(A, pm, inverse); return true; } bool InvertMatrix3x3(const BoundedMatrix<double, TDim+1 , TDim+1 >& A, BoundedMatrix<double, TDim+1 , TDim+1 >& result) { double determinant = +A(0,0)*(A(1,1)*A(2,2)-A(2,1)*A(1,2)) -A(0,1)*(A(1,0)*A(2,2)-A(1,2)*A(2,0)) +A(0,2)*(A(1,0)*A(2,1)-A(1,1)*A(2,0)); double invdet = 1/determinant; result(0,0) = (A(1,1)*A(2,2)-A(2,1)*A(1,2))*invdet; result(1,0) = -(A(0,1)*A(2,2)-A(0,2)*A(2,1))*invdet; result(2,0) = (A(0,1)*A(1,2)-A(0,2)*A(1,1))*invdet; result(0,1) = -(A(1,0)*A(2,2)-A(1,2)*A(2,0))*invdet; result(1,1) = (A(0,0)*A(2,2)-A(0,2)*A(2,0))*invdet; result(2,1) = -(A(0,0)*A(1,2)-A(1,0)*A(0,2))*invdet; result(0,2) = (A(1,0)*A(2,1)-A(2,0)*A(1,1))*invdet; result(1,2) = -(A(0,0)*A(2,1)-A(2,0)*A(0,1))*invdet; result(2,2) = (A(0,0)*A(1,1)-A(1,0)*A(0,1))*invdet; return true; } ModelPart& mr_model_part; ModelPart* mtopographic_model_part_pointer; array_1d<double, 3 > mcalculation_domain_complete_displacement; array_1d<double, 3 > mcalculation_domain_added_displacement; bool mintialized_transfer_tool; bool muse_mesh_velocity_to_convect; int m_nparticles; int mnelems; double mDENSITY_WATER; double mDENSITY_AIR; //vector<double> mareas_vector; UNUSED SO COMMENTED int max_nsubsteps; double max_substep_dt; int mmaximum_number_of_particles; std::vector< PFEM_Particle_Fluid > mparticles_vector; //Point<3> int mlast_elem_id; bool modd_timestep; bool mparticle_printing_tool_initialized; unsigned int mfilter_factor; unsigned int mlast_node_id; //ModelPart& mr_particle_model_part; vector<int> mnumber_of_particles_in_elems; vector<int> mnumber_of_particles_in_elems_aux; vector<ParticlePointerVector*> mpointers_to_particle_pointers_vectors; typename BinsObjectDynamic<Configure>::Pointer mpBinsObjectDynamic; typename BinsObjectDynamic<Configure>::Pointer mpTopographicBinsObjectDynamic; void CalculateNormal(Geometry<Node<3> >& pGeometry, array_1d<double,3>& An ); }; template<> void MoveParticleUtilityPFEM2<2>::CalculateNormal(Geometry<Node<3> >& pGeometry, array_1d<double,3>& An ) { array_1d<double,2> v1; v1[0] = pGeometry[1].X() - pGeometry[0].X(); v1[1] = pGeometry[1].Y() - pGeometry[0].Y(); An[0] = -v1[1]; An[1] = v1[0]; An[2] = 0.0; //now checking orientation using the normal: const unsigned int NumNodes = 2; array_1d<double,3> nodal_normal = ZeroVector(3); for (unsigned int iNode = 0; iNode < NumNodes; ++iNode) nodal_normal += pGeometry[iNode].FastGetSolutionStepValue(NORMAL); double dot_prod = nodal_normal[0]*An[0] + nodal_normal[1]*An[1]; if (dot_prod<0.0) { //std::cout << "inverting the normal" << std::endl; An *= -1.0; // inverting the direction of the normal!!! } } template<> void MoveParticleUtilityPFEM2<3>::CalculateNormal(Geometry<Node<3> >& pGeometry, array_1d<double,3>& An ) { array_1d<double,3> v1,v2; v1[0] = pGeometry[1].X() - pGeometry[0].X(); v1[1] = pGeometry[1].Y() - pGeometry[0].Y(); v1[2] = pGeometry[1].Z() - pGeometry[0].Z(); v2[0] = pGeometry[2].X() - pGeometry[0].X(); v2[1] = pGeometry[2].Y() - pGeometry[0].Y(); v2[2] = pGeometry[2].Z() - pGeometry[0].Z(); MathUtils<double>::CrossProduct(An,v1,v2); An *= 0.5; //now checking orientation using the normal: const unsigned int NumNodes = 3; array_1d<double,3> nodal_normal = ZeroVector(3); for (unsigned int iNode = 0; iNode < NumNodes; ++iNode) nodal_normal += pGeometry[iNode].FastGetSolutionStepValue(NORMAL); double dot_prod = nodal_normal[0]*An[0] + nodal_normal[1]*An[1] + nodal_normal[2]*An[2]; if (dot_prod<0.0) { //std::cout << "inverting the normal!!" << std::endl; An *= -1.0; // inverting the direction of the normal!!! } } } // namespace Kratos. #endif // KRATOS_MOVE_PART_UTILITY_DIFF2_INCLUDED defined
GB_binop__rdiv_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__rdiv_uint16 // A.*B function (eWiseMult): GB_AemultB__rdiv_uint16 // A*D function (colscale): GB_AxD__rdiv_uint16 // D*A function (rowscale): GB_DxB__rdiv_uint16 // C+=B function (dense accum): GB_Cdense_accumB__rdiv_uint16 // C+=b function (dense accum): GB_Cdense_accumb__rdiv_uint16 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__rdiv_uint16 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__rdiv_uint16 // C=scalar+B GB_bind1st__rdiv_uint16 // C=scalar+B' GB_bind1st_tran__rdiv_uint16 // C=A+scalar GB_bind2nd__rdiv_uint16 // C=A'+scalar GB_bind2nd_tran__rdiv_uint16 // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = GB_IDIV_UNSIGNED (bij, aij, 16) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = GB_IDIV_UNSIGNED (y, x, 16) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RDIV || GxB_NO_UINT16 || GxB_NO_RDIV_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__rdiv_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__rdiv_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__rdiv_uint16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__rdiv_uint16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__rdiv_uint16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__rdiv_uint16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__rdiv_uint16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__rdiv_uint16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__rdiv_uint16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t bij = Bx [p] ; Cx [p] = GB_IDIV_UNSIGNED (bij, x, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__rdiv_uint16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; Cx [p] = GB_IDIV_UNSIGNED (y, aij, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = GB_IDIV_UNSIGNED (aij, x, 16) ; \ } GrB_Info GB_bind1st_tran__rdiv_uint16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = GB_IDIV_UNSIGNED (y, aij, 16) ; \ } GrB_Info GB_bind2nd_tran__rdiv_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Dependency.c
#include <math.h> #include <omp.h> #include <string.h> void a(int **a, int b) { for (int i = 0; i < 4; ++i) { for (int j = 1; j < 4; ++j) { a[i + 2][j - 1] = b * a[i][j] + 4; } } } void a_sol(int **a, int b) { int **a2; memcpy(a2, a, sizeof(a)); #pragma omp parallel for for (int i = 0; i < 4; ++i) { for (int j = 1; j < 4; ++j) { a[i + 2][j - 1] = b * a2[i][j] + 4; } } }
leaf.c
#include <malloc.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include "omp.h" #include <sys/time.h> double getusec_() { struct timeval time; gettimeofday(&time, NULL); return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec); } #define START_COUNT_TIME stamp = getusec_(); #define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\ stamp = stamp/1e6;\ printf ("%s: %0.6f\n",(_m), stamp); // N and MIN must be powers of 2 long N; long MIN_SORT_SIZE; long MIN_MERGE_SIZE; int CUTOFF; #define T int void basicsort(long n, T data[n]); void basicmerge(long n, T left[n], T right[n], T result[n*2], long start, long length); void merge(long n, T left[n], T right[n], T result[n*2], long start, long length) { if (length < MIN_MERGE_SIZE*2L) { // Base case #pragma omp task basicmerge(n, left, right, result, start, length); } else { // Recursive decomposition merge(n, left, right, result, start, length/2); merge(n, left, right, result, start + length/2, length/2); } } void multisort(long n, T data[n], T tmp[n]) { if (n >= MIN_SORT_SIZE*4L) { // Recursive decomposition multisort(n/4L, &data[0], &tmp[0]); multisort(n/4L, &data[n/4L], &tmp[n/4L]); multisort(n/4L, &data[n/2L], &tmp[n/2L]); multisort(n/4L, &data[3L*n/4L], &tmp[3L*n/4L]); #pragma omp taskwait merge(n/4L, &data[0], &data[n/4L], &tmp[0], 0, n/2L); merge(n/4L, &data[n/2L], &data[3L*n/4L], &tmp[n/2L], 0, n/2L); #pragma omp taskwait merge(n/2L, &tmp[0], &tmp[n/2L], &data[0], 0, n); } else { // Base case #pragma omp task basicsort(n, data); } } static void initialize(long length, T data[length]) { long i; for (i = 0; i < length; i++) { if (i==0) { data[i] = rand(); } else { data[i] = ((data[i-1]+1) * i * 104723L) % N; } } } static void clear(long length, T data[length]) { long i; for (i = 0; i < length; i++) { data[i] = 0; } } void check_sorted(long n, T data[n]) { int unsorted=0; for (int i=1; i<n; i++) if (data[i-1] > data[i]) unsorted++; if (unsorted > 0) printf ("\nERROR: data is NOT properly sorted. There are %d unordered positions\n\n",unsorted); } int main(int argc, char **argv) { /* Defaults for command line arguments */ /* Important: all of them should be powers of two */ N = 32768 * 1024; MIN_SORT_SIZE = 1024; MIN_MERGE_SIZE = 1024; CUTOFF = 16; /* Process command-line arguments */ for (int i=1; i<argc; i++) { if (strcmp(argv[i], "-n")==0) { N = atol(argv[++i]) * 1024; } else if (strcmp(argv[i], "-s")==0) { MIN_SORT_SIZE = atol(argv[++i]); } else if (strcmp(argv[i], "-m")==0) { MIN_MERGE_SIZE = atol(argv[++i]); } #ifdef _OPENMP else if (strcmp(argv[i], "-c")==0) { CUTOFF = atoi(argv[++i]); } #endif else { #ifdef _OPENMP fprintf(stderr, "Usage: %s [-n vector_size -s MIN_SORT_SIZE -m MIN_MERGE_SIZE] -c CUTOFF\n", argv[0]); #else fprintf(stderr, "Usage: %s [-n vector_size -s MIN_SORT_SIZE -m MIN_MERGE_SIZE]\n", argv[0]); #endif fprintf(stderr, " -n to specify the size of the vector (in Kelements) to sort (default 32768)\n"); fprintf(stderr, " -s to specify the size of the vector (in elements) that breaks recursion in the sort phase (default 1024)\n"); fprintf(stderr, " -m to specify the size of the vector (in elements) that breaks recursion in the merge phase (default 1024)\n"); #ifdef _OPENMP fprintf(stderr, " -c to specify the cut off recursion level to stop task generation in OpenMP (default 16)\n"); #endif return EXIT_FAILURE; } } fprintf(stdout, "*****************************************************************************************\n"); fprintf(stdout, "Problem size (in number of elements): N=%ld, MIN_SORT_SIZE=%ld, MIN_MERGE_SIZE=%ld\n", N/1024, MIN_SORT_SIZE, MIN_MERGE_SIZE); #ifdef _OPENMP fprintf(stdout, "Cut-off level: CUTOFF=%d\n", CUTOFF); fprintf(stdout, "Number of threads in OpenMP: OMP_NUM_THREADS=%d\n", omp_get_max_threads()); #endif fprintf(stdout, "*****************************************************************************************\n"); T *data = malloc(N*sizeof(T)); T *tmp = malloc(N*sizeof(T)); double stamp; START_COUNT_TIME; initialize(N, data); clear(N, tmp); STOP_COUNT_TIME("Initialization time in seconds"); START_COUNT_TIME; #pragma omp parallel #pragma omp single multisort(N, data, tmp); STOP_COUNT_TIME("Multisort execution time"); START_COUNT_TIME; check_sorted (N, data); STOP_COUNT_TIME("Check sorted data execution time"); fprintf(stdout, "Multisort program finished\n"); fprintf(stdout, "*****************************************************************************************\n"); return 0; }
arrayInStruct_openMP.c
#include "arrayInStruct_openMP.h" void arrayInStruct__openMP(struct awl_unsignedS32 * v0, struct awl_unsignedS32 * out) { struct s_2_1xunsignedS32_1xawl_unsignedS32 e0 = { 0 }; struct s_2_1xunsignedS32_1xawl_unsignedS32 v6 = { 0 }; bool v3; (e0).member1 = (*v0).length; ((e0).member2).buffer = initCopyArray(((e0).member2).buffer, ((e0).member2).length, sizeof(uint32_t), (*v0).buffer, (*v0).length); ((e0).member2).length = (*v0).length; v3 = ((e0).member1 > 0); while (v3) { uint32_t len1; struct awl_unsignedS32 e2 = { 0 }; (v6).member1 = ((e0).member1 - 1); len1 = ((e0).member2).length; ((v6).member2).buffer = initArray(((v6).member2).buffer, ((v6).member2).length, sizeof(uint32_t), len1); ((v6).member2).length = len1; #pragma omp parallel for for (uint32_t v10 = 0; v10 < len1; v10 += 1) { ((v6).member2).buffer[v10] = (((e0).member2).buffer[v10] + 5); } e2 = (e0).member2; e0 = v6; (v6).member2 = e2; v3 = ((e0).member1 > 0); } (*out).buffer = initCopyArray((*out).buffer, (*out).length, sizeof(uint32_t), ((e0).member2).buffer, ((e0).member2).length); (*out).length = ((e0).member2).length; freeArray(((e0).member2).buffer); freeArray(((v6).member2).buffer); }
GB_subref_slice.c
//------------------------------------------------------------------------------ // GB_subref_slice: construct coarse/fine tasks for C = A(I,J) //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Determine the tasks for computing C=A(I,J). The matrix C has Cnvec vectors, // and these are divided into coarse and fine tasks. A coarse task will // compute one or more whole vectors of C. A fine task operates on a slice of // a single vector of C. The slice can be done by the # of entries in the // corresponding vector of A, or by the list of indices I, depending on how the // work is done for that method. // The (kC)th vector will access A(imin:imax,kA) in Ai,Ax [pA:pA_end-1], where // pA = Ap_start [kC] and pA_end = Ap_end [kC]. // The computation of each vector C(:,kC) = A(I,kA) is by done using one of 12 // different cases, depending on the vector, as determined by GB_subref_method. // Not all vectors in C are computed using the same method. // Note that J can have duplicates. kC is unique (0:Cnvec-1) but the // corresponding vector kA in A may repeat, if J has duplicates. Duplicates in // J are not exploited, since the coarse/fine tasks are constructed by slicing // slicing the list of vectors Ch of size Cnvec, not the vectors of A. // Compare this function with GB_ewise_slice, which constructs coarse/fine // tasks for the eWise operations (C=A+B, C=A.*B, and C<M>=Z). #define GB_FREE_WORK \ { \ GB_WERK_POP (Coarse, int64_t) ; \ GB_FREE_WERK (&Cwork, Cwork_size) ; \ } #define GB_FREE_ALL \ { \ GB_FREE_WORK ; \ GB_FREE_WERK (&TaskList, TaskList_size) ; \ GB_FREE_WERK (&Mark, Mark_size) ; \ GB_FREE_WERK (&Inext, Inext_size) ; \ } #include "GB_subref.h" GrB_Info GB_subref_slice ( // output: GB_task_struct **p_TaskList, // array of structs size_t *p_TaskList_size, // size of TaskList int *p_ntasks, // # of tasks constructed int *p_nthreads, // # of threads for subref operation bool *p_post_sort, // true if a final post-sort is needed int64_t *restrict *p_Mark, // for I inverse, if needed; size avlen size_t *p_Mark_size, int64_t *restrict *p_Inext, // for I inverse, if needed; size nI size_t *p_Inext_size, int64_t *p_nduplicates, // # of duplicates, if I inverse computed // from phase0: const int64_t *restrict Ap_start, // location of A(imin:imax,kA) const int64_t *restrict Ap_end, const int64_t Cnvec, // # of vectors of C const bool need_qsort, // true if C must be sorted const int Ikind, // GB_ALL, GB_RANGE, GB_STRIDE or GB_LIST const int64_t nI, // length of I const int64_t Icolon [3], // for GB_RANGE and GB_STRIDE // original input: const int64_t avlen, // A->vlen const int64_t anz, // nnz (A) const GrB_Index *I, GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (p_TaskList != NULL) ; ASSERT (p_TaskList_size != NULL) ; ASSERT (p_ntasks != NULL) ; ASSERT (p_nthreads != NULL) ; ASSERT (p_post_sort != NULL) ; ASSERT (p_Mark != NULL) ; ASSERT (p_Inext != NULL) ; ASSERT (p_nduplicates != NULL) ; ASSERT ((Cnvec > 0) == (Ap_start != NULL)) ; ASSERT ((Cnvec > 0) == (Ap_end != NULL)) ; (*p_TaskList) = NULL ; (*p_TaskList_size) = 0 ; (*p_Mark ) = NULL ; (*p_Inext ) = NULL ; int64_t *restrict Mark = NULL ; size_t Mark_size = 0 ; int64_t *restrict Inext = NULL ; size_t Inext_size = 0 ; int64_t *restrict Cwork = NULL ; size_t Cwork_size = 0 ; GB_WERK_DECLARE (Coarse, int64_t) ; // size ntasks1+1 int ntasks1 = 0 ; GrB_Info info ; //-------------------------------------------------------------------------- // determine # of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; //-------------------------------------------------------------------------- // allocate the initial TaskList //-------------------------------------------------------------------------- // Allocate the TaskList to hold at least 2*ntask0 tasks. It will grow // later, if needed. Usually, 64*nthreads_max is enough, but in a few cases // fine tasks can cause this number to be exceeded. If that occurs, // TaskList is reallocated. // When the mask is present, it is often fastest to break the work up // into tasks, even when nthreads_max is 1. GB_task_struct *restrict TaskList = NULL ; size_t TaskList_size = 0 ; int max_ntasks = 0 ; int ntasks0 = (nthreads_max == 1) ? 1 : (32 * nthreads_max) ; GB_REALLOC_TASK_WERK (TaskList, ntasks0, max_ntasks) ; //-------------------------------------------------------------------------- // determine if I_inverse can be constructed //-------------------------------------------------------------------------- // I_inverse_ok is true if I might be inverted. If false, then I will not // be inverted. I can be inverted only if the workspace for the inverse // does not exceed nnz(A). Note that if I was provided on input as an // explicit list, but consists of a contiguous range imin:imax, then Ikind // is now GB_LIST and the list I is ignored. // If I_inverse_ok is true, the inverse of I might still not be needed. // need_I_inverse becomes true if any C(:,kC) = A (I,kA) computation // requires I inverse. int64_t I_inverse_limit = GB_IMAX (4096, anz) ; bool I_inverse_ok = (Ikind == GB_LIST && ((nI > avlen / 256) || ((nI + avlen) < I_inverse_limit))) ; bool need_I_inverse = false ; bool post_sort = false ; int64_t iinc = Icolon [GxB_INC] ; //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- Cwork = GB_MALLOC_WERK (Cnvec+1, int64_t, &Cwork_size) ; if (Cwork == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // estimate the work required for each vector of C //-------------------------------------------------------------------------- int nthreads_for_Cwork = GB_nthreads (Cnvec, chunk, nthreads_max) ; int64_t kC ; #pragma omp parallel for num_threads(nthreads_for_Cwork) schedule(static) \ reduction(||:need_I_inverse) for (kC = 0 ; kC < Cnvec ; kC++) { // jC is the (kC)th vector of C = A(I,J) // int64_t jC = GBH (Ch, kC) ; // C(:,kC) = A(I,kA) will be constructed int64_t pA = Ap_start [kC] ; int64_t pA_end = Ap_end [kC] ; int64_t alen = pA_end - pA ; // nnz (A (imin:imax,j)) int64_t work ; // amount of work for C(:,kC) = A (I,kA) bool this_needs_I_inverse ; // true if this vector needs I inverse // ndupl in I not yet known; it is found when I is inverted. For // now, assume I has no duplicate entries. All that is needed for now // is the work required for each C(:,kC), and whether or not I inverse // must be created. The # of duplicates has no impact on the I inverse // decision, and a minor effect on the work (which is ignored). GB_subref_method (&work, &this_needs_I_inverse, alen, avlen, Ikind, nI, I_inverse_ok, need_qsort, iinc, 0) ; // log the result need_I_inverse = need_I_inverse || this_needs_I_inverse ; Cwork [kC] = work ; } //-------------------------------------------------------------------------- // replace Cwork with its cumulative sum //-------------------------------------------------------------------------- GB_cumsum (Cwork, Cnvec, NULL, nthreads_for_Cwork, Context) ; double cwork = (double) Cwork [Cnvec] ; //-------------------------------------------------------------------------- // determine # of threads and tasks to use for C=A(I,J) //-------------------------------------------------------------------------- int nthreads = GB_nthreads (cwork, chunk, nthreads_max) ; ntasks1 = (nthreads == 1) ? 1 : (32 * nthreads) ; double target_task_size = cwork / (double) (ntasks1) ; target_task_size = GB_IMAX (target_task_size, chunk) ; //-------------------------------------------------------------------------- // invert I if required //-------------------------------------------------------------------------- int64_t ndupl = 0 ; if (need_I_inverse) { GB_OK (GB_I_inverse (I, nI, avlen, &Mark, &Mark_size, &Inext, &Inext_size, &ndupl, Context)) ; ASSERT (Mark != NULL) ; ASSERT (Inext != NULL) ; } //-------------------------------------------------------------------------- // check for quick return for a single task //-------------------------------------------------------------------------- if (Cnvec == 0 || ntasks1 == 1) { // construct a single coarse task that computes all of C TaskList [0].kfirst = 0 ; TaskList [0].klast = Cnvec-1 ; // free workspace and return result GB_FREE_WORK ; (*p_TaskList ) = TaskList ; (*p_TaskList_size) = TaskList_size ; (*p_ntasks ) = (Cnvec == 0) ? 0 : 1 ; (*p_nthreads ) = 1 ; (*p_post_sort ) = false ; (*p_Mark ) = Mark ; (*p_Mark_size ) = Mark_size ; (*p_Inext ) = Inext ; (*p_Inext_size ) = Inext_size ; (*p_nduplicates) = ndupl ; return (GrB_SUCCESS) ; } //-------------------------------------------------------------------------- // slice the work into coarse tasks //-------------------------------------------------------------------------- GB_WERK_PUSH (Coarse, ntasks1 + 1, int64_t) ; if (Coarse == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } GB_pslice (Coarse, Cwork, Cnvec, ntasks1, false) ; //-------------------------------------------------------------------------- // construct all tasks, both coarse and fine //-------------------------------------------------------------------------- int ntasks = 0 ; for (int t = 0 ; t < ntasks1 ; t++) { //---------------------------------------------------------------------- // coarse task computes C (:,k:klast) //---------------------------------------------------------------------- int64_t k = Coarse [t] ; int64_t klast = Coarse [t+1] - 1 ; if (k >= Cnvec) { //------------------------------------------------------------------ // all tasks have been constructed //------------------------------------------------------------------ break ; } else if (k < klast) { //------------------------------------------------------------------ // coarse task has 2 or more vectors //------------------------------------------------------------------ // This is a non-empty coarse-grain task that does two or more // entire vectors of C, vectors k:klast, inclusive. GB_REALLOC_TASK_WERK (TaskList, ntasks + 1, max_ntasks) ; TaskList [ntasks].kfirst = k ; TaskList [ntasks].klast = klast ; ntasks++ ; } else { //------------------------------------------------------------------ // coarse task has 0 or 1 vectors //------------------------------------------------------------------ // As a coarse-grain task, this task is empty or does a single // vector, k. Vector k must be removed from the work done by this // and any other coarse-grain task, and split into one or more // fine-grain tasks. for (int tt = t ; tt < ntasks1 ; tt++) { // remove k from the initial slice tt if (Coarse [tt] == k) { // remove k from task tt Coarse [tt] = k+1 ; } else { // break, k not in task tt break ; } } //------------------------------------------------------------------ // determine the # of fine-grain tasks to create for vector k //------------------------------------------------------------------ double ckwork = Cwork [k+1] - Cwork [k] ; int nfine = ckwork / target_task_size ; nfine = GB_IMAX (nfine, 1) ; // make the TaskList bigger, if needed GB_REALLOC_TASK_WERK (TaskList, ntasks + nfine, max_ntasks) ; //------------------------------------------------------------------ // create the fine-grain tasks //------------------------------------------------------------------ if (nfine == 1) { //-------------------------------------------------------------- // this is a single coarse task for all of vector k //-------------------------------------------------------------- TaskList [ntasks].kfirst = k ; TaskList [ntasks].klast = k ; ntasks++ ; } else { //-------------------------------------------------------------- // slice vector k into nfine fine tasks //-------------------------------------------------------------- // There are two kinds of fine tasks, depending on the method // used to compute C(:,kC) = A(I,kA). If the method iterates // across all entries in A(imin:imax,kA), then those entries // are sliced (of size alen). Three methods (1, 2, and 6) // iterate across all entries in I instead (of size nI). int64_t pA = Ap_start [k] ; int64_t pA_end = Ap_end [k] ; int64_t alen = pA_end - pA ; // nnz (A (imin:imax,j)) int method = GB_subref_method (NULL, NULL, alen, avlen, Ikind, nI, I_inverse_ok, need_qsort, iinc, ndupl) ; if (method == 10) { // multiple fine tasks operate on a single vector C(:,kC) // using method 10, and so a post-sort is needed. post_sort = true ; } if (method == 1 || method == 2 || method == 6) { // slice I for this task nfine = GB_IMIN (nfine, nI) ; nfine = GB_IMAX (nfine, 1) ; for (int tfine = 0 ; tfine < nfine ; tfine++) { // flag this as a fine task, and record the method. // Methods 1, 2, and 6 slice I, not A(:,kA) TaskList [ntasks].kfirst = k ; TaskList [ntasks].klast = -method ; // do not partition A(:,kA) TaskList [ntasks].pA = pA ; TaskList [ntasks].pA_end = pA_end ; // partition I for this task GB_PARTITION (TaskList [ntasks].pB, TaskList [ntasks].pB_end, nI, tfine, nfine) ; // unused TaskList [ntasks].pM = -1 ; TaskList [ntasks].pM_end = -1 ; // no post sort TaskList [ntasks].len = 0 ; ntasks++ ; } } else { // slice A(:,kA) for this task nfine = GB_IMIN (nfine, alen) ; nfine = GB_IMAX (nfine, 1) ; bool reverse = (method == 8 || method == 9) ; for (int tfine = 0 ; tfine < nfine ; tfine++) { // flag this as a fine task, and record the method. // These methods slice A(:,kA). Methods 8 and 9 // must do so in reverse order. TaskList [ntasks].kfirst = k ; TaskList [ntasks].klast = -method ; // partition the items for this task GB_PARTITION (TaskList [ntasks].pA, TaskList [ntasks].pA_end, alen, (reverse) ? (nfine-tfine-1) : tfine, nfine) ; TaskList [ntasks].pA += pA ; TaskList [ntasks].pA_end += pA ; // do not partition I TaskList [ntasks].pB = 0 ; TaskList [ntasks].pB_end = nI ; // unused TaskList [ntasks].pM = -1 ; TaskList [ntasks].pM_end = -1 ; // flag the task that does the post sort TaskList [ntasks].len = (tfine == 0 && method == 10) ; ntasks++ ; } } } } } ASSERT (ntasks > 0) ; //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORK ; (*p_TaskList ) = TaskList ; (*p_TaskList_size) = TaskList_size ; (*p_ntasks ) = ntasks ; (*p_nthreads ) = nthreads ; (*p_post_sort ) = post_sort ; (*p_Mark ) = Mark ; (*p_Mark_size ) = Mark_size ; (*p_Inext ) = Inext ; (*p_Inext_size ) = Inext_size ; (*p_nduplicates) = ndupl ; return (GrB_SUCCESS) ; }
linear_system_solver.c
// // Created by sachetto on 04/10/17. // #include "../config/linear_system_solver_config.h" #include "../config_helpers/config_helpers.h" #include "../libraries_common/common_data_structures.h" #include "../single_file_libraries/stb_ds.h" #include "../models_library/model_gpu_utils.h" bool jacobi_initialized = false; bool bcg_initialized = false; static bool use_preconditioner = false; static int max_its = 50; static real_cpu tol = 1e-16; #include <cusparse_v2.h> #include <cublas_v2.h> #ifdef COMPILE_CUDA static int *d_col, *d_row; static real *d_val, *d_x; static real *d_r, *d_p, *d_Ax; static int N = 0, nz = 0; /* Get handle to the CUBLAS context */ static cublasHandle_t cublasHandle = 0; static cublasStatus_t cublasStatus; /* Get handle to the CUSPARSE context */ static cusparseHandle_t cusparseHandle = 0; static cusparseStatus_t cusparseStatus; cusparseMatDescr_t descr = 0; static int nzILU0; static real *d_valsILU0, *d_zm1, *d_zm2, *d_rm2, *d_y; static cusparseSolveAnalysisInfo_t infoA = 0; static cusparseSolveAnalysisInfo_t info_u; static cusparseMatDescr_t descrL = 0; static cusparseMatDescr_t descrU = 0; INIT_LINEAR_SYSTEM(init_gpu_conjugate_gradient) { int_array I = NULL, J = NULL; f32_array val = NULL; GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real_cpu, tol, config->config_data, "tolerance"); GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(int, max_its, config->config_data, "max_iterations"); GET_PARAMETER_BOOLEAN_VALUE_OR_USE_DEFAULT(use_preconditioner, config->config_data, "use_preconditioner"); check_cuda_error((cudaError_t)cublasCreate(&cublasHandle)); check_cuda_error((cudaError_t)cusparseCreate(&cusparseHandle)); check_cuda_error((cudaError_t)cusparseCreateMatDescr(&descr)); cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ZERO); grid_to_csr(the_grid, &val, &I, &J); uint32_t num_active_cells = the_grid->num_active_cells; struct cell_node** ac = the_grid->active_cells; nz = arrlen(val); N = num_active_cells; check_cuda_error(cudaMalloc((void **) &d_col, nz * sizeof(int))); check_cuda_error(cudaMalloc((void **) &d_row, (N + 1) * sizeof(int))); check_cuda_error(cudaMalloc((void **) &d_val, nz * sizeof(float))); check_cuda_error(cudaMalloc((void **) &d_x, N * sizeof(float))); check_cuda_error(cudaMalloc((void **) &d_r, N * sizeof(float))); check_cuda_error(cudaMalloc((void **) &d_p, N * sizeof(float))); check_cuda_error(cudaMalloc((void **) &d_Ax, N * sizeof(float))); cudaMemcpy(d_col, J, nz * sizeof(int), cudaMemcpyHostToDevice); //JA cudaMemcpy(d_row, I, (N + 1) * sizeof(int), cudaMemcpyHostToDevice); //IA cudaMemcpy(d_val, val, nz * sizeof(float), cudaMemcpyHostToDevice); //A real *rhs = (real*) malloc(sizeof(real)*num_active_cells); #pragma omp parallel for for (uint32_t i = 0; i < num_active_cells; i++) { rhs[i] = ac[i]->b; } check_cuda_error(cudaMemcpy(d_x, rhs, N * sizeof(float), cudaMemcpyHostToDevice)); //Result if(use_preconditioner) { nzILU0 = 2*N-1; check_cuda_error(cudaMalloc((void **)&d_valsILU0, nz*sizeof(float))); check_cuda_error(cudaMalloc((void **)&d_zm1, (N)*sizeof(float))); check_cuda_error(cudaMalloc((void **)&d_zm2, (N)*sizeof(float))); check_cuda_error(cudaMalloc((void **)&d_rm2, (N)*sizeof(float))); check_cuda_error(cudaMalloc((void **)&d_y, N*sizeof(float))); cusparseStatus = cusparseCreateSolveAnalysisInfo(&infoA); check_cuda_error((cudaError_t)cusparseStatus); /* Perform the analysis for the Non-Transpose case */ cusparseStatus = cusparseScsrsv_analysis(cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, nz, descr, d_val, d_row, d_col, infoA); check_cuda_error((cudaError_t)cusparseStatus); /* Copy A data to ILU0 vals as input*/ cudaMemcpy(d_valsILU0, d_val, nz*sizeof(float), cudaMemcpyDeviceToDevice); /* generate the Incomplete LU factor H for the matrix A using cudsparseScsrilu0 */ cusparseStatus = cusparseScsrilu0(cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, descr, d_valsILU0, d_row, d_col, infoA); check_cuda_error((cudaError_t)cusparseStatus); cusparseCreateSolveAnalysisInfo(&info_u); cusparseStatus = cusparseCreateMatDescr(&descrL); cusparseSetMatType(descrL,CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descrL,CUSPARSE_INDEX_BASE_ZERO); cusparseSetMatFillMode(descrL, CUSPARSE_FILL_MODE_LOWER); cusparseSetMatDiagType(descrL, CUSPARSE_DIAG_TYPE_UNIT); cusparseStatus = cusparseCreateMatDescr(&descrU); cusparseSetMatType(descrU,CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descrU,CUSPARSE_INDEX_BASE_ZERO); cusparseSetMatFillMode(descrU, CUSPARSE_FILL_MODE_UPPER); cusparseSetMatDiagType(descrU, CUSPARSE_DIAG_TYPE_NON_UNIT); cusparseStatus = cusparseScsrsv_analysis(cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, nz, descrU, d_val, d_row, d_col, info_u); } free(rhs); arrfree(I); arrfree(J); arrfree(val); } END_LINEAR_SYSTEM(end_gpu_conjugate_gradient) { check_cuda_error( (cudaError_t)cusparseDestroy(cusparseHandle) ); check_cuda_error( (cudaError_t)cublasDestroy(cublasHandle) ); check_cuda_error( (cudaError_t)cusparseDestroyMatDescr(descr)); check_cuda_error( (cudaError_t)cusparseDestroyMatDescr(descrL)); check_cuda_error( (cudaError_t)cusparseDestroyMatDescr(descrU)); /* Destroy parameters */ cusparseDestroySolveAnalysisInfo(infoA); cusparseDestroySolveAnalysisInfo(info_u); check_cuda_error(cudaFree(d_row)); check_cuda_error(cudaFree(d_val)); check_cuda_error(cudaFree(d_x)); check_cuda_error(cudaFree(d_r)); check_cuda_error(cudaFree(d_p)); check_cuda_error(cudaFree(d_Ax)); check_cuda_error(cudaFree(d_y)); check_cuda_error(cudaFree(d_valsILU0)); check_cuda_error(cudaFree(d_zm1)); check_cuda_error(cudaFree(d_zm2)); check_cuda_error(cudaFree(d_rm2)); } SOLVE_LINEAR_SYSTEM(gpu_conjugate_gradient) { /* Conjugate gradient without preconditioning. ------------------------------------------ Follows the description by Golub & Van Loan, "Matrix Computations 3rd ed.", Section 10.2.6 */ real dot; real a, b, na, r0, r1; int k; real alpha, beta, alpham1; real *rhs; //Vector B uint32_t num_active_cells = the_grid->num_active_cells; struct cell_node** ac = the_grid->active_cells; rhs = (real*) malloc(sizeof(real)*num_active_cells); #pragma omp parallel for for (uint32_t i = 0; i < num_active_cells; i++) { rhs[i] = ac[i]->b; } cudaMemcpy(d_r, rhs, N * sizeof(float), cudaMemcpyHostToDevice); //B alpha = 1.0; alpham1 = -1.0; beta = 0.0; r0 = 0.; real numerator, denominator; cusparseScsrmv(cusparseHandle,CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_x, &beta, d_Ax); cublasSaxpy(cublasHandle, N, &alpham1, d_Ax, 1, d_r, 1); cublasStatus = cublasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); k = 1; while (r1 >= tol && k <= max_its) { if(use_preconditioner) { // Forward Solve, we can re-use infoA since the sparsity pattern of A matches that of L cusparseStatus = cusparseScsrsv_solve(cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, &alpha, descrL, d_valsILU0, d_row, d_col, infoA, d_r, d_y); check_cuda_error((cudaError_t)cusparseStatus); // Back Substitution cusparseStatus = cusparseScsrsv_solve(cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, &alpha, descrU, d_valsILU0, d_row, d_col, info_u, d_y, d_zm1); check_cuda_error((cudaError_t)cusparseStatus); } if (k > 1) { if(use_preconditioner) { cublasSdot(cublasHandle, N, d_r, 1, d_zm1, 1, &numerator); cublasSdot(cublasHandle, N, d_rm2, 1, d_zm2, 1, &denominator); b = numerator/denominator; cublasSscal(cublasHandle, N, &b, d_p, 1); cublasSaxpy(cublasHandle, N, &alpha, d_zm1, 1, d_p, 1) ; } else { b = r1 / r0; cublasStatus = cublasSscal(cublasHandle, N, &b, d_p, 1); cublasStatus = cublasSaxpy(cublasHandle, N, &alpha, d_r, 1, d_p, 1); } } else { if(use_preconditioner) { cublasScopy(cublasHandle, N, d_zm1, 1, d_p, 1); } else { cublasStatus = cublasScopy(cublasHandle, N, d_r, 1, d_p, 1); } } if(use_preconditioner) { cusparseScsrmv(cusparseHandle,CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, nzILU0, &alpha, descrU, d_val, d_row, d_col, d_p, &beta, d_Ax); cublasSdot(cublasHandle, N, d_r, 1, d_zm1, 1, &numerator); cublasSdot(cublasHandle, N, d_p, 1, d_Ax, 1, &denominator); a = numerator / denominator; cublasSaxpy(cublasHandle, N, &a, d_p, 1, d_x, 1); cublasScopy(cublasHandle, N, d_r, 1, d_rm2, 1); cublasScopy(cublasHandle, N, d_zm1, 1, d_zm2, 1); na = -a; cublasSaxpy(cublasHandle, N, &na, d_Ax, 1, d_r, 1); r0 = r1; cublasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); } else { cusparseScsrmv(cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_p, &beta, d_Ax); cublasStatus = cublasSdot(cublasHandle, N, d_p, 1, d_Ax, 1, &dot); a = r1 / dot; cublasStatus = cublasSaxpy(cublasHandle, N, &a, d_p, 1, d_x, 1); na = -a; cublasStatus = cublasSaxpy(cublasHandle, N, &na, d_Ax, 1, d_r, 1); r0 = r1; cublasStatus = cublasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); } cudaDeviceSynchronize(); k++; } cudaMemcpy(rhs, d_x, N*sizeof(real), cudaMemcpyDeviceToHost); *number_of_iterations = k-1; *error = r1; #pragma omp parallel for for (uint32_t i = 0; i < num_active_cells; i++) { ac[i]->v = rhs[i]; } free(rhs); } #endif INIT_LINEAR_SYSTEM(init_cpu_conjugate_gradient) { GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real_cpu, tol, config->config_data, "tolerance"); GET_PARAMETER_BOOLEAN_VALUE_OR_USE_DEFAULT(use_preconditioner, config->config_data, "use_preconditioner"); GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(int, max_its, config->config_data, "max_iterations"); } END_LINEAR_SYSTEM(end_cpu_conjugate_gradient) { } SOLVE_LINEAR_SYSTEM(cpu_conjugate_gradient) { real_cpu rTr, r1Tr1, pTAp, alpha, beta, precision = tol, rTz, r1Tz1; uint32_t num_active_cells = the_grid->num_active_cells; struct cell_node** ac = the_grid->active_cells; *error = 1.0; *number_of_iterations = 1; //__________________________________________________________________________ //Computes int_vector A*x, residue r = b - Ax, scalar rTr = r^T * r and //sets initial search direction p. rTr = 0.0; rTz = 0.0; struct element element; uint32_t i; #pragma omp parallel for private (element) reduction(+:rTr,rTz) for (i = 0; i < num_active_cells; i++) { if(CG_INFO(ac[i]) == NULL) { INITIALIZE_CONJUGATE_GRADIENT_INFO(ac[i]); } struct element *cell_elements = ac[i]->elements; ac[i]->Ax = 0.0; size_t max_el = arrlen(cell_elements); for(int el = 0; el < max_el; el++) { element = cell_elements[el]; ac[i]->Ax += element.value * element.cell->v; } CG_R(ac[i]) = ac[i]->b - ac[i]->Ax; if(use_preconditioner) { real_cpu value = cell_elements[0].value; if(value == 0.0) value = 1.0; CG_Z(ac[i]) = (1.0/value) * CG_R(ac[i]); // preconditioner rTz += CG_R(ac[i]) * CG_Z(ac[i]); CG_P(ac[i]) = CG_Z(ac[i]); } else { CG_P(ac[i]) = CG_R(ac[i]); } real_cpu r = CG_R(ac[i]); rTr += r * r; } *error = rTr; //__________________________________________________________________________ //Conjugate gradient iterations. if( *error >= precision ) { while( *number_of_iterations < max_its ) { //__________________________________________________________________ // Computes Ap and pTAp. Uses Ax to store Ap. pTAp = 0.0; #pragma omp parallel for private(element) reduction(+ : pTAp) for (i = 0; i < num_active_cells; i++) { ac[i]->Ax = 0.0; struct element *cell_elements = ac[i]->elements; size_t max_el = arrlen(cell_elements); for(int el = 0; el < max_el; el++) { element = cell_elements[el]; ac[i]->Ax += element.value * CG_P(element.cell); } pTAp += CG_P(ac[i]) * ac[i]->Ax; } //__________________________________________________________________ // Computes alpha. if(use_preconditioner) { alpha = rTz/pTAp; } else { alpha = rTr/pTAp; } //__________________________________________________________________ r1Tr1 = 0.0; r1Tz1 = 0.0; // Computes new value of solution: u = u + alpha*p. #pragma omp parallel for reduction (+:r1Tr1,r1Tz1) for (i = 0; i < num_active_cells; i++) { ac[i]->v += alpha * CG_P(ac[i]); CG_R(ac[i]) -= alpha * ac[i]->Ax; real_cpu r = CG_R(ac[i]); if(use_preconditioner) { real_cpu value = ac[i]->elements[0].value; if(value == 0.0) value = 1.0; CG_Z(ac[i]) = (1.0/value) * r; r1Tz1 += CG_Z(ac[i]) * r; } r1Tr1 += r * r; } //__________________________________________________________________ //Computes beta. if(use_preconditioner) { beta = r1Tz1/rTz; } else { beta = r1Tr1/rTr; } *error = r1Tr1; *number_of_iterations = *number_of_iterations + 1; if( *error <= precision ) { break; } //__________________________________________________________________ //Computes int_vector p1 = r1 + beta*p and uses it to upgrade p. #pragma omp parallel for for (i = 0; i < num_active_cells; i++) { if(use_preconditioner) { CG_P1(ac[i]) = CG_Z(ac[i]) + beta * CG_P(ac[i]); } else { CG_P1(ac[i]) = CG_R(ac[i]) + beta * CG_P(ac[i]); } CG_P(ac[i]) = CG_P1(ac[i]); } rTz = r1Tz1; rTr = r1Tr1; } }//end of conjugate gradient iterations. }//end conjugateGradient() function. SOLVE_LINEAR_SYSTEM(conjugate_gradient) { bool gpu = false; GET_PARAMETER_BOOLEAN_VALUE_OR_USE_DEFAULT(gpu, config->config_data, "use_gpu"); if(gpu) { #ifdef COMPILE_CUDA gpu_conjugate_gradient(config, the_grid, number_of_iterations, error); #else print_to_stdout_and_file("Cuda runtime not found in this system. Fallbacking to CPU solver!!\n"); cpu_conjugate_gradient(config, the_grid, number_of_iterations, error); #endif } else { cpu_conjugate_gradient(config, the_grid, number_of_iterations, error); } } INIT_LINEAR_SYSTEM(init_conjugate_gradient) { bool gpu = false; GET_PARAMETER_BOOLEAN_VALUE_OR_USE_DEFAULT(gpu, config->config_data, "use_gpu"); if(gpu) { #ifdef COMPILE_CUDA init_gpu_conjugate_gradient(config, the_grid); #else print_to_stdout_and_file("Cuda runtime not found in this system. Fallbacking to CPU solver!!\n"); cpu_conjugate_gradient(config, the_grid, number_of_iterations, error); #endif } else { init_cpu_conjugate_gradient(config, the_grid); } } END_LINEAR_SYSTEM(end_conjugate_gradient) { bool gpu = false; GET_PARAMETER_BOOLEAN_VALUE_OR_USE_DEFAULT(gpu, config->config_data, "use_gpu"); if(gpu) { #ifdef COMPILE_CUDA end_gpu_conjugate_gradient(config); #else print_to_stdout_and_file("Cuda runtime not found in this system. Fallbacking to CPU solver!!\n"); cpu_conjugate_gradient(config, the_grid, number_of_iterations, error); #endif } else { end_cpu_conjugate_gradient(config); } } // Berg's code SOLVE_LINEAR_SYSTEM(jacobi) { if(!jacobi_initialized) { GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real_cpu, tol, config->config_data, "tolerance"); max_its = 500; GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(int, max_its, config->config_data, "max_iterations"); jacobi_initialized = true; } real_cpu sigma, precision = tol; uint32_t num_active_cells = the_grid->num_active_cells; struct cell_node** ac = the_grid->active_cells; *error = 1.0; *number_of_iterations = 1; struct element element; int i; if (*error >= precision) { //__________________________________________________________________________ //Jacobi iterations. while (*number_of_iterations < max_its) { #pragma omp parallel for private (element,sigma) for (i = 0; i < num_active_cells; i++) { if(JACOBI_INFO(ac[i]) == NULL) { INITIALIZE_JACOBI_INFO(ac[i]); } struct element *cell_elements = ac[i]->elements; sigma = 0.0; size_t max_el = arrlen(cell_elements); // Do not take the diagonal element for(int el = 1; el < max_el; el++) { element = cell_elements[el]; sigma += element.value * element.cell->v; } real_cpu value = cell_elements[0].value; JACOBI_X_AUX(ac[i]) = (1.0/value)*(ac[i]->b - sigma); } real_cpu residue = 0.0; real_cpu sum; #pragma omp parallel for private (element,sum) reduction (+:residue) for (i = 0; i < num_active_cells; i++) { struct element *cell_elements = ac[i]->elements; size_t max_el = arrlen(cell_elements); // Do not take the diagonal element sum = 0.0; for(int el = 0; el < max_el; el++) { element = cell_elements[el]; sum += element.value * JACOBI_X_AUX(element.cell); } ac[i]->v = JACOBI_X_AUX(ac[i]); residue += pow(ac[i]->b - sum,2); } // The error is norm of the residue residue = sqrt(residue); *error = residue; *number_of_iterations = *number_of_iterations + 1; if( *error <= precision ) break; } } } //// Berg's code SOLVE_LINEAR_SYSTEM(biconjugate_gradient) { if(!bcg_initialized) { GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real_cpu, tol, config->config_data, "tolerance"); char *preconditioner_char = NULL; GET_PARAMETER_VALUE_CHAR_OR_USE_DEFAULT(preconditioner_char, config->config_data, "use_preconditioner"); if (preconditioner_char != NULL) { use_preconditioner = ((strcmp (preconditioner_char, "yes") == 0) || (strcmp (preconditioner_char, "true") == 0)); } max_its = 100; GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(int, max_its, config->config_data, "max_iterations"); bcg_initialized = true; } real_cpu rTr, r1Tr1, pTAp, alpha, beta, precision = tol, rTz, r1Tz1; uint32_t num_active_cells = the_grid->num_active_cells; struct cell_node** ac = the_grid->active_cells; *error = 1.0; *number_of_iterations = 1; struct element element; int i; //__________________________________________________________________________ // Zero all entries on the int_vector x*A // And initialize the second guess vector x_aux #pragma omp parallel for for (i = 0; i < num_active_cells; i++) { if(BCG_INFO(ac[i]) == NULL) { INITIALIZE_BICONJUGATE_GRADIENT_INFO(ac[i]); } BCG_XA(ac[i]) = 0.0; BCG_X_AUX(ac[i]) = ac[i]->v; } //__________________________________________________________________________ //Computes int_vector A*x, x*A //xA must be fully calculated to start doing anything over the r_aux vector #pragma omp parallel for private (element) for (i = 0; i < num_active_cells; i++) { struct element *cell_elements = ac[i]->elements; ac[i]->Ax = 0.0; size_t max_el = arrlen(cell_elements); for(int el = 0; el < max_el; el++) { element = cell_elements[el]; uint32_t col = element.column; ac[i]->Ax += element.value * element.cell->v; #pragma omp critical BCG_XA(ac[col]) += element.value * BCG_X_AUX(ac[i]); } } rTr = 0.0; rTz = 0.0; //__________________________________________________________________________ //Computes residues r, r_aux //scalar rTr = r^T * r_aux and //sets initial search directions p and p_aux. #pragma omp parallel for private (element) reduction(+:rTr,rTz) for (i = 0; i < num_active_cells; i++) { struct element *cell_elements = ac[i]->elements; BCG_R(ac[i]) = ac[i]->b - ac[i]->Ax; BCG_R_AUX(ac[i]) = ac[i]->b - BCG_XA(ac[i]); if(use_preconditioner) { real_cpu value = cell_elements[0].value; if(value == 0.0) value = 1.0; BCG_Z(ac[i]) = (1.0/value) * BCG_R(ac[i]); // preconditioner BCG_Z_AUX(ac[i]) = (1.0/value) * BCG_R_AUX(ac[i]); rTz += BCG_R_AUX(ac[i]) * BCG_Z(ac[i]); BCG_P(ac[i]) = BCG_Z(ac[i]); BCG_P_AUX(ac[i]) = BCG_Z_AUX(ac[i]); } else { BCG_P(ac[i]) = BCG_R(ac[i]); BCG_P_AUX(ac[i])= BCG_R_AUX(ac[i]); } rTr += BCG_R_AUX(ac[i]) * BCG_R(ac[i]); } *error = rTr; //__________________________________________________________________________ //Biconjugate gradient iterations. if( *error >= precision ) { while( *number_of_iterations < max_its ) { //__________________________________________________________________ // Computes Ap, pA and pTAp. Uses Ax to store Ap and xA to store pA pTAp = 0.0; #pragma omp parallel for for (i = 0; i < num_active_cells; i++) BCG_XA(ac[i]) = 0.0; #pragma omp parallel for private(element) reduction(+ : pTAp) for (i = 0; i < num_active_cells; i++) { ac[i]->Ax = 0.0; struct element *cell_elements = ac[i]->elements; size_t max_el = arrlen(cell_elements); for(int el = 0; el < max_el; el++) { element = cell_elements[el]; uint32_t col = element.column; ac[i]->Ax += element.value * BCG_P(element.cell); #pragma omp critical BCG_XA(ac[col]) += element.value * BCG_P_AUX(ac[i]); } pTAp += BCG_P_AUX(ac[i]) * ac[i]->Ax; } //__________________________________________________________________ // Computes alpha. if(use_preconditioner) { alpha = rTz/pTAp; } else { alpha = rTr/pTAp; } //__________________________________________________________________ r1Tr1 = 0.0; r1Tz1 = 0.0; // Computes new value of solution: u = u + alpha*p. // u_aux = u_aux + alpha*p_aux #pragma omp parallel for reduction (+:r1Tr1,r1Tz1) for (i = 0; i < num_active_cells; i++) { ac[i]->v += alpha * BCG_P(ac[i]); BCG_X_AUX(ac[i]) += alpha * BCG_P_AUX(ac[i]); BCG_R(ac[i]) -= alpha * ac[i]->Ax; BCG_R_AUX(ac[i]) -= alpha * BCG_XA(ac[i]); if(use_preconditioner) { real_cpu value = ac[i]->elements[0].value; if(value == 0.0) value = 1.0; BCG_Z(ac[i]) = (1.0/value) * BCG_R(ac[i]); BCG_Z_AUX(ac[i]) = (1.0/value) * BCG_R_AUX(ac[i]); r1Tz1 += BCG_Z(ac[i]) * BCG_R_AUX(ac[i]); } r1Tr1 += BCG_R(ac[i]) * BCG_R_AUX(ac[i]); } //__________________________________________________________________ //Computes beta. if(use_preconditioner) { beta = r1Tz1/rTz; } else { beta = r1Tr1/rTr; } *error = r1Tr1; *number_of_iterations = *number_of_iterations + 1; if( *error <= precision ) { break; } //__________________________________________________________________ //Computes int_vector p1 = r1 + beta*p and uses it to upgrade p. #pragma omp parallel for for (i = 0; i < num_active_cells; i++) { if(use_preconditioner) { BCG_P1(ac[i]) = BCG_Z(ac[i]) + beta * BCG_P(ac[i]); BCG_P1_AUX(ac[i]) = BCG_Z_AUX(ac[i]) + beta * BCG_P_AUX(ac[i]); } else { BCG_P1(ac[i]) = BCG_R(ac[i]) + beta * BCG_P(ac[i]); BCG_P1_AUX(ac[i]) = BCG_R_AUX(ac[i]) + beta * BCG_P_AUX(ac[i]); } BCG_P(ac[i]) = BCG_P1(ac[i]); BCG_P_AUX(ac[i]) = BCG_P1_AUX(ac[i]); } rTz = r1Tz1; rTr = r1Tr1; } }//end of biconjugate gradient iterations. }//end biconjugateGradient() function.
13_sync_with_flush.c
/*NOTE THIS CODE DOESN'T WORK*/ #include <stdio.h> #include <omp.h> int main(){ int data; int flag; flag = 0; data = 0; #pragma omp parallel if(omp_get_thread_num() == 0) { //Thread 0 sets the payload and sets the flag data = 42; flag = 1; #pragma omp flush(data, flag) } else { //On all other threads wait until the flag is set while (!flag){ #pragma omp flush(flag) } #pragma omp flush(data) printf("Thread %i got payload %i\n", omp_get_thread_num(), data); } }
inner_mult.h
#include "CSC.h" #include "utility.h" #include "hash_mult_hw.h" #include <omp.h> #include <algorithm> #include <iostream> using namespace std; /** ** Count flop of SpGEMM between A and B in CSC format **/ template <typename IT, typename NT> long long int get_flop(const CSC<IT,NT> & A, const CSC<IT,NT> & B, IT *maxnnzc) { long long int flop = 0; // total flop (multiplication) needed to generate C #pragma omp parallel { long long int tflop=0; //thread private flop #pragma omp for for (IT i=0; i < B.cols; ++i) { // for all columns of B long long int locmax = 0; for (IT j = B.colptr[i]; j < B.colptr[i+1]; ++j) { // For all the nonzeros of the ith column IT inner = B.rowids[j]; // get the row id of B (or column id of A) IT npins = A.colptr[inner+1] - A.colptr[inner]; // get the number of nonzeros in A's corresponding column locmax += npins; } maxnnzc[i] = locmax; tflop += locmax; } #pragma omp critical { flop += tflop; } } return flop * 2; } template <typename IT, typename NT> long long int get_flop(const CSC<IT,NT> & A, const CSC<IT,NT> & B) { IT *dummy = my_malloc<IT>(B.cols); long long int flop = get_flop(A, B, dummy); my_free<IT>(dummy); return flop; } //*TODO:: Dealing with 5 mats. Mask, A, B, C, C_final* template <bool vectorProbing, bool sortOutput, typename IT, typename NT, typename MultiplyOperation, typename AddOperation> void innerSpGEMM_nohash(const CSR<IT,NT> & A, const CSC<IT,NT> & B, CSR<IT,NT> & C_final, const CSR<IT,NT> & M, MultiplyOperation multop, AddOperation addop, unsigned threadCount) { CSR<IT,NT> C; //*A^2* C.rows = M.rows; C.cols = M.cols; // B ?=A C.nnz = M.nnz; C.zerobased = true; C.rowptr = my_malloc<IT>(M.rows + 1); C.colids = my_malloc<IT>(M.nnz); C.values = my_malloc<NT>(M.nnz); for (IT i = 0; i < C.rows; ++i) C.rowptr[i] = M.rowptr[i]; for (IT i = 0; i < C.nnz; ++i) { C.colids[i] = M.colids[i]; // unnecessary C.values[i] = 0; } BIN<IT, NT> bin(A.rows, IMB_PWMIN, threadCount); /* Set max bin */ // Double check, changed 3rd param to colptr bin.set_max_bin(A.rowptr, A.colids, B.colptr, C.rows, C.cols); IT numThreads; #pragma omp parallel num_threads(threadCount) { numThreads = omp_get_num_threads(); } vector<IT> th_nnz(numThreads, 0); vector<IT> rownnz(C.rows, 0); IT rowPerThread = (M.rows + numThreads -1) / numThreads; #pragma omp parallel num_threads(threadCount) { IT i, start_row, end_row, col; IT tid; tid = omp_get_thread_num(); // start_row = bin.rows_offset[tid]; // end_row = bin.rows_offset[tid + 1]; start_row = rowPerThread * tid; end_row = min(rowPerThread * (tid+1), M.rows); // each th keeps track of active nnz in C (not all from Mask) //* blocks of rows the mask * for (i = start_row; i < end_row; ++i) { IT j, cur_col, nnz_r, nnz_c; IT cur_row = i; NT t_val = 0; bool active = false; //* nonzeros of the row over the mask * for (j = M.rowptr[i]; j < M.rowptr[i + 1]; ++j) { cur_col = M.colids[j]; nnz_r = A.rowptr[cur_row]; nnz_c = B.colptr[cur_col]; t_val = 0; active = false; //*dot product between row of A and col of B while(nnz_r < A.rowptr[cur_row+1] && nnz_c < B.colptr[cur_col+1]){ if(A.colids[nnz_r] < B.rowids[nnz_c]) nnz_r++; else if(A.colids[nnz_r] > B.rowids[nnz_c]) nnz_c++; else { //A.colids[nnz_r] == B.rowids[nnz_c]; t_val = addop(t_val, multop(A.values[nnz_r], B.values[nnz_c])); nnz_r++, nnz_c++; active = true; } } if(active) {// active nnz, shrink output accordingly IT loc = M.rowptr[start_row] + th_nnz[tid]; C.colids[loc] = M.colids[j]; C.values[loc] = t_val; th_nnz[tid]++; rownnz[i]++; } } } #pragma omp barrier } //shrink C //* sequentially create global rowptr for final shrinked C* for (IT i = 0; i < C.rows; ++i) C_final.nnz += rownnz[i]; C_final.rows = C.rows; C_final.cols = C.cols; C_final.zerobased = true; C_final.rowptr = my_malloc<IT>(C.rows + 1); C_final.colids = my_malloc<IT>(C_final.nnz); C_final.values = my_malloc<NT>(C_final.nnz); memcpy (C_final.colids, C.colids, th_nnz[0] * sizeof(IT)) ; memcpy (C_final.values, C.values, th_nnz[0] * sizeof(NT)) ; IT dest = 0; for (IT i = 1; i < numThreads; ++i) { IT loc = min(i * rowPerThread, A.rows); dest += th_nnz[i-1]; memcpy (C_final.colids + dest, C.colids + M.rowptr[loc], th_nnz[i] * sizeof(C.colids[0])); memcpy (C_final.values + dest, C.values + M.rowptr[loc], th_nnz[i] * sizeof(C.values[0])); } //TODO:: optimize prefix sum C_final.rowptr[0] = 0; for (IT i = 1; i <= C_final.rows; ++i) { C_final.rowptr[i] = C_final.rowptr[i-1] + rownnz[i-1];//A.rowptr[rowPerThread * i]; } // cout << "Dot SpGEMM with Mask C_final" << endl; // for (int i = 0; i < 10; ++i){ // cout << i << " : " << C_final.rowptr[i] << " "; // for (int j = C_final.rowptr[i]; j < C_final.rowptr[i+1]; ++j) // cout << C_final.colids[j] << " " << C_final.values[j] << ", "; // cout << endl; // } // cout << endl; C.make_empty(); }
OpenMPClause.h
//===- OpenMPClause.h - Classes for OpenMP clauses --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // /// \file /// This file defines OpenMP AST classes for clauses. /// There are clauses for executable directives, clauses for declarative /// directives and clauses which can be used in both kinds of directives. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_OPENMPCLAUSE_H #define LLVM_CLANG_AST_OPENMPCLAUSE_H #include "clang/AST/Decl.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/MapVector.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/iterator.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/TrailingObjects.h" #include <cassert> #include <cstddef> #include <iterator> #include <utility> namespace clang { class ASTContext; //===----------------------------------------------------------------------===// // AST classes for clauses. //===----------------------------------------------------------------------===// /// This is a basic class for representing single OpenMP clause. class OMPClause { /// Starting location of the clause (the clause keyword). SourceLocation StartLoc; /// Ending location of the clause. SourceLocation EndLoc; /// Kind of the clause. OpenMPClauseKind Kind; protected: OMPClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation EndLoc) : StartLoc(StartLoc), EndLoc(EndLoc), Kind(K) {} public: /// Returns the starting location of the clause. SourceLocation getBeginLoc() const { return StartLoc; } /// Returns the ending location of the clause. SourceLocation getEndLoc() const { return EndLoc; } /// Sets the starting location of the clause. void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// Sets the ending location of the clause. void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// Returns kind of OpenMP clause (private, shared, reduction, etc.). OpenMPClauseKind getClauseKind() const { return Kind; } bool isImplicit() const { return StartLoc.isInvalid(); } using child_iterator = StmtIterator; using const_child_iterator = ConstStmtIterator; using child_range = llvm::iterator_range<child_iterator>; using const_child_range = llvm::iterator_range<const_child_iterator>; child_range children(); const_child_range children() const { auto Children = const_cast<OMPClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } /// Get the iterator range for the expressions used in the clauses. Used /// expressions include only the children that must be evaluated at the /// runtime before entering the construct. child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *) { return true; } }; /// Class that handles pre-initialization statement for some clauses, like /// 'shedule', 'firstprivate' etc. class OMPClauseWithPreInit { friend class OMPClauseReader; /// Pre-initialization statement for the clause. Stmt *PreInit = nullptr; /// Region that captures the associated stmt. OpenMPDirectiveKind CaptureRegion = OMPD_unknown; protected: OMPClauseWithPreInit(const OMPClause *This) { assert(get(This) && "get is not tuned for pre-init."); } /// Set pre-initialization statement for the clause. void setPreInitStmt(Stmt *S, OpenMPDirectiveKind ThisRegion = OMPD_unknown) { PreInit = S; CaptureRegion = ThisRegion; } public: /// Get pre-initialization statement for the clause. const Stmt *getPreInitStmt() const { return PreInit; } /// Get pre-initialization statement for the clause. Stmt *getPreInitStmt() { return PreInit; } /// Get capture region for the stmt in the clause. OpenMPDirectiveKind getCaptureRegion() const { return CaptureRegion; } static OMPClauseWithPreInit *get(OMPClause *C); static const OMPClauseWithPreInit *get(const OMPClause *C); }; /// Class that handles post-update expression for some clauses, like /// 'lastprivate', 'reduction' etc. class OMPClauseWithPostUpdate : public OMPClauseWithPreInit { friend class OMPClauseReader; /// Post-update expression for the clause. Expr *PostUpdate = nullptr; protected: OMPClauseWithPostUpdate(const OMPClause *This) : OMPClauseWithPreInit(This) { assert(get(This) && "get is not tuned for post-update."); } /// Set pre-initialization statement for the clause. void setPostUpdateExpr(Expr *S) { PostUpdate = S; } public: /// Get post-update expression for the clause. const Expr *getPostUpdateExpr() const { return PostUpdate; } /// Get post-update expression for the clause. Expr *getPostUpdateExpr() { return PostUpdate; } static OMPClauseWithPostUpdate *get(OMPClause *C); static const OMPClauseWithPostUpdate *get(const OMPClause *C); }; /// This structure contains most locations needed for by an OMPVarListClause. struct OMPVarListLocTy { /// Starting location of the clause (the clause keyword). SourceLocation StartLoc; /// Location of '('. SourceLocation LParenLoc; /// Ending location of the clause. SourceLocation EndLoc; OMPVarListLocTy() = default; OMPVarListLocTy(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : StartLoc(StartLoc), LParenLoc(LParenLoc), EndLoc(EndLoc) {} }; /// This represents clauses with the list of variables like 'private', /// 'firstprivate', 'copyin', 'shared', or 'reduction' clauses in the /// '#pragma omp ...' directives. template <class T> class OMPVarListClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Number of variables in the list. unsigned NumVars; protected: /// Build a clause with \a N variables /// /// \param K Kind of the clause. /// \param StartLoc Starting location of the clause (the clause keyword). /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPVarListClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPClause(K, StartLoc, EndLoc), LParenLoc(LParenLoc), NumVars(N) {} /// Fetches list of variables associated with this clause. MutableArrayRef<Expr *> getVarRefs() { return MutableArrayRef<Expr *>( static_cast<T *>(this)->template getTrailingObjects<Expr *>(), NumVars); } /// Sets the list of variables for this clause. void setVarRefs(ArrayRef<Expr *> VL) { assert(VL.size() == NumVars && "Number of variables is not the same as the preallocated buffer"); std::copy(VL.begin(), VL.end(), static_cast<T *>(this)->template getTrailingObjects<Expr *>()); } public: using varlist_iterator = MutableArrayRef<Expr *>::iterator; using varlist_const_iterator = ArrayRef<const Expr *>::iterator; using varlist_range = llvm::iterator_range<varlist_iterator>; using varlist_const_range = llvm::iterator_range<varlist_const_iterator>; unsigned varlist_size() const { return NumVars; } bool varlist_empty() const { return NumVars == 0; } varlist_range varlists() { return varlist_range(varlist_begin(), varlist_end()); } varlist_const_range varlists() const { return varlist_const_range(varlist_begin(), varlist_end()); } varlist_iterator varlist_begin() { return getVarRefs().begin(); } varlist_iterator varlist_end() { return getVarRefs().end(); } varlist_const_iterator varlist_begin() const { return getVarRefs().begin(); } varlist_const_iterator varlist_end() const { return getVarRefs().end(); } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Fetches list of all variables in the clause. ArrayRef<const Expr *> getVarRefs() const { return llvm::makeArrayRef( static_cast<const T *>(this)->template getTrailingObjects<Expr *>(), NumVars); } }; /// This represents 'allocator' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp allocate(a) allocator(omp_default_mem_alloc) /// \endcode /// In this example directive '#pragma omp allocate' has simple 'allocator' /// clause with the allocator 'omp_default_mem_alloc'. class OMPAllocatorClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Expression with the allocator. Stmt *Allocator = nullptr; /// Set allocator. void setAllocator(Expr *A) { Allocator = A; } public: /// Build 'allocator' clause with the given allocator. /// /// \param A Allocator. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPAllocatorClause(Expr *A, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_allocator, StartLoc, EndLoc), LParenLoc(LParenLoc), Allocator(A) {} /// Build an empty clause. OMPAllocatorClause() : OMPClause(OMPC_allocator, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns allocator. Expr *getAllocator() const { return cast_or_null<Expr>(Allocator); } child_range children() { return child_range(&Allocator, &Allocator + 1); } const_child_range children() const { return const_child_range(&Allocator, &Allocator + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_allocator; } }; /// This represents clause 'allocate' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel private(a) allocate(omp_default_mem_alloc :a) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'private' /// and clause 'allocate' for the variable 'a'. class OMPAllocateClause final : public OMPVarListClause<OMPAllocateClause>, private llvm::TrailingObjects<OMPAllocateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Allocator specified in the clause, or 'nullptr' if the default one is /// used. Expr *Allocator = nullptr; /// Position of the ':' delimiter in the clause; SourceLocation ColonLoc; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Allocator Allocator expression. /// \param ColonLoc Location of ':' delimiter. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPAllocateClause(SourceLocation StartLoc, SourceLocation LParenLoc, Expr *Allocator, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPAllocateClause>(OMPC_allocate, StartLoc, LParenLoc, EndLoc, N), Allocator(Allocator), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPAllocateClause(unsigned N) : OMPVarListClause<OMPAllocateClause>(OMPC_allocate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } void setAllocator(Expr *A) { Allocator = A; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Allocator Allocator expression. /// \param ColonLoc Location of ':' delimiter. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPAllocateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, Expr *Allocator, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Returns the allocator expression or nullptr, if no allocator is specified. Expr *getAllocator() const { return Allocator; } /// Returns the location of the ':' delimiter. SourceLocation getColonLoc() const { return ColonLoc; } /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPAllocateClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPAllocateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_allocate; } }; /// This represents 'if' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel if(parallel:a > 5) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'if' clause with /// condition 'a > 5' and directive name modifier 'parallel'. class OMPIfClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'if' clause. Stmt *Condition = nullptr; /// Location of ':' (if any). SourceLocation ColonLoc; /// Directive name modifier for the clause. OpenMPDirectiveKind NameModifier = OMPD_unknown; /// Name modifier location. SourceLocation NameModifierLoc; /// Set condition. void setCondition(Expr *Cond) { Condition = Cond; } /// Set directive name modifier for the clause. void setNameModifier(OpenMPDirectiveKind NM) { NameModifier = NM; } /// Set location of directive name modifier for the clause. void setNameModifierLoc(SourceLocation Loc) { NameModifierLoc = Loc; } /// Set location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Build 'if' clause with condition \a Cond. /// /// \param NameModifier [OpenMP 4.1] Directive name modifier of clause. /// \param Cond Condition of the clause. /// \param HelperCond Helper condition for the clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param NameModifierLoc Location of directive name modifier. /// \param ColonLoc [OpenMP 4.1] Location of ':'. /// \param EndLoc Ending location of the clause. OMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Cond, Stmt *HelperCond, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc) : OMPClause(OMPC_if, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond), ColonLoc(ColonLoc), NameModifier(NameModifier), NameModifierLoc(NameModifierLoc) { setPreInitStmt(HelperCond, CaptureRegion); } /// Build an empty clause. OMPIfClause() : OMPClause(OMPC_if, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } /// Return directive name modifier associated with the clause. OpenMPDirectiveKind getNameModifier() const { return NameModifier; } /// Return the location of directive name modifier. SourceLocation getNameModifierLoc() const { return NameModifierLoc; } child_range children() { return child_range(&Condition, &Condition + 1); } const_child_range children() const { return const_child_range(&Condition, &Condition + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPIfClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_if; } }; /// This represents 'final' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task final(a > 5) /// \endcode /// In this example directive '#pragma omp task' has simple 'final' /// clause with condition 'a > 5'. class OMPFinalClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'if' clause. Stmt *Condition = nullptr; /// Set condition. void setCondition(Expr *Cond) { Condition = Cond; } public: /// Build 'final' clause with condition \a Cond. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Cond Condition of the clause. /// \param EndLoc Ending location of the clause. OMPFinalClause(Expr *Cond, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_final, StartLoc, EndLoc), LParenLoc(LParenLoc), Condition(Cond) {} /// Build an empty clause. OMPFinalClause() : OMPClause(OMPC_final, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } child_range children() { return child_range(&Condition, &Condition + 1); } const_child_range children() const { return const_child_range(&Condition, &Condition + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_final; } }; /// This represents 'num_threads' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel num_threads(6) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'num_threads' /// clause with number of threads '6'. class OMPNumThreadsClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'num_threads' clause. Stmt *NumThreads = nullptr; /// Set condition. void setNumThreads(Expr *NThreads) { NumThreads = NThreads; } public: /// Build 'num_threads' clause with condition \a NumThreads. /// /// \param NumThreads Number of threads for the construct. /// \param HelperNumThreads Helper Number of threads for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPNumThreadsClause(Expr *NumThreads, Stmt *HelperNumThreads, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_num_threads, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumThreads(NumThreads) { setPreInitStmt(HelperNumThreads, CaptureRegion); } /// Build an empty clause. OMPNumThreadsClause() : OMPClause(OMPC_num_threads, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns number of threads. Expr *getNumThreads() const { return cast_or_null<Expr>(NumThreads); } child_range children() { return child_range(&NumThreads, &NumThreads + 1); } const_child_range children() const { return const_child_range(&NumThreads, &NumThreads + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_num_threads; } }; /// This represents 'safelen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd safelen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'safelen' /// with single expression '4'. /// If the safelen clause is used then no two iterations executed /// concurrently with SIMD instructions can have a greater distance /// in the logical iteration space than its value. The parameter of /// the safelen clause must be a constant positive integer expression. class OMPSafelenClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Safelen = nullptr; /// Set safelen. void setSafelen(Expr *Len) { Safelen = Len; } public: /// Build 'safelen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSafelenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_safelen, StartLoc, EndLoc), LParenLoc(LParenLoc), Safelen(Len) {} /// Build an empty clause. explicit OMPSafelenClause() : OMPClause(OMPC_safelen, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getSafelen() const { return cast_or_null<Expr>(Safelen); } child_range children() { return child_range(&Safelen, &Safelen + 1); } const_child_range children() const { return const_child_range(&Safelen, &Safelen + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_safelen; } }; /// This represents 'simdlen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd simdlen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'simdlen' /// with single expression '4'. /// If the 'simdlen' clause is used then it specifies the preferred number of /// iterations to be executed concurrently. The parameter of the 'simdlen' /// clause must be a constant positive integer expression. class OMPSimdlenClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Simdlen = nullptr; /// Set simdlen. void setSimdlen(Expr *Len) { Simdlen = Len; } public: /// Build 'simdlen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSimdlenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_simdlen, StartLoc, EndLoc), LParenLoc(LParenLoc), Simdlen(Len) {} /// Build an empty clause. explicit OMPSimdlenClause() : OMPClause(OMPC_simdlen, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getSimdlen() const { return cast_or_null<Expr>(Simdlen); } child_range children() { return child_range(&Simdlen, &Simdlen + 1); } const_child_range children() const { return const_child_range(&Simdlen, &Simdlen + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_simdlen; } }; /// This represents 'collapse' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd collapse(3) /// \endcode /// In this example directive '#pragma omp simd' has clause 'collapse' /// with single expression '3'. /// The parameter must be a constant positive integer expression, it specifies /// the number of nested loops that should be collapsed into a single iteration /// space. class OMPCollapseClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Number of for-loops. Stmt *NumForLoops = nullptr; /// Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// Build 'collapse' clause. /// /// \param Num Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPCollapseClause(Expr *Num, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_collapse, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num) {} /// Build an empty clause. explicit OMPCollapseClause() : OMPClause(OMPC_collapse, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); } const_child_range children() const { return const_child_range(&NumForLoops, &NumForLoops + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_collapse; } }; /// This represents 'default' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel default(shared) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'default' /// clause with kind 'shared'. class OMPDefaultClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'default' clause. OpenMPDefaultClauseKind Kind = OMPC_DEFAULT_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clauses. /// /// \param K Argument of clause. void setDefaultKind(OpenMPDefaultClauseKind K) { Kind = K; } /// Set argument location. /// /// \param KLoc Argument location. void setDefaultKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'default' clause with argument \a A ('none' or 'shared'). /// /// \param A Argument of the clause ('none' or 'shared'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDefaultClause(OpenMPDefaultClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_default, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPDefaultClause() : OMPClause(OMPC_default, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. OpenMPDefaultClauseKind getDefaultKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getDefaultKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_default; } }; /// This represents 'proc_bind' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel proc_bind(master) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'proc_bind' /// clause with kind 'master'. class OMPProcBindClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'proc_bind' clause. OpenMPProcBindClauseKind Kind = OMPC_PROC_BIND_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clause. /// /// \param K Kind of clause. void setProcBindKind(OpenMPProcBindClauseKind K) { Kind = K; } /// Set clause kind location. /// /// \param KLoc Kind location. void setProcBindKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'proc_bind' clause with argument \a A ('master', 'close' or /// 'spread'). /// /// \param A Argument of the clause ('master', 'close' or 'spread'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPProcBindClause(OpenMPProcBindClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_proc_bind, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPProcBindClause() : OMPClause(OMPC_proc_bind, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. OpenMPProcBindClauseKind getProcBindKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getProcBindKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_proc_bind; } }; /// This represents 'unified_address' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires unified_address /// \endcode /// In this example directive '#pragma omp requires' has 'unified_address' /// clause. class OMPUnifiedAddressClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'unified_address' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_unified_address, StartLoc, EndLoc) {} /// Build an empty clause. OMPUnifiedAddressClause() : OMPClause(OMPC_unified_address, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_unified_address; } }; /// This represents 'unified_shared_memory' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires unified_shared_memory /// \endcode /// In this example directive '#pragma omp requires' has 'unified_shared_memory' /// clause. class OMPUnifiedSharedMemoryClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'unified_shared_memory' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_unified_shared_memory, StartLoc, EndLoc) {} /// Build an empty clause. OMPUnifiedSharedMemoryClause() : OMPClause(OMPC_unified_shared_memory, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_unified_shared_memory; } }; /// This represents 'reverse_offload' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires reverse_offload /// \endcode /// In this example directive '#pragma omp requires' has 'reverse_offload' /// clause. class OMPReverseOffloadClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'reverse_offload' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_reverse_offload, StartLoc, EndLoc) {} /// Build an empty clause. OMPReverseOffloadClause() : OMPClause(OMPC_reverse_offload, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_reverse_offload; } }; /// This represents 'dynamic_allocators' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires dynamic_allocators /// \endcode /// In this example directive '#pragma omp requires' has 'dynamic_allocators' /// clause. class OMPDynamicAllocatorsClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'dynamic_allocators' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_dynamic_allocators, StartLoc, EndLoc) {} /// Build an empty clause. OMPDynamicAllocatorsClause() : OMPClause(OMPC_dynamic_allocators, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_dynamic_allocators; } }; /// This represents 'atomic_default_mem_order' clause in the '#pragma omp /// requires' directive. /// /// \code /// #pragma omp requires atomic_default_mem_order(seq_cst) /// \endcode /// In this example directive '#pragma omp requires' has simple /// atomic_default_mem_order' clause with kind 'seq_cst'. class OMPAtomicDefaultMemOrderClause final : public OMPClause { friend class OMPClauseReader; /// Location of '(' SourceLocation LParenLoc; /// A kind of the 'atomic_default_mem_order' clause. OpenMPAtomicDefaultMemOrderClauseKind Kind = OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clause. /// /// \param K Kind of clause. void setAtomicDefaultMemOrderKind(OpenMPAtomicDefaultMemOrderClauseKind K) { Kind = K; } /// Set clause kind location. /// /// \param KLoc Kind location. void setAtomicDefaultMemOrderKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'atomic_default_mem_order' clause with argument \a A ('seq_cst', /// 'acq_rel' or 'relaxed'). /// /// \param A Argument of the clause ('seq_cst', 'acq_rel' or 'relaxed'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPAtomicDefaultMemOrderClause(OpenMPAtomicDefaultMemOrderClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_atomic_default_mem_order, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPAtomicDefaultMemOrderClause() : OMPClause(OMPC_atomic_default_mem_order, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the locaiton of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. OpenMPAtomicDefaultMemOrderClauseKind getAtomicDefaultMemOrderKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getAtomicDefaultMemOrderKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_atomic_default_mem_order; } }; /// This represents 'schedule' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for schedule(static, 3) /// \endcode /// In this example directive '#pragma omp for' has 'schedule' clause with /// arguments 'static' and '3'. class OMPScheduleClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'schedule' clause. OpenMPScheduleClauseKind Kind = OMPC_SCHEDULE_unknown; /// Modifiers for 'schedule' clause. enum {FIRST, SECOND, NUM_MODIFIERS}; OpenMPScheduleClauseModifier Modifiers[NUM_MODIFIERS]; /// Locations of modifiers. SourceLocation ModifiersLoc[NUM_MODIFIERS]; /// Start location of the schedule ind in source code. SourceLocation KindLoc; /// Location of ',' (if any). SourceLocation CommaLoc; /// Chunk size. Expr *ChunkSize = nullptr; /// Set schedule kind. /// /// \param K Schedule kind. void setScheduleKind(OpenMPScheduleClauseKind K) { Kind = K; } /// Set the first schedule modifier. /// /// \param M Schedule modifier. void setFirstScheduleModifier(OpenMPScheduleClauseModifier M) { Modifiers[FIRST] = M; } /// Set the second schedule modifier. /// /// \param M Schedule modifier. void setSecondScheduleModifier(OpenMPScheduleClauseModifier M) { Modifiers[SECOND] = M; } /// Set location of the first schedule modifier. void setFirstScheduleModifierLoc(SourceLocation Loc) { ModifiersLoc[FIRST] = Loc; } /// Set location of the second schedule modifier. void setSecondScheduleModifierLoc(SourceLocation Loc) { ModifiersLoc[SECOND] = Loc; } /// Set schedule modifier location. /// /// \param M Schedule modifier location. void setScheduleModifer(OpenMPScheduleClauseModifier M) { if (Modifiers[FIRST] == OMPC_SCHEDULE_MODIFIER_unknown) Modifiers[FIRST] = M; else { assert(Modifiers[SECOND] == OMPC_SCHEDULE_MODIFIER_unknown); Modifiers[SECOND] = M; } } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set schedule kind start location. /// /// \param KLoc Schedule kind location. void setScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// Set location of ','. /// /// \param Loc Location of ','. void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// Set chunk size. /// /// \param E Chunk size. void setChunkSize(Expr *E) { ChunkSize = E; } public: /// Build 'schedule' clause with schedule kind \a Kind and chunk size /// expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind Schedule kind. /// \param ChunkSize Chunk size. /// \param HelperChunkSize Helper chunk size for combined directives. /// \param M1 The first modifier applied to 'schedule' clause. /// \param M1Loc Location of the first modifier /// \param M2 The second modifier applied to 'schedule' clause. /// \param M2Loc Location of the second modifier OMPScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, Stmt *HelperChunkSize, OpenMPScheduleClauseModifier M1, SourceLocation M1Loc, OpenMPScheduleClauseModifier M2, SourceLocation M2Loc) : OMPClause(OMPC_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) { setPreInitStmt(HelperChunkSize); Modifiers[FIRST] = M1; Modifiers[SECOND] = M2; ModifiersLoc[FIRST] = M1Loc; ModifiersLoc[SECOND] = M2Loc; } /// Build an empty clause. explicit OMPScheduleClause() : OMPClause(OMPC_schedule, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) { Modifiers[FIRST] = OMPC_SCHEDULE_MODIFIER_unknown; Modifiers[SECOND] = OMPC_SCHEDULE_MODIFIER_unknown; } /// Get kind of the clause. OpenMPScheduleClauseKind getScheduleKind() const { return Kind; } /// Get the first modifier of the clause. OpenMPScheduleClauseModifier getFirstScheduleModifier() const { return Modifiers[FIRST]; } /// Get the second modifier of the clause. OpenMPScheduleClauseModifier getSecondScheduleModifier() const { return Modifiers[SECOND]; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getScheduleKindLoc() { return KindLoc; } /// Get the first modifier location. SourceLocation getFirstScheduleModifierLoc() const { return ModifiersLoc[FIRST]; } /// Get the second modifier location. SourceLocation getSecondScheduleModifierLoc() const { return ModifiersLoc[SECOND]; } /// Get location of ','. SourceLocation getCommaLoc() { return CommaLoc; } /// Get chunk size. Expr *getChunkSize() { return ChunkSize; } /// Get chunk size. const Expr *getChunkSize() const { return ChunkSize; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&ChunkSize), reinterpret_cast<Stmt **>(&ChunkSize) + 1); } const_child_range children() const { auto Children = const_cast<OMPScheduleClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_schedule; } }; /// This represents 'ordered' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for ordered (2) /// \endcode /// In this example directive '#pragma omp for' has 'ordered' clause with /// parameter 2. class OMPOrderedClause final : public OMPClause, private llvm::TrailingObjects<OMPOrderedClause, Expr *> { friend class OMPClauseReader; friend TrailingObjects; /// Location of '('. SourceLocation LParenLoc; /// Number of for-loops. Stmt *NumForLoops = nullptr; /// Real number of loops. unsigned NumberOfLoops = 0; /// Build 'ordered' clause. /// /// \param Num Expression, possibly associated with this clause. /// \param NumLoops Number of loops, associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPOrderedClause(Expr *Num, unsigned NumLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_ordered, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num), NumberOfLoops(NumLoops) {} /// Build an empty clause. explicit OMPOrderedClause(unsigned NumLoops) : OMPClause(OMPC_ordered, SourceLocation(), SourceLocation()), NumberOfLoops(NumLoops) {} /// Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// Build 'ordered' clause. /// /// \param Num Expression, possibly associated with this clause. /// \param NumLoops Number of loops, associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. static OMPOrderedClause *Create(const ASTContext &C, Expr *Num, unsigned NumLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Build an empty clause. static OMPOrderedClause* CreateEmpty(const ASTContext &C, unsigned NumLoops); /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } /// Set number of iterations for the specified loop. void setLoopNumIterations(unsigned NumLoop, Expr *NumIterations); /// Get number of iterations for all the loops. ArrayRef<Expr *> getLoopNumIterations() const; /// Set loop counter for the specified loop. void setLoopCounter(unsigned NumLoop, Expr *Counter); /// Get loops counter for the specified loop. Expr *getLoopCounter(unsigned NumLoop); const Expr *getLoopCounter(unsigned NumLoop) const; child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); } const_child_range children() const { return const_child_range(&NumForLoops, &NumForLoops + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_ordered; } }; /// This represents 'nowait' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for nowait /// \endcode /// In this example directive '#pragma omp for' has 'nowait' clause. class OMPNowaitClause : public OMPClause { public: /// Build 'nowait' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_nowait, StartLoc, EndLoc) {} /// Build an empty clause. OMPNowaitClause() : OMPClause(OMPC_nowait, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_nowait; } }; /// This represents 'untied' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task untied /// \endcode /// In this example directive '#pragma omp task' has 'untied' clause. class OMPUntiedClause : public OMPClause { public: /// Build 'untied' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_untied, StartLoc, EndLoc) {} /// Build an empty clause. OMPUntiedClause() : OMPClause(OMPC_untied, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_untied; } }; /// This represents 'mergeable' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task mergeable /// \endcode /// In this example directive '#pragma omp task' has 'mergeable' clause. class OMPMergeableClause : public OMPClause { public: /// Build 'mergeable' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_mergeable, StartLoc, EndLoc) {} /// Build an empty clause. OMPMergeableClause() : OMPClause(OMPC_mergeable, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_mergeable; } }; /// This represents 'read' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic read /// \endcode /// In this example directive '#pragma omp atomic' has 'read' clause. class OMPReadClause : public OMPClause { public: /// Build 'read' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_read, StartLoc, EndLoc) {} /// Build an empty clause. OMPReadClause() : OMPClause(OMPC_read, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_read; } }; /// This represents 'write' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic write /// \endcode /// In this example directive '#pragma omp atomic' has 'write' clause. class OMPWriteClause : public OMPClause { public: /// Build 'write' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_write, StartLoc, EndLoc) {} /// Build an empty clause. OMPWriteClause() : OMPClause(OMPC_write, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_write; } }; /// This represents 'update' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic update /// \endcode /// In this example directive '#pragma omp atomic' has 'update' clause. class OMPUpdateClause : public OMPClause { public: /// Build 'update' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_update, StartLoc, EndLoc) {} /// Build an empty clause. OMPUpdateClause() : OMPClause(OMPC_update, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_update; } }; /// This represents 'capture' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic capture /// \endcode /// In this example directive '#pragma omp atomic' has 'capture' clause. class OMPCaptureClause : public OMPClause { public: /// Build 'capture' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_capture, StartLoc, EndLoc) {} /// Build an empty clause. OMPCaptureClause() : OMPClause(OMPC_capture, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_capture; } }; /// This represents 'seq_cst' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic seq_cst /// \endcode /// In this example directive '#pragma omp atomic' has 'seq_cst' clause. class OMPSeqCstClause : public OMPClause { public: /// Build 'seq_cst' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_seq_cst, StartLoc, EndLoc) {} /// Build an empty clause. OMPSeqCstClause() : OMPClause(OMPC_seq_cst, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_seq_cst; } }; /// This represents clause 'private' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel private(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'private' /// with the variables 'a' and 'b'. class OMPPrivateClause final : public OMPVarListClause<OMPPrivateClause>, private llvm::TrailingObjects<OMPPrivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPPrivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPPrivateClause>(OMPC_private, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPPrivateClause(unsigned N) : OMPVarListClause<OMPPrivateClause>(OMPC_private, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PrivateVL List of references to private copies with initializers. static OMPPrivateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPPrivateClause *CreateEmpty(const ASTContext &C, unsigned N); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPPrivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_private; } }; /// This represents clause 'firstprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel firstprivate(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'firstprivate' /// with the variables 'a' and 'b'. class OMPFirstprivateClause final : public OMPVarListClause<OMPFirstprivateClause>, public OMPClauseWithPreInit, private llvm::TrailingObjects<OMPFirstprivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPFirstprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFirstprivateClause>(OMPC_firstprivate, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPreInit(this) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPFirstprivateClause(unsigned N) : OMPVarListClause<OMPFirstprivateClause>( OMPC_firstprivate, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPreInit(this) {} /// Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Sets the list of references to initializer variables for new /// private variables. /// \param VL List of references. void setInits(ArrayRef<Expr *> VL); /// Gets the list of references to initializer variables for new /// private variables. MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. /// \param PrivateVL List of references to private copies with initializers. /// \param InitVL List of references to auto generated variables used for /// initialization of a single array element. Used if firstprivate variable is /// of array type. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. static OMPFirstprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL, ArrayRef<Expr *> InitVL, Stmt *PreInit); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPFirstprivateClause *CreateEmpty(const ASTContext &C, unsigned N); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPFirstprivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_firstprivate; } }; /// This represents clause 'lastprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd lastprivate(a,b) /// \endcode /// In this example directive '#pragma omp simd' has clause 'lastprivate' /// with the variables 'a' and 'b'. class OMPLastprivateClause final : public OMPVarListClause<OMPLastprivateClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPLastprivateClause, Expr *> { // There are 4 additional tail-allocated arrays at the end of the class: // 1. Contains list of pseudo variables with the default initialization for // each non-firstprivate variables. Used in codegen for initialization of // lastprivate copies. // 2. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents private variables // (for arrays, single array element). // 3. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents original variables // (for arrays, single array element). // 4. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of final assignment performed by the // lastprivate clause. friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPLastprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPLastprivateClause>(OMPC_lastprivate, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPLastprivateClause(unsigned N) : OMPVarListClause<OMPLastprivateClause>( OMPC_lastprivate, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Get the list of helper expressions for initialization of private /// copies for lastprivate variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent original variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign private copy of the variable to original variable. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// private variables (for arrays, single array element). /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// original variables (for arrays, single array element). /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// lastprivate clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPLastprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPLastprivateClause *CreateEmpty(const ASTContext &C, unsigned N); using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; /// Set list of helper expressions, required for generation of private /// copies of original lastprivate variables. void setPrivateCopies(ArrayRef<Expr *> PrivateCopies); helper_expr_const_range private_copies() const { return helper_expr_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_range private_copies() { return helper_expr_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPLastprivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_lastprivate; } }; /// This represents clause 'shared' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel shared(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'shared' /// with the variables 'a' and 'b'. class OMPSharedClause final : public OMPVarListClause<OMPSharedClause>, private llvm::TrailingObjects<OMPSharedClause, Expr *> { friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPSharedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPSharedClause>(OMPC_shared, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPSharedClause(unsigned N) : OMPVarListClause<OMPSharedClause>(OMPC_shared, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPSharedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPSharedClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPSharedClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_shared; } }; /// This represents clause 'reduction' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'reduction' /// with operator '+' and the variables 'a' and 'b'. class OMPReductionClause final : public OMPVarListClause<OMPReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPReductionClause>(OMPC_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPReductionClause(unsigned N) : OMPVarListClause<OMPReductionClause>(OMPC_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private copy of the reduction /// variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent LHS expression in the final /// reduction expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent RHS expression in the final /// reduction expression performed by the reduction clause. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPReductionClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_reduction; } }; /// This represents clause 'task_reduction' in the '#pragma omp taskgroup' /// directives. /// /// \code /// #pragma omp taskgroup task_reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp taskgroup' has clause /// 'task_reduction' with operator '+' and the variables 'a' and 'b'. class OMPTaskReductionClause final : public OMPVarListClause<OMPTaskReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPTaskReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPTaskReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPTaskReductionClause>(OMPC_task_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPTaskReductionClause(unsigned N) : OMPVarListClause<OMPTaskReductionClause>( OMPC_task_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent private copy of the reduction variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent LHS expression in the final reduction /// expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent RHS expression in the final reduction /// expression performed by the reduction clause. Also, variables in these /// expressions are used for proper initialization of reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPTaskReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPTaskReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPTaskReductionClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_task_reduction; } }; /// This represents clause 'in_reduction' in the '#pragma omp task' directives. /// /// \code /// #pragma omp task in_reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp task' has clause 'in_reduction' with /// operator '+' and the variables 'a' and 'b'. class OMPInReductionClause final : public OMPVarListClause<OMPInReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPInReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPInReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPInReductionClause>(OMPC_in_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPInReductionClause(unsigned N) : OMPVarListClause<OMPInReductionClause>( OMPC_in_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent private copy of the reduction variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent LHS expression in the final reduction /// expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent RHS expression in the final reduction /// expression performed by the reduction clause. Also, variables in these /// expressions are used for proper initialization of reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } /// Set list of helper reduction taskgroup descriptors. void setTaskgroupDescriptors(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction taskgroup descriptors. MutableArrayRef<Expr *> getTaskgroupDescriptors() { return MutableArrayRef<Expr *>(getReductionOps().end(), varlist_size()); } ArrayRef<const Expr *> getTaskgroupDescriptors() const { return llvm::makeArrayRef(getReductionOps().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param TaskgroupDescriptors List of helper taskgroup descriptors for /// corresponding items in parent taskgroup task_reduction clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPInReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, ArrayRef<Expr *> TaskgroupDescriptors, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPInReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_const_range taskgroup_descriptors() const { return helper_expr_const_range(getTaskgroupDescriptors().begin(), getTaskgroupDescriptors().end()); } helper_expr_range taskgroup_descriptors() { return helper_expr_range(getTaskgroupDescriptors().begin(), getTaskgroupDescriptors().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPInReductionClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_in_reduction; } }; /// This represents clause 'linear' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd linear(a,b : 2) /// \endcode /// In this example directive '#pragma omp simd' has clause 'linear' /// with variables 'a', 'b' and linear step '2'. class OMPLinearClause final : public OMPVarListClause<OMPLinearClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPLinearClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Modifier of 'linear' clause. OpenMPLinearClauseKind Modifier = OMPC_LINEAR_val; /// Location of linear modifier if any. SourceLocation ModifierLoc; /// Location of ':'. SourceLocation ColonLoc; /// Sets the linear step for clause. void setStep(Expr *Step) { *(getFinals().end()) = Step; } /// Sets the expression to calculate linear step for clause. void setCalcStep(Expr *CalcStep) { *(getFinals().end() + 1) = CalcStep; } /// Build 'linear' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. OMPLinearClause(SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPLinearClause>(OMPC_linear, StartLoc, LParenLoc, EndLoc, NumVars), OMPClauseWithPostUpdate(this), Modifier(Modifier), ModifierLoc(ModifierLoc), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param NumVars Number of variables. explicit OMPLinearClause(unsigned NumVars) : OMPVarListClause<OMPLinearClause>(OMPC_linear, SourceLocation(), SourceLocation(), SourceLocation(), NumVars), OMPClauseWithPostUpdate(this) {} /// Gets the list of initial values for linear variables. /// /// There are NumVars expressions with initial values allocated after the /// varlist, they are followed by NumVars update expressions (used to update /// the linear variable's value on current iteration) and they are followed by /// NumVars final expressions (used to calculate the linear variable's /// value after the loop body). After these lists, there are 2 helper /// expressions - linear step and a helper to calculate it before the /// loop body (used when the linear step is not constant): /// /// { Vars[] /* in OMPVarListClause */; Privates[]; Inits[]; Updates[]; /// Finals[]; Step; CalcStep; } MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Sets the list of update expressions for linear variables. MutableArrayRef<Expr *> getUpdates() { return MutableArrayRef<Expr *>(getInits().end(), varlist_size()); } ArrayRef<const Expr *> getUpdates() const { return llvm::makeArrayRef(getInits().end(), varlist_size()); } /// Sets the list of final update expressions for linear variables. MutableArrayRef<Expr *> getFinals() { return MutableArrayRef<Expr *>(getUpdates().end(), varlist_size()); } ArrayRef<const Expr *> getFinals() const { return llvm::makeArrayRef(getUpdates().end(), varlist_size()); } /// Sets the list of the copies of original linear variables. /// \param PL List of expressions. void setPrivates(ArrayRef<Expr *> PL); /// Sets the list of the initial values for linear variables. /// \param IL List of expressions. void setInits(ArrayRef<Expr *> IL); public: /// Creates clause with a list of variables \a VL and a linear step /// \a Step. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Modifier Modifier of 'linear' clause. /// \param ModifierLoc Modifier location. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PL List of private copies of original variables. /// \param IL List of initial values for the variables. /// \param Step Linear step. /// \param CalcStep Calculation of the linear step. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPLinearClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. static OMPLinearClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// Set modifier. void setModifier(OpenMPLinearClauseKind Kind) { Modifier = Kind; } /// Return modifier. OpenMPLinearClauseKind getModifier() const { return Modifier; } /// Set modifier location. void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// Return modifier location. SourceLocation getModifierLoc() const { return ModifierLoc; } /// Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns linear step. Expr *getStep() { return *(getFinals().end()); } /// Returns linear step. const Expr *getStep() const { return *(getFinals().end()); } /// Returns expression to calculate linear step. Expr *getCalcStep() { return *(getFinals().end() + 1); } /// Returns expression to calculate linear step. const Expr *getCalcStep() const { return *(getFinals().end() + 1); } /// Sets the list of update expressions for linear variables. /// \param UL List of expressions. void setUpdates(ArrayRef<Expr *> UL); /// Sets the list of final update expressions for linear variables. /// \param FL List of expressions. void setFinals(ArrayRef<Expr *> FL); using privates_iterator = MutableArrayRef<Expr *>::iterator; using privates_const_iterator = ArrayRef<const Expr *>::iterator; using privates_range = llvm::iterator_range<privates_iterator>; using privates_const_range = llvm::iterator_range<privates_const_iterator>; privates_range privates() { return privates_range(getPrivates().begin(), getPrivates().end()); } privates_const_range privates() const { return privates_const_range(getPrivates().begin(), getPrivates().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } using updates_iterator = MutableArrayRef<Expr *>::iterator; using updates_const_iterator = ArrayRef<const Expr *>::iterator; using updates_range = llvm::iterator_range<updates_iterator>; using updates_const_range = llvm::iterator_range<updates_const_iterator>; updates_range updates() { return updates_range(getUpdates().begin(), getUpdates().end()); } updates_const_range updates() const { return updates_const_range(getUpdates().begin(), getUpdates().end()); } using finals_iterator = MutableArrayRef<Expr *>::iterator; using finals_const_iterator = ArrayRef<const Expr *>::iterator; using finals_range = llvm::iterator_range<finals_iterator>; using finals_const_range = llvm::iterator_range<finals_const_iterator>; finals_range finals() { return finals_range(getFinals().begin(), getFinals().end()); } finals_const_range finals() const { return finals_const_range(getFinals().begin(), getFinals().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPLinearClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_linear; } }; /// This represents clause 'aligned' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd aligned(a,b : 8) /// \endcode /// In this example directive '#pragma omp simd' has clause 'aligned' /// with variables 'a', 'b' and alignment '8'. class OMPAlignedClause final : public OMPVarListClause<OMPAlignedClause>, private llvm::TrailingObjects<OMPAlignedClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Sets the alignment for clause. void setAlignment(Expr *A) { *varlist_end() = A; } /// Build 'aligned' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. OMPAlignedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(OMPC_aligned, StartLoc, LParenLoc, EndLoc, NumVars), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param NumVars Number of variables. explicit OMPAlignedClause(unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(OMPC_aligned, SourceLocation(), SourceLocation(), SourceLocation(), NumVars) {} public: /// Creates clause with a list of variables \a VL and alignment \a A. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param A Alignment. static OMPAlignedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, Expr *A); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. static OMPAlignedClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns alignment. Expr *getAlignment() { return *varlist_end(); } /// Returns alignment. const Expr *getAlignment() const { return *varlist_end(); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPAlignedClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_aligned; } }; /// This represents clause 'copyin' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel copyin(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'copyin' /// with the variables 'a' and 'b'. class OMPCopyinClause final : public OMPVarListClause<OMPCopyinClause>, private llvm::TrailingObjects<OMPCopyinClause, Expr *> { // Class has 3 additional tail allocated arrays: // 1. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents sources. // 2. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents destinations. // 3. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of propagation of master's thread values of // threadprivate variables to local instances of that variables in other // implicit threads. friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPCopyinClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyinClause>(OMPC_copyin, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPCopyinClause(unsigned N) : OMPVarListClause<OMPCopyinClause>(OMPC_copyin, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyin clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyin clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of propagation of master's thread values of /// threadprivate variables to local instances of that variables in other /// implicit threads. static OMPCopyinClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPCopyinClause *CreateEmpty(const ASTContext &C, unsigned N); using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPCopyinClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_copyin; } }; /// This represents clause 'copyprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp single copyprivate(a,b) /// \endcode /// In this example directive '#pragma omp single' has clause 'copyprivate' /// with the variables 'a' and 'b'. class OMPCopyprivateClause final : public OMPVarListClause<OMPCopyprivateClause>, private llvm::TrailingObjects<OMPCopyprivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPCopyprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyprivateClause>(OMPC_copyprivate, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPCopyprivateClause(unsigned N) : OMPVarListClause<OMPCopyprivateClause>( OMPC_copyprivate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// copyprivate clause. static OMPCopyprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPCopyprivateClause *CreateEmpty(const ASTContext &C, unsigned N); using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPCopyprivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_copyprivate; } }; /// This represents implicit clause 'flush' for the '#pragma omp flush' /// directive. /// This clause does not exist by itself, it can be only as a part of 'omp /// flush' directive. This clause is introduced to keep the original structure /// of \a OMPExecutableDirective class and its derivatives and to use the /// existing infrastructure of clauses with the list of variables. /// /// \code /// #pragma omp flush(a,b) /// \endcode /// In this example directive '#pragma omp flush' has implicit clause 'flush' /// with the variables 'a' and 'b'. class OMPFlushClause final : public OMPVarListClause<OMPFlushClause>, private llvm::TrailingObjects<OMPFlushClause, Expr *> { friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPFlushClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFlushClause>(OMPC_flush, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPFlushClause(unsigned N) : OMPVarListClause<OMPFlushClause>(OMPC_flush, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPFlushClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPFlushClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPFlushClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_flush; } }; /// This represents implicit clause 'depend' for the '#pragma omp task' /// directive. /// /// \code /// #pragma omp task depend(in:a,b) /// \endcode /// In this example directive '#pragma omp task' with clause 'depend' with the /// variables 'a' and 'b' with dependency 'in'. class OMPDependClause final : public OMPVarListClause<OMPDependClause>, private llvm::TrailingObjects<OMPDependClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Dependency type (one of in, out, inout). OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown; /// Dependency type location. SourceLocation DepLoc; /// Colon location. SourceLocation ColonLoc; /// Number of loops, associated with the depend clause. unsigned NumLoops = 0; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// \param NumLoops Number of loops that is associated with this depend /// clause. OMPDependClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N, unsigned NumLoops) : OMPVarListClause<OMPDependClause>(OMPC_depend, StartLoc, LParenLoc, EndLoc, N), NumLoops(NumLoops) {} /// Build an empty clause. /// /// \param N Number of variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. explicit OMPDependClause(unsigned N, unsigned NumLoops) : OMPVarListClause<OMPDependClause>(OMPC_depend, SourceLocation(), SourceLocation(), SourceLocation(), N), NumLoops(NumLoops) {} /// Set dependency kind. void setDependencyKind(OpenMPDependClauseKind K) { DepKind = K; } /// Set dependency kind and its location. void setDependencyLoc(SourceLocation Loc) { DepLoc = Loc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param DepKind Dependency type. /// \param DepLoc Location of the dependency type. /// \param ColonLoc Colon location. /// \param VL List of references to the variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. static OMPDependClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VL, unsigned NumLoops); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. static OMPDependClause *CreateEmpty(const ASTContext &C, unsigned N, unsigned NumLoops); /// Get dependency type. OpenMPDependClauseKind getDependencyKind() const { return DepKind; } /// Get dependency type location. SourceLocation getDependencyLoc() const { return DepLoc; } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } /// Get number of loops associated with the clause. unsigned getNumLoops() const { return NumLoops; } /// Set the loop data for the depend clauses with 'sink|source' kind of /// dependency. void setLoopData(unsigned NumLoop, Expr *Cnt); /// Get the loop data. Expr *getLoopData(unsigned NumLoop); const Expr *getLoopData(unsigned NumLoop) const; child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPDependClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_depend; } }; /// This represents 'device' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp target device(a) /// \endcode /// In this example directive '#pragma omp target' has clause 'device' /// with single expression 'a'. class OMPDeviceClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Device number. Stmt *Device = nullptr; /// Set the device number. /// /// \param E Device number. void setDevice(Expr *E) { Device = E; } public: /// Build 'device' clause. /// /// \param E Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDeviceClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_device, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Device(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPDeviceClause() : OMPClause(OMPC_device, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return device number. Expr *getDevice() { return cast<Expr>(Device); } /// Return device number. Expr *getDevice() const { return cast<Expr>(Device); } child_range children() { return child_range(&Device, &Device + 1); } const_child_range children() const { return const_child_range(&Device, &Device + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_device; } }; /// This represents 'threads' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp ordered threads /// \endcode /// In this example directive '#pragma omp ordered' has simple 'threads' clause. class OMPThreadsClause : public OMPClause { public: /// Build 'threads' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_threads, StartLoc, EndLoc) {} /// Build an empty clause. OMPThreadsClause() : OMPClause(OMPC_threads, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_threads; } }; /// This represents 'simd' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp ordered simd /// \endcode /// In this example directive '#pragma omp ordered' has simple 'simd' clause. class OMPSIMDClause : public OMPClause { public: /// Build 'simd' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_simd, StartLoc, EndLoc) {} /// Build an empty clause. OMPSIMDClause() : OMPClause(OMPC_simd, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_simd; } }; /// Struct that defines common infrastructure to handle mappable /// expressions used in OpenMP clauses. class OMPClauseMappableExprCommon { public: /// Class that represents a component of a mappable expression. E.g. /// for an expression S.a, the first component is a declaration reference /// expression associated with 'S' and the second is a member expression /// associated with the field declaration 'a'. If the expression is an array /// subscript it may not have any associated declaration. In that case the /// associated declaration is set to nullptr. class MappableComponent { /// Expression associated with the component. Expr *AssociatedExpression = nullptr; /// Declaration associated with the declaration. If the component does /// not have a declaration (e.g. array subscripts or section), this is set /// to nullptr. ValueDecl *AssociatedDeclaration = nullptr; public: explicit MappableComponent() = default; explicit MappableComponent(Expr *AssociatedExpression, ValueDecl *AssociatedDeclaration) : AssociatedExpression(AssociatedExpression), AssociatedDeclaration( AssociatedDeclaration ? cast<ValueDecl>(AssociatedDeclaration->getCanonicalDecl()) : nullptr) {} Expr *getAssociatedExpression() const { return AssociatedExpression; } ValueDecl *getAssociatedDeclaration() const { return AssociatedDeclaration; } }; // List of components of an expression. This first one is the whole // expression and the last one is the base expression. using MappableExprComponentList = SmallVector<MappableComponent, 8>; using MappableExprComponentListRef = ArrayRef<MappableComponent>; // List of all component lists associated to the same base declaration. // E.g. if both 'S.a' and 'S.b' are a mappable expressions, each will have // their component list but the same base declaration 'S'. using MappableExprComponentLists = SmallVector<MappableExprComponentList, 8>; using MappableExprComponentListsRef = ArrayRef<MappableExprComponentList>; protected: // Return the total number of elements in a list of component lists. static unsigned getComponentsTotalNumber(MappableExprComponentListsRef ComponentLists); // Return the total number of elements in a list of declarations. All // declarations are expected to be canonical. static unsigned getUniqueDeclarationsTotalNumber(ArrayRef<const ValueDecl *> Declarations); }; /// This structure contains all sizes needed for by an /// OMPMappableExprListClause. struct OMPMappableExprListSizeTy { /// Number of expressions listed. unsigned NumVars; /// Number of unique base declarations. unsigned NumUniqueDeclarations; /// Number of component lists. unsigned NumComponentLists; /// Total number of expression components. unsigned NumComponents; OMPMappableExprListSizeTy() = default; OMPMappableExprListSizeTy(unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : NumVars(NumVars), NumUniqueDeclarations(NumUniqueDeclarations), NumComponentLists(NumComponentLists), NumComponents(NumComponents) {} }; /// This represents clauses with a list of expressions that are mappable. /// Examples of these clauses are 'map' in /// '#pragma omp target [enter|exit] [data]...' directives, and 'to' and 'from /// in '#pragma omp target update...' directives. template <class T> class OMPMappableExprListClause : public OMPVarListClause<T>, public OMPClauseMappableExprCommon { friend class OMPClauseReader; /// Number of unique declarations in this clause. unsigned NumUniqueDeclarations; /// Number of component lists in this clause. unsigned NumComponentLists; /// Total number of components in this clause. unsigned NumComponents; /// C++ nested name specifier for the associated user-defined mapper. NestedNameSpecifierLoc MapperQualifierLoc; /// The associated user-defined mapper identifier information. DeclarationNameInfo MapperIdInfo; protected: /// Build a clause for \a NumUniqueDeclarations declarations, \a /// NumComponentLists total component lists, and \a NumComponents total /// components. /// /// \param K Kind of the clause. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. /// \param MapperQualifierLocPtr C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfoPtr The identifier of associated user-defined mapper. OMPMappableExprListClause( OpenMPClauseKind K, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes, NestedNameSpecifierLoc *MapperQualifierLocPtr = nullptr, DeclarationNameInfo *MapperIdInfoPtr = nullptr) : OMPVarListClause<T>(K, Locs.StartLoc, Locs.LParenLoc, Locs.EndLoc, Sizes.NumVars), NumUniqueDeclarations(Sizes.NumUniqueDeclarations), NumComponentLists(Sizes.NumComponentLists), NumComponents(Sizes.NumComponents) { if (MapperQualifierLocPtr) MapperQualifierLoc = *MapperQualifierLocPtr; if (MapperIdInfoPtr) MapperIdInfo = *MapperIdInfoPtr; } /// Get the unique declarations that are in the trailing objects of the /// class. MutableArrayRef<ValueDecl *> getUniqueDeclsRef() { return MutableArrayRef<ValueDecl *>( static_cast<T *>(this)->template getTrailingObjects<ValueDecl *>(), NumUniqueDeclarations); } /// Get the unique declarations that are in the trailing objects of the /// class. ArrayRef<ValueDecl *> getUniqueDeclsRef() const { return ArrayRef<ValueDecl *>( static_cast<const T *>(this) ->template getTrailingObjects<ValueDecl *>(), NumUniqueDeclarations); } /// Set the unique declarations that are in the trailing objects of the /// class. void setUniqueDecls(ArrayRef<ValueDecl *> UDs) { assert(UDs.size() == NumUniqueDeclarations && "Unexpected amount of unique declarations."); std::copy(UDs.begin(), UDs.end(), getUniqueDeclsRef().begin()); } /// Get the number of lists per declaration that are in the trailing /// objects of the class. MutableArrayRef<unsigned> getDeclNumListsRef() { return MutableArrayRef<unsigned>( static_cast<T *>(this)->template getTrailingObjects<unsigned>(), NumUniqueDeclarations); } /// Get the number of lists per declaration that are in the trailing /// objects of the class. ArrayRef<unsigned> getDeclNumListsRef() const { return ArrayRef<unsigned>( static_cast<const T *>(this)->template getTrailingObjects<unsigned>(), NumUniqueDeclarations); } /// Set the number of lists per declaration that are in the trailing /// objects of the class. void setDeclNumLists(ArrayRef<unsigned> DNLs) { assert(DNLs.size() == NumUniqueDeclarations && "Unexpected amount of list numbers."); std::copy(DNLs.begin(), DNLs.end(), getDeclNumListsRef().begin()); } /// Get the cumulative component lists sizes that are in the trailing /// objects of the class. They are appended after the number of lists. MutableArrayRef<unsigned> getComponentListSizesRef() { return MutableArrayRef<unsigned>( static_cast<T *>(this)->template getTrailingObjects<unsigned>() + NumUniqueDeclarations, NumComponentLists); } /// Get the cumulative component lists sizes that are in the trailing /// objects of the class. They are appended after the number of lists. ArrayRef<unsigned> getComponentListSizesRef() const { return ArrayRef<unsigned>( static_cast<const T *>(this)->template getTrailingObjects<unsigned>() + NumUniqueDeclarations, NumComponentLists); } /// Set the cumulative component lists sizes that are in the trailing /// objects of the class. void setComponentListSizes(ArrayRef<unsigned> CLSs) { assert(CLSs.size() == NumComponentLists && "Unexpected amount of component lists."); std::copy(CLSs.begin(), CLSs.end(), getComponentListSizesRef().begin()); } /// Get the components that are in the trailing objects of the class. MutableArrayRef<MappableComponent> getComponentsRef() { return MutableArrayRef<MappableComponent>( static_cast<T *>(this) ->template getTrailingObjects<MappableComponent>(), NumComponents); } /// Get the components that are in the trailing objects of the class. ArrayRef<MappableComponent> getComponentsRef() const { return ArrayRef<MappableComponent>( static_cast<const T *>(this) ->template getTrailingObjects<MappableComponent>(), NumComponents); } /// Set the components that are in the trailing objects of the class. /// This requires the list sizes so that it can also fill the original /// expressions, which are the first component of each list. void setComponents(ArrayRef<MappableComponent> Components, ArrayRef<unsigned> CLSs) { assert(Components.size() == NumComponents && "Unexpected amount of component lists."); assert(CLSs.size() == NumComponentLists && "Unexpected amount of list sizes."); std::copy(Components.begin(), Components.end(), getComponentsRef().begin()); } /// Fill the clause information from the list of declarations and /// associated component lists. void setClauseInfo(ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists) { // Perform some checks to make sure the data sizes are consistent with the // information available when the clause was created. assert(getUniqueDeclarationsTotalNumber(Declarations) == NumUniqueDeclarations && "Unexpected number of mappable expression info entries!"); assert(getComponentsTotalNumber(ComponentLists) == NumComponents && "Unexpected total number of components!"); assert(Declarations.size() == ComponentLists.size() && "Declaration and component lists size is not consistent!"); assert(Declarations.size() == NumComponentLists && "Unexpected declaration and component lists size!"); // Organize the components by declaration and retrieve the original // expression. Original expressions are always the first component of the // mappable component list. llvm::MapVector<ValueDecl *, SmallVector<MappableExprComponentListRef, 8>> ComponentListMap; { auto CI = ComponentLists.begin(); for (auto DI = Declarations.begin(), DE = Declarations.end(); DI != DE; ++DI, ++CI) { assert(!CI->empty() && "Invalid component list!"); ComponentListMap[*DI].push_back(*CI); } } // Iterators of the target storage. auto UniqueDeclarations = getUniqueDeclsRef(); auto UDI = UniqueDeclarations.begin(); auto DeclNumLists = getDeclNumListsRef(); auto DNLI = DeclNumLists.begin(); auto ComponentListSizes = getComponentListSizesRef(); auto CLSI = ComponentListSizes.begin(); auto Components = getComponentsRef(); auto CI = Components.begin(); // Variable to compute the accumulation of the number of components. unsigned PrevSize = 0u; // Scan all the declarations and associated component lists. for (auto &M : ComponentListMap) { // The declaration. auto *D = M.first; // The component lists. auto CL = M.second; // Initialize the entry. *UDI = D; ++UDI; *DNLI = CL.size(); ++DNLI; // Obtain the cumulative sizes and concatenate all the components in the // reserved storage. for (auto C : CL) { // Accumulate with the previous size. PrevSize += C.size(); // Save the size. *CLSI = PrevSize; ++CLSI; // Append components after the current components iterator. CI = std::copy(C.begin(), C.end(), CI); } } } /// Set the nested name specifier of associated user-defined mapper. void setMapperQualifierLoc(NestedNameSpecifierLoc NNSL) { MapperQualifierLoc = NNSL; } /// Set the name of associated user-defined mapper. void setMapperIdInfo(DeclarationNameInfo MapperId) { MapperIdInfo = MapperId; } /// Get the user-defined mapper references that are in the trailing objects of /// the class. MutableArrayRef<Expr *> getUDMapperRefs() { return llvm::makeMutableArrayRef<Expr *>( static_cast<T *>(this)->template getTrailingObjects<Expr *>() + OMPVarListClause<T>::varlist_size(), OMPVarListClause<T>::varlist_size()); } /// Get the user-defined mappers references that are in the trailing objects /// of the class. ArrayRef<Expr *> getUDMapperRefs() const { return llvm::makeArrayRef<Expr *>( static_cast<T *>(this)->template getTrailingObjects<Expr *>() + OMPVarListClause<T>::varlist_size(), OMPVarListClause<T>::varlist_size()); } /// Set the user-defined mappers that are in the trailing objects of the /// class. void setUDMapperRefs(ArrayRef<Expr *> DMDs) { assert(DMDs.size() == OMPVarListClause<T>::varlist_size() && "Unexpected number of user-defined mappers."); std::copy(DMDs.begin(), DMDs.end(), getUDMapperRefs().begin()); } public: /// Return the number of unique base declarations in this clause. unsigned getUniqueDeclarationsNum() const { return NumUniqueDeclarations; } /// Return the number of lists derived from the clause expressions. unsigned getTotalComponentListNum() const { return NumComponentLists; } /// Return the total number of components in all lists derived from the /// clause. unsigned getTotalComponentsNum() const { return NumComponents; } /// Gets the nested name specifier for associated user-defined mapper. NestedNameSpecifierLoc getMapperQualifierLoc() const { return MapperQualifierLoc; } /// Gets the name info for associated user-defined mapper. const DeclarationNameInfo &getMapperIdInfo() const { return MapperIdInfo; } /// Iterator that browse the components by lists. It also allows /// browsing components of a single declaration. class const_component_lists_iterator : public llvm::iterator_adaptor_base< const_component_lists_iterator, MappableExprComponentListRef::const_iterator, std::forward_iterator_tag, MappableComponent, ptrdiff_t, MappableComponent, MappableComponent> { // The declaration the iterator currently refers to. ArrayRef<ValueDecl *>::iterator DeclCur; // The list number associated with the current declaration. ArrayRef<unsigned>::iterator NumListsCur; // Remaining lists for the current declaration. unsigned RemainingLists = 0; // The cumulative size of the previous list, or zero if there is no previous // list. unsigned PrevListSize = 0; // The cumulative sizes of the current list - it will delimit the remaining // range of interest. ArrayRef<unsigned>::const_iterator ListSizeCur; ArrayRef<unsigned>::const_iterator ListSizeEnd; // Iterator to the end of the components storage. MappableExprComponentListRef::const_iterator End; public: /// Construct an iterator that scans all lists. explicit const_component_lists_iterator( ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes, MappableExprComponentListRef Components) : const_component_lists_iterator::iterator_adaptor_base( Components.begin()), DeclCur(UniqueDecls.begin()), NumListsCur(DeclsListNum.begin()), ListSizeCur(CumulativeListSizes.begin()), ListSizeEnd(CumulativeListSizes.end()), End(Components.end()) { assert(UniqueDecls.size() == DeclsListNum.size() && "Inconsistent number of declarations and list sizes!"); if (!DeclsListNum.empty()) RemainingLists = *NumListsCur; } /// Construct an iterator that scan lists for a given declaration \a /// Declaration. explicit const_component_lists_iterator( const ValueDecl *Declaration, ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes, MappableExprComponentListRef Components) : const_component_lists_iterator(UniqueDecls, DeclsListNum, CumulativeListSizes, Components) { // Look for the desired declaration. While we are looking for it, we // update the state so that we know the component where a given list // starts. for (; DeclCur != UniqueDecls.end(); ++DeclCur, ++NumListsCur) { if (*DeclCur == Declaration) break; assert(*NumListsCur > 0 && "No lists associated with declaration??"); // Skip the lists associated with the current declaration, but save the // last list size that was skipped. std::advance(ListSizeCur, *NumListsCur - 1); PrevListSize = *ListSizeCur; ++ListSizeCur; } // If we didn't find any declaration, advance the iterator to after the // last component and set remaining lists to zero. if (ListSizeCur == CumulativeListSizes.end()) { this->I = End; RemainingLists = 0u; return; } // Set the remaining lists with the total number of lists of the current // declaration. RemainingLists = *NumListsCur; // Adjust the list size end iterator to the end of the relevant range. ListSizeEnd = ListSizeCur; std::advance(ListSizeEnd, RemainingLists); // Given that the list sizes are cumulative, the index of the component // that start the list is the size of the previous list. std::advance(this->I, PrevListSize); } // Return the array with the current list. The sizes are cumulative, so the // array size is the difference between the current size and previous one. std::pair<const ValueDecl *, MappableExprComponentListRef> operator*() const { assert(ListSizeCur != ListSizeEnd && "Invalid iterator!"); return std::make_pair( *DeclCur, MappableExprComponentListRef(&*this->I, *ListSizeCur - PrevListSize)); } std::pair<const ValueDecl *, MappableExprComponentListRef> operator->() const { return **this; } // Skip the components of the current list. const_component_lists_iterator &operator++() { assert(ListSizeCur != ListSizeEnd && RemainingLists && "Invalid iterator!"); // If we don't have more lists just skip all the components. Otherwise, // advance the iterator by the number of components in the current list. if (std::next(ListSizeCur) == ListSizeEnd) { this->I = End; RemainingLists = 0; } else { std::advance(this->I, *ListSizeCur - PrevListSize); PrevListSize = *ListSizeCur; // We are done with a declaration, move to the next one. if (!(--RemainingLists)) { ++DeclCur; ++NumListsCur; RemainingLists = *NumListsCur; assert(RemainingLists && "No lists in the following declaration??"); } } ++ListSizeCur; return *this; } }; using const_component_lists_range = llvm::iterator_range<const_component_lists_iterator>; /// Iterators for all component lists. const_component_lists_iterator component_lists_begin() const { return const_component_lists_iterator( getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(), getComponentsRef()); } const_component_lists_iterator component_lists_end() const { return const_component_lists_iterator( ArrayRef<ValueDecl *>(), ArrayRef<unsigned>(), ArrayRef<unsigned>(), MappableExprComponentListRef(getComponentsRef().end(), getComponentsRef().end())); } const_component_lists_range component_lists() const { return {component_lists_begin(), component_lists_end()}; } /// Iterators for component lists associated with the provided /// declaration. const_component_lists_iterator decl_component_lists_begin(const ValueDecl *VD) const { return const_component_lists_iterator( VD, getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(), getComponentsRef()); } const_component_lists_iterator decl_component_lists_end() const { return component_lists_end(); } const_component_lists_range decl_component_lists(const ValueDecl *VD) const { return {decl_component_lists_begin(VD), decl_component_lists_end()}; } /// Iterators to access all the declarations, number of lists, list sizes, and /// components. using const_all_decls_iterator = ArrayRef<ValueDecl *>::iterator; using const_all_decls_range = llvm::iterator_range<const_all_decls_iterator>; const_all_decls_range all_decls() const { auto A = getUniqueDeclsRef(); return const_all_decls_range(A.begin(), A.end()); } using const_all_num_lists_iterator = ArrayRef<unsigned>::iterator; using const_all_num_lists_range = llvm::iterator_range<const_all_num_lists_iterator>; const_all_num_lists_range all_num_lists() const { auto A = getDeclNumListsRef(); return const_all_num_lists_range(A.begin(), A.end()); } using const_all_lists_sizes_iterator = ArrayRef<unsigned>::iterator; using const_all_lists_sizes_range = llvm::iterator_range<const_all_lists_sizes_iterator>; const_all_lists_sizes_range all_lists_sizes() const { auto A = getComponentListSizesRef(); return const_all_lists_sizes_range(A.begin(), A.end()); } using const_all_components_iterator = ArrayRef<MappableComponent>::iterator; using const_all_components_range = llvm::iterator_range<const_all_components_iterator>; const_all_components_range all_components() const { auto A = getComponentsRef(); return const_all_components_range(A.begin(), A.end()); } using mapperlist_iterator = MutableArrayRef<Expr *>::iterator; using mapperlist_const_iterator = ArrayRef<const Expr *>::iterator; using mapperlist_range = llvm::iterator_range<mapperlist_iterator>; using mapperlist_const_range = llvm::iterator_range<mapperlist_const_iterator>; mapperlist_iterator mapperlist_begin() { return getUDMapperRefs().begin(); } mapperlist_iterator mapperlist_end() { return getUDMapperRefs().end(); } mapperlist_const_iterator mapperlist_begin() const { return getUDMapperRefs().begin(); } mapperlist_const_iterator mapperlist_end() const { return getUDMapperRefs().end(); } mapperlist_range mapperlists() { return mapperlist_range(mapperlist_begin(), mapperlist_end()); } mapperlist_const_range mapperlists() const { return mapperlist_const_range(mapperlist_begin(), mapperlist_end()); } }; /// This represents clause 'map' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target map(a,b) /// \endcode /// In this example directive '#pragma omp target' has clause 'map' /// with the variables 'a' and 'b'. class OMPMapClause final : public OMPMappableExprListClause<OMPMapClause>, private llvm::TrailingObjects< OMPMapClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { // There are varlist_size() of expressions, and varlist_size() of // user-defined mappers. return 2 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Number of allowed map-type-modifiers. static constexpr unsigned NumberOfModifiers = OMPC_MAP_MODIFIER_last - OMPC_MAP_MODIFIER_unknown - 1; private: /// Map-type-modifiers for the 'map' clause. OpenMPMapModifierKind MapTypeModifiers[NumberOfModifiers] = { OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown}; /// Location of map-type-modifiers for the 'map' clause. SourceLocation MapTypeModifiersLoc[NumberOfModifiers]; /// Map type for the 'map' clause. OpenMPMapClauseKind MapType = OMPC_MAP_unknown; /// Is this an implicit map type or not. bool MapTypeIsImplicit = false; /// Location of the map type. SourceLocation MapLoc; /// Colon location. SourceLocation ColonLoc; /// Build a clause for \a NumVars listed expressions, \a /// NumUniqueDeclarations declarations, \a NumComponentLists total component /// lists, and \a NumComponents total expression components. /// /// \param MapModifiers Map-type-modifiers. /// \param MapModifiersLoc Locations of map-type-modifiers. /// \param MapperQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfo The identifier of associated user-defined mapper. /// \param MapType Map type. /// \param MapTypeIsImplicit Map type is inferred implicitly. /// \param MapLoc Location of the map type. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPMapClause(ArrayRef<OpenMPMapModifierKind> MapModifiers, ArrayRef<SourceLocation> MapModifiersLoc, NestedNameSpecifierLoc MapperQualifierLoc, DeclarationNameInfo MapperIdInfo, OpenMPMapClauseKind MapType, bool MapTypeIsImplicit, SourceLocation MapLoc, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(OMPC_map, Locs, Sizes, &MapperQualifierLoc, &MapperIdInfo), MapType(MapType), MapTypeIsImplicit(MapTypeIsImplicit), MapLoc(MapLoc) { assert(llvm::array_lengthof(MapTypeModifiers) == MapModifiers.size() && "Unexpected number of map type modifiers."); llvm::copy(MapModifiers, std::begin(MapTypeModifiers)); assert(llvm::array_lengthof(MapTypeModifiersLoc) == MapModifiersLoc.size() && "Unexpected number of map type modifier locations."); llvm::copy(MapModifiersLoc, std::begin(MapTypeModifiersLoc)); } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPMapClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(OMPC_map, OMPVarListLocTy(), Sizes) {} /// Set map-type-modifier for the clause. /// /// \param I index for map-type-modifier. /// \param T map-type-modifier for the clause. void setMapTypeModifier(unsigned I, OpenMPMapModifierKind T) { assert(I < NumberOfModifiers && "Unexpected index to store map type modifier, exceeds array size."); MapTypeModifiers[I] = T; } /// Set location for the map-type-modifier. /// /// \param I index for map-type-modifier location. /// \param TLoc map-type-modifier location. void setMapTypeModifierLoc(unsigned I, SourceLocation TLoc) { assert(I < NumberOfModifiers && "Index to store map type modifier location exceeds array size."); MapTypeModifiersLoc[I] = TLoc; } /// Set type for the clause. /// /// \param T Type for the clause. void setMapType(OpenMPMapClauseKind T) { MapType = T; } /// Set type location. /// /// \param TLoc Type location. void setMapLoc(SourceLocation TLoc) { MapLoc = TLoc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param UDMapperRefs References to user-defined mappers associated with /// expressions used in the clause. /// \param MapModifiers Map-type-modifiers. /// \param MapModifiersLoc Location of map-type-modifiers. /// \param UDMQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperId The identifier of associated user-defined mapper. /// \param Type Map type. /// \param TypeIsImplicit Map type is inferred implicitly. /// \param TypeLoc Location of the map type. static OMPMapClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, ArrayRef<OpenMPMapModifierKind> MapModifiers, ArrayRef<SourceLocation> MapModifiersLoc, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId, OpenMPMapClauseKind Type, bool TypeIsImplicit, SourceLocation TypeLoc); /// Creates an empty clause with the place for \a NumVars original /// expressions, \a NumUniqueDeclarations declarations, \NumComponentLists /// lists, and \a NumComponents expression components. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPMapClause *CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); /// Fetches mapping kind for the clause. OpenMPMapClauseKind getMapType() const LLVM_READONLY { return MapType; } /// Is this an implicit map type? /// We have to capture 'IsMapTypeImplicit' from the parser for more /// informative error messages. It helps distinguish map(r) from /// map(tofrom: r), which is important to print more helpful error /// messages for some target directives. bool isImplicitMapType() const LLVM_READONLY { return MapTypeIsImplicit; } /// Fetches the map-type-modifier at 'Cnt' index of array of modifiers. /// /// \param Cnt index for map-type-modifier. OpenMPMapModifierKind getMapTypeModifier(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfModifiers && "Requested modifier exceeds the total number of modifiers."); return MapTypeModifiers[Cnt]; } /// Fetches the map-type-modifier location at 'Cnt' index of array of /// modifiers' locations. /// /// \param Cnt index for map-type-modifier location. SourceLocation getMapTypeModifierLoc(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfModifiers && "Requested modifier location exceeds total number of modifiers."); return MapTypeModifiersLoc[Cnt]; } /// Fetches ArrayRef of map-type-modifiers. ArrayRef<OpenMPMapModifierKind> getMapTypeModifiers() const LLVM_READONLY { return llvm::makeArrayRef(MapTypeModifiers); } /// Fetches ArrayRef of location of map-type-modifiers. ArrayRef<SourceLocation> getMapTypeModifiersLoc() const LLVM_READONLY { return llvm::makeArrayRef(MapTypeModifiersLoc); } /// Fetches location of clause mapping kind. SourceLocation getMapLoc() const LLVM_READONLY { return MapLoc; } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } child_range children() { return child_range( reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPMapClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_map; } }; /// This represents 'num_teams' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp teams num_teams(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'num_teams' /// with single expression 'n'. class OMPNumTeamsClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// NumTeams number. Stmt *NumTeams = nullptr; /// Set the NumTeams number. /// /// \param E NumTeams number. void setNumTeams(Expr *E) { NumTeams = E; } public: /// Build 'num_teams' clause. /// /// \param E Expression associated with this clause. /// \param HelperE Helper Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPNumTeamsClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_num_teams, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumTeams(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPNumTeamsClause() : OMPClause(OMPC_num_teams, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return NumTeams number. Expr *getNumTeams() { return cast<Expr>(NumTeams); } /// Return NumTeams number. Expr *getNumTeams() const { return cast<Expr>(NumTeams); } child_range children() { return child_range(&NumTeams, &NumTeams + 1); } const_child_range children() const { return const_child_range(&NumTeams, &NumTeams + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_num_teams; } }; /// This represents 'thread_limit' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp teams thread_limit(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'thread_limit' /// with single expression 'n'. class OMPThreadLimitClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// ThreadLimit number. Stmt *ThreadLimit = nullptr; /// Set the ThreadLimit number. /// /// \param E ThreadLimit number. void setThreadLimit(Expr *E) { ThreadLimit = E; } public: /// Build 'thread_limit' clause. /// /// \param E Expression associated with this clause. /// \param HelperE Helper Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPThreadLimitClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_thread_limit, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), ThreadLimit(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPThreadLimitClause() : OMPClause(OMPC_thread_limit, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return ThreadLimit number. Expr *getThreadLimit() { return cast<Expr>(ThreadLimit); } /// Return ThreadLimit number. Expr *getThreadLimit() const { return cast<Expr>(ThreadLimit); } child_range children() { return child_range(&ThreadLimit, &ThreadLimit + 1); } const_child_range children() const { return const_child_range(&ThreadLimit, &ThreadLimit + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_thread_limit; } }; /// This represents 'priority' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task priority(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'priority' with /// single expression 'n'. class OMPPriorityClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Priority number. Stmt *Priority = nullptr; /// Set the Priority number. /// /// \param E Priority number. void setPriority(Expr *E) { Priority = E; } public: /// Build 'priority' clause. /// /// \param E Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPPriorityClause(Expr *E, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_priority, StartLoc, EndLoc), LParenLoc(LParenLoc), Priority(E) {} /// Build an empty clause. OMPPriorityClause() : OMPClause(OMPC_priority, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return Priority number. Expr *getPriority() { return cast<Expr>(Priority); } /// Return Priority number. Expr *getPriority() const { return cast<Expr>(Priority); } child_range children() { return child_range(&Priority, &Priority + 1); } const_child_range children() const { return const_child_range(&Priority, &Priority + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_priority; } }; /// This represents 'grainsize' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp taskloop grainsize(4) /// \endcode /// In this example directive '#pragma omp taskloop' has clause 'grainsize' /// with single expression '4'. class OMPGrainsizeClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Grainsize = nullptr; /// Set safelen. void setGrainsize(Expr *Size) { Grainsize = Size; } public: /// Build 'grainsize' clause. /// /// \param Size Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_grainsize, StartLoc, EndLoc), LParenLoc(LParenLoc), Grainsize(Size) {} /// Build an empty clause. explicit OMPGrainsizeClause() : OMPClause(OMPC_grainsize, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getGrainsize() const { return cast_or_null<Expr>(Grainsize); } child_range children() { return child_range(&Grainsize, &Grainsize + 1); } const_child_range children() const { return const_child_range(&Grainsize, &Grainsize + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_grainsize; } }; /// This represents 'nogroup' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp taskloop nogroup /// \endcode /// In this example directive '#pragma omp taskloop' has 'nogroup' clause. class OMPNogroupClause : public OMPClause { public: /// Build 'nogroup' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_nogroup, StartLoc, EndLoc) {} /// Build an empty clause. OMPNogroupClause() : OMPClause(OMPC_nogroup, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_nogroup; } }; /// This represents 'num_tasks' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp taskloop num_tasks(4) /// \endcode /// In this example directive '#pragma omp taskloop' has clause 'num_tasks' /// with single expression '4'. class OMPNumTasksClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *NumTasks = nullptr; /// Set safelen. void setNumTasks(Expr *Size) { NumTasks = Size; } public: /// Build 'num_tasks' clause. /// /// \param Size Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNumTasksClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_num_tasks, StartLoc, EndLoc), LParenLoc(LParenLoc), NumTasks(Size) {} /// Build an empty clause. explicit OMPNumTasksClause() : OMPClause(OMPC_num_tasks, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getNumTasks() const { return cast_or_null<Expr>(NumTasks); } child_range children() { return child_range(&NumTasks, &NumTasks + 1); } const_child_range children() const { return const_child_range(&NumTasks, &NumTasks + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_num_tasks; } }; /// This represents 'hint' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp critical (name) hint(6) /// \endcode /// In this example directive '#pragma omp critical' has name 'name' and clause /// 'hint' with argument '6'. class OMPHintClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Hint expression of the 'hint' clause. Stmt *Hint = nullptr; /// Set hint expression. void setHint(Expr *H) { Hint = H; } public: /// Build 'hint' clause with expression \a Hint. /// /// \param Hint Hint expression. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_hint, StartLoc, EndLoc), LParenLoc(LParenLoc), Hint(Hint) {} /// Build an empty clause. OMPHintClause() : OMPClause(OMPC_hint, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns number of threads. Expr *getHint() const { return cast_or_null<Expr>(Hint); } child_range children() { return child_range(&Hint, &Hint + 1); } const_child_range children() const { return const_child_range(&Hint, &Hint + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_hint; } }; /// This represents 'dist_schedule' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp distribute dist_schedule(static, 3) /// \endcode /// In this example directive '#pragma omp distribute' has 'dist_schedule' /// clause with arguments 'static' and '3'. class OMPDistScheduleClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'schedule' clause. OpenMPDistScheduleClauseKind Kind = OMPC_DIST_SCHEDULE_unknown; /// Start location of the schedule kind in source code. SourceLocation KindLoc; /// Location of ',' (if any). SourceLocation CommaLoc; /// Chunk size. Expr *ChunkSize = nullptr; /// Set schedule kind. /// /// \param K Schedule kind. void setDistScheduleKind(OpenMPDistScheduleClauseKind K) { Kind = K; } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set schedule kind start location. /// /// \param KLoc Schedule kind location. void setDistScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// Set location of ','. /// /// \param Loc Location of ','. void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// Set chunk size. /// /// \param E Chunk size. void setChunkSize(Expr *E) { ChunkSize = E; } public: /// Build 'dist_schedule' clause with schedule kind \a Kind and chunk /// size expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind DistSchedule kind. /// \param ChunkSize Chunk size. /// \param HelperChunkSize Helper chunk size for combined directives. OMPDistScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, Stmt *HelperChunkSize) : OMPClause(OMPC_dist_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) { setPreInitStmt(HelperChunkSize); } /// Build an empty clause. explicit OMPDistScheduleClause() : OMPClause(OMPC_dist_schedule, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Get kind of the clause. OpenMPDistScheduleClauseKind getDistScheduleKind() const { return Kind; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getDistScheduleKindLoc() { return KindLoc; } /// Get location of ','. SourceLocation getCommaLoc() { return CommaLoc; } /// Get chunk size. Expr *getChunkSize() { return ChunkSize; } /// Get chunk size. const Expr *getChunkSize() const { return ChunkSize; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&ChunkSize), reinterpret_cast<Stmt **>(&ChunkSize) + 1); } const_child_range children() const { auto Children = const_cast<OMPDistScheduleClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_dist_schedule; } }; /// This represents 'defaultmap' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp target defaultmap(tofrom: scalar) /// \endcode /// In this example directive '#pragma omp target' has 'defaultmap' clause of kind /// 'scalar' with modifier 'tofrom'. class OMPDefaultmapClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Modifiers for 'defaultmap' clause. OpenMPDefaultmapClauseModifier Modifier = OMPC_DEFAULTMAP_MODIFIER_unknown; /// Locations of modifiers. SourceLocation ModifierLoc; /// A kind of the 'defaultmap' clause. OpenMPDefaultmapClauseKind Kind = OMPC_DEFAULTMAP_unknown; /// Start location of the defaultmap kind in source code. SourceLocation KindLoc; /// Set defaultmap kind. /// /// \param K Defaultmap kind. void setDefaultmapKind(OpenMPDefaultmapClauseKind K) { Kind = K; } /// Set the defaultmap modifier. /// /// \param M Defaultmap modifier. void setDefaultmapModifier(OpenMPDefaultmapClauseModifier M) { Modifier = M; } /// Set location of the defaultmap modifier. void setDefaultmapModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set defaultmap kind start location. /// /// \param KLoc Defaultmap kind location. void setDefaultmapKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } public: /// Build 'defaultmap' clause with defaultmap kind \a Kind /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param EndLoc Ending location of the clause. /// \param Kind Defaultmap kind. /// \param M The modifier applied to 'defaultmap' clause. /// \param MLoc Location of the modifier OMPDefaultmapClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KLoc, SourceLocation EndLoc, OpenMPDefaultmapClauseKind Kind, OpenMPDefaultmapClauseModifier M) : OMPClause(OMPC_defaultmap, StartLoc, EndLoc), LParenLoc(LParenLoc), Modifier(M), ModifierLoc(MLoc), Kind(Kind), KindLoc(KLoc) {} /// Build an empty clause. explicit OMPDefaultmapClause() : OMPClause(OMPC_defaultmap, SourceLocation(), SourceLocation()) {} /// Get kind of the clause. OpenMPDefaultmapClauseKind getDefaultmapKind() const { return Kind; } /// Get the modifier of the clause. OpenMPDefaultmapClauseModifier getDefaultmapModifier() const { return Modifier; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getDefaultmapKindLoc() { return KindLoc; } /// Get the modifier location. SourceLocation getDefaultmapModifierLoc() const { return ModifierLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_defaultmap; } }; /// This represents clause 'to' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target update to(a,b) /// \endcode /// In this example directive '#pragma omp target update' has clause 'to' /// with the variables 'a' and 'b'. class OMPToClause final : public OMPMappableExprListClause<OMPToClause>, private llvm::TrailingObjects< OMPToClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param MapperQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfo The identifier of associated user-defined mapper. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPToClause(NestedNameSpecifierLoc MapperQualifierLoc, DeclarationNameInfo MapperIdInfo, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(OMPC_to, Locs, Sizes, &MapperQualifierLoc, &MapperIdInfo) {} /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPToClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(OMPC_to, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { // There are varlist_size() of expressions, and varlist_size() of // user-defined mappers. return 2 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param UDMapperRefs References to user-defined mappers associated with /// expressions used in the clause. /// \param UDMQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperId The identifier of associated user-defined mapper. static OMPToClause *Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPToClause *CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPToClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_to; } }; /// This represents clause 'from' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target update from(a,b) /// \endcode /// In this example directive '#pragma omp target update' has clause 'from' /// with the variables 'a' and 'b'. class OMPFromClause final : public OMPMappableExprListClause<OMPFromClause>, private llvm::TrailingObjects< OMPFromClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param MapperQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfo The identifier of associated user-defined mapper. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPFromClause(NestedNameSpecifierLoc MapperQualifierLoc, DeclarationNameInfo MapperIdInfo, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(OMPC_from, Locs, Sizes, &MapperQualifierLoc, &MapperIdInfo) {} /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPFromClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(OMPC_from, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { // There are varlist_size() of expressions, and varlist_size() of // user-defined mappers. return 2 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param UDMapperRefs References to user-defined mappers associated with /// expressions used in the clause. /// \param UDMQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperId The identifier of associated user-defined mapper. static OMPFromClause *Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPFromClause *CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPFromClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_from; } }; /// This represents clause 'use_device_ptr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target data use_device_ptr(a,b) /// \endcode /// In this example directive '#pragma omp target data' has clause /// 'use_device_ptr' with the variables 'a' and 'b'. class OMPUseDevicePtrClause final : public OMPMappableExprListClause<OMPUseDevicePtrClause>, private llvm::TrailingObjects< OMPUseDevicePtrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDevicePtrClause(const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(OMPC_use_device_ptr, Locs, Sizes) {} /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDevicePtrClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(OMPC_use_device_ptr, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return 3 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } /// Sets the list of references to private copies with initializers for new /// private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for new /// private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Sets the list of references to initializer variables for new private /// variables. /// \param VL List of references. void setInits(ArrayRef<Expr *> VL); /// Gets the list of references to initializer variables for new private /// variables. MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param PrivateVars Expressions referring to private copies. /// \param Inits Expressions referring to private copy initializers. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPUseDevicePtrClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<Expr *> PrivateVars, ArrayRef<Expr *> Inits, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPUseDevicePtrClause * CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPUseDevicePtrClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_use_device_ptr; } }; /// This represents clause 'is_device_ptr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target is_device_ptr(a,b) /// \endcode /// In this example directive '#pragma omp target' has clause /// 'is_device_ptr' with the variables 'a' and 'b'. class OMPIsDevicePtrClause final : public OMPMappableExprListClause<OMPIsDevicePtrClause>, private llvm::TrailingObjects< OMPIsDevicePtrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPIsDevicePtrClause(const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(OMPC_is_device_ptr, Locs, Sizes) {} /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPIsDevicePtrClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(OMPC_is_device_ptr, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPIsDevicePtrClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPIsDevicePtrClause * CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPIsDevicePtrClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_is_device_ptr; } }; /// This class implements a simple visitor for OMPClause /// subclasses. template<class ImplClass, template <typename> class Ptr, typename RetTy> class OMPClauseVisitorBase { public: #define PTR(CLASS) typename Ptr<CLASS>::type #define DISPATCH(CLASS) \ return static_cast<ImplClass*>(this)->Visit##CLASS(static_cast<PTR(CLASS)>(S)) #define OPENMP_CLAUSE(Name, Class) \ RetTy Visit ## Class (PTR(Class) S) { DISPATCH(Class); } #include "clang/Basic/OpenMPKinds.def" RetTy Visit(PTR(OMPClause) S) { // Top switch clause: visit each OMPClause. switch (S->getClauseKind()) { default: llvm_unreachable("Unknown clause kind!"); #define OPENMP_CLAUSE(Name, Class) \ case OMPC_ ## Name : return Visit ## Class(static_cast<PTR(Class)>(S)); #include "clang/Basic/OpenMPKinds.def" } } // Base case, ignore it. :) RetTy VisitOMPClause(PTR(OMPClause) Node) { return RetTy(); } #undef PTR #undef DISPATCH }; template <typename T> using const_ptr = typename std::add_pointer<typename std::add_const<T>::type>; template<class ImplClass, typename RetTy = void> class OMPClauseVisitor : public OMPClauseVisitorBase <ImplClass, std::add_pointer, RetTy> {}; template<class ImplClass, typename RetTy = void> class ConstOMPClauseVisitor : public OMPClauseVisitorBase <ImplClass, const_ptr, RetTy> {}; class OMPClausePrinter final : public OMPClauseVisitor<OMPClausePrinter> { raw_ostream &OS; const PrintingPolicy &Policy; /// Process clauses with list of variables. template <typename T> void VisitOMPClauseList(T *Node, char StartSym); public: OMPClausePrinter(raw_ostream &OS, const PrintingPolicy &Policy) : OS(OS), Policy(Policy) {} #define OPENMP_CLAUSE(Name, Class) void Visit##Class(Class *S); #include "clang/Basic/OpenMPKinds.def" }; } // namespace clang #endif // LLVM_CLANG_AST_OPENMPCLAUSE_H
knn.h
/* Copyright (c) 2020, Intel Corporation Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <iostream> #include <vector> #include <cmath> #include <array> #include <algorithm> #include <omp.h> using dtype = double; const size_t classesNum = 3; const size_t dataDim = 16; const size_t k = 5; // Define the number of nearest neighbors void push_queue(std::array<std::pair<dtype, size_t>, k> & queue, std::pair<dtype, size_t> new_distance, int index = k-1) { while (index > 0 && new_distance.first < queue[index-1].first) { queue[index] = queue[index - 1]; --index; queue[index] = new_distance; } } void sort_queue(std::array<std::pair<dtype, size_t>, k> &queue) { for (int i = 1; i < queue.size(); i++) { push_queue(queue, queue[i], i); } } dtype euclidean_dist(std::array<dtype, dataDim>& x1, std::array<dtype, dataDim>& x2) { dtype distance = 0.0; //#pragma omp simd reduction(+:distance) for (std::size_t i = 0; i < dataDim; ++i) { dtype diff = x1[i] - x2[i]; distance += diff * diff; } dtype result = sqrt(distance); return result; } size_t simple_vote(std::array<std::pair<dtype, size_t>, k>& neighbors) { std::array<size_t, classesNum> votes_to_classes = {}; for (size_t i = 0; i < neighbors.size(); ++i) { votes_to_classes[neighbors[i].second]++; } size_t max_ind = 0; size_t max_value = 0; for (int i = 0; i < classesNum; ++i) { if (votes_to_classes[i] > max_value) { max_value = votes_to_classes[i]; max_ind = i; } } return max_ind; } std::vector<size_t> run_knn(std::vector<std::array<dtype, dataDim>>& train, std::vector<size_t>& train_labels, std::vector<std::array<dtype, dataDim>>& test) { auto train_nrows = train.size(); std::vector<size_t> predictions(test.size()); #pragma omp parallel for simd for (size_t i = 0; i < test.size(); ++i) { std::array<std::pair<dtype, size_t>, k> queue_neighbors; //count distances for (int j = 0; j < k; ++j) { dtype dist = euclidean_dist(train[j], test[i]); queue_neighbors[j] = std::make_pair(dist, train_labels[j]); } sort_queue(queue_neighbors); for (int j = k; j < train_nrows; ++j) { dtype dist = euclidean_dist(train[j], test[i]); auto new_neighbor = std::make_pair(dist, train_labels[j]); if (new_neighbor.first < queue_neighbors[k-1].first) { queue_neighbors[k-1] = new_neighbor; push_queue(queue_neighbors, new_neighbor); } } predictions[i] = simple_vote(queue_neighbors); } return predictions; }