source
stringlengths
3
92
c
stringlengths
26
2.25M
GB_unop__identity_uint32_uint16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint32_uint16) // op(A') function: GB (_unop_tran__identity_uint32_uint16) // C type: uint32_t // A type: uint16_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint32_t z = (uint32_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint32_t z = (uint32_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint32_uint16) ( uint32_t *Cx, // Cx and Ax may be aliased const uint16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; uint32_t z = (uint32_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint16_t aij = Ax [p] ; uint32_t z = (uint32_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint32_uint16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
SoaDistanceTableAA.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2016 Jeongnim Kim and QMCPACK developers. // // File developed by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp. // Amrita Mathuriya, amrita.mathuriya@intel.com, Intel Corp. // // File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp. ////////////////////////////////////////////////////////////////////////////////////// // -*- C++ -*- #ifndef QMCPLUSPLUS_DTDIMPL_AA_H #define QMCPLUSPLUS_DTDIMPL_AA_H #include "simd/algorithm.hpp" namespace qmcplusplus { /**@ingroup nnlist * @brief A derived classe from DistacneTableData, specialized for dense case */ template<typename T, unsigned D, int SC> struct SoaDistanceTableAA: public DTD_BConds<T,D,SC>, public DistanceTableData { int Ntargets; int Ntargets_padded; SoaDistanceTableAA(ParticleSet& target) : DTD_BConds<T,D,SC>(target.Lattice), DistanceTableData(target,target) { resize(target.getTotalNum()); } #if (__cplusplus >= 201103L) SoaDistanceTableAA()=delete; SoaDistanceTableAA(const SoaDistanceTableAA&)=delete; #endif ~SoaDistanceTableAA() {} size_t compute_size(int N) { const size_t N_padded = getAlignedSize<T>(N); const size_t Alignment = getAlignment<T>(); return (N_padded*(2*N-N_padded+1)+(Alignment-1)*N_padded)/2; } void resize(int n) { N[SourceIndex]=N[VisitorIndex]=Ntargets=n; Ntargets_padded=getAlignedSize<T>(n); Distances.resize(Ntargets,Ntargets_padded); const size_t total_size = compute_size(Ntargets); memoryPool.resize(total_size*D); Displacements.resize(Ntargets); for(int i=0; i<Ntargets; ++i) Displacements[i].attachReference(i,total_size,memoryPool.data()+compute_size(i)); Temp_r.resize(Ntargets); Temp_dr.resize(Ntargets); } inline void evaluate(ParticleSet& P) { constexpr T BigR= std::numeric_limits<T>::max(); //P.RSoA.copyIn(P.R); for(int iat=0; iat<Ntargets; ++iat) { DTD_BConds<T,D,SC>::computeDistances(P.R[iat], P.RSoA, Distances[iat], Displacements[iat], 0, Ntargets, iat); Distances[iat][iat]=BigR; //assign big distance } } inline void evaluate(ParticleSet& P, IndexType jat) { DTD_BConds<T,D,SC>::computeDistances(P.R[jat], P.RSoA, Distances[jat], Displacements[jat], 0, Ntargets, jat); Distances[jat][jat]=std::numeric_limits<T>::max(); //assign a big number } inline void moveOnSphere(const ParticleSet& P, const PosType& rnew) { DTD_BConds<T,D,SC>::computeDistances(rnew, P.RSoA, Temp_r.data(),Temp_dr, 0, Ntargets, P.activePtcl); } ///evaluate the temporary pair relations inline void move(const ParticleSet& P, const PosType& rnew) { //#pragma omp master moveOnSphere(P,rnew); } int get_first_neighbor(IndexType iat, RealType& r, PosType& dr, bool newpos) const { RealType min_dist = std::numeric_limits<RealType>::max(); int index=-1; if(newpos) { for(int jat=0; jat<Ntargets; ++jat) if(Temp_r[jat]<min_dist && jat!=iat) { min_dist = Temp_r[jat]; index = jat; } if(index>=0) dr=Temp_dr[index]; } else { for(int jat=0; jat<Ntargets; ++jat) if(Distances[iat][jat]<min_dist && jat!=iat) { min_dist = Distances[iat][jat]; index = jat; } if(index>=0) dr=Displacements[iat][index]; } r=min_dist; return index; } ///update the iat-th row for iat=[0,iat-1) inline void update(IndexType iat) { if(iat==0) return; //update by a cache line const int nupdate=getAlignedSize<T>(iat); simd::copy_n(Temp_r.data(),nupdate,Distances[iat]); for(int idim=0;idim<D; ++idim) simd::copy_n(Temp_dr.data(idim),nupdate,Displacements[iat].data(idim)); } }; } #endif
SparseLinear.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/SparseLinear.c" #else #ifdef _OPENMP #include <omp.h> #endif #define ROW_PTR2(t, r) (THTensor_(data)(t) + (r) * (t)->stride[0]) #define COL_PTR2(t, c) (THTensor_(data)(t) + (c) * (t)->stride[1]) static bool THNN_(checkLegacyInput)(THTensor* t) { return t->nDimension == 3 && t->size[2] == 2; } static bool THNN_(checkInput)(THTensor* t) { return t->nDimension == 2 && t->size[1] == 3; } static bool THNN_(checkSize2D)(THTensor* t, int64_t size0, int64_t size1) { return t->nDimension == 2 && t->size[0] == size0 && t->size[1] == size1; } static bool THNN_(checkSize1D)(THTensor* t, int64_t size0) { return t->nDimension == 1 && t->size[0] == size0; } static void THNN_(set1d)(THTensor *t, int64_t x0, real value) { THStorage_(set)(t->storage, t->storageOffset + x0*t->stride[0], value); } static real THNN_(get3d)(const THTensor *t, int64_t x0, int64_t x1, int64_t x2) { return THStorage_(get)(t->storage, t->storageOffset + x0*t->stride[0] + x1*t->stride[1] + x2*t->stride[2]); } static real THNN_(get2d)(const THTensor *t, int64_t x0, int64_t x1) { return THStorage_(get)(t->storage, t->storageOffset + x0*t->stride[0] + x1*t->stride[1]); } void THNN_(SparseLinear_updateOutput)( THNNState *state, THTensor *input, THTensor *output, THTensor *weight, THTensor *bias) { int64_t h, i, j, hp0, hp1; int64_t outDim = THTensor_(size)(weight, 0); int64_t inDim = THTensor_(size)(weight, 1); int64_t batchSize = THTensor_(size)(output, 0); THArgCheck(THNN_(checkInput)(input), 2, "input must be in coo format, nnz x 3"); THArgCheck(THTensor_(isContiguous)(output), 3, "output must be contiguous"); THArgCheck(THNN_(checkSize1D)(bias, outDim), 5, "bias size wrong"); int64_t nnz = THTensor_(size)(input, 0); THLongTensor * csr = THLongTensor_newWithSize1d(batchSize+1); THLongTensor_zero(csr); weight = THTensor_(newContiguous)(weight); //#pragma omp parallel for private(i, h, hp0, hp1) schedule(static) if (nnz > 10000) for (i=0; i<nnz; i++) { hp0 = (int64_t)(THNN_(get2d)(input, i, 0)) - 1; hp1 = (i+1 == nnz) ? batchSize : (int64_t)(THNN_(get2d)(input, i+1, 0)) - 1; if (hp0 != hp1) for (h = hp0; h < hp1; h++) { THLongTensor_set1d(csr, h+1, i+1); } } // output = weight * input + bias THTensor_(zero)(output); #pragma omp parallel for private(h, i) schedule(static) if (nnz > 10000) for (h = 0; h < batchSize; h++) { int64_t i_start = THLongTensor_get1d(csr, h); int64_t i_end = THLongTensor_get1d(csr, h+1); for (i = i_start; i < i_end; i++) { real val = THNN_(get2d)(input, i, 2); if (val == 0) { continue; } int64_t offset = (int64_t)(THNN_(get2d)(input, i, 1)) - 1; if (offset >= 0 && offset < inDim) { THBlas_(axpy)(outDim, val, COL_PTR2(weight, offset), weight->stride[0], ROW_PTR2(output, h), output->stride[1]); } else { THError("index out of bound. updateOutput: %d not between 1 and %d", offset + 1, inDim); } } } THTensor* output_row = THTensor_(new)(); for (h = 0; h < batchSize; h++) { THTensor_(select)(output_row, output, 0, h); THTensor_(cadd)(output_row, bias, 1.0, output_row); } THTensor_(free)(output_row); THLongTensor_free(csr); THTensor_(free)(weight); } void THNN_(SparseLinear_legacyUpdateOutput)( THNNState *state, THTensor *input, THTensor *output, THTensor *weight, THTensor *bias) { int64_t h, i; int64_t outDim = THTensor_(size)(weight, 0); int64_t inDim = THTensor_(size)(weight, 1); THArgCheck(THNN_(checkLegacyInput)(input), 2, "input size must be batchsize x nnz x 2"); THArgCheck(THTensor_(isContiguous)(output), 3, "output must be contiguous"); THArgCheck(THNN_(checkSize1D)(bias, outDim), 5, "bias size wrong"); weight = THTensor_(newContiguous)(weight); int64_t batchSize = THTensor_(size)(input, 0); int64_t nnz = THTensor_(size)(input, 1); THTensor_(resize2d)(output, batchSize, outDim); // output = weight * input + bias THTensor_(zero)(output); #pragma omp parallel for private(h, i) schedule(static) if ( \ batchSize > 1 && batchSize * nnz * outDim > 10000) for (h = 0; h < batchSize; h++) { for (i = 0; i < nnz; i++) { real val = THNN_(get3d)(input, h, i, 1); if (val == 0) { continue; } int64_t offset = (int64_t)(THNN_(get3d)(input, h, i, 0)) - 1; if (offset >= 0 && offset < inDim) { THBlas_(axpy)(outDim, val, COL_PTR2(weight, offset), weight->stride[0], ROW_PTR2(output, h), output->stride[1]); } else { THError("index out of bound. updateOutput: %d not between 1 and %d", offset + 1, inDim); } } } THTensor* output_row = THTensor_(new)(); for (h = 0; h < batchSize; h++) { THTensor_(select)(output_row, output, 0, h); THTensor_(cadd)(output_row, bias, 1.0, output_row); } THTensor_(free)(output_row); THTensor_(free)(weight); } void THNN_(SparseLinear_accGradParameters)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *weight, THTensor *bias, accreal weightDecay_, accreal scale_) { real weightDecay = TH_CONVERT_ACCREAL_TO_REAL(weightDecay_); real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); int64_t h, i, col, hp0, hp1; int64_t outDim = THTensor_(size)(weight, 0); int64_t inDim = THTensor_(size)(weight, 1); THArgCheck(THNN_(checkInput)(input), 2, "input must be in coo format, nnz x 3"); THArgCheck(THNN_(checkSize2D)(gradWeight, outDim, inDim), 4, "gradWeight size wrong"); THArgCheck(THNN_(checkSize1D)(gradBias, outDim), 5, "gradBias size wrong"); THArgCheck(THTensor_(isContiguous)(gradOutput), 1, "gradOutput must be contiguous"); int64_t nnz = THTensor_(size)(input, 0); THLongTensor* csc = THLongTensor_newWithSize1d(inDim+1); THLongTensor_zero(csc); weight = THTensor_(newContiguous)(weight); #pragma omp parallel for private(i, h, hp0, hp1) schedule(static) if (nnz > 10000) for (i = 0; i < nnz; i++) { hp0 = (int64_t)(THNN_(get2d)(input, i, 1)) - 1; hp1 = (i+1 == nnz) ? inDim : (int64_t)(THNN_(get2d)(input, i+1, 1)) - 1; if (hp0 != hp1) for (h = hp0; h < hp1; h++) { THLongTensor_set1d(csc, h+1, i+1); } } // gradWeight += gradOutput * input #pragma omp parallel for private(h, i, col) schedule(static) if (nnz > 10000) for (col = 0; col < inDim; col++) { int64_t i_start = THLongTensor_get1d(csc, col); int64_t i_end = THLongTensor_get1d(csc, col+1); for (i = i_start; i < i_end; i++) { real val = scale * THNN_(get2d)(input, i, 2); h = (int64_t)(THNN_(get2d)(input, i, 0)) - 1; int64_t offset = (int64_t)(THNN_(get2d)(input, i, 1)) - 1; if (offset >= 0 && offset < inDim) { THBlas_(axpy)(outDim, val, ROW_PTR2(gradOutput, h), gradOutput->stride[1], COL_PTR2(gradWeight, offset), gradWeight->stride[0]); } else { THError( "index out of bound. accGradParameters: %d not between 1 and %d", offset + 1, inDim); } } } // gradBias += gradOutput THTensor* buf = THTensor_(new)(); THTensor_(sum)(buf, gradOutput, 0); THTensor_(cadd)(gradBias, gradBias, scale, buf); THTensor_(free)(buf); THLongTensor_free(csc); if (weightDecay != 0) { THTensor_(cadd)(gradWeight, gradWeight, weightDecay, weight); } THTensor_(free)(weight); } void THNN_(SparseLinear_legacyAccGradParameters)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *weight, THTensor *bias, accreal weightDecay_, accreal scale_) { real weightDecay = TH_CONVERT_ACCREAL_TO_REAL(weightDecay_); real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); int64_t h, i; int64_t outDim = THTensor_(size)(weight, 0); int64_t inDim = THTensor_(size)(weight, 1); THArgCheck(THNN_(checkLegacyInput)(input), 2, "input size must be batchsize x nnz x 2"); THArgCheck(THNN_(checkSize2D)(gradWeight, outDim, inDim), 4, "gradWeight size wrong"); THArgCheck(THNN_(checkSize1D)(gradBias, outDim), 5, "gradBias size wrong"); THArgCheck(THTensor_(isContiguous)(gradOutput), 1, "gradOutput must be contiguous"); int64_t batchSize = THTensor_(size)(input, 0); int64_t nnz = THTensor_(size)(input, 1); THTensor_(resize2d)(gradOutput, batchSize, outDim); // gradWeight += gradOutput * input #pragma omp parallel for private(h, i) schedule(static) if (\ batchSize * nnz * outDim > 10000) for (i = 0; i < nnz; i++) { for (h = 0; h < batchSize; h++) { real val = scale * THNN_(get3d)(input, h, i, 1); if (val == 0) { continue; } int64_t offset = (int64_t)(THNN_(get3d)(input, h, i, 0)) - 1; if (offset >= 0 && offset < inDim) { THBlas_(axpy)(outDim, val, ROW_PTR2(gradOutput, h), gradOutput->stride[1], COL_PTR2(gradWeight, offset), gradWeight->stride[0]); } else { THError( "index out of bound. accGradParameters: %d not between 1 and %d", offset + 1, inDim); } } } // gradBias += gradOutput THTensor* gradOutput_row = THTensor_(new)(); for (h = 0; h < batchSize; h++) { THTensor_(select)(gradOutput_row, gradOutput, 0, h); THTensor_(cadd)(gradBias, gradBias, scale, gradOutput_row); } THTensor_(free)(gradOutput_row); if (weightDecay != 0) { THTensor_(cadd)(gradWeight, gradWeight, weightDecay, weight); } } void THNN_(SparseLinear_updateParameters)( THNNState *state, THTensor *weight, THTensor *bias, THTensor *gradWeight, THTensor *gradBias, THTensor *lastInput, accreal learningRate_) { real learningRate = TH_CONVERT_ACCREAL_TO_REAL(learningRate_); int64_t h, i; int64_t outDim = weight->size[0]; int64_t inDim = weight->size[1]; THArgCheck(THNN_(checkSize2D)(gradWeight, outDim, inDim), 4, "gradWeight size wrong"); THArgCheck(THNN_(checkSize1D)(bias, outDim), 3, "bias size wrong"); THArgCheck(THNN_(checkSize1D)(gradBias, outDim), 5, "gradBias size wrong"); THArgCheck(THNN_(checkInput)(lastInput), 6, "input must be in coo format, nnz x 3"); int64_t nnz = THTensor_(size)(lastInput, 0); // collect unique offsets of non-0 val in input THTensor* offsets = THTensor_(newWithSize1d)(nnz); int64_t cnt = 0; for (i = 0; i < nnz; i++) { real val = THNN_(get2d)(lastInput, i, 2); if (val == 0) { continue; } int64_t offset = (int64_t)(THNN_(get2d)(lastInput, i, 1)) - 1; if (offset >= 0 && offset < inDim) { THNN_(set1d)(offsets, cnt++, offset); } else { THError( "index out of bound. updateParameters: %d not between 1 and %d", offset + 1, inDim); } } if (cnt == 0) return; THTensor_(resize1d)(offsets, cnt); THTensor* uniqueOffsets = THTensor_(new)(); THLongTensor* ri = THLongTensor_new(); THTensor_(sort)(uniqueOffsets, ri, offsets, 0, 0); THLongTensor_free(ri); THTensor_(free)(offsets); cnt = 1; real* uniqueOffsets_p = THTensor_(data)(uniqueOffsets); for (i = 1; i < THTensor_(size)(uniqueOffsets, 0); i++) { if (uniqueOffsets_p[i] != uniqueOffsets_p[i - 1]) { uniqueOffsets_p[cnt++] = uniqueOffsets_p[i]; } } THTensor_(resize1d)(uniqueOffsets, cnt); // weight += -learningRate * gradWeight THTensor_(cadd)(bias, bias, -learningRate, gradBias); #pragma omp parallel for private(i) schedule(static) if (cnt * outDim > 10000) for (i = 0; i < cnt; i++) { int64_t offset = (int64_t)uniqueOffsets_p[i]; THBlas_(axpy)(outDim, -learningRate, COL_PTR2(gradWeight, offset), gradWeight->stride[0], COL_PTR2(weight, offset), weight->stride[0]); } THTensor_(free)(uniqueOffsets); } void THNN_(SparseLinear_legacyUpdateParameters)( THNNState *state, THTensor *weight, THTensor *bias, THTensor *gradWeight, THTensor *gradBias, THTensor *lastInput, accreal learningRate_) { real learningRate = TH_CONVERT_ACCREAL_TO_REAL(learningRate_); int64_t h, i; int64_t outDim = weight->size[0]; int64_t inDim = weight->size[1]; THArgCheck(THNN_(checkSize2D)(gradWeight, outDim, inDim), 4, "gradWeight size wrong"); THArgCheck(THNN_(checkSize1D)(bias, outDim), 3, "bias size wrong"); THArgCheck(THNN_(checkSize1D)(gradBias, outDim), 5, "gradBias size wrong"); THArgCheck(THNN_(checkLegacyInput)(lastInput), 6, "input size must be batchsize x nnz x 2"); int64_t batchSize = THTensor_(size)(lastInput, 0); int64_t nnz = THTensor_(size)(lastInput, 1); // collect unique offsets of non-0 val in input THTensor* offsets = THTensor_(newWithSize1d)(batchSize * nnz); int64_t cnt = 0; for (h = 0; h < batchSize; h++) { for (i = 0; i < nnz; i++) { real val = THNN_(get3d)(lastInput, h, i, 1); if (val == 0 ) { continue; } int64_t offset = (int64_t)(THNN_(get3d)(lastInput, h, i, 0)) - 1; if (offset >= 0 && offset < inDim) { THNN_(set1d)(offsets, cnt++, offset); } else { THError( "index out of bound. updateParameters: %d not between 1 and %d", offset + 1, inDim); } } } THTensor_(resize1d)(offsets, cnt); THTensor* uniqueOffsets = THTensor_(new)(); THLongTensor* ri = THLongTensor_new(); THTensor_(sort)(uniqueOffsets, ri, offsets, 0, 0); THLongTensor_free(ri); THTensor_(free)(offsets); cnt = 1; real* uniqueOffsets_p = THTensor_(data)(uniqueOffsets); for (i = 1; i < THTensor_(size)(uniqueOffsets, 0); i++) { if (uniqueOffsets_p[i] != uniqueOffsets_p[i - 1]) { uniqueOffsets_p[cnt++] = uniqueOffsets_p[i]; } } THTensor_(resize1d)(uniqueOffsets, cnt); // weight += -learningRate * gradWeight THTensor_(cadd)(bias, bias, -learningRate, gradBias); #pragma omp parallel for private(i) schedule(static) if (cnt * outDim > 10000) for (i = 0; i < cnt; i++) { int64_t offset = (int64_t)uniqueOffsets_p[i]; THBlas_(axpy)(outDim, -learningRate, COL_PTR2(gradWeight, offset), gradWeight->stride[0], COL_PTR2(weight, offset), weight->stride[0]); } THTensor_(free)(uniqueOffsets); } void THNN_(SparseLinear_zeroGradParameters)( THNNState *state, THTensor *gradWeight, THTensor *gradBias, THTensor *lastInput) { int64_t h, i, j; int64_t outDim = gradWeight->size[0]; int64_t inDim = gradWeight->size[1]; THArgCheck(THNN_(checkSize1D)(gradBias, outDim), 3, "gradBias size wrong"); THArgCheck(THNN_(checkInput)(lastInput), 4, "input must be in coo format, nnz x 3"); THTensor_(zero)(gradBias); int64_t nnz = THTensor_(size)(lastInput, 0); #pragma omp parallel for private(i, j) schedule(static) if ( \ nnz * outDim > 10000) for (i = 0; i < nnz; i++) { if (THNN_(get2d)(lastInput, i, 2) == 0 ) { continue; } int64_t offset = (int64_t)(THNN_(get2d)(lastInput, i, 1)) - 1; if (offset >= 0 && offset < inDim) { real* pGradWeight = COL_PTR2(gradWeight, offset); if (gradWeight->stride[0] == 1) { THVector_(fill)(pGradWeight, 0, outDim); } else { int64_t stride = gradWeight->stride[0]; for (j = 0; j < outDim; ++j) { pGradWeight[j * stride] = 0; } } } else { THError( "index out of bound. zeroGradParameters: %d not between 1 and %d", offset + 1, inDim); } } } void THNN_(SparseLinear_legacyZeroGradParameters)( THNNState *state, THTensor *gradWeight, THTensor *gradBias, THTensor *lastInput) { int64_t h, i, j; int64_t outDim = gradWeight->size[0]; int64_t inDim = gradWeight->size[1]; THArgCheck(THNN_(checkSize1D)(gradBias, outDim), 3, "gradBias size wrong"); THArgCheck(THNN_(checkLegacyInput)(lastInput), 4, "input size must be batchsize x nnz x 2"); THTensor_(zero)(gradBias); int64_t batchSize = THTensor_(size)(lastInput, 0); int64_t nnz = THTensor_(size)(lastInput, 1); #pragma omp parallel for private(h, i, j) schedule(static) if ( \ batchSize > 1 && batchSize * nnz * outDim > 10000) for (h = 0; h < batchSize; h++) { for (i = 0; i < nnz; i++) { if (THNN_(get3d)(lastInput, h, i, 1) == 0 ) { continue; } int64_t offset = (int64_t)(THNN_(get3d)(lastInput, h, i, 0)) - 1; if (offset >= 0 && offset < inDim) { real* pGradWeight = COL_PTR2(gradWeight, offset); if (gradWeight->stride[0] == 1) { THVector_(fill)(pGradWeight, 0, outDim); } else { int64_t stride = gradWeight->stride[0]; for (j = 0; j < outDim; ++j) { pGradWeight[j * stride] = 0; } } } else { THError( "index out of bound. zeroGradParameters: %d not between 1 and %d", offset + 1, inDim); } } } } #undef ROW_PTR2 #undef COL_PTR2 #endif
maxwell_zeroBC.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_sstruct_ls.h" HYPRE_Int hypre_ParVectorZeroBCValues(hypre_ParVector *v, HYPRE_Int *rows, HYPRE_Int nrows) { HYPRE_Int ierr= 0; hypre_Vector *v_local = hypre_ParVectorLocalVector(v); hypre_SeqVectorZeroBCValues(v_local, rows, nrows); return ierr; } HYPRE_Int hypre_SeqVectorZeroBCValues(hypre_Vector *v, HYPRE_Int *rows, HYPRE_Int nrows) { HYPRE_Real *vector_data = hypre_VectorData(v); HYPRE_Int i; HYPRE_Int ierr = 0; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < nrows; i++) vector_data[rows[i]]= 0.0; return ierr; }
NeighborhoodGraph.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #ifndef _SPTAG_COMMON_NG_H_ #define _SPTAG_COMMON_NG_H_ #include "../VectorIndex.h" #include "CommonUtils.h" #include "Dataset.h" #include "FineGrainedLock.h" #include "QueryResultSet.h" #include <chrono> #if defined(GPU) #include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <typeinfo> #include <cuda_fp16.h> #include "inc/Core/Common/cuda/KNN.hxx" #include "inc/Core/Common/cuda/params.h" #endif namespace SPTAG { namespace COMMON { class NeighborhoodGraph { public: NeighborhoodGraph(): m_iTPTNumber(32), m_iTPTLeafSize(2000), m_iSamples(1000), m_numTopDimensionTPTSplit(5), m_iNeighborhoodSize(32), m_iNeighborhoodScale(2), m_iCEFScale(2), m_iRefineIter(2), m_iCEF(1000), m_iAddCEF(500), m_iMaxCheckForRefineGraph(10000), m_iGPUGraphType(2), m_iGPURefineSteps(0), m_iGPURefineDepth(2), m_iGPULeafSize(500), m_iGPUBatches(1) {} ~NeighborhoodGraph() {} virtual void InsertNeighbors(VectorIndex* index, const SizeType node, SizeType insertNode, float insertDist) = 0; virtual void RebuildNeighbors(VectorIndex* index, const SizeType node, SizeType* nodes, const BasicResult* queryResults, const int numResults) = 0; virtual float GraphAccuracyEstimation(VectorIndex* index, const SizeType samples, const std::unordered_map<SizeType, SizeType>* idmap = nullptr) { DimensionType* correct = new DimensionType[samples]; #pragma omp parallel for schedule(dynamic) for (SizeType i = 0; i < samples; ++i) { SizeType x = COMMON::Utils::rand(m_iGraphSize); //int x = i; COMMON::QueryResultSet<void> query(nullptr, m_iCEF); for (SizeType y = 0; y < m_iGraphSize; y++) { if ((idmap != nullptr && idmap->find(y) != idmap->end())) continue; float dist = index->ComputeDistance(index->GetSample(x), index->GetSample(y)); query.AddPoint(y, dist); } query.SortResult(); SizeType * exact_rng = new SizeType[m_iNeighborhoodSize]; RebuildNeighbors(index, x, exact_rng, query.GetResults(), m_iCEF); correct[i] = 0; for (DimensionType j = 0; j < m_iNeighborhoodSize; ++j) { if (exact_rng[j] == -1) { correct[i] += m_iNeighborhoodSize - j; break; } for (DimensionType k = 0; k < m_iNeighborhoodSize; k++) if ((m_pNeighborhoodGraph)[x][k] == exact_rng[j]) { correct[i]++; break; } } delete[] exact_rng; } float acc = 0; for (SizeType i = 0; i < samples; ++i) acc += float(correct[i]); acc = acc / samples / m_iNeighborhoodSize; delete[] correct; return acc; } #if defined(GPU) template <typename T> void BuildInitKNNGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap) { SizeType initSize; SPTAG::Helper::Convert::ConvertStringTo(index->GetParameter("NumberOfInitialDynamicPivots").c_str(), initSize); // Build the entire RNG graph, both builds the KNN and refines it to RNG buildGraph<T>(index, m_iGraphSize, m_iNeighborhoodSize, m_iTPTNumber, (int*)m_pNeighborhoodGraph[0], m_iGPURefineSteps, m_iGPURefineDepth, m_iGPUGraphType, m_iGPULeafSize, initSize, m_iGPUBatches); if (idmap != nullptr) { std::unordered_map<SizeType, SizeType>::const_iterator iter; for (SizeType i = 0; i < m_iGraphSize; ++i) { for (DimensionType j = 0; j < m_iNeighborhoodSize; ++j) { if ((iter = idmap->find(m_pNeighborhoodGraph[i][j])) != idmap->end()) m_pNeighborhoodGraph[i][j] = iter->second; } } } } #else template <typename T> void PartitionByTptree(VectorIndex* index, std::vector<SizeType>& indices, const SizeType first, const SizeType last, std::vector<std::pair<SizeType, SizeType>> & leaves) { if (last - first <= m_iTPTLeafSize) { leaves.emplace_back(first, last); } else { std::vector<float> Mean(index->GetFeatureDim(), 0); int iIteration = 100; SizeType end = std::min(first + m_iSamples, last); SizeType count = end - first + 1; // calculate the mean of each dimension for (SizeType j = first; j <= end; ++j) { const T* v = (const T*)index->GetSample(indices[j]); for (DimensionType k = 0; k < index->GetFeatureDim(); k++) { Mean[k] += v[k]; } } for (DimensionType k = 0; k < index->GetFeatureDim(); k++) { Mean[k] /= count; } std::vector<BasicResult> Variance; Variance.reserve(index->GetFeatureDim()); for (DimensionType j = 0; j < index->GetFeatureDim(); ++j) { Variance.emplace_back(j, 0.0f); } // calculate the variance of each dimension for (SizeType j = first; j <= end; ++j) { const T* v = (const T*)index->GetSample(indices[j]); for (DimensionType k = 0; k < index->GetFeatureDim(); k++) { float dist = v[k] - Mean[k]; Variance[k].Dist += dist*dist; } } std::sort(Variance.begin(), Variance.end(), COMMON::Compare); std::vector<SizeType> indexs(m_numTopDimensionTPTSplit); std::vector<float> weight(m_numTopDimensionTPTSplit), bestweight(m_numTopDimensionTPTSplit); float bestvariance = Variance[index->GetFeatureDim() - 1].Dist; for (int i = 0; i < m_numTopDimensionTPTSplit; ++i) { indexs[i] = Variance[index->GetFeatureDim() - 1 - i].VID; bestweight[i] = 0; } bestweight[0] = 1; float bestmean = Mean[indexs[0]]; std::vector<float> Val(count); for (int i = 0; i < iIteration; ++i) { float sumweight = 0; for (int j = 0; j < m_numTopDimensionTPTSplit; ++j) { weight[j] = float(rand() % 10000) / 5000.0f - 1.0f; sumweight += weight[j] * weight[j]; } sumweight = sqrt(sumweight); for (int j = 0; j < m_numTopDimensionTPTSplit; ++j) { weight[j] /= sumweight; } float mean = 0; for (SizeType j = 0; j < count; ++j) { Val[j] = 0; const T* v = (const T*)index->GetSample(indices[first + j]); for (int k = 0; k < m_numTopDimensionTPTSplit; k++) { Val[j] += weight[k] * v[indexs[k]]; } mean += Val[j]; } mean /= count; float var = 0; for (SizeType j = 0; j < count; ++j) { float dist = Val[j] - mean; var += dist * dist; } if (var > bestvariance) { bestvariance = var; bestmean = mean; for (int j = 0; j < m_numTopDimensionTPTSplit; ++j) { bestweight[j] = weight[j]; } } } SizeType i = first; SizeType j = last; // decide which child one point belongs while (i <= j) { float val = 0; const T* v = (const T*)index->GetSample(indices[i]); for (int k = 0; k < m_numTopDimensionTPTSplit; k++) { val += bestweight[k] * v[indexs[k]]; } if (val < bestmean) { i++; } else { Kokkos::swap(indices[i], indices[j]); j--; } } // if all the points in the node are equal,equally split the node into 2 if ((i == first) || (i == last + 1)) { i = (first + last + 1) / 2; } Mean.clear(); Variance.clear(); Val.clear(); indexs.clear(); weight.clear(); bestweight.clear(); PartitionByTptree<T>(index, indices, first, i - 1, leaves); PartitionByTptree<T>(index, indices, i, last, leaves); } } template <typename T> void BuildInitKNNGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap) { COMMON::Dataset<float> NeighborhoodDists(m_iGraphSize, m_iNeighborhoodSize); std::vector<std::vector<SizeType>> TptreeDataIndices(m_iTPTNumber, std::vector<SizeType>(m_iGraphSize)); std::vector<std::vector<std::pair<SizeType, SizeType>>> TptreeLeafNodes(m_iTPTNumber, std::vector<std::pair<SizeType, SizeType>>()); for (SizeType i = 0; i < m_iGraphSize; ++i) for (DimensionType j = 0; j < m_iNeighborhoodSize; ++j) (NeighborhoodDists)[i][j] = MaxDist; auto t1 = std::chrono::high_resolution_clock::now(); LOG(Helper::LogLevel::LL_Info, "Parallel TpTree Partition begin\n"); #pragma omp parallel for schedule(dynamic) for (int i = 0; i < m_iTPTNumber; ++i) { Sleep(i * 100); std::srand(clock()); for (SizeType j = 0; j < m_iGraphSize; ++j) TptreeDataIndices[i][j] = j; std::random_shuffle(TptreeDataIndices[i].begin(), TptreeDataIndices[i].end()); PartitionByTptree<T>(index, TptreeDataIndices[i], 0, m_iGraphSize - 1, TptreeLeafNodes[i]); LOG(Helper::LogLevel::LL_Info, "Finish Getting Leaves for Tree %d\n", i); } LOG(Helper::LogLevel::LL_Info, "Parallel TpTree Partition done\n"); auto t2 = std::chrono::high_resolution_clock::now(); LOG(Helper::LogLevel::LL_Info, "Build TPTree time (s): %lld\n", std::chrono::duration_cast<std::chrono::seconds>(t2 - t1).count()); for (int i = 0; i < m_iTPTNumber; ++i) { #pragma omp parallel for schedule(dynamic) for (SizeType j = 0; j < (SizeType)TptreeLeafNodes[i].size(); ++j) { SizeType start_index = TptreeLeafNodes[i][j].first; SizeType end_index = TptreeLeafNodes[i][j].second; if ((j * 5) % TptreeLeafNodes[i].size() == 0) LOG(Helper::LogLevel::LL_Info, "Processing Tree %d %d%%\n", i, static_cast<int>(j * 1.0 / TptreeLeafNodes[i].size() * 100)); for (SizeType x = start_index; x < end_index; x++) { for (SizeType y = x + 1; y <= end_index; y++) { SizeType p1 = TptreeDataIndices[i][x]; SizeType p2 = TptreeDataIndices[i][y]; float dist = index->ComputeDistance(index->GetSample(p1), index->GetSample(p2)); if (idmap != nullptr) { p1 = (idmap->find(p1) == idmap->end()) ? p1 : idmap->at(p1); p2 = (idmap->find(p2) == idmap->end()) ? p2 : idmap->at(p2); } COMMON::Utils::AddNeighbor(p2, dist, (m_pNeighborhoodGraph)[p1], (NeighborhoodDists)[p1], m_iNeighborhoodSize); COMMON::Utils::AddNeighbor(p1, dist, (m_pNeighborhoodGraph)[p2], (NeighborhoodDists)[p2], m_iNeighborhoodSize); } } } TptreeDataIndices[i].clear(); TptreeLeafNodes[i].clear(); } TptreeDataIndices.clear(); TptreeLeafNodes.clear(); auto t3 = std::chrono::high_resolution_clock::now(); LOG(Helper::LogLevel::LL_Info, "Process TPTree time (s): %lld\n", std::chrono::duration_cast<std::chrono::seconds>(t3 - t2).count()); } #endif template <typename T> void BuildGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap = nullptr) { LOG(Helper::LogLevel::LL_Info, "build RNG graph!\n"); m_iGraphSize = index->GetNumSamples(); m_iNeighborhoodSize = m_iNeighborhoodSize * m_iNeighborhoodScale; m_pNeighborhoodGraph.Initialize(m_iGraphSize, m_iNeighborhoodSize); if (m_iGraphSize < 1000) { RefineGraph<T>(index, idmap); LOG(Helper::LogLevel::LL_Info, "Build RNG Graph end!\n"); return; } auto t1 = std::chrono::high_resolution_clock::now(); BuildInitKNNGraph<T>(index, idmap); auto t2 = std::chrono::high_resolution_clock::now(); LOG(Helper::LogLevel::LL_Info, "BuildInitKNNGraph time (s): %lld\n", std::chrono::duration_cast<std::chrono::seconds>(t2 - t1).count()); RefineGraph<T>(index, idmap); if (idmap != nullptr) { for (auto iter = idmap->begin(); iter != idmap->end(); iter++) if (iter->first < 0) { m_pNeighborhoodGraph[-1 - iter->first][m_iNeighborhoodSize - 1] = -2 - iter->second; } } auto t3 = std::chrono::high_resolution_clock::now(); LOG(Helper::LogLevel::LL_Info, "BuildGraph time (s): %lld\n", std::chrono::duration_cast<std::chrono::seconds>(t3 - t1).count()); } template <typename T> void RefineGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap = nullptr) { for (int iter = 0; iter < m_iRefineIter - 1; iter++) { auto t1 = std::chrono::high_resolution_clock::now(); #pragma omp parallel for schedule(dynamic) for (SizeType i = 0; i < m_iGraphSize; ++i) { RefineNode<T>(index, i, false, false, m_iCEF * m_iCEFScale); if ((i * 5) % m_iGraphSize == 0) LOG(Helper::LogLevel::LL_Info, "Refine %d %d%%\n", iter, static_cast<int>(i * 1.0 / m_iGraphSize * 100)); } auto t2 = std::chrono::high_resolution_clock::now(); LOG(Helper::LogLevel::LL_Info, "Refine RNG time (s): %lld Graph Acc: %f\n", std::chrono::duration_cast<std::chrono::seconds>(t2 - t1).count(), GraphAccuracyEstimation(index, 100, idmap)); } m_iNeighborhoodSize /= m_iNeighborhoodScale; if (m_iRefineIter > 0) { auto t1 = std::chrono::high_resolution_clock::now(); #pragma omp parallel for schedule(dynamic) for (SizeType i = 0; i < m_iGraphSize; ++i) { RefineNode<T>(index, i, false, false, m_iCEF); if ((i * 5) % m_iGraphSize == 0) LOG(Helper::LogLevel::LL_Info, "Refine %d %d%%\n", m_iRefineIter - 1, static_cast<int>(i * 1.0 / m_iGraphSize * 100)); } auto t2 = std::chrono::high_resolution_clock::now(); LOG(Helper::LogLevel::LL_Info, "Refine RNG time (s): %lld Graph Acc: %f\n", std::chrono::duration_cast<std::chrono::seconds>(t2 - t1).count(), GraphAccuracyEstimation(index, 100, idmap)); } } template <typename T> ErrorCode RefineGraph(VectorIndex* index, std::vector<SizeType>& indices, std::vector<SizeType>& reverseIndices, std::shared_ptr<Helper::DiskPriorityIO> output, NeighborhoodGraph* newGraph, const std::unordered_map<SizeType, SizeType>* idmap = nullptr) { std::shared_ptr<NeighborhoodGraph> tmp; if (newGraph == nullptr) { tmp = NeighborhoodGraph::CreateInstance(Type()); newGraph = tmp.get(); } SizeType R = (SizeType)indices.size(); newGraph->m_pNeighborhoodGraph.Initialize(R, m_iNeighborhoodSize); newGraph->m_iGraphSize = R; newGraph->m_iNeighborhoodSize = m_iNeighborhoodSize; #pragma omp parallel for schedule(dynamic) for (SizeType i = 0; i < R; ++i) { if ((i * 5) % R == 0) LOG(Helper::LogLevel::LL_Info, "Refine %d%%\n", static_cast<int>(i * 1.0 / R * 100)); SizeType* outnodes = newGraph->m_pNeighborhoodGraph[i]; COMMON::QueryResultSet<T> query((const T*)index->GetSample(indices[i]), m_iCEF + 1); index->RefineSearchIndex(query, false); RebuildNeighbors(index, indices[i], outnodes, query.GetResults(), m_iCEF + 1); std::unordered_map<SizeType, SizeType>::const_iterator iter; for (DimensionType j = 0; j < m_iNeighborhoodSize; ++j) { if (outnodes[j] >= 0 && outnodes[j] < reverseIndices.size()) outnodes[j] = reverseIndices[outnodes[j]]; if (idmap != nullptr && (iter = idmap->find(outnodes[j])) != idmap->end()) outnodes[j] = iter->second; } if (idmap != nullptr && (iter = idmap->find(-1 - i)) != idmap->end()) outnodes[m_iNeighborhoodSize - 1] = -2 - iter->second; } if (output != nullptr) newGraph->SaveGraph(output); return ErrorCode::Success; } template <typename T> void RefineNode(VectorIndex* index, const SizeType node, bool updateNeighbors, bool searchDeleted, int CEF) { COMMON::QueryResultSet<T> query((const T*)index->GetSample(node), CEF + 1); index->RefineSearchIndex(query, searchDeleted); RebuildNeighbors(index, node, m_pNeighborhoodGraph[node], query.GetResults(), CEF + 1); if (updateNeighbors) { // update neighbors for (int j = 0; j <= CEF; ++j) { BasicResult* item = query.GetResult(j); if (item->VID < 0) break; if (item->VID == node) continue; InsertNeighbors(index, item->VID, node, item->Dist); } } } inline std::uint64_t BufferSize() const { return m_pNeighborhoodGraph.BufferSize(); } ErrorCode LoadGraph(std::shared_ptr<Helper::DiskPriorityIO> input) { ErrorCode ret = ErrorCode::Success; if ((ret = m_pNeighborhoodGraph.Load(input)) != ErrorCode::Success) return ret; m_iGraphSize = m_pNeighborhoodGraph.R(); m_iNeighborhoodSize = m_pNeighborhoodGraph.C(); return ret; } ErrorCode LoadGraph(std::string sGraphFilename) { ErrorCode ret = ErrorCode::Success; if ((ret = m_pNeighborhoodGraph.Load(sGraphFilename)) != ErrorCode::Success) return ret; m_iGraphSize = m_pNeighborhoodGraph.R(); m_iNeighborhoodSize = m_pNeighborhoodGraph.C(); return ret; } ErrorCode LoadGraph(char* pGraphMemFile) { ErrorCode ret = ErrorCode::Success; if ((ret = m_pNeighborhoodGraph.Load(pGraphMemFile)) != ErrorCode::Success) return ret; m_iGraphSize = m_pNeighborhoodGraph.R(); m_iNeighborhoodSize = m_pNeighborhoodGraph.C(); return ErrorCode::Success; } ErrorCode SaveGraph(std::string sGraphFilename) const { LOG(Helper::LogLevel::LL_Info, "Save %s To %s\n", m_pNeighborhoodGraph.Name().c_str(), sGraphFilename.c_str()); auto ptr = f_createIO(); if (ptr == nullptr || !ptr->Initialize(sGraphFilename.c_str(), std::ios::binary | std::ios::out)) return ErrorCode::FailedCreateFile; return SaveGraph(ptr); } ErrorCode SaveGraph(std::shared_ptr<Helper::DiskPriorityIO> output) const { IOBINARY(output, WriteBinary, sizeof(SizeType), (char*)&m_iGraphSize); IOBINARY(output, WriteBinary, sizeof(DimensionType), (char*)&m_iNeighborhoodSize); for (int i = 0; i < m_iGraphSize; ++i) IOBINARY(output, WriteBinary, sizeof(SizeType) * m_iNeighborhoodSize, (char*)m_pNeighborhoodGraph[i]); LOG(Helper::LogLevel::LL_Info, "Save %s (%d,%d) Finish!\n", m_pNeighborhoodGraph.Name().c_str(), m_iGraphSize, m_iNeighborhoodSize); return ErrorCode::Success; } inline ErrorCode AddBatch(SizeType num) { ErrorCode ret = m_pNeighborhoodGraph.AddBatch(num); if (ret != ErrorCode::Success) return ret; m_iGraphSize += num; return ErrorCode::Success; } inline SizeType* operator[](SizeType index) { return m_pNeighborhoodGraph[index]; } inline const SizeType* operator[](SizeType index) const { return m_pNeighborhoodGraph[index]; } void Update(SizeType row, DimensionType col, SizeType val) { std::lock_guard<std::mutex> lock(m_dataUpdateLock[row]); m_pNeighborhoodGraph[row][col] = val; } inline void SetR(SizeType rows) { m_pNeighborhoodGraph.SetR(rows); m_iGraphSize = rows; } inline SizeType R() const { return m_iGraphSize; } inline std::string Type() const { return m_pNeighborhoodGraph.Name(); } static std::shared_ptr<NeighborhoodGraph> CreateInstance(std::string type); protected: // Graph structure SizeType m_iGraphSize; COMMON::Dataset<SizeType> m_pNeighborhoodGraph; FineGrainedLock m_dataUpdateLock; public: int m_iTPTNumber, m_iTPTLeafSize, m_iSamples, m_numTopDimensionTPTSplit; DimensionType m_iNeighborhoodSize; int m_iNeighborhoodScale, m_iCEFScale, m_iRefineIter, m_iCEF, m_iAddCEF, m_iMaxCheckForRefineGraph, m_iGPUGraphType, m_iGPURefineSteps, m_iGPURefineDepth, m_iGPULeafSize, m_iGPUBatches; }; } } #endif
rt_dplgsy.c
#include "runtime.h" void RT_CORE_dplgsy( Quark *quark, Quark_Task_Flags *task_flags, double bump, int m, int n, double *A, int lda, int bigM, int m0, int n0, unsigned long long int seed ) { plasma_context_t *plasma; plasma = plasma_context_self(); if (plasma->runtime == PLASMA_QUARK) { QUARK_CORE_dplgsy(quark, task_flags, bump, m, n, A, lda, bigM, m0, n0, seed ); } else if (plasma->runtime == PLASMA_OMPSS) { #pragma omp target device (smp) copy_deps #pragma omp task out([lda*n]A) label(dplgsy) CORE_dplgsy_rt(bump, m, n, A, lda, bigM, m0, n0, seed); } } void CORE_dplgsy_rt(int bump, int m, int n, double *A, int lda, int bigM, int m0, int n0, unsigned long long int seed) { CORE_dplgsy(bump, m, n, A, lda, bigM, m0, n0, seed); }
uts.ref.c
#include <sys/time.h> #include <time.h> #include <stdio.h> static unsigned long long current_time_ns() { #ifdef __MACH__ clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); unsigned long long s = 1000000000ULL * (unsigned long long)mts.tv_sec; return (unsigned long long)mts.tv_nsec + s; #else struct timespec t ={0,0}; clock_gettime(CLOCK_MONOTONIC, &t); unsigned long long s = 1000000000ULL * (unsigned long long)t.tv_sec; return (((unsigned long long)t.tv_nsec)) + s; #endif } /**********************************************************************************************/ /* This program is part of the Barcelona OpenMP Tasks Suite */ /* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */ /* Copyright (C) 2009 Universitat Politecnica de Catalunya */ /**********************************************************************************************/ /* * Copyright (c) 2007 The Unbalanced Tree Search (UTS) Project Team: * ----------------------------------------------------------------- * * This file is part of the unbalanced tree search benchmark. This * project is licensed under the MIT Open Source license. See the LICENSE * file for copyright and licensing information. * * UTS is a collaborative project between researchers at the University of * Maryland, the University of North Carolina at Chapel Hill, and the Ohio * State University. * * University of Maryland: * Chau-Wen Tseng(1) <tseng at cs.umd.edu> * * University of North Carolina, Chapel Hill: * Jun Huan <huan, * Jinze Liu liu, * Stephen Olivier olivier, * Jan Prins* prins at cs.umd.edu> * * The Ohio State University: * James Dinan <dinan, * Gerald Sabin sabin, * P. Sadayappan* saday at cse.ohio-state.edu> * * Supercomputing Research Center * D. Pryor * * (1) - indicates project PI * * UTS Recursive Depth-First Search (DFS) version developed by James Dinan * * Adapted for OpenMP 3.0 Task-based version by Stephen Olivier * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <omp.h> #include <sys/time.h> #include "app-desc.h" #include "bots.h" #include "uts.h" /*********************************************************** * Global state * ***********************************************************/ unsigned long long nLeaves = 0; int maxTreeDepth = 0; /*********************************************************** * Tree generation strategy is controlled via various * * parameters set from the command line. The parameters * * and their default values are given below. * * Trees are generated using a Galton-Watson process, in * * which the branching factor of each node is a random * * variable. * * * * The random variable follow a binomial distribution. * ***********************************************************/ double b_0 = 4.0; // default branching factor at the root int rootId = 0; // default seed for RNG state at root /*********************************************************** * The branching factor at the root is specified by b_0. * The branching factor below the root follows an * identical binomial distribution at all nodes. * A node has m children with prob q, or no children with * prob (1-q). The expected branching factor is q * m. * * Default parameter values ***********************************************************/ int nonLeafBF = 4; // m double nonLeafProb = 15.0 / 64.0; // q /*********************************************************** * compute granularity - number of rng evaluations per * tree node ***********************************************************/ int computeGranularity = 1; /*********************************************************** * expected results for execution ***********************************************************/ unsigned long long exp_tree_size = 0; int exp_tree_depth = 0; unsigned long long exp_num_leaves = 0; /*********************************************************** * FUNCTIONS * ***********************************************************/ // Interpret 32 bit positive integer as value on [0,1) double rng_toProb(int n) { if (n < 0) { printf("*** toProb: rand n = %d out of range\n",n); } return ((n<0)? 0.0 : ((double) n)/2147483648.0); } void uts_initRoot(Node * root) { root->height = 0; root->numChildren = -1; // means not yet determined rng_init(root->state.state, rootId); bots_message("Root node at %p\n", root); } int uts_numChildren_bin(Node * parent) { // distribution is identical everywhere below root int v = rng_rand(parent->state.state); double d = rng_toProb(v); return (d < nonLeafProb) ? nonLeafBF : 0; } int uts_numChildren(Node *parent) { int numChildren = 0; /* Determine the number of children */ if (parent->height == 0) numChildren = (int) floor(b_0); else numChildren = uts_numChildren_bin(parent); // limit number of children // only a BIN root can have more than MAXNUMCHILDREN if (parent->height == 0) { int rootBF = (int) ceil(b_0); if (numChildren > rootBF) { bots_debug("*** Number of children of root truncated from %d to %d\n", numChildren, rootBF); numChildren = rootBF; } } else { if (numChildren > MAXNUMCHILDREN) { bots_debug("*** Number of children truncated from %d to %d\n", numChildren, MAXNUMCHILDREN); numChildren = MAXNUMCHILDREN; } } return numChildren; } /*********************************************************** * Recursive depth-first implementation * ***********************************************************/ unsigned long long parallel_uts ( Node *root ) { unsigned long long num_nodes = 0 ; root->numChildren = uts_numChildren(root); bots_message("Computing Unbalance Tree Search algorithm "); const unsigned long long full_program_start = current_time_ns(); { #pragma omp parallel { #pragma omp single nowait { #pragma omp task untied num_nodes = parTreeSearch( 0, root, root->numChildren ); } } } ; const unsigned long long full_program_end = current_time_ns(); printf("full_program %llu ns\n", full_program_end - full_program_start); bots_message(" completed!"); return num_nodes; } unsigned long long parTreeSearch(int depth, Node *parent, int numChildren) { Node *n = (Node *)malloc(numChildren * sizeof(Node)); Node *nodePtr; int i, j; unsigned long long subtreesize = 1; unsigned long long *partialCount = (unsigned long long *)malloc(numChildren * sizeof(unsigned long long)); // Recurse on the children for (i = 0; i < numChildren; i++) { nodePtr = &n[i]; nodePtr->height = parent->height + 1; // The following line is the work (one or more SHA-1 ops) for (j = 0; j < computeGranularity; j++) { rng_spawn(parent->state.state, nodePtr->state.state, i); } nodePtr->numChildren = uts_numChildren(nodePtr); #pragma omp task untied firstprivate(i, nodePtr) shared(partialCount) partialCount[i] = parTreeSearch(depth+1, nodePtr, nodePtr->numChildren); } #pragma omp taskwait ; for (i = 0; i < numChildren; i++) { subtreesize += partialCount[i]; } free(n); free(partialCount); return subtreesize; } void uts_read_file ( char *filename ) { FILE *fin; if ((fin = fopen(filename, "r")) == NULL) { bots_message("Could not open input file (%s)\n", filename); exit (-1); } fscanf(fin,"%lf %lf %d %d %d %llu %d %llu", &b_0, &nonLeafProb, &nonLeafBF, &rootId, &computeGranularity, &exp_tree_size, &exp_tree_depth, &exp_num_leaves ); fclose(fin); computeGranularity = max(1,computeGranularity); // Printing input data bots_message("\n"); bots_message("Root branching factor = %f\n", b_0); bots_message("Root seed (0 <= 2^31) = %d\n", rootId); bots_message("Probability of non-leaf node = %f\n", nonLeafProb); bots_message("Number of children for non-leaf node = %d\n", nonLeafBF); bots_message("E(n) = %f\n", (double) ( nonLeafProb * nonLeafBF ) ); bots_message("E(s) = %f\n", (double) ( 1.0 / (1.0 - nonLeafProb * nonLeafBF) ) ); bots_message("Compute granularity = %d\n", computeGranularity); bots_message("Random number generator = "); rng_showtype(); } void uts_show_stats( void ) { int nPes = atoi(bots_resources); int chunkSize = 0; bots_message("\n"); bots_message("Tree size = %llu\n", (unsigned long long) bots_number_of_tasks ); bots_message("Maximum tree depth = %d\n", maxTreeDepth ); bots_message("Chunk size = %d\n", chunkSize ); bots_message("Number of leaves = %llu (%.2f%%)\n", nLeaves, nLeaves/(float)bots_number_of_tasks*100.0 ); bots_message("Number of PE's = %.4d threads\n", nPes ); bots_message("Wallclock time = %.3f sec\n", bots_time_program ); bots_message("Overall performance = %.0f nodes/sec\n", (bots_number_of_tasks / bots_time_program) ); bots_message("Performance per PE = %.0f nodes/sec\n", (bots_number_of_tasks / bots_time_program / nPes) ); } int uts_check_result ( void ) { int answer = BOTS_RESULT_SUCCESSFUL; if ( bots_number_of_tasks != exp_tree_size ) { answer = BOTS_RESULT_UNSUCCESSFUL; bots_message("Incorrect tree size result (%llu instead of %llu).\n", bots_number_of_tasks, exp_tree_size); } return answer; }
sparseMatrix.c
/// \File /// Routines for creating and manipulating sparse matrices. #include "sparseMatrix.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <assert.h> #include <omp.h> #include "performance.h" #include "parallel.h" #include "constants.h" /// \details /// Adjust number of non-zeroes int nnzStart(int hsize, int msize) { int M = msize; if (M == 0) M = hsize; if ((M % 32) > 0) M += (32 - (M % 32)); if (M > hsize) M = hsize; if (printRank()) printf("Adjusted M = %d\n", M); return M; } /// \details /// Allocate space for sparse matrix SparseMatrix* initSparseMatrix(int hsize, int msize) { SparseMatrix* spmatrix = (SparseMatrix*)malloc(sizeof(SparseMatrix)); // hsize is number of rows and msize is the max number of non-zeroes per row spmatrix->hsize = hsize; spmatrix->msize = msize; // iia holds the number of non-zeroes in each row spmatrix->iia = (int*)malloc(hsize*sizeof(int)); #ifdef CONTIG_MATRIX spmatrix->jjcontig = (int*)malloc(hsize*msize*sizeof(int)); spmatrix->jja = (int**)malloc(hsize*sizeof(int*)); #pragma omp parallel for for (int i = 0; i < hsize; i++) { spmatrix->jja[i] = &(spmatrix->jjcontig[i*msize]); } // Zero counts of non-zeroes per row and indices memset(spmatrix->jjcontig, 0, hsize*msize*sizeof(int)); spmatrix->valcontig = (real_t*)malloc(hsize*msize*sizeof(real_t)); spmatrix->val = (real_t**)malloc(hsize*sizeof(real_t*)); #pragma omp parallel for for (int i = 0; i < hsize; i++) { spmatrix->val[i] = &(spmatrix->valcontig[i*msize]); } // Zero non-zero values memset(spmatrix->valcontig, ZERO, hsize*msize*sizeof(real_t)); #else // jja contains the column index for each non-zero value spmatrix->jja = (int**)malloc(hsize*sizeof(int*)); for (int i = 0; i < hsize; i++) { spmatrix->jja[i] = (int*)malloc(msize*sizeof(int)); } // val contains the non-zeroes spmatrix->val = (real_t**)malloc(hsize*sizeof(real_t*)); for (int i = 0; i < hsize; i++) { spmatrix->val[i] = (real_t*)malloc(msize*sizeof(real_t)); } #endif // Zero counts of non-zeroes per row memset(spmatrix->iia, 0, hsize*sizeof(int)); // Used for normalization spmatrix->maxEval = ZERO; spmatrix->minEval = ZERO; spmatrix->maxMinusMin = ZERO; // Matrix bandwidth spmatrix->bandwidth = 0; return spmatrix; } /// \details /// Deallocate space for sparse matrix void destroySparseMatrix(struct SparseMatrixSt* spmatrix) { int hsize = spmatrix->hsize; free(spmatrix->iia); #ifdef CONTIG_MATRIX free(spmatrix->jjcontig); free(spmatrix->jja); free(spmatrix->valcontig); free(spmatrix->val); #else for (int i = 0; i < hsize; i++) { //free(spmatrix->jja[i]); } free(spmatrix->jja); for (int i = 0; i < hsize; i++) { free(spmatrix->val[i]); } free(spmatrix->val); #endif spmatrix->hsize = 0; spmatrix->msize = 0; spmatrix->bandwidth = 0; spmatrix->minEval = ZERO; spmatrix->maxEval = ZERO; spmatrix->maxMinusMin = ZERO; } /// \details /// Calculate sparcity statistics for a sparse matrix void sparsity(struct SparseMatrixSt* spmatrix) { int hsize = spmatrix->hsize; int hValCount=0; int hDist[hsize]; memset(hDist, 0, hsize*sizeof(int)); for (int i = 0; i < hsize; i++) { hValCount += spmatrix->iia[i]; if (spmatrix->iia[i] > 0) hDist[spmatrix->iia[i]] += 1; } if (printRank()) { printf("\nSparsity:\nInitial sparsity = %d, fraction = %e, Avg per row = %f\n", hValCount, (real_t)hValCount/(real_t)(hsize*hsize), (real_t)hValCount/(real_t)hsize); int maxRowCount = 0; for (int i = 0; i < hsize; i++) { maxRowCount = MAX(maxRowCount, spmatrix->iia[i]); } printf("Max per row = %d\n", maxRowCount); for (int i = 0; i < hsize; i++) { if (hDist[i] > 0) printf("I = %d, count = %d, fraction = %f\n", i, hDist[i], (real_t)hDist[i]/(real_t)hsize); } } } /// \details /// Calculate gershgorin bounds for sparse matrix void gershgorin(struct SparseMatrixSt* spmatrix, struct DomainSt* domain) { int hsize = spmatrix->hsize; real_t eMin = 10000; real_t eMax = -10000; real_t sumP, sumM, maxMinusMin; #pragma omp parallel for private(sumM,sumP) reduction(max:eMax) reduction(min:eMin) for(int i = 0; i < hsize; i++) { sumM = 0.0; for(int j = 0; j < spmatrix->iia[i]; j++) { real_t hx = ABS(spmatrix->val[i][j]); sumM += hx; if (spmatrix->jja[i][j] == i) { sumP = spmatrix->val[i][j]; sumM -= hx; } } eMax = ((eMax < (sumP + sumM)) ? sumP + sumM : eMax); eMin = ((eMin > (sumP - sumM)) ? sumP - sumM : eMin); } // Determine eMax and eMin across ranks #ifdef DO_MPI if (getNRanks() > 1) { startTimer(reduceCommTimer); minRealReduce(&eMin); stopTimer(reduceCommTimer); collectCounter(reduceCounter, sizeof(real_t)); startTimer(reduceCommTimer); maxRealReduce(&eMax); stopTimer(reduceCommTimer); collectCounter(reduceCounter, sizeof(real_t)); } #endif maxMinusMin = eMax-eMin; if (printRank()) printf("\nGershgorin:\nNew eMax, eMin = %e, %e\n", eMax, eMin); // GERSGORIN BOUNDS; spmatrix->maxEval = eMax; spmatrix->minEval = eMin; spmatrix->maxMinusMin = maxMinusMin; } /// \details /// Normalize a matrix in sparse format using the gershgorin estimates void normalize(struct SparseMatrixSt* spmatrix) { int hsize = spmatrix->hsize; int sumIia = 0; int maxIia = 0; #pragma omp parallel for reduction(+:sumIia) reduction(max:maxIia) for(int i = 0; i < hsize; i++) { for(int j = 0; j < spmatrix->iia[i]; j++) { if (spmatrix->jja[i][j] == i) { spmatrix->val[i][j] = (spmatrix->maxEval - spmatrix->val[i][j])/spmatrix->maxMinusMin; } else { spmatrix->val[i][j] = -spmatrix->val[i][j]/spmatrix->maxMinusMin; } } sumIia += spmatrix->iia[i]; maxIia = MAX(maxIia, spmatrix->iia[i]); } // WE NOW HAVE X = (eMax*I-H)/(eMax-eMin) if (printRank() && debug == 1) printf("Initial sparsity normalized = %d, fraction = %e, avg = %g, max = %d\n", sumIia, (real_t)sumIia/(real_t)(hsize*hsize), (real_t)sumIia/(real_t)hsize, maxIia); } /// \details /// Calculate trace and trace^2 for a sparse matrix. void trace(struct SparseMatrixSt* spmatrix, struct DomainSt* domain, real_t* tr, real_t* tr2) { int hsize = spmatrix->hsize; real_t trace = ZERO; real_t trace2 = ZERO; #pragma omp parallel for reduction(+:trace, trace2) for(int i = domain->localRowMin; i < domain->localRowMax; i++) { #ifdef POS1 // Diagonal values are in first position trace += spmatrix->val[i][0]; trace2 += spmatrix->val[i][0] * spmatrix->val[i][0]; #else for(int j = 0; j < spmatrix->iia[i]; j++) { if (i == spmatrix->jja[i][j]) { trace += spmatrix->val[i][j]; trace2 += spmatrix->val[i][j] * spmatrix->val[i][j]; } } #endif } *tr = trace; *tr2 = trace2; }
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 32; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
gbdt.h
#ifndef LIGHTGBM_BOOSTING_GBDT_H_ #define LIGHTGBM_BOOSTING_GBDT_H_ #include <LightGBM/boosting.h> #include <LightGBM/prediction_early_stop.h> #include <LightGBM/tree.h> #include <cstdio> #include <vector> #include <string> #include <fstream> #include <memory> #include <mutex> #include <map> namespace LightGBM { /*! * \brief GBDT algorithm implementation. including Training, prediction, bagging. */ class GBDT : public GBDTBase { public: /*! * \brief Constructor */ GBDT(); /*! * \brief Destructor */ ~GBDT(); /*! * \brief Reset Boosting Config * \param gbdt_config Config for boosting */ void ResetConfig(const Config* gbdt_config) override; /*! * \brief Get current iteration */ int GetCurrentIteration() const override { return static_cast<int>(models_.size()) / num_tree_per_iteration_; } /*! * \brief Can use early stopping for prediction or not * \return True if cannot use early stopping for prediction */ bool NeedAccuratePrediction() const override { if (objective_function_ == nullptr) { return true; } else { return objective_function_->NeedAccuratePrediction(); } } /*! * \brief Get number of prediction for one data * \param num_iteration number of used iterations * \param is_pred_leaf True if predicting leaf index * \param is_pred_contrib True if predicting feature contribution * \return number of prediction */ inline int NumPredictOneRow(int , bool is_pred_leaf, bool is_pred_contrib) const override { int num_preb_in_one_row = num_class_; if (is_pred_contrib || is_pred_leaf) { throw std::runtime_error("Invalid mode!"); } return num_preb_in_one_row; } void PredictRaw(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; void PredictRawByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void Predict(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; void PredictByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void PredictLeafIndex(const double* features, double* output) const override; void PredictLeafIndexByMap(const std::unordered_map<int, double>& features, double* output) const override; /*! * \brief Restore from a serialized buffer */ bool LoadModelFromString(const char* buffer, size_t len) override; /*! * \brief Get max feature index of this model * \return Max feature index of this model */ inline int MaxFeatureIdx() const override { return max_feature_idx_; } /*! * \brief Get feature names of this model * \return Feature names of this model */ inline std::vector<std::string> FeatureNames() const override { return feature_names_; } /*! * \brief Get index of label column * \return index of label column */ inline int LabelIdx() const override { return label_idx_; } /*! * \brief Get number of weak sub-models * \return Number of weak sub-models */ inline int NumberOfTotalModel() const override { return static_cast<int>(models_.size()); } /*! * \brief Get number of tree per iteration * \return number of tree per iteration */ inline int NumModelPerIteration() const override { return num_tree_per_iteration_; } /*! * \brief Get number of classes * \return Number of classes */ inline int NumberOfClasses() const override { return num_class_; } inline void InitPredict(int num_iteration, bool is_pred_contrib) override { num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; if (num_iteration > 0) { num_iteration_for_pred_ = std::min(num_iteration, num_iteration_for_pred_); } if (is_pred_contrib) { #pragma omp parallel for schedule(static) for (int i = 0; i < static_cast<int>(models_.size()); ++i) { models_[i]->RecomputeMaxDepth(); } } } inline double GetLeafValue(int tree_idx, int leaf_idx) const override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); return models_[tree_idx]->LeafOutput(leaf_idx); } inline void SetLeafValue(int tree_idx, int leaf_idx, double val) override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); models_[tree_idx]->SetLeafOutput(leaf_idx, val); } /*! * \brief Get Type name of this boosting object */ virtual const char* SubModelName() const override { return "tree"; } protected: /*! \brief current iteration */ int iter_; /*! \brief Pointer to training data */ const Dataset* train_data_; /*! \brief Config of gbdt */ std::unique_ptr<Config> config_; /*! \brief Objective function */ const ObjectiveFunction* objective_function_; /*! \brief Metrics for training data */ std::vector<const Metric*> training_metrics_; /*! \brief Metric for validation data */ std::vector<std::vector<const Metric*>> valid_metrics_; /*! \brief Number of rounds for early stopping */ int early_stopping_round_; /*! \brief Best iteration(s) for early stopping */ std::vector<std::vector<int>> best_iter_; /*! \brief Best score(s) for early stopping */ std::vector<std::vector<double>> best_score_; /*! \brief output message of best iteration */ std::vector<std::vector<std::string>> best_msg_; /*! \brief Trained models(trees) */ std::vector<std::unique_ptr<Tree>> models_; /*! \brief Max feature index of training data*/ int max_feature_idx_; /*! \brief First order derivative of training data */ std::vector<score_t> gradients_; /*! \brief Secend order derivative of training data */ std::vector<score_t> hessians_; /*! \brief Store the indices of in-bag data */ std::vector<data_size_t> bag_data_indices_; /*! \brief Number of in-bag data */ data_size_t bag_data_cnt_; /*! \brief Store the indices of in-bag data */ std::vector<data_size_t> tmp_indices_; /*! \brief Number of training data */ data_size_t num_data_; /*! \brief Number of trees per iterations */ int num_tree_per_iteration_; /*! \brief Number of class */ int num_class_; /*! \brief Index of label column */ data_size_t label_idx_; /*! \brief number of used model */ int num_iteration_for_pred_; /*! \brief Shrinkage rate for one iteration */ double shrinkage_rate_; /*! \brief Number of loaded initial models */ int num_init_iteration_; /*! \brief Feature names */ std::vector<std::string> feature_names_; std::vector<std::string> feature_infos_; /*! \brief number of threads */ int num_threads_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> offsets_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> left_cnts_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> right_cnts_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> left_write_pos_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> right_write_pos_buf_; bool is_use_subset_; std::vector<bool> class_need_train_; std::vector<double> class_default_output_; bool is_constant_hessian_; std::unique_ptr<ObjectiveFunction> loaded_objective_; bool average_output_; bool need_re_bagging_; std::string loaded_parameter_; }; } // namespace LightGBM #endif // LightGBM_BOOSTING_GBDT_H_
ast-dump-openmp-begin-declare-variant_addr_1.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s | FileCheck %s // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -x c++| FileCheck %s // expected-no-diagnostics int also_before(void) { return 0; } #pragma omp begin declare variant match(implementation={vendor(llvm)}) int also_after(void) { return 1; } int also_before(void) { return 2; } #pragma omp end declare variant int also_after(void) { return 0; } int test(int (*fd)(void)) { return fd(); } int main() { // Should return 0. return test(also_after) + test(also_before) + test(&also_after) + test(&also_before); } // Make sure: // - we see the specialization in the AST // - we pick the right callees // CHECK: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:7:1> line:5:5 used also_before 'int ({{.*}})' // CHECK-NEXT: | |-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:23, line:7:1> // CHECK-NEXT: | | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <line:6:3, col:10> // CHECK-NEXT: | | `-IntegerLiteral [[ADDR_3:0x[a-z0-9]*]] <col:10> 'int' 0 // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_4:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_5:0x[a-z0-9]*]] <line:13:1> 'int ({{.*}})' Function [[ADDR_6:0x[a-z0-9]*]] 'also_before[implementation={vendor(llvm)}]' 'int ({{.*}})' // CHECK-NEXT: |-FunctionDecl [[ADDR_7:0x[a-z0-9]*]] <line:10:1, col:20> col:5 implicit used also_after 'int ({{.*}})' // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_8:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_9:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_10:0x[a-z0-9]*]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})' // CHECK-NEXT: |-FunctionDecl [[ADDR_10]] <col:1, line:12:1> line:10:1 also_after[implementation={vendor(llvm)}] 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_11:0x[a-z0-9]*]] <col:22, line:12:1> // CHECK-NEXT: | `-ReturnStmt [[ADDR_12:0x[a-z0-9]*]] <line:11:3, col:10> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_13:0x[a-z0-9]*]] <col:10> 'int' 1 // CHECK-NEXT: |-FunctionDecl [[ADDR_6]] <line:13:1, line:15:1> line:13:1 also_before[implementation={vendor(llvm)}] 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_14:0x[a-z0-9]*]] <col:23, line:15:1> // CHECK-NEXT: | `-ReturnStmt [[ADDR_15:0x[a-z0-9]*]] <line:14:3, col:10> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_16:0x[a-z0-9]*]] <col:10> 'int' 2 // CHECK-NEXT: |-FunctionDecl [[ADDR_17:0x[a-z0-9]*]] prev [[ADDR_7]] <line:18:1, line:20:1> line:18:5 used also_after 'int ({{.*}})' // CHECK-NEXT: | |-CompoundStmt [[ADDR_18:0x[a-z0-9]*]] <col:22, line:20:1> // CHECK-NEXT: | | `-ReturnStmt [[ADDR_19:0x[a-z0-9]*]] <line:19:3, col:10> // CHECK-NEXT: | | `-IntegerLiteral [[ADDR_20:0x[a-z0-9]*]] <col:10> 'int' 0 // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_21:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(llvm)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_9]] <line:10:1> 'int ({{.*}})' Function [[ADDR_10]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})' // CHECK-NEXT: |-FunctionDecl [[ADDR_22:0x[a-z0-9]*]] <line:22:1, line:24:1> line:22:5 used test 'int (int (*)({{.*}}))' // CHECK-NEXT: | |-ParmVarDecl [[ADDR_23:0x[a-z0-9]*]] <col:10, col:24> col:16 used fd 'int (*)({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_24:0x[a-z0-9]*]] <col:27, line:24:1> // CHECK-NEXT: | `-ReturnStmt [[ADDR_25:0x[a-z0-9]*]] <line:23:3, col:13> // CHECK-NEXT: | `-CallExpr [[ADDR_26:0x[a-z0-9]*]] <col:10, col:13> 'int' // CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_27:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <LValueToRValue> // CHECK-NEXT: | `-DeclRefExpr [[ADDR_28:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' {{.*}}ParmVar [[ADDR_23]] 'fd' 'int (*)({{.*}})' // CHECK-NEXT: `-FunctionDecl [[ADDR_29:0x[a-z0-9]*]] <line:25:1, line:31:1> line:25:5 main 'int ({{.*}})' // CHECK-NEXT: `-CompoundStmt [[ADDR_30:0x[a-z0-9]*]] <col:12, line:31:1> // CHECK-NEXT: `-ReturnStmt [[ADDR_31:0x[a-z0-9]*]] <line:27:3, line:30:27> // CHECK-NEXT: `-BinaryOperator [[ADDR_32:0x[a-z0-9]*]] <line:27:10, line:30:27> 'int' '+' // CHECK-NEXT: |-BinaryOperator [[ADDR_33:0x[a-z0-9]*]] <line:27:10, line:29:26> 'int' '+' // CHECK-NEXT: | |-BinaryOperator [[ADDR_34:0x[a-z0-9]*]] <line:27:10, line:28:26> 'int' '+' // CHECK-NEXT: | | |-CallExpr [[ADDR_35:0x[a-z0-9]*]] <line:27:10, col:25> 'int' // CHECK-NEXT: | | | |-ImplicitCastExpr [[ADDR_36:0x[a-z0-9]*]] <col:10> 'int (*)(int (*)({{.*}}))' <FunctionToPointerDecay> // CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_37:0x[a-z0-9]*]] <col:10> 'int (int (*)({{.*}}))' {{.*}}Function [[ADDR_22]] 'test' 'int (int (*)({{.*}}))' // CHECK-NEXT: | | | `-ImplicitCastExpr [[ADDR_38:0x[a-z0-9]*]] <col:15> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | | | `-DeclRefExpr [[ADDR_39:0x[a-z0-9]*]] <col:15> 'int ({{.*}})' {{.*}}Function [[ADDR_17]] 'also_after' 'int ({{.*}})' // CHECK-NEXT: | | `-CallExpr [[ADDR_40:0x[a-z0-9]*]] <line:28:10, col:26> 'int' // CHECK-NEXT: | | |-ImplicitCastExpr [[ADDR_41:0x[a-z0-9]*]] <col:10> 'int (*)(int (*)({{.*}}))' <FunctionToPointerDecay> // CHECK-NEXT: | | | `-DeclRefExpr [[ADDR_42:0x[a-z0-9]*]] <col:10> 'int (int (*)({{.*}}))' {{.*}}Function [[ADDR_22]] 'test' 'int (int (*)({{.*}}))' // CHECK-NEXT: | | `-ImplicitCastExpr [[ADDR_43:0x[a-z0-9]*]] <col:15> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | | `-DeclRefExpr [[ADDR_44:0x[a-z0-9]*]] <col:15> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before' 'int ({{.*}})' // CHECK-NEXT: | `-CallExpr [[ADDR_45:0x[a-z0-9]*]] <line:29:10, col:26> 'int' // CHECK-NEXT: | |-ImplicitCastExpr [[ADDR_46:0x[a-z0-9]*]] <col:10> 'int (*)(int (*)({{.*}}))' <FunctionToPointerDecay> // CHECK-NEXT: | | `-DeclRefExpr [[ADDR_47:0x[a-z0-9]*]] <col:10> 'int (int (*)({{.*}}))' {{.*}}Function [[ADDR_22]] 'test' 'int (int (*)({{.*}}))' // CHECK-NEXT: | `-UnaryOperator [[ADDR_48:0x[a-z0-9]*]] <col:15, col:16> 'int (*)({{.*}})' prefix '&' cannot overflow // CHECK-NEXT: | `-DeclRefExpr [[ADDR_49:0x[a-z0-9]*]] <col:16> 'int ({{.*}})' {{.*}}Function [[ADDR_17]] 'also_after' 'int ({{.*}})' // CHECK-NEXT: `-CallExpr [[ADDR_50:0x[a-z0-9]*]] <line:30:10, col:27> 'int' // CHECK-NEXT: |-ImplicitCastExpr [[ADDR_51:0x[a-z0-9]*]] <col:10> 'int (*)(int (*)({{.*}}))' <FunctionToPointerDecay> // CHECK-NEXT: | `-DeclRefExpr [[ADDR_52:0x[a-z0-9]*]] <col:10> 'int (int (*)({{.*}}))' {{.*}}Function [[ADDR_22]] 'test' 'int (int (*)({{.*}}))' // CHECK-NEXT: `-UnaryOperator [[ADDR_53:0x[a-z0-9]*]] <col:15, col:16> 'int (*)({{.*}})' prefix '&' cannot overflow // CHECK-NEXT: `-DeclRefExpr [[ADDR_54:0x[a-z0-9]*]] <col:16> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before' 'int ({{.*}})'
GB_binop__times_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__times_uint32) // A.*B function (eWiseMult): GB (_AemultB_08__times_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__times_uint32) // A.*B function (eWiseMult): GB (_AemultB_04__times_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__times_uint32) // A*D function (colscale): GB (_AxD__times_uint32) // D*A function (rowscale): GB (_DxB__times_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__times_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__times_uint32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_uint32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_uint32) // C=scalar+B GB (_bind1st__times_uint32) // C=scalar+B' GB (_bind1st_tran__times_uint32) // C=A+scalar GB (_bind2nd__times_uint32) // C=A'+scalar GB (_bind2nd_tran__times_uint32) // C type: uint32_t // A type: uint32_t // A pattern? 0 // B type: uint32_t // B pattern? 0 // BinaryOp: cij = (aij * bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x * y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TIMES || GxB_NO_UINT32 || GxB_NO_TIMES_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__times_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__times_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__times_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__times_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__times_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__times_uint32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__times_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint32_t alpha_scalar ; uint32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ; beta_scalar = (*((uint32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__times_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__times_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__times_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__times_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__times_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = (x * bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__times_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij * y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x * aij) ; \ } GrB_Info GB (_bind1st_tran__times_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij * y) ; \ } GrB_Info GB (_bind2nd_tran__times_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__lnot_uint64_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint64_int8 // op(A') function: GB_tran__lnot_uint64_int8 // C type: uint64_t // A type: int8_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int8_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT64 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint64_int8 ( uint64_t *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint64_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
openmp_matrix.c
/** * Implement some parallel algorithms operating on matrixes with OpenMP */ #ifndef _GNU_SOURCE # define _GNU_SOURCE /* for snprintf */ #endif /* Define inline for OpenMP compatibility with "clang -ansi" */ #ifdef __clang__ # define inline __inline__ #endif #include <errno.h> #include <omp.h> #include <stddef.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> /** * Output a string to standard output without any buffering */ static void print_nobuf(const char *string) { size_t len = strlen(string); while (len > 0) { ssize_t count = write(STDOUT_FILENO, string, len); if (count == -1 && errno == -EINTR) { continue; } if (count <= 0) { break; } string += count; len -= (size_t)count; } } /** * Malloc and exit if it failed */ static void *malloc_nofail(size_t size) { void *ptr = malloc(size); if (!ptr) { fprintf(stderr, "malloc: failed to allocate %lu bytes\n", (unsigned long)size); exit(1); } return ptr; } /** * Fill a square matrix with a value */ static void fill_square_matrix(double *matrix, size_t size, double value) { size_t i = 0, j; /* Use static schedule (default) */ #pragma omp parallel for private(i, j) for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { matrix[i * size + j] = value; } } } /** * Initialize a list of vectors */ static void init_vector_list(double *vectors, size_t size, size_t dim) { size_t i = 0, j; #pragma omp parallel for private(i, j) for (i = 0; i < size; i++) { for (j = 0; j < dim; j++) { vectors[i * dim + j] = ((double)i) + ((double)j + 1) / 100.; } } } /** * Sum the values of a square matrix */ static double sum_square_matrix(const double *matrix, size_t size) { size_t i_j = 0; double sum = 0; #pragma omp parallel for private(i_j) reduction(+:sum) for (i_j = 0; i_j < size * size; i_j++) { sum += matrix[i_j]; } return sum; } /** * Compute the squared euclidean distance of vectors in a square matrix * Here are several ways of implementing this: * 1. Compute separately each cell in matrix * 2. Compute by triangles */ static void sq_euclidean_distance1( double *matrix, const double *vectors, size_t size, size_t dim) { size_t i_j = 0; #pragma omp parallel for private(i_j) for (i_j = 0; i_j < size * size; i_j++) { size_t i = i_j / size; size_t j = i_j % size; size_t k; double dist = 0; for (k = 0; k < dim; k++) { double diff = vectors[i * dim + k] - vectors[j * dim + k]; dist += diff * diff; } matrix[i_j] = dist; } } static void sq_euclidean_distance2( double *matrix, const double *vectors, size_t size, size_t dim) { size_t i = 0; #pragma omp parallel for private(i) for (i = 0; i < size; i++) { size_t j; matrix[i * size + i] = 0; for (j = i + 1; j < size; j++) { size_t k; double dist = 0; for (k = 0; k < dim; k++) { double diff = vectors[i * dim + k] - vectors[j * dim + k]; dist += diff * diff; } matrix[i * size + j] = dist; matrix[j * size + i] = dist; } } } int main(void) { const size_t size = 5000, dim = 2; double *matrix; double *vectors; /* Use omp_get_max_threads() instead of omp_get_thread_num() outside an * OpenMP loop. */ printf("OpenMP max threads: %d\n", omp_get_max_threads()); fflush(stdout); /* Test that everything is fine */ print_nobuf("OpenMP threads:"); #pragma omp parallel { int this_thread = omp_get_thread_num(); int num_threads = omp_get_num_threads(); char buffer[sizeof(" [/]") + 2 * 11]; snprintf(buffer, sizeof(buffer), " [%d/%d]", this_thread, num_threads); print_nobuf(buffer); } print_nobuf("\n"); /* Allocate a big matrix and 2 lists of vectors */ matrix = malloc_nofail(size * size * sizeof(double)); vectors = malloc_nofail(size * dim * sizeof(double)); /* Initialization */ fill_square_matrix(matrix, size, 0); init_vector_list(vectors, size, 2); /* Computations */ sq_euclidean_distance1(matrix, vectors, size, dim); printf("1: sum(eucl_dist(vects)) = %f\n", sum_square_matrix(matrix, size)); sq_euclidean_distance2(matrix, vectors, size, dim); printf("2: sum(eucl_dist(vects)) = %f\n", sum_square_matrix(matrix, size)); /* Free the mallocs */ free(matrix); free(vectors); return 0; }
SF_test_offsets.c
// John D. McCalpin, mccalpin@tacc.utexas.edu static char const rcsid[] = "$Id: SF_test_offsets.c,v 1.4 2018/05/17 22:20:24 mccalpin Exp mccalpin $"; // include files #include <stdio.h> // printf, etc #include <stdint.h> // standard integer types, e.g., uint32_t #include <signal.h> // for signal handler #include <stdlib.h> // exit() and EXIT_FAILURE #include <string.h> // strerror() function converts errno to a text string for printing #include <fcntl.h> // for open() #include <errno.h> // errno support #include <assert.h> // assert() function #include <unistd.h> // sysconf() function, sleep() function #include <sys/mman.h> // support for mmap() function #include <linux/mman.h> // required for 1GiB page support in mmap() #include <math.h> // for pow() function used in RAPL computations #include <time.h> #include <sys/time.h> // for gettimeofday # define ARRAYSIZE 2147483648L #ifdef MYHUGEPAGE_1GB // 1 GiB pages #define MYPAGESIZE 1073741824UL #define NUMPAGES 32L #define PAGES_MAPPED 32L // this code is not working correctly for 1GiB pages, but I already know the answers.... #else #define MYPAGESIZE 2097152L #define NUMPAGES 1024L #define PAGES_MAPPED 14L #endif #define SPECIAL_VALUE (-1) // interfaces for va2pa_lib.c void print_pagemap_entry(unsigned long long pagemap_entry); unsigned long long get_pagemap_entry( void * va ); int dumpall; // when set to 1, will cause dump of lots of stuff for debugging int report; int nwraps; // track number of performance counter wraps double *array; // array pointer to mmap on 1GiB pages double *page_pointers[NUMPAGES]; // one pointer for each page allocated uint64_t pageframenumber[NUMPAGES]; // one PFN entry for each page allocated // constant value defines # define NUM_SOCKETS 2 // # define NUM_IMC_CHANNELS 6 // includes channels on all IMCs in a socket # define NUM_IMC_COUNTERS 5 // 0-3 are the 4 programmable counters, 4 is the fixed-function DCLK counter # define NUM_CHA_BOXES 28 # define NUM_CHA_USED 28 # define NUM_CHA_COUNTERS 4 long imc_counts[NUM_SOCKETS][NUM_IMC_CHANNELS][NUM_IMC_COUNTERS][2]; // including the fixed-function (DCLK) counter as the final entry long imc_pkg_sums[NUM_SOCKETS][NUM_IMC_COUNTERS]; // sum across channels for each chip char imc_event_name[NUM_SOCKETS][NUM_IMC_CHANNELS][NUM_IMC_COUNTERS][32]; // reserve 32 characters for the IMC event names for each socket, channel, counter uint32_t imc_perfevtsel[NUM_IMC_COUNTERS]; // expected control settings for the counters uint32_t imc_vid_did[3]; // PCIe configuration space vendor and device IDs for the IMC blocks long cha_counts[NUM_SOCKETS][NUM_CHA_BOXES][NUM_CHA_COUNTERS][2]; // 2 sockets, 28 tiles per socket, 4 counters per tile, 2 times (before and after) uint32_t cha_perfevtsel[NUM_CHA_COUNTERS]; long cha_pkg_sums[NUM_SOCKETS][NUM_CHA_COUNTERS]; #define MAXCORES 112 #define CORES_USED 24 // New feature -- core counters. // upgrade to include counters for all cores long core_counters[MAXCORES][4][2]; // 24 cores & 24 threads on one socket, 4 counters, before and after long fixed_counters[MAXCORES][4][2]; // 24 cores with 4 fixed-function core counters (Instr, CoreCyc, RefCyc, TSC) long core_pkg_sums[NUM_SOCKETS][4]; // four core counters long fixed_pkg_sums[NUM_SOCKETS][4]; // four fixed-function counters per core (Instr, CoreCyc, RefCyc, TSC) int8_t cha_by_page[PAGES_MAPPED][32768]; // L3 numbers for each of the 32,768 cache lines in each of the first PAGES_MAPPED 2MiB pages uint64_t paddr_by_page[PAGES_MAPPED]; // physical addresses of the base of each of the first PAGES_MAPPED 2MiB pages used long lines_by_cha[NUM_CHA_USED]; // bulk count of lines assigned to each CHA #ifdef DEBUG FILE *log_file; // log file for debugging -- should not be needed in production #endif unsigned int *mmconfig_ptr; // must be pointer to 32-bit int so compiler will generate 32-bit loads and stores struct timeval tp; // seconds and microseconds from gettimeofday struct timezone tzp; // required, but not used here. double ssum(double *a, long vl); double mysecond() { struct timeval tp; struct timezone tzp; int i; i = gettimeofday(&tp,&tzp); return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); } # ifndef MIN # define MIN(x,y) ((x)<(y)?(x):(y)) # endif # ifndef MAX # define MAX(x,y) ((x)>(y)?(x):(y)) # endif #include "low_overhead_timers.c" #include "SKX_IMC_BusDeviceFunctionOffset.h" #include "MSR_defs.h" // =========================================================================================================================================================================== // Convert PCI(bus:device.function,offset) to uint32_t array index uint32_t PCI_cfg_index(unsigned int Bus, unsigned int Device, unsigned int Function, unsigned int Offset) { uint32_t byteaddress; uint32_t index; assert (Device >= 0); assert (Function >= 0); assert (Offset >= 0); assert (Device < (1<<5)); assert (Function < (1<<3)); assert (Offset < (1<<12)); byteaddress = (Bus<<20) | (Device<<15) | (Function<<12) | Offset; index = byteaddress / 4; return ( index ); } // =========================================================================================================================================================================== int main(int argc, char *argv[]) { // local declarations // int cpuid_return[4]; int i; int retries; int zeros; int rc; int core_pmc_width, fixed_pmc_width; // these will be looked up using CPUID to use in overflow/wraparound correction int uncore_pmc_width=48; // all the uncore stuff is model-dependent, but most are 48 bits ssize_t rc64; char description[100]; size_t len; long arraylen; long l2_contained_size, inner_repetitions; unsigned long pagemapentry; unsigned long paddr, basephysaddr; unsigned long pagenum, basepagenum; uint32_t bus, device, function, offset, ctl_offset, ctr_offset, value, index; uint32_t socket, imc, channel, counter, controller; long count,delta; long j,k,page_number,page_base_index,line_number; long jstart[CORES_USED], jend[CORES_USED], mycore, vl[CORES_USED]; uint32_t low_0, high_0, low_1, high_1; char filename[100]; int pkg, tile; int nr_cpus; uint64_t msr_val, msr_num; int mem_fd; int msr_fd[2]; // one for each socket int proc_in_pkg[2]; // one Logical Processor number for each socket uid_t my_uid; gid_t my_gid; double sum,expected; double t0, t1; double avg_cycles; unsigned long tsc_start, tsc_end; float TSC_GHz; double sf_evict_rate; double bandwidth; unsigned long mmconfig_base=0x80000000; // DOUBLE-CHECK THIS ON NEW SYSTEMS!!!!! grep MMCONFIG /proc/iomem | awk -F- '{print $1}' unsigned long mmconfig_size=0x10000000; double private_sum,partial_sums[CORES_USED]; long iters,iteration_counts[CORES_USED]; long BaseOffset; TSC_GHz = get_TSC_frequency()/1.0e9; core_pmc_width = get_core_counter_width(); fixed_pmc_width = get_fixed_counter_width(); BaseOffset = 0; #ifdef RANDOMOFFSETS if (argc != 2) { printf("Must Provide a Random Offset cache line offset value (an integer between 0 and 2^24-375000 (16,402,216))\n"); exit(1); } else { BaseOffset = atol(argv[1]); printf("Random Cache Line Offset is %ld\n",BaseOffset); BaseOffset = BaseOffset*8; printf("Starting index for summation is %ld\n",BaseOffset); } #endif retries = 0; zeros = 0; report = 1; dumpall = 0; nwraps = 0; l2_contained_size = 125000 * CORES_USED; // about 95% of the L2 space in the cores used // l2_contained_size = 87380 * CORES_USED; // with 24 cores, this gives almost exactly 16 MiB for (i=0; i<CORES_USED; i++) { iters = 0; jstart[i] = BaseOffset + i*l2_contained_size/CORES_USED; jend[i] = jstart[i] + l2_contained_size/CORES_USED; vl[i] = jend[i]-jstart[i]; printf("thread %d jstart %ld jend %ld vl %ld\n",i,jstart[i],jend[i],vl[i]); partial_sums[i] = 0.0; iteration_counts[i] = 0; for (counter=0; counter<4; counter++) { core_counters[i][counter][0] = SPECIAL_VALUE; core_counters[i][counter][1] = SPECIAL_VALUE; fixed_counters[i][counter][0] = SPECIAL_VALUE; fixed_counters[i][counter][1] = SPECIAL_VALUE; } } // initialize the array that will hold the L3 numbers for each cache line for each of the first PAGES_MAPPED 2MiB pages for (i=0; i<PAGES_MAPPED; i++) { for (line_number=0; line_number<32768; line_number++) { cha_by_page[i][line_number] = -1; // special value -- if set properly, all values should be in the range of 0..23 } } // allocate working array on a huge pages -- either 1GiB or 2MiB len = NUMPAGES * MYPAGESIZE; #ifdef MYHUGEPAGE_1GB array = (double*) mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | MAP_HUGE_1GB, -1, 0 ); #else array = (double*) mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB, -1, 0 ); #endif if (array == (void *)(-1)) { perror("ERROR: mmap of array a failed! "); exit(1); } // initialize working array arraylen = NUMPAGES * MYPAGESIZE/sizeof(double); #pragma omp parallel for for (j=0; j<arraylen; j++) { array[j] = 1.0; } // initialize page_pointers to point to the beginning of each page in the array // then get and print physical addresses for each #ifdef VERBOSE printf(" Page ArrayIndex VirtAddr PagemapEntry PFN PhysAddr\n"); #endif for (j=0; j<NUMPAGES; j++) { k = j*MYPAGESIZE/sizeof(double); page_pointers[j] = &array[k]; pagemapentry = get_pagemap_entry(&array[k]); pageframenumber[j] = (pagemapentry & (unsigned long) 0x007FFFFFFFFFFFFF); #ifdef VERBOSE printf(" %.5ld %.10ld %#18lx %#18lx %#18lx %#18lx\n",j,k,&array[k],pagemapentry,pageframenumber[j],(pageframenumber[j]<<12)); #endif } printf("PAGE_ADDRESSES "); for (j=0; j<PAGES_MAPPED; j++) { basephysaddr = pageframenumber[j] << 12; paddr_by_page[j] = basephysaddr; printf("0x%.12lx ",paddr_by_page[j]); } printf("\n"); // initialize arrays for counter data for (socket=0; socket<NUM_SOCKETS; socket++) { for (channel=0; channel<NUM_IMC_CHANNELS; channel++) { for (counter=0; counter<NUM_IMC_COUNTERS; counter++) { imc_counts[socket][channel][counter][0] = 0; imc_counts[socket][channel][counter][1] = 0; } } for (tile=0; tile<NUM_CHA_USED; tile++) { lines_by_cha[tile] = 0; for (counter=0; counter<4; counter++) { cha_counts[socket][tile][counter][0] = 0; cha_counts[socket][tile][counter][1] = 0; } } } // get the host name, assume that it is of the TACC standard form, and use this as part // of the log file name.... Standard form is "c263-109.stampede2.tacc.utexas.edu", so // truncating at the first "." is done by writing \0 to character #8. len = 100; rc = gethostname(description, len); if (rc != 0) { fprintf(stderr,"ERROR when trying to get hostname\n"); exit(-1); } description[8] = 0; // assume hostname of the form c263-109.stampede2.tacc.utexas.edu -- truncate after first period my_uid = getuid(); my_gid = getgid(); #ifdef DEBUG sprintf(filename,"log.%s.perf_counters",description); // sprintf(filename,"log.perf_counters"); log_file = fopen(filename,"w+"); if (log_file == 0) { fprintf(stderr,"ERROR %s when trying to open log file %s\n",strerror(errno),filename); exit(-1); } fprintf(log_file,"DEBUG: my uid is %d, my gid is %d\n",my_uid,my_gid); rc = chown(filename,my_uid,my_gid); if (rc == 0) { fprintf(log_file,"DEBUG: Successfully changed ownership of log file to %d %d\n",my_uid,my_gid); } else { fprintf(stderr,"ERROR: Attempt to change ownership of log file failed -- bailing out\n"); exit(-1); } #endif //======================================================================================================================== // initial checks // is this a supported core? (CPUID Family/Model) // Every processor that I am going to see will be Family 0x06 (no ExtFamily needed). // The DisplayModel field is (ExtModel<<4)+Model and should be 0x3F for all Xeon E5 v3 systems int leaf = 1; int subleaf = 0; uint32_t eax, ebx, ecx, edx; __asm__ __volatile__ ("cpuid" : \ "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (leaf), "c" (subleaf)); // Alternate form: // The compiler cpuid intrinsics are not documented by Intel -- they use the Microsoft format // described at https://msdn.microsoft.com/en-us/library/hskdteyh.aspx // __cpuid(array to hold eax,ebx,ecx,edx outputs, initial eax value) // __cpuidex(array to hold eax,ebx,ecx,edx outputs, initial eax value, initial ecx value) // CPUID function 0x01 returns the model info in eax. // 27:20 ExtFamily -- expect 0x00 // 19:16 ExtModel -- expect 0x3 for HSW, 0x5 for SKX // 11:8 Family -- expect 0x6 // 7:4 Model -- expect 0xf for HSW, 0x5 for SKX // __cpuid(&cpuid_return[0], 1); // uint32_t ModelInfo = cpuid_return[0] & 0x0fff0ff0; // mask out the reserved and "stepping" fields, leaving only the based and extended Family/Model fields uint32_t ModelInfo = eax & 0x0fff0ff0; // mask out the reserved and "stepping" fields, leaving only the based and extended Family/Model fields if (ModelInfo != 0x00050650) { // expected values for Skylake Xeon fprintf(stderr,"ERROR -- this does not appear to be the correct processor type!!!\n"); fprintf(stderr,"ERROR -- Expected CPUID(0x01) Family/Model bits = 0x%x, but found 0x%x\n",0x00050650,ModelInfo); exit(1); } #ifdef IMC_COUNTS // =================================================================================================================== // ------------------ REQUIRES ROOT PERMISSIONS ------------------ // open /dev/mem for PCI device access and mmap() a pointer to the beginning // of the 256 MiB PCI Configuration Space. // check VID/DID for uncore bus:device:function combinations // Note that using /dev/mem for PCI configuration space access is required for some devices on KNL. // It is not required on other systems, but it is not particularly inconvenient either. sprintf(filename,"/dev/mem"); #ifdef DEBUG fprintf(log_file,"opening %s\n",filename); #endif mem_fd = open(filename, O_RDWR); if (mem_fd == -1) { fprintf(stderr,"ERROR %s when trying to open %s\n",strerror(errno),filename); exit(-1); } int map_prot = PROT_READ | PROT_WRITE; mmconfig_ptr = mmap(NULL, mmconfig_size, map_prot, MAP_SHARED, mem_fd, mmconfig_base); if (mmconfig_ptr == MAP_FAILED) { fprintf(stderr,"cannot mmap base of PCI configuration space from /dev/mem: address %lx\n", mmconfig_base); exit(2); #ifdef DEBUG } else { fprintf(log_file,"Successful mmap of base of PCI configuration space from /dev/mem at address %lx\n", mmconfig_base); #endif } close(mem_fd); // OK to close file after mmap() -- the mapping persists until unmap() or program exit // New simple test that does not need to know the uncore bus numbers here... // Skylake bus 0, Function 5, offset 0 -- Sky Lake-E MM/Vt-d Configuration Registers // // simple test -- should return "20248086" on Skylake Xeon EP -- DID 0x2024, VID 0x8086 bus = 0x00; device = 0x5; function = 0x0; offset = 0x0; index = PCI_cfg_index(bus, device, function, offset); value = mmconfig_ptr[index]; if (value != 0x20248086) { fprintf(stderr,"ERROR: Bus %x device %x function %x offset %x expected %x, found %x\n",bus,device,function,offset,0x20248086,value); exit(3); #ifdef DEBUG } else { fprintf(log_file,"DEBUG: Well done! Bus %x device %x function %x offset %x returns expected value of %x\n",bus,device,function,offset,value); #endif } #endif #ifdef CHA_COUNTS // =================================================================================================================== // open the MSR driver using one core in socket 0 and one core in socket 1 nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); proc_in_pkg[0] = 0; // logical processor 0 is in socket 0 in all TACC systems proc_in_pkg[1] = nr_cpus-1; // logical processor N-1 is in socket 1 in all TACC 2-socket systems for (pkg=0; pkg<2; pkg++) { sprintf(filename,"/dev/cpu/%d/msr",proc_in_pkg[pkg]); msr_fd[pkg] = open(filename, O_RDWR); if (msr_fd[pkg] == -1) { fprintf(stderr,"ERROR %s when trying to open %s\n",strerror(errno),filename); exit(-1); } } for (pkg=0; pkg<2; pkg++) { pread(msr_fd[pkg],&msr_val,sizeof(msr_val),IA32_TIME_STAMP_COUNTER); fprintf(stdout,"DEBUG: TSC on core %d socket %d is %ld\n",proc_in_pkg[pkg],pkg,msr_val); } pread(msr_fd[0],&msr_val,sizeof(msr_val),0x186); printf("Core PerfEvtSel0 0x%lx\n",msr_val); pread(msr_fd[0],&msr_val,sizeof(msr_val),0x187); printf("Core PerfEvtSel1 0x%lx\n",msr_val); pread(msr_fd[0],&msr_val,sizeof(msr_val),0x188); printf("Core PerfEvtSel2 0x%lx\n",msr_val); pread(msr_fd[0],&msr_val,sizeof(msr_val),0x189); printf("Core PerfEvtSel3 0x%lx\n",msr_val); // Program the CHA mesh counters // Each CHA has a block of 16 MSRs reserved, of which 12 are used // The base for each CHA is 0xE00 + 0x10*CHA // Within each block: // Unit Control is at offset 0x00 // CTL0, 1, 2, 3 are at offsets 0x01, 0x02, 0x03, 0x04 // CTR0, 1, 2, 3 are at offsets 0x08, 0x09, 0x0a, 0x0b // For the moment I think I can ignore the filter registers at offsets 0x05 and 0x06 // and the status register at offset 0x07 // The control register needs bit 22 set to enabled, then bits 15:8 as Umask and 7:0 as EventSelect // Mesh Events: // HORZ_RING_BL_IN_USE = 0xab // LEFT_EVEN = 0x01 // LEFT_ODD = 0x02 // RIGHT_EVEN = 0x04 // RIGHT_ODD = 0x08 // VERT_RING_BL_IN_USE = 0xaa // UP_EVEN = 0x01 // UP_ODD = 0x02 // DN_EVEN = 0x04 // DN_ODD = 0x08 // For starters, I will combine even and odd and create 4 events // 0x004003ab HORZ_RING_BL_IN_USE.LEFT // 0x00400cab HORZ_RING_BL_IN_USE.RIGHT // 0x004003aa VERT_RING_BL_IN_USE.UP // 0x00400caa VERT_RING_BL_IN_USE.DN // first set to try.... cha_perfevtsel[0] = 0x004003ab; // HORZ_RING_BL_IN_USE.LEFT cha_perfevtsel[1] = 0x00400cab; // HORZ_RING_BL_IN_USE.RIGHT cha_perfevtsel[2] = 0x004003aa; // VERT_RING_BL_IN_USE.UP cha_perfevtsel[3] = 0x00400caa; // VERT_RING_BL_IN_USE.DN // second set to try.... // cha_perfevtsel[0] = 0x004001ab; // HORZ_RING_BL_IN_USE.LEFT_EVEN // cha_perfevtsel[1] = 0x004002ab; // HORZ_RING_BL_IN_USE.LEFT_ODD // cha_perfevtsel[2] = 0x004004ab; // HORZ_RING_BL_IN_USE.RIGHT_EVEN // cha_perfevtsel[3] = 0x004008ab; // HORZ_RING_BL_IN_USE.RIGHT_ODD // Snoop Filter Eviction counters cha_perfevtsel[0] = 0x0040073d; // SF_EVICTION S,E,M states cha_perfevtsel[1] = 0x00400334; // LLC_LOOKUP.DATA_READ <-- requires CHA_FILTER0[26:17] cha_perfevtsel[2] = 0x00400534; // LLC_LOOKUP.DATA_WRITE (WB from L2) <-- requires CHA_FILTER0[26:17] cha_perfevtsel[3] = 0x0040af37; // LLC_VICTIMS.TOTAL (MESF) (does not count clean victims) uint64_t cha_filter0 = 0x01e20000; // set bits 24,23,22,21,17 FMESI -- all LLC lookups, no SF lookups printf("CHA PerfEvtSel0 0x%lx\n",cha_perfevtsel[0]); printf("CHA PerfEvtSel1 0x%lx\n",cha_perfevtsel[1]); printf("CHA PerfEvtSel2 0x%lx\n",cha_perfevtsel[2]); printf("CHA PerfEvtSel3 0x%lx\n",cha_perfevtsel[3]); printf("CHA FILTER0 0x%lx\n",cha_filter0); #ifdef VERBOSE printf("VERBOSE: programming CHA counters\n"); #endif for (pkg=0; pkg<2; pkg++) { for (tile=0; tile<NUM_CHA_USED; tile++) { msr_num = 0xe00 + 0x10*tile; // box control register -- set enable bit msr_val = 0x00400000; pwrite(msr_fd[pkg],&msr_val,sizeof(msr_val),msr_num); msr_num = 0xe00 + 0x10*tile + 1; // ctl0 msr_val = cha_perfevtsel[0]; pwrite(msr_fd[pkg],&msr_val,sizeof(msr_val),msr_num); msr_num = 0xe00 + 0x10*tile + 2; // ctl1 msr_val = cha_perfevtsel[1]; pwrite(msr_fd[pkg],&msr_val,sizeof(msr_val),msr_num); msr_num = 0xe00 + 0x10*tile + 3; // ctl2 msr_val = cha_perfevtsel[2]; pwrite(msr_fd[pkg],&msr_val,sizeof(msr_val),msr_num); msr_num = 0xe00 + 0x10*tile + 4; // ctl3 msr_val = cha_perfevtsel[3]; pwrite(msr_fd[pkg],&msr_val,sizeof(msr_val),msr_num); msr_num = 0xe00 + 0x10*tile + 5; // filter0 msr_val = cha_filter0; // bits 24:21,17 FMESI -- all LLC lookups, not not SF lookups pwrite(msr_fd[pkg],&msr_val,sizeof(msr_val),msr_num); } } #ifdef VERBOSE printf("VERBOSE: finished programming CHA counters\n"); #endif #endif #ifdef IMC_COUNTS // =================================================================================================================== // Read the current programming of the IMC counters and look for the standard values (in this order) // CAS_COUNT.READS Event 0x04, Umask 0x03 // CAS_COUNT.WRITES Event 0x04, Umask 0x0C // ACT.ALL Event 0x01, Umask 0x0B // PRE_COUNT.MISS Event 0x02, Umask 0x01 // DCLK #ifdef VERBOSE printf("Preparing to program IMC counters\n"); #endif // expected values of IMC performance counter event select control registers imc_perfevtsel[0] = 0x00400304; // CAS_COUNT.READS imc_perfevtsel[1] = 0x00400C04; // CAS_COUNT.WRITES imc_perfevtsel[2] = 0x00400B01; // ACT_COUNT.ALL imc_perfevtsel[3] = 0x00400102; // PRE_COUNT.MISS imc_perfevtsel[4] = 0x00400000; // DCLK imc_vid_did[0] = 0x20428086; // all channel 0 devices are 2042 imc_vid_did[1] = 0x20468086; // all channel 1 devices are 2046 imc_vid_did[2] = 0x204a8086; // all channel 2 devices are 204a printf("IMC PerfEvtSel0 0x%lx\n",imc_perfevtsel[0]); printf("IMC PerfEvtSel1 0x%lx\n",imc_perfevtsel[1]); printf("IMC PerfEvtSel2 0x%lx\n",imc_perfevtsel[2]); printf("IMC PerfEvtSel3 0x%lx\n",imc_perfevtsel[3]); printf("IMC PerfEvtSel4 0x%lx\n",imc_perfevtsel[4]); // print the full wall-clock time in seconds and microseconds // assume both components of tp struct are longs. fprintf(stdout,"# %s\n", rcsid); i = gettimeofday(&tp,&tzp); fprintf(stdout,"%ld %ld\n", tp.tv_sec,tp.tv_usec); for (socket=0; socket<NUM_SOCKETS; socket++) { bus = IMC_BUS_Socket[socket]; #ifdef VERBOSE printf("VERBOSE: socket %d bus %d\n",socket,bus); #endif for (channel=0; channel<NUM_IMC_CHANNELS; channel++) { device = IMC_Device_Channel[channel]; function = IMC_Function_Channel[channel]; #ifdef VERBOSE printf("VERBOSE: channel %d device %d function %d\n",channel, device, function); #endif // check to make sure this is the correct device offset = 0x0; index = PCI_cfg_index(bus, device, function, offset); value = mmconfig_ptr[index]; if ( value != imc_vid_did[channel%3]) { fprintf(stderr,"WARNING!!!! socket %d, channel %d has vid_did %x but should be %x\n",socket,channel,value,imc_vid_did[channel%3]); } for (counter=0; counter<NUM_IMC_COUNTERS; counter++) { // check to see if this unit is programmed correctly and reprogram if needed offset = IMC_PmonCtl_Offset[counter]; index = PCI_cfg_index(bus, device, function, offset); value = mmconfig_ptr[index]; if ( value != imc_perfevtsel[counter]) { fprintf(stderr,"WARNING!!!! socket %d, channel %d has perfevtsel %x but should be %x -- reprogramming\n",socket,channel,value,imc_perfevtsel[counter]); mmconfig_ptr[index] = imc_perfevtsel[counter]; } } } } #endif // ========= END OF PERFORMANCE COUNTER SETUP ======================================================================== #ifdef MAP_L3 // ============== BEGIN L3 MAPPING TESTS ============================== // For each of the PAGES_MAPPED 2MiB pages: // 1. Use "access()" to see if the mapping file already exists. // If exists: // 2. Use "stat()" to make sure the file is the correct size // If right size: // 3. Read the contents into the 32768-element int8_t array of L3 numbers. // Else (wrong size): // 4. Abort and tell the user to fix it manually. // Else (not exists): // 4. Call the mapping function to re-compute the map // 5. Create mapping file // 6. Save data in mapping file // 7. Close output file FILE *ptr_mapping_file; int needs_mapping; int good, good_old, good_new, pass1, pass2, pass3, found, numtries; int min_count, max_count, sum_count, old_cha; double avg_count, goodness1, goodness2, goodness3; int globalsum = 0; long totaltries = 0; int NFLUSHES = 1000; for (page_number=0; page_number<PAGES_MAPPED; page_number++) { needs_mapping=0; sprintf(filename,"PADDR_0x%.12lx.map",paddr_by_page[page_number]); i = access(filename, F_OK); if (i == -1) { // file does not exist printf("DEBUG: Mapping file %s does not exist -- will create file after mapping cache lines\n",filename); needs_mapping = 1; } else { // file exists i = access(filename, R_OK); if (i == -1) { // file exists without read permissions printf("ERROR: Mapping file %s exists, but without read permission\n",filename); exit(1); } else { // file exists with read permissions ptr_mapping_file = fopen(filename,"r"); if (!ptr_mapping_file) { printf("ERROR: Failed to open Mapping File %s, should not happen\n",filename); exit(2); } k = fread(&cha_by_page[page_number][0],(size_t) 32768,(size_t) 1,ptr_mapping_file); if (k != 1) { // incorrect read length printf("ERROR: Read from Mapping File %s, returned the wrong record count %ld expected 1\n",filename,k); exit(3); } else { // correct read length printf("DEBUG: Mapping File read for %s succeeded -- skipping mapping for this page\n",filename); needs_mapping = 0; } } } if (needs_mapping == 1) { // code imported from SystemMirrors/Hikari/MemSuite/InterventionLatency/L3_mapping.c #ifdef VERBOSE printf("DEBUG: here I need to perform the mapping for paddr 0x%.12lx, and then save the file\n",paddr_by_page[page_number]); #endif page_base_index = page_number*262144; // index of element at beginning of current 2MiB page for (line_number=0; line_number<32768; line_number++) { good = 0; good_old = 0; good_new = 0; numtries = 0; #ifdef VERBOSE if (line_number%64 == 0) { pagemapentry = get_pagemap_entry(&array[page_base_index+line_number*8]); printf("DEBUG: page_base_index %ld line_number %ld index %ld pagemapentry 0x%lx\n",page_base_index,line_number,page_base_index+line_number*8,pagemapentry); } #endif do { // -------------- Inner Repeat Loop until results pass "goodness" tests -------------- numtries++; if (numtries > 100) { printf("ERROR: No good results for line %d after %d tries\n",line_number,numtries); exit(101); } totaltries++; // 1. read L3 counters before starting test for (tile=0; tile<NUM_CHA_USED; tile++) { msr_num = 0xe00 + 0x10*tile + 0x8 + 1; // counter 1 is the LLC_LOOKUPS.READ event pread(msr_fd[0],&msr_val,sizeof(msr_val),msr_num); cha_counts[0][tile][1][0] = msr_val; // use the array I have already declared for cha counts // printf("DEBUG: page %ld line %ld msr_num 0x%x msr_val %ld cha_counter1 %lu\n", // page_number,line_number,msr_num,msr_val,cha_counts[0][tile][1][0]); } // 2. Access the line many times sum = 0; for (i=0; i<NFLUSHES; i++) { sum += array[page_base_index+line_number*8]; _mm_mfence(); _mm_clflush(&array[page_base_index+line_number*8]); _mm_mfence(); } globalsum += sum; // 3. read L3 counters after loads are done for (tile=0; tile<NUM_CHA_USED; tile++) { msr_num = 0xe00 + 0x10*tile + 0x8 + 1; // counter 1 is the LLC_LOOKUPS.READ event pread(msr_fd[0],&msr_val,sizeof(msr_val),msr_num); cha_counts[0][tile][1][1] = msr_val; // use the array I have already declared for cha counts } #ifdef VERBOSE for (tile=0; tile<NUM_CHA_USED; tile++) { printf("DEBUG: page %ld line %ld cha_counter1_after %lu cha_counter1 before %lu delta %lu\n", page_number,line_number,cha_counts[0][tile][1][1],cha_counts[0][tile][1][0],cha_counts[0][tile][1][1]-cha_counts[0][tile][1][0]); } #endif // CHA counter 1 set to LLC_LOOKUP.READ // // 4. Determine which L3 slice owns the cache line and // 5. Save the CHA number in the cha_by_page[page][line] array // first do a rough quantitative checks of the "goodness" of the data // goodness1 = max/NFLUSHES (pass if >95%) // goodness2 = min/NFLUSHES (pass if <20%) // goodness3 = avg/NFLUSHES (pass if <40%) max_count = 0; min_count = 1<<30; sum_count = 0; for (tile=0; tile<NUM_CHA_USED; tile++) { max_count = MAX(max_count, cha_counts[0][tile][1][1]-cha_counts[0][tile][1][0]); min_count = MIN(min_count, cha_counts[0][tile][1][1]-cha_counts[0][tile][1][0]); sum_count += cha_counts[0][tile][1][1]-cha_counts[0][tile][1][0]; } avg_count = (double)(sum_count - max_count) / (double)(NUM_CHA_USED); goodness1 = (double) max_count / (double) NFLUSHES; goodness2 = (double) min_count / (double) NFLUSHES; goodness3 = avg_count / (double) NFLUSHES; // compare the goodness parameters with manually chosen limits & combine into a single pass (good=1) or fail (good=0) pass1 = 0; pass2 = 0; pass3 = 0; if ( goodness1 > 0.95 ) pass1 = 1; if ( goodness2 < 0.20 ) pass2 = 1; if ( goodness3 < 0.40 ) pass3 = 1; good_new = pass1 * pass2 * pass3; #ifdef VERBOSE printf("GOODNESS: line_number %ld max_count %d min_count %d sum_count %d avg_count %f goodness1 %f goodness2 %f goodness3 %f pass123 %d %d %d\n", line_number, max_count, min_count, sum_count, avg_count, goodness1, goodness2, goodness3, pass1, pass2, pass3); if (good_new == 0) printf("DEBUG: one or more of the sanity checks failed for line=%ld: %d %d %d goodness values %f %f %f\n", line_number,pass1,pass2,pass3,goodness1,goodness2,goodness3); #endif // test to see if more than one CHA reports > 0.95*NFLUSHES events found = 0; old_cha = -1; int min_counts = (NFLUSHES*19)/20; for (tile=0; tile<NUM_CHA_USED; tile++) { if (cha_counts[0][tile][1][1]-cha_counts[0][tile][1][0] >= min_counts) { old_cha = cha_by_page[page_number][line_number]; cha_by_page[page_number][line_number] = tile; found++; #ifdef VERBOSE if (found > 1) { printf("WARNING: Multiple (%d) CHAs found using counter 1 for cache line %ld, index %ld: old_cha %d new_cha %d\n",found,line_number,page_base_index+line_number*8,old_cha,cha_by_page[page_number][line_number]); } #endif } } if (found == 0) { good_old = 0; #ifdef VERBOSE printf("WARNING: no CHA entry has been found for line %ld!\n",line_number); printf("DEBUG dump for no CHA found\n"); for (tile=0; tile<NUM_CHA_USED; tile++) { printf("CHA %d LLC_LOOKUP.READ delta %ld\n",tile,(cha_counts[0][tile][1][1]-cha_counts[0][tile][1][0])); } #endif } else if (found == 1) { good_old = 1; } else { good_old = 0; #ifdef VERBOSE printf("DEBUG dump for multiple CHAs found\n"); for (tile=0; tile<NUM_CHA_USED; tile++) { printf("CHA %d LLC_LOOKUP.READ delta %ld\n",tile,(cha_counts[0][tile][1][1]-cha_counts[0][tile][1][0])); } #endif } good = good_new * good_old; // trigger a repeat if either the old or new tests failed } while (good == 0); #if 0 // 6. save the cache line number in the appropriate the cbo_indices[cbo][#lines] array // 7. increment the corresponding cbo_num_lines[cbo] array entry this_cbo = cha_by_page[page_number][line_number]; if (this_cbo == -1) { printf("ERROR: cha_by_page[%ld][%ld] has not been set!\n",page_number,line_number); exit(80); } cbo_indices[this_cbo][cbo_num_lines[this_cbo]] = line_number; cbo_num_lines[this_cbo]++; #endif } // I have not overwritten the filename, but I will rebuild it here just in case I add something stupid in between.... sprintf(filename,"PADDR_0x%.12lx.map",paddr_by_page[page_number]); ptr_mapping_file = fopen(filename,"w"); if (!ptr_mapping_file) { printf("ERROR: Failed to open Mapping File %s for writing -- aborting\n",filename); exit(4); } // first try -- write one record of 32768 bytes rc64 = fwrite(&cha_by_page[page_number][0],(size_t) 32768, (size_t) 1, ptr_mapping_file); if (rc64 != 1) { printf("ERROR: failed to write one 32768 Byte record to %s -- return code %ld\n",filename,rc64); exit(5); } else { printf("SUCCESS: wrote mapping file %s\n",filename); } } } printf("DUMMY: globalsum %d\n",globalsum); printf("VERBOSE: L3 Mapping Complete in %ld tries for %d cache lines ratio %f\n",totaltries,32768*PAGES_MAPPED,(double)totaltries/(double)(32768*PAGES_MAPPED)); #ifndef MYHUGEPAGE_1GB // TODO!! Fix this so that it is not hard-coded for the 24p case!! // // now that the mapping is complete, I can add up the number of lines mapped to each CHA // be careful to count only the lines that are used, not the full 24MiB // 3 million elements is ~11.44 2MiB pages, so count all lines in each of the first 11 pages // If I did the arithmetic correctly, the 3 million elements uses 931328 Bytes of the 12th 2MiB page // which is 116416 elements or 14552 cache lines. // first accumulate the first 11 full pages for (page_number=0; page_number<11; page_number++) { for (line_number=0; line_number<32768; line_number++) { lines_by_cha[cha_by_page[page_number][line_number]]++; } } // then accumulate the partial 12th page for (line_number=0; line_number<14552; line_number++) { lines_by_cha[cha_by_page[11][line_number]]++; } // output long lines_accounted = 0; printf("LINES_BY_CHA"); for (i=0; i<NUM_CHA_USED; i++) { printf(" %ld",lines_by_cha[i]); lines_accounted += lines_by_cha[i]; } printf("\n"); printf("ACCCOUNTED FOR %ld lines expected %ld lines\n",lines_accounted,l2_contained_size/8); #endif // ============== END L3 MAPPING TESTS ============================== #endif // NEW LOOP STRUCTURE -- MCCALPIN // I want to run the test at various offsets within each of the 1GiB // pages allocated. // Start with repeating the test for the beginning of each 1GiB page. // I can simply add 134,217,728 to the jstart and jend values to // move to the next 1GiB page printf("DEBUG: jstart[0] = %ld\n",jstart[0]); long current_page; for (current_page=0; current_page < NUMPAGES; current_page++) { if (current_page > 0) { for (i=0; i<CORES_USED; i++) { jstart[i] += 134217728; jend[i] += 134217728; } printf("DEBUG: jstart[0] = %ld\n",jstart[0]); } // For the snoop filter tests, I want to repeatedly read // some number of arrays per core with an aggregate footprint // close to 1MiB per core // 24 cores = 24 MiB = 3 Mi elements, so // using an array length of 3 million should be just about right 95.3674% // l2_contained_size = arraylen; // only use if I want a large memory-contained version inner_repetitions = 1000; int stride = 2; // used in thread binding checks: use 2 for Dell nodes, 1 for Intel nodes // try to pre-load the working data into the L2 caches before the initial performance counter reads sum = 0.0; #pragma omp parallel for reduction(+:sum) for (j=jstart[0]; j<jstart[0]+l2_contained_size; j++) sum += array[j]; // While I am at it, I need to warm up the cores using AVX-512 code to get them to full frequency // This may take up to 100 microseconds, or maybe 400,000 AVX512 instructions per thread. // This is a pain because I can't trust the compiler to generate AVX512 code at any given time, // so I have to resort to inline assembly. tsc_start = rdtsc(); #pragma omp parallel for for (i=0; i<CORES_USED; i++) { for (j=0; j<10*1000*1000; j++) { __asm__ __volatile__ ( "vpaddq %%zmm0, %%zmm1, %%zmm2\n\t" "vpaddq %%zmm1, %%zmm2, %%zmm3\n\t" "vpaddq %%zmm2, %%zmm3, %%zmm0\n\t" "vpaddq %%zmm3, %%zmm0, %%zmm1" : : : "zmm0","zmm1","zmm2","zmm3"); } } tsc_end = rdtsc(); printf("DEBUG: WARMUP LOOP took %lu TSC cycles\n",tsc_end - tsc_start); // =================== BEGINNING OF PERFORMANCE COUNTER READS BEFORE KERNEL TESTING ============================== #ifdef IMC_COUNTS // read the initial values of the IMC counters for (socket=0; socket<NUM_SOCKETS; socket++) { bus = IMC_BUS_Socket[socket]; for (channel=0; channel<NUM_IMC_CHANNELS; channel++) { device = IMC_Device_Channel[channel]; function = IMC_Function_Channel[channel]; for (counter=0; counter<NUM_IMC_COUNTERS; counter++) { offset = IMC_PmonCtr_Offset[counter]; index = PCI_cfg_index(bus, device, function, offset); // read each counter twice to identify rare cases where the low-order bits // overflow and increment the high-order bits between the two reads. // Use the second set of values unless (( high_1 != high_0 ) && ( low_1 > low_0)) // (this indicates that the counter rolled between the 3rd and 4th reads). low_0 = mmconfig_ptr[index]; high_0 = mmconfig_ptr[index+1]; low_1 = mmconfig_ptr[index]; high_1 = mmconfig_ptr[index+1]; if ( (high_1 != high_0) && (low_1 > low_0) ) { count = ((uint64_t) high_0) << 32 | (uint64_t) low_0; } else { count = ((uint64_t) high_1) << 32 | (uint64_t) low_1; } imc_counts[socket][channel][counter][0] = count; } } } #if 0 // for debugging only: print initial values of IMC counts for (socket=0; socket<NUM_SOCKETS; socket++) { for (channel=0; channel<NUM_IMC_CHANNELS; channel++) { fprintf(stdout,"%d %d",socket,channel); for (counter=0; counter<NUM_IMC_COUNTERS; counter++) { fprintf(stdout," %ld",imc_counts[socket][channel][counter][0]); } fprintf(stdout,"\n"); } } #endif #endif #ifdef CHA_COUNTS // read the initial values of the CHA mesh counters for (pkg=0; pkg<2; pkg++) { for (tile=0; tile<NUM_CHA_USED; tile++) { for (counter=0; counter<4; counter++) { msr_num = 0xe00 + 0x10*tile + 0x8 + counter; pread(msr_fd[pkg],&msr_val,sizeof(msr_val),msr_num); cha_counts[pkg][tile][counter][0] = msr_val; } } } #if 0 // for debugging only: print initial values of CHA counters for (pkg=0; pkg<2; pkg++) { for (tile=0; tile<NUM_CHA_USED; tile++) { for (counter=0; counter<4; counter++) { printf("Package %d, tile %d, counter %d, value %lu\n",pkg,tile,counter,cha_counts[pkg][tile][counter][0]); } } } #endif #endif // ------ read programmable core counters before test loop ------ #pragma omp parallel for private(counter) for (i=0; i<CORES_USED; i++) { #ifdef CHECK_THREAD_LOCATION if (get_core_number() != stride*i) { printf("ERROR: thread %d is in the wrong place %d\n",i,get_core_number()); } #endif for (counter=0; counter<4; counter++) { core_counters[i][counter][0] = rdpmc(counter); } } tsc_start = rdtsc(); // ===================================== CODE TO TEST BEGINS HERE ======================================================================= #ifdef SIMPLE_OMP_LOOP for (k=0; k<inner_repetitions; k++) { #pragma omp parallel for, reduction(+:sum) for (j=jstart[0]; j<jstart[0]+l2_contained_size; j++) { sum += array[j]; } } #ifdef CHECK_START_STOP printf("CHECK_START_STOP: SIMPLE_OMP_LOOP: start %ld end %ld vl %ld\n",jstart[0],jstart[0]+l2_contained_size,l2_contained_size); #endif #else #pragma omp parallel for private(j,k,iters,private_sum) for (i=0; i<CORES_USED; i++) { iters = 0; partial_sums[i] = 0.0; fixed_counters[i][0][0] = rdpmc_instructions(); fixed_counters[i][1][0] = rdpmc_actual_cycles(); fixed_counters[i][2][0] = rdpmc_reference_cycles(); fixed_counters[i][3][0] = rdtsc(); for (k=0; k<inner_repetitions; k++) { private_sum = ssum(&array[jstart[i]],vl[i]); partial_sums[i] += private_sum; iters++; } fixed_counters[i][0][1] = rdpmc_instructions(); fixed_counters[i][1][1] = rdpmc_actual_cycles(); fixed_counters[i][2][1] = rdpmc_reference_cycles(); fixed_counters[i][3][1] = rdtsc(); iteration_counts[i] = iters; } #ifdef CHECK_START_STOP for (i=0; i<CORES_USED; i++) { printf("CHECK_START_STOP: PER-THREAD-INDICES: thread %d jstart %ld jstop %ld vl %ld\n",i,jstart[i],jend[i],vl[i]); } #endif #endif // ===================================== END OF CODE UNDER TEST ======================================================== tsc_end = rdtsc(); // use the partial sums so the optimizer does not remove the actual code under test for (i=0; i<CORES_USED; i++) { sum += partial_sums[i]; } #pragma omp parallel for private(counter) for (i=0; i<CORES_USED; i++) { #ifdef CHECK_THREAD_LOCATION if (get_core_number() != stride*i) { printf("ERROR: thread %d is in the wrong place %d\n",i,get_core_number()); } #endif for (counter=0; counter<4; counter++) { core_counters[i][counter][1] = rdpmc(counter); #ifdef CHECK_SPECIAL_VALUES if (core_counters[i][counter][1] == SPECIAL_VALUE) { printf("BADNESS: SPECIAL_VALUE value returned on thread %d counter %d\n",i,counter); } #endif #ifdef RETRIES // if the counter returns zero, read it one more time.... if (core_counters[i][counter][1] == SPECIAL_VALUE) { core_counters[i][counter][1] = rdpmc(counter); #pragma omp atomic update retries++; } #endif } } #ifdef CHECK_SPECIAL_VALUES for (i=0; i<CORES_USED; i++) { for (counter=0; counter<4; counter++) { if (core_counters[i][counter][0] == SPECIAL_VALUE) { printf("DEBUG: SPECIAL_VALUE found after loop in start count on thread %d counter %d\n",i,counter); zeros++; } if (core_counters[i][counter][1] == SPECIAL_VALUE) { printf("DEBUG: SPECIAL_VALUE found after loop in end count on thread %d counter %d\n",i,counter); zeros++; } } } #endif #ifdef CHA_COUNTS // read the final values of the CHA mesh counters for (pkg=0; pkg<2; pkg++) { for (tile=0; tile<NUM_CHA_USED; tile++) { for (counter=0; counter<4; counter++) { msr_num = 0xe00 + 0x10*tile + 0x8 + counter; pread(msr_fd[pkg],&msr_val,sizeof(msr_val),msr_num); cha_counts[pkg][tile][counter][1] = msr_val; } } } #endif #ifdef IMC_COUNTS for (socket=0; socket<NUM_SOCKETS; socket++) { bus = IMC_BUS_Socket[socket]; for (channel=0; channel<NUM_IMC_CHANNELS; channel++) { device = IMC_Device_Channel[channel]; function = IMC_Function_Channel[channel]; for (counter=0; counter<NUM_IMC_COUNTERS; counter++) { offset = IMC_PmonCtr_Offset[counter]; index = PCI_cfg_index(bus, device, function, offset); // read each counter twice to identify rare cases where the low-order bits // overflow and increment the high-order bits between the two reads. // Use the second set of values unless (( high_1 != high_0 ) && ( low_1 > low_0)) // (this indicates that the counter rolled between the 3rd and 4th reads). low_0 = mmconfig_ptr[index]; high_0 = mmconfig_ptr[index+1]; low_1 = mmconfig_ptr[index]; high_1 = mmconfig_ptr[index+1]; if ( (high_1 != high_0) && (low_1 > low_0) ) { count = ((uint64_t) high_0) << 32 | (uint64_t) low_0; } else { count = ((uint64_t) high_1) << 32 | (uint64_t) low_1; } imc_counts[socket][channel][counter][1] = count; } } } #endif // ================================== END OF PERFORMANCE COUNTER READS AFTER TEST ============================================== t0 = 0.0; t1 = (double) (tsc_end - tsc_start) / TSC_GHz / 1.0e9; printf("Instrumented code required %f seconds to execute\n",t1-t0); bandwidth = sizeof(double)*(double)l2_contained_size*(double)inner_repetitions / (t1-t0) / 1e9; printf("Bandwidth %f GB/s\n",bandwidth); printf("Bandwidth per core %f GB/s\n",bandwidth/(double)CORES_USED); printf("Approx Bytes/cycle per core %f\n",bandwidth/(double)CORES_USED/2.0); expected = (double)l2_contained_size * (double)(inner_repetitions) / (double)CORES_USED; avg_cycles = (double)(tsc_end - tsc_start) / expected; printf("Average TSC cycles per element %f\n",avg_cycles); // clear the arrays for the package-level sums for (pkg=0; pkg<2; pkg++) { for (counter=0; counter<4; counter++) { // no point in summing the cycle counts, so exclude counter 4 core_pkg_sums[pkg][counter] = 0; fixed_pkg_sums[pkg][counter] = 0; imc_pkg_sums[pkg][counter] = 0; cha_pkg_sums[pkg][counter] = 0; } } // compute core package sums and optional print for (i=0; i<CORES_USED; i++) { for (counter=0; counter<4; counter++) { delta = corrected_pmc_delta(fixed_counters[i][counter][1],fixed_counters[i][counter][0],fixed_pmc_width); fixed_pkg_sums[0][counter] += delta; } for (counter=0; counter<4; counter++) { #ifdef CHECK_SPECIAL_VALUES if (core_counters[i][counter][0] == SPECIAL_VALUE) { printf("DEBUG: SPECIAL_VALUE found in post-processing in start count on thread %d counter %d\n",i,counter); } if (core_counters[i][counter][1] == SPECIAL_VALUE) { printf("DEBUG: SPECIAL_VALUE found in post-processing in end count on thread %d counter %d\n",i,counter); } #endif delta = corrected_pmc_delta(core_counters[i][counter][1],core_counters[i][counter][0],core_pmc_width); #ifdef VERBOSE printf("CORE %d counter %d end %ld start %ld delta %ld\n",i,counter,core_counters[i][counter][1],core_counters[i][counter][0],delta); #endif core_pkg_sums[0][counter] += delta; } } if (dumpall == 1) { report = 0; for (i=0; i<CORES_USED; i++) { for (counter=0; counter<4; counter++) { delta = corrected_pmc_delta(core_counters[i][counter][1],core_counters[i][counter][0],core_pmc_width); printf("CORE %d counter %d end %ld start %ld delta %ld\n",i,counter,core_counters[i][counter][1],core_counters[i][counter][0],delta); } } } report = 1; dumpall = 0; #ifdef CHA_COUNTS // print out the differences and compute sums of differences for (pkg=0; pkg<2; pkg++) { for (tile=0; tile<NUM_CHA_USED; tile++) { for (counter=0; counter<4; counter++) { delta = corrected_pmc_delta(cha_counts[pkg][tile][counter][1],cha_counts[pkg][tile][counter][0],uncore_pmc_width); #ifdef VERBOSE printf("CHA pkg %d tile %d counter %d delta %ld\n",pkg,tile,counter,delta); #endif cha_pkg_sums[pkg][counter] += delta; } } } #endif #ifdef IMC_COUNTS for (pkg=0; pkg<2; pkg++) { for (channel=0; channel<NUM_IMC_CHANNELS; channel++) { for (counter=0; counter<NUM_IMC_COUNTERS; counter++) { delta = corrected_pmc_delta(imc_counts[pkg][channel][counter][1],imc_counts[pkg][channel][counter][0],uncore_pmc_width); #ifdef VERBOSE printf("IMC pkg %d channel %d counter %d delta %ld\n",pkg,channel,counter,delta); #endif imc_pkg_sums[pkg][counter] += delta; } } } #endif int max_display_pkg = 1; for (pkg=0; pkg<max_display_pkg; pkg++) { for (counter=0; counter<4; counter++) { printf("CORE_PKG_SUMS pkg %d counter %d sum_delta %ld\n",pkg,counter,core_pkg_sums[pkg][counter]); } } for (pkg=0; pkg<max_display_pkg; pkg++) { for (counter=0; counter<4; counter++) { printf("FIXED_PKG_SUMS pkg %d counter %d sum_delta %ld\n",pkg,counter,fixed_pkg_sums[pkg][counter]); } } // the fixed-function counters are measured inside the OpenMP loop, so they should not be contaminated by // spin-waiting.... // Compute per-core metrics here -- note that the fixed-function counter set is (Instr, CoreCyc, RefCyc, TSC) // Utilization = RefCyc/TSC (fixed2/fixed3) // AvgGHz_unhalted = CoreCyc/RefCyc * 2.1 (fixed1/fixed2 * 2.1) // AvgGHz_wall = CoreCyc/TSC * 2.1 (fixed1/fixed3 * 2.1) // IPC = Instr/CoreCyc (fixed0/fixed1) long delta_inst, delta_core, delta_ref, delta_tsc; double utilization, avg_ghz, ipc; printf("CORE_UTILIZATION "); for (i=0; i<CORES_USED; i++) { delta_ref = corrected_pmc_delta(fixed_counters[i][2][1],fixed_counters[i][2][0],fixed_pmc_width); delta_tsc = corrected_pmc_delta(fixed_counters[i][3][1],fixed_counters[i][3][0],fixed_pmc_width); utilization = (double)delta_ref / (double)delta_tsc; printf("%6.4f ",utilization); } printf("\n"); float TSC_GHz; TSC_GHz = get_TSC_frequency()/1.0e9; printf("CORE_GHZ "); for (i=0; i<CORES_USED; i++) { delta_core = corrected_pmc_delta(fixed_counters[i][1][1],fixed_counters[i][1][0],fixed_pmc_width); delta_ref = corrected_pmc_delta(fixed_counters[i][2][1],fixed_counters[i][2][0],fixed_pmc_width); avg_ghz = (double)delta_core / (double)delta_ref * TSC_GHz; printf("%6.4f ",avg_ghz); } printf("\n"); printf("CORE_IPC "); for (i=0; i<CORES_USED; i++) { delta_inst = corrected_pmc_delta(fixed_counters[i][0][1],fixed_counters[i][0][0],fixed_pmc_width); delta_core = corrected_pmc_delta(fixed_counters[i][1][1],fixed_counters[i][1][0],fixed_pmc_width); ipc = (double)delta_inst / (double)delta_core; printf("%6.4f ",ipc); } printf("\n"); printf("THREAD_EXECUTION_TIME "); for (i=0; i<CORES_USED; i++) { delta_tsc = corrected_pmc_delta(fixed_counters[i][3][1],fixed_counters[i][3][0],fixed_pmc_width); t0 = (double)delta_tsc / (TSC_GHz*1.0e9); printf("%f ",t0); } printf("\n"); #ifdef CHA_COUNTS for (pkg=0; pkg<max_display_pkg; pkg++) { for (counter=0; counter<4; counter++) { printf("CHA_PKG_SUMS pkg %d counter %d sum_delta %ld\n",pkg,counter,cha_pkg_sums[pkg][counter]); } } #endif #ifdef IMC_COUNTS for (pkg=0; pkg<max_display_pkg; pkg++) { for (counter=0; counter<4; counter++) { // no point in summing the cycle counts, so exclude counter 4 printf("IMC_PKG_SUMS pkg %d counter %d sum_delta %ld\n",pkg,counter,imc_pkg_sums[pkg][counter]); } } #endif // for the Snoop Filter set // expected = expected number of cache lines loaded from L2 // sf_evict_rate = #evictions / expected number of loads expected = 8.0/64.0* (double)l2_contained_size * (double) inner_repetitions; sf_evict_rate = (double) cha_pkg_sums[0][0] / expected; printf("SnoopFilterEvictionRate %f\n",sf_evict_rate); expected = (double)l2_contained_size * (double) (inner_repetitions+1); // adjusted for pre-load of data printf("Dummy Sum value is %f, expected value %f\n",sum,expected); expected = (double)l2_contained_size * (double) inner_repetitions; printf("Expected number of cache lines loaded from L2 %f\n",expected/8.0); printf("Number of performance counter wraprounds detected %d\n",nwraps); #ifdef RETRIES printf("Number of core performance counter reads retried %d\n",retries); #endif printf("Number of zero values found in the inner loop %d\n",zeros); // printf("Expected Number of Loads for AVX2 code %ld\n",arraylen/4); // printf("Expected Number of Cache Lines loaded %ld\n",arraylen/8); for (i=0; i<CORES_USED; i++) { if (iteration_counts[i] != inner_repetitions) { printf("ERROR: thread %d iteration_counts %ld expected %ld\n",i,iteration_counts[i],inner_repetitions); } } // per-core performance counter values for (counter=0; counter<4; counter++) { printf("CORE_counter %d ",counter); for (i=0; i<CORES_USED; i++) { delta = corrected_pmc_delta(core_counters[i][counter][1],core_counters[i][counter][0],core_pmc_width); printf("%ld ",delta); } printf("\n"); } // per-CHA performance counter values -- socket 0 only for (counter=0; counter<4; counter++) { printf("CHA_counter %d ",counter); for (i=0; i<NUM_CHA_USED; i++) { delta = corrected_pmc_delta(cha_counts[0][i][counter][1],cha_counts[0][i][counter][0],uncore_pmc_width); printf("%ld ",delta); } printf("\n"); } printf("Double-check physical address of first element used in array\n"); pagemapentry = get_pagemap_entry(&array[jstart[0]]); printf(" array[%ld] va 0x%.16lx pa 0x%.16lx\n",jstart[0],&array[jstart[0]],pagemapentry); } }
GB_binop__eq_bool.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_bool) // A.*B function (eWiseMult): GB (_AemultB_08__eq_bool) // A.*B function (eWiseMult): GB (_AemultB_02__eq_bool) // A.*B function (eWiseMult): GB (_AemultB_04__eq_bool) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_bool) // A*D function (colscale): GB (_AxD__eq_bool) // D*A function (rowscale): GB (_DxB__eq_bool) // C+=B function (dense accum): GB (_Cdense_accumB__eq_bool) // C+=b function (dense accum): GB (_Cdense_accumb__eq_bool) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_bool) // C=scalar+B GB (_bind1st__eq_bool) // C=scalar+B' GB (_bind1st_tran__eq_bool) // C=A+scalar GB (_bind2nd__eq_bool) // C=A'+scalar GB (_bind2nd_tran__eq_bool) // C type: bool // A type: bool // B,b type: bool // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ bool #define GB_BTYPE \ bool #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ bool aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ bool bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_BOOL || GxB_NO_EQ_BOOL) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__eq_bool) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_bool) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_bool) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type bool bool bwork = (*((bool *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_bool) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_bool) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_bool) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__eq_bool) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__eq_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_bool) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_bool) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; bool x = (*((bool *) x_input)) ; bool *Bx = (bool *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; bool bij = GBX (Bx, p, false) ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_bool) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; bool *Ax = (bool *) Ax_input ; bool y = (*((bool *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; bool aij = GBX (Ax, p, false) ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_bool) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ bool #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool x = (*((const bool *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ bool } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_bool) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool y = (*((const bool *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
bitmap.c
#include <assert.h> #include <stdatomic.h> #include <stdbool.h> #include <stdlib.h> #include "bitmap.h" #include "types.h" bitmap *bitmap_new(u32 capacity) { atomic_uint *map = calloc((capacity / 32 + 1), sizeof(atomic_uint)); bitmap *ret = malloc(sizeof(bitmap)); *ret = (bitmap){capacity, map}; return ret; } void bitmap_set(bitmap *b, u32 n) { u32 index = n / 32; u32 offset = n % 32; b->map[index] |= 1 << offset; } bool bitmap_test(bitmap *b, u32 n) { u32 index = n / 32; u32 offset = n % 32; return !!((b->map[index] >> offset) & 1); } bool bitmap_test_set(bitmap *b, u32 n) { u32 index = n / 32; u32 offset = n % 32; bool prev = !!((b->map[index] >> offset) & 1); // Guard to prevent unnecessary contention if (!prev) { prev = !!((atomic_fetch_or(&b->map[index], 1 << offset) >> offset) & 1); } return prev; } void bitmap_clear(bitmap *b) { #pragma omp parallel for for (u32 i = 0; i < b->capacity / 32 + 1; i += 1) { b->map[i] = 0; } } void bitmap_merge(bitmap *self, bitmap *other) { assert(self->capacity == other->capacity); #pragma omp parallel for for (u32 i = 0; i < self->capacity / 32 + 1; i += 1) { self->map[i] |= other->map[i]; } } void bitmap_free(bitmap *b) { free(b->map); free(b); } u32 bitmap2array(bitmap *b, u32 *array) { u32 n = 0; for (u32 i = 0; i < b->capacity; i += 1) { if (bitmap_test(b, i)) { array[n++] = i; } } return n; } void array2bitmap(u32 *array, bitmap *b, u32 n) { #pragma omp parallel for for (u32 i = 0; i < n; i += 1) { bitmap_set(b, array[i]); } }
ccv_bbf.c
#include "ccv.h" #include "ccv_internal.h" #include <sys/time.h> #ifdef HAVE_GSL #include <gsl/gsl_rng.h> #include <gsl/gsl_randist.h> #endif #ifdef USE_OPENMP #include <omp.h> #endif const ccv_bbf_param_t ccv_bbf_default_params = { .interval = 5, .min_neighbors = 2, .accurate = 1, .flags = 0, .size = { 24, 24, }, }; #define _ccv_width_padding(x) (((x) + 3) & -4) static inline int _ccv_run_bbf_feature(ccv_bbf_feature_t* feature, int* step, unsigned char** u8) { #define pf_at(i) (*(u8[feature->pz[i]] + feature->px[i] + feature->py[i] * step[feature->pz[i]])) #define nf_at(i) (*(u8[feature->nz[i]] + feature->nx[i] + feature->ny[i] * step[feature->nz[i]])) unsigned char pmin = pf_at(0), nmax = nf_at(0); /* check if every point in P > every point in N, and take a shortcut */ if (pmin <= nmax) return 0; int i; for (i = 1; i < feature->size; i++) { if (feature->pz[i] >= 0) { int p = pf_at(i); if (p < pmin) { if (p <= nmax) return 0; pmin = p; } } if (feature->nz[i] >= 0) { int n = nf_at(i); if (n > nmax) { if (pmin <= n) return 0; nmax = n; } } } #undef pf_at #undef nf_at return 1; } static int _ccv_read_bbf_stage_classifier(const char* file, ccv_bbf_stage_classifier_t* classifier) { FILE* r = fopen(file, "r"); if (r == 0) return -1; int stat = 0; stat |= fscanf(r, "%d", &classifier->count); union { float fl; int i; } fli; stat |= fscanf(r, "%d", &fli.i); classifier->threshold = fli.fl; classifier->feature = (ccv_bbf_feature_t*)ccmalloc(classifier->count * sizeof(ccv_bbf_feature_t)); classifier->alpha = (float*)ccmalloc(classifier->count * 2 * sizeof(float)); int i, j; for (i = 0; i < classifier->count; i++) { stat |= fscanf(r, "%d", &classifier->feature[i].size); for (j = 0; j < classifier->feature[i].size; j++) { stat |= fscanf(r, "%d %d %d", &classifier->feature[i].px[j], &classifier->feature[i].py[j], &classifier->feature[i].pz[j]); stat |= fscanf(r, "%d %d %d", &classifier->feature[i].nx[j], &classifier->feature[i].ny[j], &classifier->feature[i].nz[j]); } union { float fl; int i; } flia, flib; stat |= fscanf(r, "%d %d", &flia.i, &flib.i); classifier->alpha[i * 2] = flia.fl; classifier->alpha[i * 2 + 1] = flib.fl; } fclose(r); return 0; } #ifdef HAVE_GSL static unsigned int _ccv_bbf_time_measure() { struct timeval tv; gettimeofday(&tv, 0); return tv.tv_sec * 1000000 + tv.tv_usec; } #define less_than(a, b, aux) ((a) < (b)) CCV_IMPLEMENT_QSORT(_ccv_sort_32f, float, less_than) #undef less_than static void _ccv_bbf_eval_data(ccv_bbf_stage_classifier_t* classifier, unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, ccv_size_t size, float* peval, float* neval) { int i, j; int steps[] = { _ccv_width_padding(size.width), _ccv_width_padding(size.width >> 1), _ccv_width_padding(size.width >> 2) }; int isizs0 = steps[0] * size.height; int isizs01 = isizs0 + steps[1] * (size.height >> 1); for (i = 0; i < posnum; i++) { unsigned char* u8[] = { posdata[i], posdata[i] + isizs0, posdata[i] + isizs01 }; float sum = 0; float* alpha = classifier->alpha; ccv_bbf_feature_t* feature = classifier->feature; for (j = 0; j < classifier->count; ++j, alpha += 2, ++feature) sum += alpha[_ccv_run_bbf_feature(feature, steps, u8)]; peval[i] = sum; } for (i = 0; i < negnum; i++) { unsigned char* u8[] = { negdata[i], negdata[i] + isizs0, negdata[i] + isizs01 }; float sum = 0; float* alpha = classifier->alpha; ccv_bbf_feature_t* feature = classifier->feature; for (j = 0; j < classifier->count; ++j, alpha += 2, ++feature) sum += alpha[_ccv_run_bbf_feature(feature, steps, u8)]; neval[i] = sum; } } static int _ccv_prune_positive_data(ccv_bbf_classifier_cascade_t* cascade, unsigned char** posdata, int posnum, ccv_size_t size) { float* peval = (float*)ccmalloc(posnum * sizeof(float)); int i, j, k, rpos = posnum; for (i = 0; i < cascade->count; i++) { _ccv_bbf_eval_data(cascade->stage_classifier + i, posdata, rpos, 0, 0, size, peval, 0); k = 0; for (j = 0; j < rpos; j++) if (peval[j] >= cascade->stage_classifier[i].threshold) { posdata[k] = posdata[j]; ++k; } else { ccfree(posdata[j]); } rpos = k; } ccfree(peval); return rpos; } static int _ccv_prepare_background_data(ccv_bbf_classifier_cascade_t* cascade, char** bgfiles, int bgnum, unsigned char** negdata, int negnum) { int t, i, j, k, q; int negperbg; int negtotal = 0; int steps[] = { _ccv_width_padding(cascade->size.width), _ccv_width_padding(cascade->size.width >> 1), _ccv_width_padding(cascade->size.width >> 2) }; int isizs0 = steps[0] * cascade->size.height; int isizs1 = steps[1] * (cascade->size.height >> 1); int isizs2 = steps[2] * (cascade->size.height >> 2); int* idcheck = (int*)ccmalloc(negnum * sizeof(int)); gsl_rng_env_setup(); gsl_rng* rng = gsl_rng_alloc(gsl_rng_default); gsl_rng_set(rng, (unsigned long int)idcheck); ccv_size_t imgsz = cascade->size; int rneg = negtotal; for (t = 0; negtotal < negnum; t++) { PRINT(CCV_CLI_INFO, "preparing negative data ... 0%%"); for (i = 0; i < bgnum; i++) { negperbg = (t < 2) ? (negnum - negtotal) / (bgnum - i) + 1 : negnum - negtotal; ccv_dense_matrix_t* image = 0; ccv_read(bgfiles[i], &image, CCV_IO_GRAY | CCV_IO_ANY_FILE); assert((image->type & CCV_C1) && (image->type & CCV_8U)); if (image == 0) { PRINT(CCV_CLI_ERROR, "\n%s file corrupted\n", bgfiles[i]); continue; } if (t % 2 != 0) ccv_flip(image, 0, 0, CCV_FLIP_X); if (t % 4 >= 2) ccv_flip(image, 0, 0, CCV_FLIP_Y); ccv_bbf_param_t params = { .interval = 3, .min_neighbors = 0, .accurate = 1, .flags = 0, .size = cascade->size }; ccv_array_t* detected = ccv_bbf_detect_objects(image, &cascade, 1, params); memset(idcheck, 0, ccv_min(detected->rnum, negperbg) * sizeof(int)); for (j = 0; j < ccv_min(detected->rnum, negperbg); j++) { int r = gsl_rng_uniform_int(rng, detected->rnum); int flag = 1; ccv_rect_t* rect = (ccv_rect_t*)ccv_array_get(detected, r); while (flag) { flag = 0; for (k = 0; k < j; k++) if (r == idcheck[k]) { flag = 1; r = gsl_rng_uniform_int(rng, detected->rnum); break; } rect = (ccv_rect_t*)ccv_array_get(detected, r); if ((rect->x < 0) || (rect->y < 0) || (rect->width + rect->x > image->cols) || (rect->height + rect->y > image->rows)) { flag = 1; r = gsl_rng_uniform_int(rng, detected->rnum); } } idcheck[j] = r; ccv_dense_matrix_t* temp = 0; ccv_dense_matrix_t* imgs0 = 0; ccv_dense_matrix_t* imgs1 = 0; ccv_dense_matrix_t* imgs2 = 0; ccv_slice(image, (ccv_matrix_t**)&temp, 0, rect->y, rect->x, rect->height, rect->width); ccv_resample(temp, &imgs0, 0, imgsz.height, imgsz.width, CCV_INTER_AREA); assert(imgs0->step == steps[0]); ccv_matrix_free(temp); ccv_sample_down(imgs0, &imgs1, 0, 0, 0); assert(imgs1->step == steps[1]); ccv_sample_down(imgs1, &imgs2, 0, 0, 0); assert(imgs2->step == steps[2]); negdata[negtotal] = (unsigned char*)ccmalloc(isizs0 + isizs1 + isizs2); unsigned char* u8s0 = negdata[negtotal]; unsigned char* u8s1 = negdata[negtotal] + isizs0; unsigned char* u8s2 = negdata[negtotal] + isizs0 + isizs1; unsigned char* u8[] = { u8s0, u8s1, u8s2 }; memcpy(u8s0, imgs0->data.u8, imgs0->rows * imgs0->step); ccv_matrix_free(imgs0); memcpy(u8s1, imgs1->data.u8, imgs1->rows * imgs1->step); ccv_matrix_free(imgs1); memcpy(u8s2, imgs2->data.u8, imgs2->rows * imgs2->step); ccv_matrix_free(imgs2); flag = 1; ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier; for (k = 0; k < cascade->count; ++k, ++classifier) { float sum = 0; float* alpha = classifier->alpha; ccv_bbf_feature_t* feature = classifier->feature; for (q = 0; q < classifier->count; ++q, alpha += 2, ++feature) sum += alpha[_ccv_run_bbf_feature(feature, steps, u8)]; if (sum < classifier->threshold) { flag = 0; break; } } if (!flag) ccfree(negdata[negtotal]); else { ++negtotal; if (negtotal >= negnum) break; } } ccv_array_free(detected); ccv_matrix_free(image); ccv_drain_cache(); PRINT(CCV_CLI_INFO, "\rpreparing negative data ... %2d%%", 100 * negtotal / negnum); fflush(0); if (negtotal >= negnum) break; } if (rneg == negtotal) break; rneg = negtotal; PRINT(CCV_CLI_INFO, "\nentering additional round %d\n", t + 1); } gsl_rng_free(rng); ccfree(idcheck); ccv_drain_cache(); PRINT(CCV_CLI_INFO, "\n"); return negtotal; } static void _ccv_prepare_positive_data(ccv_dense_matrix_t** posimg, unsigned char** posdata, ccv_size_t size, int posnum) { PRINT(CCV_CLI_INFO, "preparing positive data ... 0%%"); int i; for (i = 0; i < posnum; i++) { ccv_dense_matrix_t* imgs0 = posimg[i]; ccv_dense_matrix_t* imgs1 = 0; ccv_dense_matrix_t* imgs2 = 0; assert((imgs0->type & CCV_C1) && (imgs0->type & CCV_8U) && imgs0->rows == size.height && imgs0->cols == size.width); ccv_sample_down(imgs0, &imgs1, 0, 0, 0); ccv_sample_down(imgs1, &imgs2, 0, 0, 0); int isizs0 = imgs0->rows * imgs0->step; int isizs1 = imgs1->rows * imgs1->step; int isizs2 = imgs2->rows * imgs2->step; posdata[i] = (unsigned char*)ccmalloc(isizs0 + isizs1 + isizs2); memcpy(posdata[i], imgs0->data.u8, isizs0); memcpy(posdata[i] + isizs0, imgs1->data.u8, isizs1); memcpy(posdata[i] + isizs0 + isizs1, imgs2->data.u8, isizs2); PRINT(CCV_CLI_INFO, "\rpreparing positive data ... %2d%%", 100 * (i + 1) / posnum); fflush(0); ccv_matrix_free(imgs1); ccv_matrix_free(imgs2); } ccv_drain_cache(); PRINT(CCV_CLI_INFO, "\n"); } typedef struct { double fitness; int pk, nk; int age; double error; ccv_bbf_feature_t feature; } ccv_bbf_gene_t; static inline void _ccv_bbf_genetic_fitness(ccv_bbf_gene_t* gene) { gene->fitness = (1 - gene->error) * exp(-0.01 * gene->age) * exp((gene->pk + gene->nk) * log(1.015)); } static inline int _ccv_bbf_exist_gene_feature(ccv_bbf_gene_t* gene, int x, int y, int z) { int i; for (i = 0; i < gene->pk; i++) if (z == gene->feature.pz[i] && x == gene->feature.px[i] && y == gene->feature.py[i]) return 1; for (i = 0; i < gene->nk; i++) if (z == gene->feature.nz[i] && x == gene->feature.nx[i] && y == gene->feature.ny[i]) return 1; return 0; } static inline void _ccv_bbf_randomize_gene(gsl_rng* rng, ccv_bbf_gene_t* gene, int* rows, int* cols) { int i; do { gene->pk = gsl_rng_uniform_int(rng, CCV_BBF_POINT_MAX - 1) + 1; gene->nk = gsl_rng_uniform_int(rng, CCV_BBF_POINT_MAX - 1) + 1; } while (gene->pk + gene->nk < CCV_BBF_POINT_MIN); /* a hard restriction of at least 3 points have to be examed */ gene->feature.size = ccv_max(gene->pk, gene->nk); gene->age = 0; for (i = 0; i < CCV_BBF_POINT_MAX; i++) { gene->feature.pz[i] = -1; gene->feature.nz[i] = -1; } int x, y, z; for (i = 0; i < gene->pk; i++) { do { z = gsl_rng_uniform_int(rng, 3); x = gsl_rng_uniform_int(rng, cols[z]); y = gsl_rng_uniform_int(rng, rows[z]); } while (_ccv_bbf_exist_gene_feature(gene, x, y, z)); gene->feature.pz[i] = z; gene->feature.px[i] = x; gene->feature.py[i] = y; } for (i = 0; i < gene->nk; i++) { do { z = gsl_rng_uniform_int(rng, 3); x = gsl_rng_uniform_int(rng, cols[z]); y = gsl_rng_uniform_int(rng, rows[z]); } while ( _ccv_bbf_exist_gene_feature(gene, x, y, z)); gene->feature.nz[i] = z; gene->feature.nx[i] = x; gene->feature.ny[i] = y; } } static inline double _ccv_bbf_error_rate(ccv_bbf_feature_t* feature, unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, ccv_size_t size, double* pw, double* nw) { int i; int steps[] = { _ccv_width_padding(size.width), _ccv_width_padding(size.width >> 1), _ccv_width_padding(size.width >> 2) }; int isizs0 = steps[0] * size.height; int isizs01 = isizs0 + steps[1] * (size.height >> 1); double error = 0; for (i = 0; i < posnum; i++) { unsigned char* u8[] = { posdata[i], posdata[i] + isizs0, posdata[i] + isizs01 }; if (!_ccv_run_bbf_feature(feature, steps, u8)) error += pw[i]; } for (i = 0; i < negnum; i++) { unsigned char* u8[] = { negdata[i], negdata[i] + isizs0, negdata[i] + isizs01 }; if ( _ccv_run_bbf_feature(feature, steps, u8)) error += nw[i]; } return error; } #define less_than(fit1, fit2, aux) ((fit1).fitness >= (fit2).fitness) static CCV_IMPLEMENT_QSORT(_ccv_bbf_genetic_qsort, ccv_bbf_gene_t, less_than) #undef less_than static ccv_bbf_feature_t _ccv_bbf_genetic_optimize(unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, int ftnum, ccv_size_t size, double* pw, double* nw) { ccv_bbf_feature_t best; /* seed (random method) */ gsl_rng_env_setup(); gsl_rng* rng = gsl_rng_alloc(gsl_rng_default); union { unsigned long int li; double db; } dbli; dbli.db = pw[0] + nw[0]; gsl_rng_set(rng, dbli.li); int i, j; int pnum = ftnum * 100; assert(pnum > 0); ccv_bbf_gene_t* gene = (ccv_bbf_gene_t*)ccmalloc(pnum * sizeof(ccv_bbf_gene_t)); int rows[] = { size.height, size.height >> 1, size.height >> 2 }; int cols[] = { size.width, size.width >> 1, size.width >> 2 }; for (i = 0; i < pnum; i++) _ccv_bbf_randomize_gene(rng, &gene[i], rows, cols); unsigned int timer = _ccv_bbf_time_measure(); #ifdef USE_OPENMP #pragma omp parallel for private(i) schedule(dynamic) #endif for (i = 0; i < pnum; i++) gene[i].error = _ccv_bbf_error_rate(&gene[i].feature, posdata, posnum, negdata, negnum, size, pw, nw); timer = _ccv_bbf_time_measure() - timer; for (i = 0; i < pnum; i++) _ccv_bbf_genetic_fitness(&gene[i]); double best_err = 1; int rnum = ftnum * 39; /* number of randomize */ int mnum = ftnum * 40; /* number of mutation */ int hnum = ftnum * 20; /* number of hybrid */ /* iteration stop crit : best no change in 40 iterations */ int it = 0, t; for (t = 0 ; it < 40; ++it, ++t) { int min_id = 0; double min_err = gene[0].error; for (i = 1; i < pnum; i++) if (gene[i].error < min_err) { min_id = i; min_err = gene[i].error; } min_err = gene[min_id].error = _ccv_bbf_error_rate(&gene[min_id].feature, posdata, posnum, negdata, negnum, size, pw, nw); if (min_err < best_err) { best_err = min_err; memcpy(&best, &gene[min_id].feature, sizeof(best)); PRINT(CCV_CLI_INFO, "best bbf feature with error %f\n|-size: %d\n|-positive point: ", best_err, best.size); for (i = 0; i < best.size; i++) PRINT(CCV_CLI_INFO, "(%d %d %d), ", best.px[i], best.py[i], best.pz[i]); PRINT(CCV_CLI_INFO, "\n|-negative point: "); for (i = 0; i < best.size; i++) PRINT(CCV_CLI_INFO, "(%d %d %d), ", best.nx[i], best.ny[i], best.nz[i]); PRINT(CCV_CLI_INFO, "\n"); it = 0; } PRINT(CCV_CLI_INFO, "minimum error achieved in round %d(%d) : %f with %d ms\n", t, it, min_err, timer / 1000); _ccv_bbf_genetic_qsort(gene, pnum, 0); for (i = 0; i < ftnum; i++) ++gene[i].age; for (i = ftnum; i < ftnum + mnum; i++) { int parent = gsl_rng_uniform_int(rng, ftnum); memcpy(gene + i, gene + parent, sizeof(ccv_bbf_gene_t)); /* three mutation strategy : 1. add, 2. remove, 3. refine */ int pnm, pn = gsl_rng_uniform_int(rng, 2); int* pnk[] = { &gene[i].pk, &gene[i].nk }; int* pnx[] = { gene[i].feature.px, gene[i].feature.nx }; int* pny[] = { gene[i].feature.py, gene[i].feature.ny }; int* pnz[] = { gene[i].feature.pz, gene[i].feature.nz }; int x, y, z; int victim, decay = 1; do { switch (gsl_rng_uniform_int(rng, 3)) { case 0: /* add */ if (gene[i].pk == CCV_BBF_POINT_MAX && gene[i].nk == CCV_BBF_POINT_MAX) break; while (*pnk[pn] + 1 > CCV_BBF_POINT_MAX) pn = gsl_rng_uniform_int(rng, 2); do { z = gsl_rng_uniform_int(rng, 3); x = gsl_rng_uniform_int(rng, cols[z]); y = gsl_rng_uniform_int(rng, rows[z]); } while (_ccv_bbf_exist_gene_feature(&gene[i], x, y, z)); pnz[pn][*pnk[pn]] = z; pnx[pn][*pnk[pn]] = x; pny[pn][*pnk[pn]] = y; ++(*pnk[pn]); gene[i].feature.size = ccv_max(gene[i].pk, gene[i].nk); decay = gene[i].age = 0; break; case 1: /* remove */ if (gene[i].pk + gene[i].nk <= CCV_BBF_POINT_MIN) /* at least 3 points have to be examed */ break; while (*pnk[pn] - 1 <= 0) // || *pnk[pn] + *pnk[!pn] - 1 < CCV_BBF_POINT_MIN) pn = gsl_rng_uniform_int(rng, 2); victim = gsl_rng_uniform_int(rng, *pnk[pn]); for (j = victim; j < *pnk[pn] - 1; j++) { pnz[pn][j] = pnz[pn][j + 1]; pnx[pn][j] = pnx[pn][j + 1]; pny[pn][j] = pny[pn][j + 1]; } pnz[pn][*pnk[pn] - 1] = -1; --(*pnk[pn]); gene[i].feature.size = ccv_max(gene[i].pk, gene[i].nk); decay = gene[i].age = 0; break; case 2: /* refine */ pnm = gsl_rng_uniform_int(rng, *pnk[pn]); do { z = gsl_rng_uniform_int(rng, 3); x = gsl_rng_uniform_int(rng, cols[z]); y = gsl_rng_uniform_int(rng, rows[z]); } while (_ccv_bbf_exist_gene_feature(&gene[i], x, y, z)); pnz[pn][pnm] = z; pnx[pn][pnm] = x; pny[pn][pnm] = y; decay = gene[i].age = 0; break; } } while (decay); } for (i = ftnum + mnum; i < ftnum + mnum + hnum; i++) { /* hybrid strategy: taking positive points from dad, negative points from mum */ int dad, mum; do { dad = gsl_rng_uniform_int(rng, ftnum); mum = gsl_rng_uniform_int(rng, ftnum); } while (dad == mum || gene[dad].pk + gene[mum].nk < CCV_BBF_POINT_MIN); /* at least 3 points have to be examed */ for (j = 0; j < CCV_BBF_POINT_MAX; j++) { gene[i].feature.pz[j] = -1; gene[i].feature.nz[j] = -1; } gene[i].pk = gene[dad].pk; for (j = 0; j < gene[i].pk; j++) { gene[i].feature.pz[j] = gene[dad].feature.pz[j]; gene[i].feature.px[j] = gene[dad].feature.px[j]; gene[i].feature.py[j] = gene[dad].feature.py[j]; } gene[i].nk = gene[mum].nk; for (j = 0; j < gene[i].nk; j++) { gene[i].feature.nz[j] = gene[mum].feature.nz[j]; gene[i].feature.nx[j] = gene[mum].feature.nx[j]; gene[i].feature.ny[j] = gene[mum].feature.ny[j]; } gene[i].feature.size = ccv_max(gene[i].pk, gene[i].nk); gene[i].age = 0; } for (i = ftnum + mnum + hnum; i < ftnum + mnum + hnum + rnum; i++) _ccv_bbf_randomize_gene(rng, &gene[i], rows, cols); timer = _ccv_bbf_time_measure(); #ifdef USE_OPENMP #pragma omp parallel for private(i) schedule(dynamic) #endif for (i = 0; i < pnum; i++) gene[i].error = _ccv_bbf_error_rate(&gene[i].feature, posdata, posnum, negdata, negnum, size, pw, nw); timer = _ccv_bbf_time_measure() - timer; for (i = 0; i < pnum; i++) _ccv_bbf_genetic_fitness(&gene[i]); } ccfree(gene); gsl_rng_free(rng); return best; } #define less_than(fit1, fit2, aux) ((fit1).error < (fit2).error) static CCV_IMPLEMENT_QSORT(_ccv_bbf_best_qsort, ccv_bbf_gene_t, less_than) #undef less_than static ccv_bbf_gene_t _ccv_bbf_best_gene(ccv_bbf_gene_t* gene, int pnum, int point_min, unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, ccv_size_t size, double* pw, double* nw) { int i; unsigned int timer = _ccv_bbf_time_measure(); #ifdef USE_OPENMP #pragma omp parallel for private(i) schedule(dynamic) #endif for (i = 0; i < pnum; i++) gene[i].error = _ccv_bbf_error_rate(&gene[i].feature, posdata, posnum, negdata, negnum, size, pw, nw); timer = _ccv_bbf_time_measure() - timer; _ccv_bbf_best_qsort(gene, pnum, 0); int min_id = 0; double min_err = gene[0].error; for (i = 0; i < pnum; i++) if (gene[i].nk + gene[i].pk >= point_min) { min_id = i; min_err = gene[i].error; break; } PRINT(CCV_CLI_INFO, "local best bbf feature with error %f\n|-size: %d\n|-positive point: ", min_err, gene[min_id].feature.size); for (i = 0; i < gene[min_id].feature.size; i++) PRINT(CCV_CLI_INFO, "(%d %d %d), ", gene[min_id].feature.px[i], gene[min_id].feature.py[i], gene[min_id].feature.pz[i]); PRINT(CCV_CLI_INFO, "\n|-negative point: "); for (i = 0; i < gene[min_id].feature.size; i++) PRINT(CCV_CLI_INFO, "(%d %d %d), ", gene[min_id].feature.nx[i], gene[min_id].feature.ny[i], gene[min_id].feature.nz[i]); PRINT(CCV_CLI_INFO, "\nthe computation takes %d ms\n", timer / 1000); return gene[min_id]; } static ccv_bbf_feature_t _ccv_bbf_convex_optimize(unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, ccv_bbf_feature_t* best_feature, ccv_size_t size, double* pw, double* nw) { ccv_bbf_gene_t best_gene; /* seed (random method) */ gsl_rng_env_setup(); gsl_rng* rng = gsl_rng_alloc(gsl_rng_default); union { unsigned long int li; double db; } dbli; dbli.db = pw[0] + nw[0]; gsl_rng_set(rng, dbli.li); int i, j, k, q, p, g, t; int rows[] = { size.height, size.height >> 1, size.height >> 2 }; int cols[] = { size.width, size.width >> 1, size.width >> 2 }; int pnum = rows[0] * cols[0] + rows[1] * cols[1] + rows[2] * cols[2]; ccv_bbf_gene_t* gene = (ccv_bbf_gene_t*)ccmalloc((pnum * (CCV_BBF_POINT_MAX * 2 + 1) * 2 + CCV_BBF_POINT_MAX * 2 + 1) * sizeof(ccv_bbf_gene_t)); if (best_feature == 0) { /* bootstrapping the best feature, start from two pixels, one for positive, one for negative * the bootstrapping process go like this: first, it will assign a random pixel as positive * and enumerate every possible pixel as negative, and pick the best one. Then, enumerate every * possible pixel as positive, and pick the best one, until it converges */ memset(&best_gene, 0, sizeof(ccv_bbf_gene_t)); for (i = 0; i < CCV_BBF_POINT_MAX; i++) best_gene.feature.pz[i] = best_gene.feature.nz[i] = -1; best_gene.pk = 1; best_gene.nk = 0; best_gene.feature.size = 1; best_gene.feature.pz[0] = gsl_rng_uniform_int(rng, 3); best_gene.feature.px[0] = gsl_rng_uniform_int(rng, cols[best_gene.feature.pz[0]]); best_gene.feature.py[0] = gsl_rng_uniform_int(rng, rows[best_gene.feature.pz[0]]); for (t = 0; ; ++t) { g = 0; if (t % 2 == 0) { for (i = 0; i < 3; i++) for (j = 0; j < cols[i]; j++) for (k = 0; k < rows[i]; k++) if (i != best_gene.feature.pz[0] || j != best_gene.feature.px[0] || k != best_gene.feature.py[0]) { gene[g] = best_gene; gene[g].pk = gene[g].nk = 1; gene[g].feature.nz[0] = i; gene[g].feature.nx[0] = j; gene[g].feature.ny[0] = k; g++; } } else { for (i = 0; i < 3; i++) for (j = 0; j < cols[i]; j++) for (k = 0; k < rows[i]; k++) if (i != best_gene.feature.nz[0] || j != best_gene.feature.nx[0] || k != best_gene.feature.ny[0]) { gene[g] = best_gene; gene[g].pk = gene[g].nk = 1; gene[g].feature.pz[0] = i; gene[g].feature.px[0] = j; gene[g].feature.py[0] = k; g++; } } PRINT(CCV_CLI_INFO, "bootstrapping round : %d\n", t); ccv_bbf_gene_t local_gene = _ccv_bbf_best_gene(gene, g, 2, posdata, posnum, negdata, negnum, size, pw, nw); if (local_gene.error >= best_gene.error - 1e-10) break; best_gene = local_gene; } } else { best_gene.feature = *best_feature; best_gene.pk = best_gene.nk = best_gene.feature.size; for (i = 0; i < CCV_BBF_POINT_MAX; i++) if (best_feature->pz[i] == -1) { best_gene.pk = i; break; } for (i = 0; i < CCV_BBF_POINT_MAX; i++) if (best_feature->nz[i] == -1) { best_gene.nk = i; break; } } /* after bootstrapping, the float search technique will do the following permutations: * a). add a new point to positive or negative * b). remove a point from positive or negative * c). move an existing point in positive or negative to another position * the three rules applied exhaustively, no heuristic used. */ for (t = 0; ; ++t) { g = 0; for (i = 0; i < 3; i++) for (j = 0; j < cols[i]; j++) for (k = 0; k < rows[i]; k++) if (!_ccv_bbf_exist_gene_feature(&best_gene, j, k, i)) { /* add positive point */ if (best_gene.pk < CCV_BBF_POINT_MAX - 1) { gene[g] = best_gene; gene[g].feature.pz[gene[g].pk] = i; gene[g].feature.px[gene[g].pk] = j; gene[g].feature.py[gene[g].pk] = k; gene[g].pk++; gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk); g++; } /* add negative point */ if (best_gene.nk < CCV_BBF_POINT_MAX - 1) { gene[g] = best_gene; gene[g].feature.nz[gene[g].nk] = i; gene[g].feature.nx[gene[g].nk] = j; gene[g].feature.ny[gene[g].nk] = k; gene[g].nk++; gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk); g++; } /* refine positive point */ for (q = 0; q < best_gene.pk; q++) { gene[g] = best_gene; gene[g].feature.pz[q] = i; gene[g].feature.px[q] = j; gene[g].feature.py[q] = k; g++; } /* add positive point, remove negative point */ if (best_gene.pk < CCV_BBF_POINT_MAX - 1 && best_gene.nk > 1) { for (q = 0; q < best_gene.nk; q++) { gene[g] = best_gene; gene[g].feature.pz[gene[g].pk] = i; gene[g].feature.px[gene[g].pk] = j; gene[g].feature.py[gene[g].pk] = k; gene[g].pk++; for (p = q; p < best_gene.nk - 1; p++) { gene[g].feature.nz[p] = gene[g].feature.nz[p + 1]; gene[g].feature.nx[p] = gene[g].feature.nx[p + 1]; gene[g].feature.ny[p] = gene[g].feature.ny[p + 1]; } gene[g].feature.nz[gene[g].nk - 1] = -1; gene[g].nk--; gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk); g++; } } /* refine negative point */ for (q = 0; q < best_gene.nk; q++) { gene[g] = best_gene; gene[g].feature.nz[q] = i; gene[g].feature.nx[q] = j; gene[g].feature.ny[q] = k; g++; } /* add negative point, remove positive point */ if (best_gene.pk > 1 && best_gene.nk < CCV_BBF_POINT_MAX - 1) { for (q = 0; q < best_gene.pk; q++) { gene[g] = best_gene; gene[g].feature.nz[gene[g].nk] = i; gene[g].feature.nx[gene[g].nk] = j; gene[g].feature.ny[gene[g].nk] = k; gene[g].nk++; for (p = q; p < best_gene.pk - 1; p++) { gene[g].feature.pz[p] = gene[g].feature.pz[p + 1]; gene[g].feature.px[p] = gene[g].feature.px[p + 1]; gene[g].feature.py[p] = gene[g].feature.py[p + 1]; } gene[g].feature.pz[gene[g].pk - 1] = -1; gene[g].pk--; gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk); g++; } } } if (best_gene.pk > 1) for (q = 0; q < best_gene.pk; q++) { gene[g] = best_gene; for (i = q; i < best_gene.pk - 1; i++) { gene[g].feature.pz[i] = gene[g].feature.pz[i + 1]; gene[g].feature.px[i] = gene[g].feature.px[i + 1]; gene[g].feature.py[i] = gene[g].feature.py[i + 1]; } gene[g].feature.pz[gene[g].pk - 1] = -1; gene[g].pk--; gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk); g++; } if (best_gene.nk > 1) for (q = 0; q < best_gene.nk; q++) { gene[g] = best_gene; for (i = q; i < best_gene.nk - 1; i++) { gene[g].feature.nz[i] = gene[g].feature.nz[i + 1]; gene[g].feature.nx[i] = gene[g].feature.nx[i + 1]; gene[g].feature.ny[i] = gene[g].feature.ny[i + 1]; } gene[g].feature.nz[gene[g].nk - 1] = -1; gene[g].nk--; gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk); g++; } gene[g] = best_gene; g++; PRINT(CCV_CLI_INFO, "float search round : %d\n", t); ccv_bbf_gene_t local_gene = _ccv_bbf_best_gene(gene, g, CCV_BBF_POINT_MIN, posdata, posnum, negdata, negnum, size, pw, nw); if (local_gene.error >= best_gene.error - 1e-10) break; best_gene = local_gene; } ccfree(gene); gsl_rng_free(rng); return best_gene.feature; } static int _ccv_write_bbf_stage_classifier(const char* file, ccv_bbf_stage_classifier_t* classifier) { FILE* w = fopen(file, "wb"); if (w == 0) return -1; fprintf(w, "%d\n", classifier->count); union { float fl; int i; } fli; fli.fl = classifier->threshold; fprintf(w, "%d\n", fli.i); int i, j; for (i = 0; i < classifier->count; i++) { fprintf(w, "%d\n", classifier->feature[i].size); for (j = 0; j < classifier->feature[i].size; j++) { fprintf(w, "%d %d %d\n", classifier->feature[i].px[j], classifier->feature[i].py[j], classifier->feature[i].pz[j]); fprintf(w, "%d %d %d\n", classifier->feature[i].nx[j], classifier->feature[i].ny[j], classifier->feature[i].nz[j]); } union { float fl; int i; } flia, flib; flia.fl = classifier->alpha[i * 2]; flib.fl = classifier->alpha[i * 2 + 1]; fprintf(w, "%d %d\n", flia.i, flib.i); } fclose(w); return 0; } static int _ccv_read_background_data(const char* file, unsigned char** negdata, int* negnum, ccv_size_t size) { int stat = 0; FILE* r = fopen(file, "rb"); if (r == 0) return -1; stat |= fread(negnum, sizeof(int), 1, r); int i; int isizs012 = _ccv_width_padding(size.width) * size.height + _ccv_width_padding(size.width >> 1) * (size.height >> 1) + _ccv_width_padding(size.width >> 2) * (size.height >> 2); for (i = 0; i < *negnum; i++) { negdata[i] = (unsigned char*)ccmalloc(isizs012); stat |= fread(negdata[i], 1, isizs012, r); } fclose(r); return 0; } static int _ccv_write_background_data(const char* file, unsigned char** negdata, int negnum, ccv_size_t size) { FILE* w = fopen(file, "w"); if (w == 0) return -1; fwrite(&negnum, sizeof(int), 1, w); int i; int isizs012 = _ccv_width_padding(size.width) * size.height + _ccv_width_padding(size.width >> 1) * (size.height >> 1) + _ccv_width_padding(size.width >> 2) * (size.height >> 2); for (i = 0; i < negnum; i++) fwrite(negdata[i], 1, isizs012, w); fclose(w); return 0; } static int _ccv_resume_bbf_cascade_training_state(const char* file, int* i, int* k, int* bg, double* pw, double* nw, int posnum, int negnum) { int stat = 0; FILE* r = fopen(file, "r"); if (r == 0) return -1; stat |= fscanf(r, "%d %d %d", i, k, bg); int j; union { double db; int i[2]; } dbi; for (j = 0; j < posnum; j++) { stat |= fscanf(r, "%d %d", &dbi.i[0], &dbi.i[1]); pw[j] = dbi.db; } for (j = 0; j < negnum; j++) { stat |= fscanf(r, "%d %d", &dbi.i[0], &dbi.i[1]); nw[j] = dbi.db; } fclose(r); return 0; } static int _ccv_save_bbf_cacade_training_state(const char* file, int i, int k, int bg, double* pw, double* nw, int posnum, int negnum) { FILE* w = fopen(file, "w"); if (w == 0) return -1; fprintf(w, "%d %d %d\n", i, k, bg); int j; union { double db; int i[2]; } dbi; for (j = 0; j < posnum; ++j) { dbi.db = pw[j]; fprintf(w, "%d %d ", dbi.i[0], dbi.i[1]); } fprintf(w, "\n"); for (j = 0; j < negnum; ++j) { dbi.db = nw[j]; fprintf(w, "%d %d ", dbi.i[0], dbi.i[1]); } fprintf(w, "\n"); fclose(w); return 0; } void ccv_bbf_classifier_cascade_new(ccv_dense_matrix_t** posimg, int posnum, char** bgfiles, int bgnum, int negnum, ccv_size_t size, const char* dir, ccv_bbf_new_param_t params) { int i, j, k; /* allocate memory for usage */ ccv_bbf_classifier_cascade_t* cascade = (ccv_bbf_classifier_cascade_t*)ccmalloc(sizeof(ccv_bbf_classifier_cascade_t)); cascade->count = 0; cascade->size = size; cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(sizeof(ccv_bbf_stage_classifier_t)); unsigned char** posdata = (unsigned char**)ccmalloc(posnum * sizeof(unsigned char*)); unsigned char** negdata = (unsigned char**)ccmalloc(negnum * sizeof(unsigned char*)); double* pw = (double*)ccmalloc(posnum * sizeof(double)); double* nw = (double*)ccmalloc(negnum * sizeof(double)); float* peval = (float*)ccmalloc(posnum * sizeof(float)); float* neval = (float*)ccmalloc(negnum * sizeof(float)); double inv_balance_k = 1. / params.balance_k; /* balance factor k, and weighted with 0.01 */ params.balance_k *= 0.01; inv_balance_k *= 0.01; int steps[] = { _ccv_width_padding(cascade->size.width), _ccv_width_padding(cascade->size.width >> 1), _ccv_width_padding(cascade->size.width >> 2) }; int isizs0 = steps[0] * cascade->size.height; int isizs01 = isizs0 + steps[1] * (cascade->size.height >> 1); i = 0; k = 0; int bg = 0; int cacheK = 10; /* state resume code */ char buf[1024]; sprintf(buf, "%s/stat.txt", dir); _ccv_resume_bbf_cascade_training_state(buf, &i, &k, &bg, pw, nw, posnum, negnum); if (i > 0) { cascade->count = i; ccfree(cascade->stage_classifier); cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(i * sizeof(ccv_bbf_stage_classifier_t)); for (j = 0; j < i; j++) { sprintf(buf, "%s/stage-%d.txt", dir, j); _ccv_read_bbf_stage_classifier(buf, &cascade->stage_classifier[j]); } } if (k > 0) cacheK = k; int rpos, rneg = 0; if (bg) { sprintf(buf, "%s/negs.txt", dir); _ccv_read_background_data(buf, negdata, &rneg, cascade->size); } for (; i < params.layer; i++) { if (!bg) { rneg = _ccv_prepare_background_data(cascade, bgfiles, bgnum, negdata, negnum); /* save state of background data */ sprintf(buf, "%s/negs.txt", dir); _ccv_write_background_data(buf, negdata, rneg, cascade->size); bg = 1; } double totalw; /* save state of cascade : level, weight etc. */ sprintf(buf, "%s/stat.txt", dir); _ccv_save_bbf_cacade_training_state(buf, i, k, bg, pw, nw, posnum, negnum); ccv_bbf_stage_classifier_t classifier; if (k > 0) { /* resume state of classifier */ sprintf( buf, "%s/stage-%d.txt", dir, i ); _ccv_read_bbf_stage_classifier(buf, &classifier); } else { /* initialize classifier */ for (j = 0; j < posnum; j++) pw[j] = params.balance_k; for (j = 0; j < rneg; j++) nw[j] = inv_balance_k; classifier.count = k; classifier.threshold = 0; classifier.feature = (ccv_bbf_feature_t*)ccmalloc(cacheK * sizeof(ccv_bbf_feature_t)); classifier.alpha = (float*)ccmalloc(cacheK * 2 * sizeof(float)); } _ccv_prepare_positive_data(posimg, posdata, cascade->size, posnum); rpos = _ccv_prune_positive_data(cascade, posdata, posnum, cascade->size); PRINT(CCV_CLI_INFO, "%d postivie data and %d negative data in training\n", rpos, rneg); /* reweight to 1.00 */ totalw = 0; for (j = 0; j < rpos; j++) totalw += pw[j]; for (j = 0; j < rneg; j++) totalw += nw[j]; for (j = 0; j < rpos; j++) pw[j] = pw[j] / totalw; for (j = 0; j < rneg; j++) nw[j] = nw[j] / totalw; for (; ; k++) { /* get overall true-positive, false-positive rate and threshold */ double tp = 0, fp = 0, etp = 0, efp = 0; _ccv_bbf_eval_data(&classifier, posdata, rpos, negdata, rneg, cascade->size, peval, neval); _ccv_sort_32f(peval, rpos, 0); classifier.threshold = peval[(int)((1. - params.pos_crit) * rpos)] - 1e-6; for (j = 0; j < rpos; j++) { if (peval[j] >= 0) ++tp; if (peval[j] >= classifier.threshold) ++etp; } tp /= rpos; etp /= rpos; for (j = 0; j < rneg; j++) { if (neval[j] >= 0) ++fp; if (neval[j] >= classifier.threshold) ++efp; } fp /= rneg; efp /= rneg; PRINT(CCV_CLI_INFO, "stage classifier real TP rate : %f, FP rate : %f\n", tp, fp); PRINT(CCV_CLI_INFO, "stage classifier TP rate : %f, FP rate : %f at threshold : %f\n", etp, efp, classifier.threshold); if (k > 0) { /* save classifier state */ sprintf(buf, "%s/stage-%d.txt", dir, i); _ccv_write_bbf_stage_classifier(buf, &classifier); sprintf(buf, "%s/stat.txt", dir); _ccv_save_bbf_cacade_training_state(buf, i, k, bg, pw, nw, posnum, negnum); } if (etp > params.pos_crit && efp < params.neg_crit) break; /* TODO: more post-process is needed in here */ /* select the best feature in current distribution through genetic algorithm optimization */ ccv_bbf_feature_t best; if (params.optimizer == CCV_BBF_GENETIC_OPT) { best = _ccv_bbf_genetic_optimize(posdata, rpos, negdata, rneg, params.feature_number, cascade->size, pw, nw); } else if (params.optimizer == CCV_BBF_FLOAT_OPT) { best = _ccv_bbf_convex_optimize(posdata, rpos, negdata, rneg, 0, cascade->size, pw, nw); } else { best = _ccv_bbf_genetic_optimize(posdata, rpos, negdata, rneg, params.feature_number, cascade->size, pw, nw); best = _ccv_bbf_convex_optimize(posdata, rpos, negdata, rneg, &best, cascade->size, pw, nw); } double err = _ccv_bbf_error_rate(&best, posdata, rpos, negdata, rneg, cascade->size, pw, nw); double rw = (1 - err) / err; totalw = 0; /* reweight */ for (j = 0; j < rpos; j++) { unsigned char* u8[] = { posdata[j], posdata[j] + isizs0, posdata[j] + isizs01 }; if (!_ccv_run_bbf_feature(&best, steps, u8)) pw[j] *= rw; pw[j] *= params.balance_k; totalw += pw[j]; } for (j = 0; j < rneg; j++) { unsigned char* u8[] = { negdata[j], negdata[j] + isizs0, negdata[j] + isizs01 }; if (_ccv_run_bbf_feature(&best, steps, u8)) nw[j] *= rw; nw[j] *= inv_balance_k; totalw += nw[j]; } for (j = 0; j < rpos; j++) pw[j] = pw[j] / totalw; for (j = 0; j < rneg; j++) nw[j] = nw[j] / totalw; double c = log(rw); PRINT(CCV_CLI_INFO, "coefficient of feature %d: %f\n", k + 1, c); classifier.count = k + 1; /* resizing classifier */ if (k >= cacheK) { ccv_bbf_feature_t* feature = (ccv_bbf_feature_t*)ccmalloc(cacheK * 2 * sizeof(ccv_bbf_feature_t)); memcpy(feature, classifier.feature, cacheK * sizeof(ccv_bbf_feature_t)); ccfree(classifier.feature); float* alpha = (float*)ccmalloc(cacheK * 4 * sizeof(float)); memcpy(alpha, classifier.alpha, cacheK * 2 * sizeof(float)); ccfree(classifier.alpha); classifier.feature = feature; classifier.alpha = alpha; cacheK *= 2; } /* setup new feature */ classifier.feature[k] = best; classifier.alpha[k * 2] = -c; classifier.alpha[k * 2 + 1] = c; } cascade->count = i + 1; ccv_bbf_stage_classifier_t* stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(cascade->count * sizeof(ccv_bbf_stage_classifier_t)); memcpy(stage_classifier, cascade->stage_classifier, i * sizeof(ccv_bbf_stage_classifier_t)); ccfree(cascade->stage_classifier); stage_classifier[i] = classifier; cascade->stage_classifier = stage_classifier; k = 0; bg = 0; for (j = 0; j < rpos; j++) ccfree(posdata[j]); for (j = 0; j < rneg; j++) ccfree(negdata[j]); } ccfree(neval); ccfree(peval); ccfree(nw); ccfree(pw); ccfree(negdata); ccfree(posdata); ccfree(cascade); } #else void ccv_bbf_classifier_cascade_new(ccv_dense_matrix_t** posimg, int posnum, char** bgfiles, int bgnum, int negnum, ccv_size_t size, const char* dir, ccv_bbf_new_param_t params) { fprintf(stderr, " ccv_bbf_classifier_cascade_new requires libgsl support, please compile ccv with libgsl.\n"); } #endif static int _ccv_is_equal(const void* _r1, const void* _r2, void* data) { const ccv_comp_t* r1 = (const ccv_comp_t*)_r1; const ccv_comp_t* r2 = (const ccv_comp_t*)_r2; int distance = (int)(r1->rect.width * 0.25 + 0.5); return r2->rect.x <= r1->rect.x + distance && r2->rect.x >= r1->rect.x - distance && r2->rect.y <= r1->rect.y + distance && r2->rect.y >= r1->rect.y - distance && r2->rect.width <= (int)(r1->rect.width * 1.5 + 0.5) && (int)(r2->rect.width * 1.5 + 0.5) >= r1->rect.width; } static int _ccv_is_equal_same_class(const void* _r1, const void* _r2, void* data) { const ccv_comp_t* r1 = (const ccv_comp_t*)_r1; const ccv_comp_t* r2 = (const ccv_comp_t*)_r2; int distance = (int)(r1->rect.width * 0.25 + 0.5); return r2->classification.id == r1->classification.id && r2->rect.x <= r1->rect.x + distance && r2->rect.x >= r1->rect.x - distance && r2->rect.y <= r1->rect.y + distance && r2->rect.y >= r1->rect.y - distance && r2->rect.width <= (int)(r1->rect.width * 1.5 + 0.5) && (int)(r2->rect.width * 1.5 + 0.5) >= r1->rect.width; } ccv_array_t* ccv_bbf_detect_objects(ccv_dense_matrix_t* a, ccv_bbf_classifier_cascade_t** _cascade, int count, ccv_bbf_param_t params) { int hr = a->rows / params.size.height; int wr = a->cols / params.size.width; double scale = pow(2., 1. / (params.interval + 1.)); int next = params.interval + 1; int scale_upto = (int)(log((double)ccv_min(hr, wr)) / log(scale)); ccv_dense_matrix_t** pyr = (ccv_dense_matrix_t**)alloca((scale_upto + next * 2) * 4 * sizeof(ccv_dense_matrix_t*)); memset(pyr, 0, (scale_upto + next * 2) * 4 * sizeof(ccv_dense_matrix_t*)); if (params.size.height != _cascade[0]->size.height || params.size.width != _cascade[0]->size.width) ccv_resample(a, &pyr[0], 0, a->rows * _cascade[0]->size.height / params.size.height, a->cols * _cascade[0]->size.width / params.size.width, CCV_INTER_AREA); else pyr[0] = a; int i, j, k, t, x, y, q; for (i = 1; i < ccv_min(params.interval + 1, scale_upto + next * 2); i++) ccv_resample(pyr[0], &pyr[i * 4], 0, (int)(pyr[0]->rows / pow(scale, i)), (int)(pyr[0]->cols / pow(scale, i)), CCV_INTER_AREA); for (i = next; i < scale_upto + next * 2; i++) ccv_sample_down(pyr[i * 4 - next * 4], &pyr[i * 4], 0, 0, 0); if (params.accurate) for (i = next * 2; i < scale_upto + next * 2; i++) { ccv_sample_down(pyr[i * 4 - next * 4], &pyr[i * 4 + 1], 0, 1, 0); ccv_sample_down(pyr[i * 4 - next * 4], &pyr[i * 4 + 2], 0, 0, 1); ccv_sample_down(pyr[i * 4 - next * 4], &pyr[i * 4 + 3], 0, 1, 1); } ccv_array_t* idx_seq; ccv_array_t* seq = ccv_array_new(sizeof(ccv_comp_t), 64, 0); ccv_array_t* seq2 = ccv_array_new(sizeof(ccv_comp_t), 64, 0); ccv_array_t* result_seq = ccv_array_new(sizeof(ccv_comp_t), 64, 0); /* detect in multi scale */ for (t = 0; t < count; t++) { ccv_bbf_classifier_cascade_t* cascade = _cascade[t]; float scale_x = (float) params.size.width / (float) cascade->size.width; float scale_y = (float) params.size.height / (float) cascade->size.height; ccv_array_clear(seq); for (i = 0; i < scale_upto; i++) { int dx[] = {0, 1, 0, 1}; int dy[] = {0, 0, 1, 1}; int i_rows = pyr[i * 4 + next * 8]->rows - (cascade->size.height >> 2); int steps[] = { pyr[i * 4]->step, pyr[i * 4 + next * 4]->step, pyr[i * 4 + next * 8]->step }; int i_cols = pyr[i * 4 + next * 8]->cols - (cascade->size.width >> 2); int paddings[] = { pyr[i * 4]->step * 4 - i_cols * 4, pyr[i * 4 + next * 4]->step * 2 - i_cols * 2, pyr[i * 4 + next * 8]->step - i_cols }; for (q = 0; q < (params.accurate ? 4 : 1); q++) { unsigned char* u8[] = { pyr[i * 4]->data.u8 + dx[q] * 2 + dy[q] * pyr[i * 4]->step * 2, pyr[i * 4 + next * 4]->data.u8 + dx[q] + dy[q] * pyr[i * 4 + next * 4]->step, pyr[i * 4 + next * 8 + q]->data.u8 }; for (y = 0; y < i_rows; y++) { for (x = 0; x < i_cols; x++) { float sum; int flag = 1; ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier; for (j = 0; j < cascade->count; ++j, ++classifier) { sum = 0; float* alpha = classifier->alpha; ccv_bbf_feature_t* feature = classifier->feature; for (k = 0; k < classifier->count; ++k, alpha += 2, ++feature) sum += alpha[_ccv_run_bbf_feature(feature, steps, u8)]; if (sum < classifier->threshold) { flag = 0; break; } } if (flag) { ccv_comp_t comp; comp.rect = ccv_rect((int)((x * 4 + dx[q] * 2) * scale_x + 0.5), (int)((y * 4 + dy[q] * 2) * scale_y + 0.5), (int)(cascade->size.width * scale_x + 0.5), (int)(cascade->size.height * scale_y + 0.5)); comp.neighbors = 1; comp.classification.id = t; comp.classification.confidence = sum; ccv_array_push(seq, &comp); } u8[0] += 4; u8[1] += 2; u8[2] += 1; } u8[0] += paddings[0]; u8[1] += paddings[1]; u8[2] += paddings[2]; } } scale_x *= scale; scale_y *= scale; } /* the following code from OpenCV's haar feature implementation */ if(params.min_neighbors == 0) { for (i = 0; i < seq->rnum; i++) { ccv_comp_t* comp = (ccv_comp_t*)ccv_array_get(seq, i); ccv_array_push(result_seq, comp); } } else { idx_seq = 0; ccv_array_clear(seq2); // group retrieved rectangles in order to filter out noise int ncomp = ccv_array_group(seq, &idx_seq, _ccv_is_equal_same_class, 0); ccv_comp_t* comps = (ccv_comp_t*)ccmalloc((ncomp + 1) * sizeof(ccv_comp_t)); memset(comps, 0, (ncomp + 1) * sizeof(ccv_comp_t)); // count number of neighbors for(i = 0; i < seq->rnum; i++) { ccv_comp_t r1 = *(ccv_comp_t*)ccv_array_get(seq, i); int idx = *(int*)ccv_array_get(idx_seq, i); if (comps[idx].neighbors == 0) comps[idx].classification.confidence = r1.classification.confidence; ++comps[idx].neighbors; comps[idx].rect.x += r1.rect.x; comps[idx].rect.y += r1.rect.y; comps[idx].rect.width += r1.rect.width; comps[idx].rect.height += r1.rect.height; comps[idx].classification.id = r1.classification.id; comps[idx].classification.confidence = ccv_max(comps[idx].classification.confidence, r1.classification.confidence); } // calculate average bounding box for(i = 0; i < ncomp; i++) { int n = comps[i].neighbors; if(n >= params.min_neighbors) { ccv_comp_t comp; comp.rect.x = (comps[i].rect.x * 2 + n) / (2 * n); comp.rect.y = (comps[i].rect.y * 2 + n) / (2 * n); comp.rect.width = (comps[i].rect.width * 2 + n) / (2 * n); comp.rect.height = (comps[i].rect.height * 2 + n) / (2 * n); comp.neighbors = comps[i].neighbors; comp.classification.id = comps[i].classification.id; comp.classification.confidence = comps[i].classification.confidence; ccv_array_push(seq2, &comp); } } // filter out small face rectangles inside large face rectangles for(i = 0; i < seq2->rnum; i++) { ccv_comp_t r1 = *(ccv_comp_t*)ccv_array_get(seq2, i); int flag = 1; for(j = 0; j < seq2->rnum; j++) { ccv_comp_t r2 = *(ccv_comp_t*)ccv_array_get(seq2, j); int distance = (int)(r2.rect.width * 0.25 + 0.5); if(i != j && r1.classification.id == r2.classification.id && r1.rect.x >= r2.rect.x - distance && r1.rect.y >= r2.rect.y - distance && r1.rect.x + r1.rect.width <= r2.rect.x + r2.rect.width + distance && r1.rect.y + r1.rect.height <= r2.rect.y + r2.rect.height + distance && (r2.neighbors > ccv_max(3, r1.neighbors) || r1.neighbors < 3)) { flag = 0; break; } } if(flag) ccv_array_push(result_seq, &r1); } ccv_array_free(idx_seq); ccfree(comps); } } ccv_array_free(seq); ccv_array_free(seq2); ccv_array_t* result_seq2; /* the following code from OpenCV's haar feature implementation */ if (params.flags & CCV_BBF_NO_NESTED) { result_seq2 = ccv_array_new(sizeof(ccv_comp_t), 64, 0); idx_seq = 0; // group retrieved rectangles in order to filter out noise int ncomp = ccv_array_group(result_seq, &idx_seq, _ccv_is_equal, 0); ccv_comp_t* comps = (ccv_comp_t*)ccmalloc((ncomp + 1) * sizeof(ccv_comp_t)); memset(comps, 0, (ncomp + 1) * sizeof(ccv_comp_t)); // count number of neighbors for(i = 0; i < result_seq->rnum; i++) { ccv_comp_t r1 = *(ccv_comp_t*)ccv_array_get(result_seq, i); int idx = *(int*)ccv_array_get(idx_seq, i); if (comps[idx].neighbors == 0 || comps[idx].classification.confidence < r1.classification.confidence) { comps[idx].classification.confidence = r1.classification.confidence; comps[idx].neighbors = 1; comps[idx].rect = r1.rect; comps[idx].classification.id = r1.classification.id; } } // calculate average bounding box for(i = 0; i < ncomp; i++) if(comps[i].neighbors) ccv_array_push(result_seq2, &comps[i]); ccv_array_free(result_seq); ccfree(comps); } else { result_seq2 = result_seq; } for (i = 1; i < scale_upto + next * 2; i++) ccv_matrix_free(pyr[i * 4]); if (params.accurate) for (i = next * 2; i < scale_upto + next * 2; i++) { ccv_matrix_free(pyr[i * 4 + 1]); ccv_matrix_free(pyr[i * 4 + 2]); ccv_matrix_free(pyr[i * 4 + 3]); } if (params.size.height != _cascade[0]->size.height || params.size.width != _cascade[0]->size.width) ccv_matrix_free(pyr[0]); return result_seq2; } ccv_bbf_classifier_cascade_t* ccv_bbf_read_classifier_cascade(const char* directory) { char buf[1024]; sprintf(buf, "%s/cascade.txt", directory); int s, i; FILE* r = fopen(buf, "r"); if (r == 0) return 0; ccv_bbf_classifier_cascade_t* cascade = (ccv_bbf_classifier_cascade_t*)ccmalloc(sizeof(ccv_bbf_classifier_cascade_t)); s = fscanf(r, "%d %d %d", &cascade->count, &cascade->size.width, &cascade->size.height); assert(s > 0); cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(cascade->count * sizeof(ccv_bbf_stage_classifier_t)); for (i = 0; i < cascade->count; i++) { sprintf(buf, "%s/stage-%d.txt", directory, i); if (_ccv_read_bbf_stage_classifier(buf, &cascade->stage_classifier[i]) < 0) { cascade->count = i; break; } } fclose(r); return cascade; } ccv_bbf_classifier_cascade_t* ccv_bbf_classifier_cascade_read_binary(char* s) { int i; ccv_bbf_classifier_cascade_t* cascade = (ccv_bbf_classifier_cascade_t*)ccmalloc(sizeof(ccv_bbf_classifier_cascade_t)); memcpy(&cascade->count, s, sizeof(cascade->count)); s += sizeof(cascade->count); memcpy(&cascade->size.width, s, sizeof(cascade->size.width)); s += sizeof(cascade->size.width); memcpy(&cascade->size.height, s, sizeof(cascade->size.height)); s += sizeof(cascade->size.height); ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(cascade->count * sizeof(ccv_bbf_stage_classifier_t)); for (i = 0; i < cascade->count; i++, classifier++) { memcpy(&classifier->count, s, sizeof(classifier->count)); s += sizeof(classifier->count); memcpy(&classifier->threshold, s, sizeof(classifier->threshold)); s += sizeof(classifier->threshold); classifier->feature = (ccv_bbf_feature_t*)ccmalloc(classifier->count * sizeof(ccv_bbf_feature_t)); classifier->alpha = (float*)ccmalloc(classifier->count * 2 * sizeof(float)); memcpy(classifier->feature, s, classifier->count * sizeof(ccv_bbf_feature_t)); s += classifier->count * sizeof(ccv_bbf_feature_t); memcpy(classifier->alpha, s, classifier->count * 2 * sizeof(float)); s += classifier->count * 2 * sizeof(float); } return cascade; } int ccv_bbf_classifier_cascade_write_binary(ccv_bbf_classifier_cascade_t* cascade, char* s, int slen) { int i; int len = sizeof(cascade->count) + sizeof(cascade->size.width) + sizeof(cascade->size.height); ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier; for (i = 0; i < cascade->count; i++, classifier++) len += sizeof(classifier->count) + sizeof(classifier->threshold) + classifier->count * sizeof(ccv_bbf_feature_t) + classifier->count * 2 * sizeof(float); if (slen >= len) { memcpy(s, &cascade->count, sizeof(cascade->count)); s += sizeof(cascade->count); memcpy(s, &cascade->size.width, sizeof(cascade->size.width)); s += sizeof(cascade->size.width); memcpy(s, &cascade->size.height, sizeof(cascade->size.height)); s += sizeof(cascade->size.height); classifier = cascade->stage_classifier; for (i = 0; i < cascade->count; i++, classifier++) { memcpy(s, &classifier->count, sizeof(classifier->count)); s += sizeof(classifier->count); memcpy(s, &classifier->threshold, sizeof(classifier->threshold)); s += sizeof(classifier->threshold); memcpy(s, classifier->feature, classifier->count * sizeof(ccv_bbf_feature_t)); s += classifier->count * sizeof(ccv_bbf_feature_t); memcpy(s, classifier->alpha, classifier->count * 2 * sizeof(float)); s += classifier->count * 2 * sizeof(float); } } return len; } void ccv_bbf_classifier_cascade_free(ccv_bbf_classifier_cascade_t* cascade) { int i; for (i = 0; i < cascade->count; ++i) { ccfree(cascade->stage_classifier[i].feature); ccfree(cascade->stage_classifier[i].alpha); } ccfree(cascade->stage_classifier); ccfree(cascade); }
utils.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file utils.h * \brief Basic utilility functions. */ #ifndef MXNET_COMMON_UTILS_H_ #define MXNET_COMMON_UTILS_H_ #include <dmlc/logging.h> #include <dmlc/omp.h> #include <nnvm/graph.h> #include <nnvm/node.h> #include <mxnet/engine.h> #include <mxnet/ndarray.h> #include <mxnet/op_attr_types.h> #include <mxnet/graph_attr_types.h> #include <nnvm/graph_attr_types.h> #include <memory> #include <vector> #include <type_traits> #include <utility> #include <random> #include <string> #include <thread> #include <algorithm> #include <functional> #include <limits> #include "../operator/mxnet_op.h" #if MXNET_USE_MKLDNN == 1 #include "../operator/nn/mkldnn/mkldnn_base-inl.h" #endif #if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__) #include <windows.h> #else #include <unistd.h> #endif namespace mxnet { namespace common { #if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__) inline size_t current_process_id() { return ::GetCurrentProcessId(); } #else inline size_t current_process_id() { return getpid(); } #endif /*! * \brief IndPtr should be non-negative, in non-decreasing order, start with 0 * and end with value equal with size of indices. */ struct csr_indptr_check { template<typename DType, typename IType> MSHADOW_XINLINE static void Map(int i, DType* out, const IType* indptr, const nnvm::dim_t end, const nnvm::dim_t idx_size) { if (indptr[i+1] < 0 || indptr[i+1] < indptr[i] || (i == 0 && indptr[i] != 0) || (i == end - 1 && indptr[end] != idx_size)) *out = kCSRIndPtrErr; } }; /*! * \brief Indices should be non-negative, less than the number of columns * and in ascending order per row. */ struct csr_idx_check { template<typename DType, typename IType, typename RType> MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx, const RType* indptr, const nnvm::dim_t ncols) { for (RType j = indptr[i]; j < indptr[i+1]; j++) { if (idx[j] >= ncols || idx[j] < 0 || (j < indptr[i+1] - 1 && idx[j] >= idx[j+1])) { *out = kCSRIdxErr; break; } } } }; /*! * \brief Indices of RSPNDArray should be non-negative, * less than the size of first dimension and in ascending order */ struct rsp_idx_check { template<typename DType, typename IType> MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx, const nnvm::dim_t end, const nnvm::dim_t nrows) { if ((i < end && idx[i+1] <= idx[i]) || idx[i] < 0 || idx[i] >= nrows) *out = kRSPIdxErr; } }; template<typename xpu> void CheckFormatWrapper(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check); /*! * \brief Check the validity of CSRNDArray. * \param rctx Execution context. * \param input Input NDArray of CSRStorage. * \param err_cpu Error number on cpu. * \param full_check If true, rigorous check, O(N) operations, * otherwise basic check, O(1) operations. */ template<typename xpu> void CheckFormatCSRImpl(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check) { using namespace op::mxnet_op; CHECK_EQ(input.storage_type(), kCSRStorage) << "CheckFormatCSRImpl is for CSRNDArray"; const mxnet::TShape shape = input.shape(); const mxnet::TShape idx_shape = input.aux_shape(csr::kIdx); const mxnet::TShape indptr_shape = input.aux_shape(csr::kIndPtr); const mxnet::TShape storage_shape = input.storage_shape(); if ((shape.ndim() != 2) || (idx_shape.ndim() != 1 || indptr_shape.ndim() != 1 || storage_shape.ndim() != 1) || (indptr_shape[0] != shape[0] + 1) || (idx_shape[0] != storage_shape[0])) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { DType* err = err_cpu.dptr<DType>(); *err = kCSRShapeErr; }); return; } if (full_check) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIndPtr), RType, { MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIdx), IType, { mshadow::Stream<xpu> *s = rctx.get_stream<xpu>(); NDArray ret_xpu = NDArray(mshadow::Shape1(1), rctx.get_ctx(), false, err_cpu.type_flag_); TBlob val_xpu = ret_xpu.data(); Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>()); Kernel<csr_indptr_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(), input.aux_data(csr::kIndPtr).dptr<RType>(), indptr_shape[0] - 1, idx_shape[0]); // no need to check indices if indices are empty if (idx_shape[0] != 0) { Kernel<csr_idx_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(), input.aux_data(csr::kIdx).dptr<IType>(), input.aux_data(csr::kIndPtr).dptr<RType>(), shape[1]); } mshadow::Copy(err_cpu.get<cpu, 1, DType>(), val_xpu.get<xpu, 1, DType>(s), s); }); }); }); } } /*! * \brief Check the validity of RowSparseNDArray. * \param rctx Execution context. * \param input Input NDArray of RowSparseStorage. * \param err_cpu Error number on cpu. * \param full_check If true, rigorous check, O(N) operations, * otherwise basic check, O(1) operations. */ template<typename xpu> void CheckFormatRSPImpl(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check) { using namespace op::mxnet_op; CHECK_EQ(input.storage_type(), kRowSparseStorage) << "CheckFormatRSPImpl is for RSPNDArray"; const mxnet::TShape idx_shape = input.aux_shape(rowsparse::kIdx); if (idx_shape[0] != input.storage_shape()[0]) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { DType* err = err_cpu.dptr<DType>(); *err = kRSPShapeErr; }); return; } if (idx_shape[0] == 0) { return; } if (full_check) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { MSHADOW_IDX_TYPE_SWITCH(input.aux_type(rowsparse::kIdx), IType, { mshadow::Stream<xpu> *s = rctx.get_stream<xpu>(); NDArray ret_xpu = NDArray(mshadow::Shape1(1), rctx.get_ctx(), false, err_cpu.type_flag_); TBlob val_xpu = ret_xpu.data(); Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>()); Kernel<rsp_idx_check, xpu>::Launch(s, idx_shape[0], val_xpu.dptr<DType>(), input.aux_data(rowsparse::kIdx).dptr<IType>(), idx_shape[0] - 1, input.shape()[0]); mshadow::Copy(err_cpu.get<cpu, 1, DType>(), val_xpu.get<xpu, 1, DType>(s), s); }); }); } } template<typename xpu> void CheckFormatImpl(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check) { int stype = input.storage_type(); if (stype == kCSRStorage) { CheckFormatCSRImpl<xpu>(rctx, input, err_cpu, full_check); } else if (stype == kRowSparseStorage) { CheckFormatRSPImpl<xpu>(rctx, input, err_cpu, full_check); } else if (stype == kDefaultStorage) { // no-op for default storage } else { LOG(FATAL) << "Unknown storage type " << stype; } } /*! \brief Pick rows specified by user input index array from a row sparse ndarray * and save them in the output sparse ndarray. */ template<typename xpu> void SparseRetainOpForwardRspWrapper(mshadow::Stream<xpu> *s, const NDArray& input_nd, const TBlob& idx_data, const OpReqType req, NDArray* output_nd); /* \brief Casts tensor storage type to the new type. */ template<typename xpu> void CastStorageDispatch(const OpContext& ctx, const NDArray& input, const NDArray& output); /*! \brief returns true if all storage types in `vstorage` are the same as target `stype`. * false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage, const NDArrayStorageType stype) { if (!vstorage.empty()) { for (const auto& i : vstorage) { if (i != stype) return false; } return true; } return false; } /*! \brief returns true if all storage types in `vstorage` are the same as target `stype1` * or `stype2'. Sets boolean if both found. * false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage, const NDArrayStorageType stype1, const NDArrayStorageType stype2, bool *has_both) { if (has_both) { *has_both = false; } if (!vstorage.empty()) { uint8_t has = 0; for (const auto i : vstorage) { if (i == stype1) { has |= 1; } else if (i == stype2) { has |= 2; } else { return false; } } if (has_both) { *has_both = has == 3; } return true; } return false; } /*! \brief returns true if the storage types of arrays in `ndarrays` * are the same as target `stype`. false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays, const NDArrayStorageType stype) { if (!ndarrays.empty()) { for (const auto& nd : ndarrays) { if (nd.storage_type() != stype) { return false; } } return true; } return false; } /*! \brief returns true if the storage types of arrays in `ndarrays` * are the same as targets `stype1` or `stype2`. false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays, const NDArrayStorageType stype1, const NDArrayStorageType stype2, bool *has_both) { if (has_both) { *has_both = false; } if (!ndarrays.empty()) { uint8_t has = 0; for (const auto& nd : ndarrays) { const NDArrayStorageType stype = nd.storage_type(); if (stype == stype1) { has |= 1; } else if (stype == stype2) { has |= 2; } else { return false; } } if (has_both) { *has_both = has == 3; } return true; } return false; } /*! \brief returns true if storage type of any array in `ndarrays` * is the same as the target `stype`. false is returned for empty inputs. */ inline bool ContainsStorageType(const std::vector<NDArray>& ndarrays, const NDArrayStorageType stype) { if (!ndarrays.empty()) { for (const auto& nd : ndarrays) { if (nd.storage_type() == stype) { return true; } } } return false; } /*! \brief returns true if any storage type `ndstype` in `ndstypes` * is the same as the target `stype`. false is returned for empty inputs. */ inline bool ContainsStorageType(const std::vector<int>& ndstypes, const NDArrayStorageType stype) { if (!ndstypes.empty()) { for (const auto& ndstype : ndstypes) { if (ndstype == stype) { return true; } } } return false; } /*! \brief get string representation of dispatch_mode */ inline std::string dispatch_mode_string(const DispatchMode x) { switch (x) { case DispatchMode::kFCompute: return "fcompute"; case DispatchMode::kFComputeEx: return "fcompute_ex"; case DispatchMode::kFComputeFallback: return "fcompute_fallback"; case DispatchMode::kVariable: return "variable"; case DispatchMode::kUndefined: return "undefined"; } return "unknown"; } /*! \brief get string representation of storage_type */ inline std::string stype_string(const int x) { switch (x) { case kDefaultStorage: return "default"; case kCSRStorage: return "csr"; case kRowSparseStorage: return "row_sparse"; } return "unknown"; } /*! \brief get string representation of device type */ inline std::string dev_type_string(const int dev_type) { switch (dev_type) { case Context::kCPU: return "cpu"; case Context::kGPU: return "gpu"; case Context::kCPUPinned: return "cpu_pinned"; case Context::kCPUShared: return "cpu_shared"; } return "unknown"; } inline std::string attr_value_string(const nnvm::NodeAttrs& attrs, const std::string& attr_name, std::string default_val = "") { if (attrs.dict.find(attr_name) == attrs.dict.end()) { return default_val; } return attrs.dict.at(attr_name); } /*! \brief get string representation of the operator stypes */ inline std::string operator_stype_string(const nnvm::NodeAttrs& attrs, const int dev_mask, const std::vector<int>& in_attrs, const std::vector<int>& out_attrs) { std::ostringstream os; os << "operator = " << attrs.op->name << "\ninput storage types = ["; for (const int attr : in_attrs) { os << stype_string(attr) << ", "; } os << "]\n" << "output storage types = ["; for (const int attr : out_attrs) { os << stype_string(attr) << ", "; } os << "]\n" << "params = {"; for (auto kv : attrs.dict) { os << "\"" << kv.first << "\" : " << kv.second << ", "; } os << "}\n" << "context.dev_mask = " << dev_type_string(dev_mask); return os.str(); } /*! \brief get string representation of the operator */ inline std::string operator_string(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<NDArray>& inputs, const std::vector<OpReqType>& req, const std::vector<NDArray>& outputs) { std::string result = ""; std::vector<int> in_stypes; std::vector<int> out_stypes; in_stypes.reserve(inputs.size()); out_stypes.reserve(outputs.size()); auto xform = [](const NDArray arr) -> int { return arr.storage_type(); }; std::transform(inputs.begin(), inputs.end(), std::back_inserter(in_stypes), xform); std::transform(outputs.begin(), outputs.end(), std::back_inserter(out_stypes), xform); result += operator_stype_string(attrs, ctx.run_ctx.ctx.dev_mask(), in_stypes, out_stypes); return result; } /*! \brief log message once. Intended for storage fallback warning messages. */ inline void LogOnce(const std::string& message) { typedef dmlc::ThreadLocalStore<std::unordered_set<std::string>> LogStore; auto log_store = LogStore::Get(); if (log_store->find(message) == log_store->end()) { LOG(INFO) << message; log_store->insert(message); } } /*! \brief log storage fallback event */ inline void LogStorageFallback(const nnvm::NodeAttrs& attrs, const int dev_mask, const std::vector<int>* in_attrs, const std::vector<int>* out_attrs) { static bool log = dmlc::GetEnv("MXNET_STORAGE_FALLBACK_LOG_VERBOSE", true); if (!log) return; const std::string op_str = operator_stype_string(attrs, dev_mask, *in_attrs, *out_attrs); std::ostringstream os; const char* warning = "\nThe operator with default storage type will be dispatched " "for execution. You're seeing this warning message because the operator above is unable " "to process the given ndarrays with specified storage types, context and parameter. " "Temporary dense ndarrays are generated in order to execute the operator. " "This does not affect the correctness of the programme. " "You can set environment variable MXNET_STORAGE_FALLBACK_LOG_VERBOSE to " "0 to suppress this warning."; os << "\nStorage type fallback detected:\n" << op_str << warning; LogOnce(os.str()); #if MXNET_USE_MKLDNN == 1 if (!MKLDNNEnvSet()) common::LogOnce("MXNET_MKLDNN_ENABLED flag is off. " "You can re-enable by setting MXNET_MKLDNN_ENABLED=1"); if (GetMKLDNNCacheSize() != -1) common::LogOnce("MXNET_MKLDNN_CACHE_NUM is set." "Should only be set if " "your model has variable input shapes, " "as cache size may grow unbounded"); #endif } // heuristic to dermine number of threads per GPU inline int GetNumThreadsPerGPU() { // This is resource efficient option. return dmlc::GetEnv("MXNET_GPU_WORKER_NTHREADS", 2); } // heuristic to get number of matching colors. // this decides how much parallelism we can get in each GPU. inline int GetExecNumMatchColor() { // This is resource efficient option. int num_match_color = dmlc::GetEnv("MXNET_EXEC_NUM_TEMP", 1); return std::min(num_match_color, GetNumThreadsPerGPU()); } template<typename T, typename V> V ParallelAccumulate(const T* a, const int n, V start) { V sum = start; #pragma omp parallel for reduction(+:sum) for (int i = 0; i < n; ++i) { sum += a[i]; } return sum; } /*! * \brief * Helper function for ParallelSort. * DO NOT call this function directly. * Use the interface ParallelSort instead. * Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h */ template<typename RandomIt, typename Compare> void ParallelSortHelper(RandomIt first, size_t len, size_t grainsize, const Compare& comp) { if (len < grainsize) { std::sort(first, first+len, comp); } else { std::thread thr(ParallelSortHelper<RandomIt, Compare>, first, len/2, grainsize, comp); ParallelSortHelper(first+len/2, len - len/2, grainsize, comp); thr.join(); std::inplace_merge(first, first+len/2, first+len, comp); } } /*! * \brief * Sort the elements in the range [first, last) into the ascending order defined by * the comparator comp. * If the length of the range [first, last) is greater than a certain threshold, * the range will be recursively divided into two and assign two threads * to sort each half range. * Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h */ template<typename RandomIt, typename Compare> void ParallelSort(RandomIt first, RandomIt last, size_t num_threads, Compare comp) { const auto num = std::distance(first, last); size_t grainsize = std::max(num / num_threads + 5, static_cast<size_t>(1024*16)); ParallelSortHelper(first, num, grainsize, comp); } /*! * \brief * Sort the elements in the range [first, last) into ascending order. * The elements are compared using the default < operator. * If the length of the range [first, last) is greater than a certain threshold, * the range will be recursively divided into two and assign two threads * to sort each half range. * Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h */ template<typename RandomIt> void ParallelSort(RandomIt first, RandomIt last, size_t num_threads) { ParallelSort(first, last, num_threads, std::less<typename std::iterator_traits<RandomIt>::value_type>()); } /*! * \brief Random Engine */ typedef std::mt19937 RANDOM_ENGINE; /*! * \brief Helper functions. */ namespace helper { /*! * \brief Helper for non-array type `T`. */ template <class T> struct UniqueIf { /*! * \brief Type of `T`. */ using SingleObject = std::unique_ptr<T>; }; /*! * \brief Helper for an array of unknown bound `T`. */ template <class T> struct UniqueIf<T[]> { /*! * \brief Type of `T`. */ using UnknownBound = std::unique_ptr<T[]>; }; /*! * \brief Helper for an array of known bound `T`. */ template <class T, size_t kSize> struct UniqueIf<T[kSize]> { /*! * \brief Type of `T`. */ using KnownBound = void; }; } // namespace helper /*! * \brief Constructs an object of type `T` and wraps it in a * `std``::``unique_ptr`. * \param args List of arguments with which an instance of `T` will be * constructed. * \return `std``::``unique_ptr` of an instance of type `T`. * * Constructs a non-array type `T`. The arguments `args` are passed to the * constructor of `T`. The function does not participate in the overload * resolution if `T` is an array type. */ template <class T, class... Args> typename helper::UniqueIf<T>::SingleObject MakeUnique(Args&&... args) { return std::unique_ptr<T>(new T(std::forward<Args>(args)...)); } /*! * \brief Constructs an object of type `T` and wraps it in a * `std``::``unique_ptr`. * \param n The size of the array to construct. * \return `std``::``unique_ptr` of an instance of type `T`. * * Constructs an array of unknown bound `T`. The function does not participate * in the overload resolution unless `T` is an array of unknown bound. */ template <class T> typename helper::UniqueIf<T>::UnknownBound MakeUnique(size_t n) { using U = typename std::remove_extent<T>::type; return std::unique_ptr<T>(new U[n]{}); } /*! * \brief Constructs an object of type `T` and wraps it in a * `std``::``unique_ptr`. * \param args List of arguments with which an instance of `T` will be * constructed. * * Constructs an arrays of known bound is disallowed. */ template <class T, class... Args> typename helper::UniqueIf<T>::KnownBound MakeUnique(Args&&... args) = delete; template<typename FCompType> FCompType GetFCompute(const nnvm::Op* op, const std::string& name, const Context& ctx) { static auto& fcompute_cpu = nnvm::Op::GetAttr<FCompType>(name + "<cpu>"); static auto& fcompute_gpu = nnvm::Op::GetAttr<FCompType>(name + "<gpu>"); if (ctx.dev_mask() == cpu::kDevMask) { return fcompute_cpu.get(op, nullptr); } else if (ctx.dev_mask() == gpu::kDevMask) { return fcompute_gpu.get(op, nullptr); } else { LOG(FATAL) << "Unknown device mask " << ctx.dev_mask(); return nullptr; } } /*! * \brief Return the max integer value representable in the type `T` without loss of precision. */ template <typename T> constexpr size_t MaxIntegerValue() { return std::is_integral<T>::value ? std::numeric_limits<T>::max(): size_t(2) << (std::numeric_limits<T>::digits - 1); } template <> constexpr size_t MaxIntegerValue<mshadow::half::half_t>() { return size_t(2) << 10; } template <> constexpr size_t MaxIntegerValue<mshadow::bfloat::bf16_t>() { return size_t(2) << 14; } MSHADOW_XINLINE int ilog2ul(size_t a) { int k = 1; while (a >>= 1) ++k; return k; } MSHADOW_XINLINE int ilog2ui(unsigned int a) { int k = 1; while (a >>= 1) ++k; return k; } /*! * \brief Return an NDArray of all zeros. */ inline NDArray InitZeros(const NDArrayStorageType stype, const mxnet::TShape &shape, const Context &ctx, const int dtype) { // NDArray with default storage if (stype == kDefaultStorage) { NDArray ret(shape, ctx, false, dtype); ret = 0; return ret; } // NDArray with non-default storage. Storage allocation is always delayed. return NDArray(stype, shape, ctx, true, dtype); } /*! * \brief Helper to add a NDArray of zeros to a std::vector. */ inline void EmplaceBackZeros(const NDArrayStorageType stype, const mxnet::TShape &shape, const Context &ctx, const int dtype, std::vector<NDArray> *vec) { // NDArray with default storage if (stype == kDefaultStorage) { vec->emplace_back(shape, ctx, false, dtype); vec->back() = 0; } else { // NDArray with non-default storage. Storage allocation is always delayed. vec->emplace_back(stype, shape, ctx, true, dtype); } } /*! * \brief parallelize copy by OpenMP. */ template<typename DType> inline void ParallelCopy(DType* dst, const DType* src, index_t size) { static index_t copy_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_SIZE", 200000); if (size >= copy_block_size) { #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (index_t i = 0; i < size; ++i) { dst[i] = src[i]; } } else { #pragma GCC diagnostic push #if __GNUC__ >= 8 #pragma GCC diagnostic ignored "-Wclass-memaccess" #endif std::memcpy(dst, src, sizeof(DType) * size); #pragma GCC diagnostic pop } } /*! * \breif parallelize add by OpenMP */ template<typename DType> inline void ParallelAdd(DType* dst, const DType* src, index_t size) { static index_t add_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_SIZE", 200000); if (size >= add_block_size) { #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (index_t i = 0; i < size; ++i) { dst[i] += src[i]; } } else { for (index_t i = 0; i < size; ++i) { dst[i] += src[i]; } } } /*! * \brief If numpy compatibility is turned off (default), the shapes passed in * by users follow the legacy shape definition: * 1. 0 ndim means the shape is completely unknown. * 2. 0 dim size means the dim size is unknown. * We need to convert those shapes to use the numpy shape definition: * 1. 0 ndim means it's a scalar tensor. * 2. -1 ndim means the shape is unknown. * 3. 0 dim size means no elements in that dimension. * 4. -1 dim size means the dimension's size is unknown. * so that operator's infer shape function can work in backend. * \param shape to be converted. * Note: It is possible that the shape to be converted is already * numpy compatible. For example, when a subgraph operator's infer * shape function is called from the infer shape pass of the whole * graph, its input/output shapes have been converted to numpy * compatible shapes. */ inline void ConvertToNumpyShape(mxnet::TShape* shape) { if (shape->ndim() == 0) { // legacy shape ndim = 0 means unknown *shape = mxnet::TShape(); // unknown shape ndim = -1 } else { for (int j = 0; j < shape->ndim(); ++j) { if ((*shape)[j] == 0) { // legacy shape dim_size = 0 means unknown (*shape)[j] = -1; // unknown dim size = -1 } } } } inline void ConvertToNumpyShape(mxnet::ShapeVector* shapes) { for (size_t i = 0; i < shapes->size(); ++i) { ConvertToNumpyShape(&(shapes->at(i))); } } /*! * \brief This is function is used to convert shapes returned by * the infer shape functions/pass to the legacy shape definition. */ inline void ConvertToLegacyShape(mxnet::TShape* shape) { if (!mxnet::ndim_is_known(*shape)) { *shape = mxnet::TShape(0, -1); } else { for (int j = 0; j < shape->ndim(); ++j) { if (!mxnet::dim_size_is_known(*shape, j)) { (*shape)[j] = 0; } } } } inline void ConvertToLegacyShape(mxnet::ShapeVector* shapes) { for (size_t i = 0; i < shapes->size(); ++i) { ConvertToLegacyShape(&(shapes->at(i))); } } void ExecuteMonInputCallback( const nnvm::IndexedGraph &idx, const std::vector<NDArray *> &state_arrays, size_t nid, const std::function<void(const char *, const char *, void *)> &monitor_callback); void ExecuteMonOutputCallback( const nnvm::IndexedGraph &idx, const std::vector<NDArray *> &state_arrays, size_t nid, const std::function<void(const char *, const char *, void *)> &monitor_callback); /*! * \brief This is function can return the output names of a NodeEntry. */ static inline std::string GetOutputName(const nnvm::NodeEntry& e) { nnvm::Symbol sym; sym.outputs.push_back(e); return sym.ListOutputNames()[0]; } inline mxnet::TShape CanonicalizeAxes(const mxnet::TShape& src) { // convert negative axes to positive values const int ndim = src.ndim(); mxnet::TShape axes = src; for (int i = 0; i < ndim; ++i) { if (axes[i] < 0) { axes[i] += ndim; } CHECK(axes[i] >= 0 && axes[i] < ndim) << "axes[" << i << "]=" << axes[i] << " exceeds the range [" << 0 << ", " << ndim << ")"; } return axes; } inline bool is_float(const int dtype) { return dtype == mshadow::kFloat32 || dtype == mshadow::kFloat64 || dtype == mshadow::kFloat16; } inline int get_more_precise_type(const int type1, const int type2) { if (type1 == type2) return type1; if (is_float(type1) && is_float(type2)) { if (type1 == mshadow::kFloat64 || type2 == mshadow::kFloat64) { return mshadow::kFloat64; } if (type1 == mshadow::kFloat32 || type2 == mshadow::kFloat32) { return mshadow::kFloat32; } return mshadow::kFloat16; } else if (is_float(type1) || is_float(type2)) { return is_float(type1) ? type1 : type2; } if (type1 == mshadow::kInt64 || type2 == mshadow::kInt64) { return mshadow::kInt64; } if (type1 == mshadow::kInt32 || type2 == mshadow::kInt32) { return mshadow::kInt32; } CHECK(!((type1 == mshadow::kUint8 && type2 == mshadow::kInt8) || (type1 == mshadow::kInt8 && type2 == mshadow::kUint8))) << "1 is UInt8 and 1 is Int8 should not get here"; if (type1 == mshadow::kUint8 || type2 == mshadow::kUint8) { return mshadow::kUint8; } return mshadow::kInt8; } inline int np_binary_out_infer_type(const int type1, const int type2) { if ((type1 == mshadow::kUint8 && type2 == mshadow::kInt8) || (type1 == mshadow::kInt8 && type2 == mshadow::kUint8)) { return mshadow::kInt32; } return get_more_precise_type(type1, type2); } } // namespace common } // namespace mxnet #endif // MXNET_COMMON_UTILS_H_
triad.h
#ifndef TRIAD_H #define TRIAD_H namespace TSnap { ///////////////////////////////////////////////// // Triads and clustering coefficient /// Computes the average clustering coefficient as defined in Watts and Strogatz, Collective dynamics of 'small-world' networks. ##TSnap::GetClustCf template <class PGraph> double GetClustCf(const PGraph& Graph, int SampleNodes=-1); /// Computes the distribution of average clustering coefficient. ##TSnap::GetClustCf1 template <class PGraph> double GetClustCf(const PGraph& Graph, TFltPrV& DegToCCfV, int SampleNodes=-1); /// Computes the distribution of average clustering coefficient as well as the number of open and closed triads in the graph. ##TSnap::GetClustCf2 template <class PGraph> double GetClustCf(const PGraph& Graph, TFltPrV& DegToCCfV, int64& ClosedTriadsX, int64& OpenTriadsX, int SampleNodes=-1); /// Returns clustering coefficient of a particular node. ##TSnap::GetNodeClustCf template <class PGraph> double GetNodeClustCf(const PGraph& Graph, const int& NId); /// Computes clustering coefficient of each node of the Graph. ##TSnap::GetClustCf1 template <class PGraph> void GetNodeClustCf(const PGraph& Graph, TIntFltH& NIdCCfH); /// Returns the number of triangles in a graph. ##TSnap::GetTriads template <class PGraph> int64 GetTriads(const PGraph& Graph, int SampleNodes=-1); /// Computes the number of Closed and Open triads. ##TSnap::GetTriads1 template <class PGraph> int64 GetTriads(const PGraph& Graph, int64& ClosedTriadsX, int64& OpenTriadsX, int SampleNodes); /// Computes the number of open and close triads for every node of the network. ##TSnap::GetTriads2 template <class PGraph> void GetTriads(const PGraph& Graph, TIntTrV& NIdCOTriadV, int SampleNodes=-1); /// Counts the number of edges that participate in at least one triad. ##TSnap::GetTriadEdges template <class PGraph> int GetTriadEdges(const PGraph& Graph, int SampleEdges=-1); /// Returns the number of undirected triads a node \c NId participates in. ##TSnap::GetNodeTriads template <class PGraph> int GetNodeTriads(const PGraph& Graph, const int& NId); /// Returns number of Open and Closed triads a node \c NId participates in. ##TSnap::GetNodeTriads1 template <class PGraph> int GetNodeTriads(const PGraph& Graph, const int& NId, int& ClosedNTriadsX, int& OpenNTriadsX); /// Returns the number of triads between a node \c NId and a subset of its neighbors \c GroupSet. ##TSnap::GetNodeTriads3 template <class PGraph> int GetNodeTriads(const PGraph& Graph, const int& NId, const TIntSet& GroupSet, int& InGroupEdgesX, int& InOutGroupEdgesX, int& OutGroupEdgesX); /// Triangle Participation Ratio: For each node counts how many triangles it participates in and then returns a set of pairs (number of triangles, number of such nodes). ##TSnap::GetTriadParticip template <class PGraph> void GetTriadParticip(const PGraph& Graph, TIntPrV& TriadCntV); /// Returns a number of shared neighbors between a pair of nodes NId1 and NId2. template<class PGraph> int GetCmnNbrs(const PGraph& Graph, const int& NId1, const int& NId2); /// Returns the shared neighbors between a pair of nodes NId1 and NId2. template<class PGraph> int GetCmnNbrs(const PGraph& Graph, const int& NId1, const int& NId2, TIntV& NbrV); /// Returns the number of length 2 directed paths between a pair of nodes NId1, NId2 (NId1 --> U --> NId2). template<class PGraph> int GetLen2Paths(const PGraph& Graph, const int& NId1, const int& NId2); /// Returns the 2 directed paths between a pair of nodes NId1, NId2 (NId1 --> U --> NId2). ##TSnap::GetLen2Paths template<class PGraph> int GetLen2Paths(const PGraph& Graph, const int& NId1, const int& NId2, TIntV& NbrV); /// Returns the number of triangles in graph \c Graph, original version template<class PGraph> int64 CountTriangles(const PGraph& Graph); /// Returns the number of triangles in graph \c Graph, newer version template<class PGraph> int64 GetTriangleCnt(const PGraph& Graph); /// Merges neighbors. template<class PGraph> void MergeNbrs(TIntV& NeighbourV, const typename PGraph::TObj::TNodeI& NI); void GetMergeSortedV(TIntV& NeighbourV, TNGraph::TNodeI NI); /// Returns sorted vector \c NbrV containing unique in or out neighbors of node \c NId in graph \c Graph template <class PGraph> void GetUniqueNbrV(const PGraph& Graph, const int& NId, TIntV& NbrV); /// Returns the number of common elements in two sorted TInt vectors int GetCommon(TIntV& A, TIntV& B); ///////////////////////////////////////////////// // Implementation template <class PGraph> double GetClustCf(const PGraph& Graph, int SampleNodes) { TIntTrV NIdCOTriadV; GetTriads(Graph, NIdCOTriadV, SampleNodes); if (NIdCOTriadV.Empty()) { return 0.0; } double SumCcf = 0.0; for (int i = 0; i < NIdCOTriadV.Len(); i++) { const double OpenCnt = NIdCOTriadV[i].Val2()+NIdCOTriadV[i].Val3(); if (OpenCnt > 0) { SumCcf += NIdCOTriadV[i].Val2() / OpenCnt; } } IAssert(SumCcf>=0); return SumCcf / double(NIdCOTriadV.Len()); } template <class PGraph> double GetClustCf(const PGraph& Graph, TFltPrV& DegToCCfV, int SampleNodes) { TIntTrV NIdCOTriadV; GetTriads(Graph, NIdCOTriadV, SampleNodes); THash<TInt, TFltPr> DegSumCnt; double SumCcf = 0.0; for (int i = 0; i < NIdCOTriadV.Len(); i++) { const int D = NIdCOTriadV[i].Val2()+NIdCOTriadV[i].Val3(); const double Ccf = D!=0 ? NIdCOTriadV[i].Val2() / double(D) : 0.0; TFltPr& SumCnt = DegSumCnt.AddDat(Graph->GetNI(NIdCOTriadV[i].Val1).GetDeg()); SumCnt.Val1 += Ccf; SumCnt.Val2 += 1; SumCcf += Ccf; } // get average clustering coefficient for each degree DegToCCfV.Gen(DegSumCnt.Len(), 0); for (int d = 0; d < DegSumCnt.Len(); d++) { DegToCCfV.Add(TFltPr(DegSumCnt.GetKey(d).Val, double(DegSumCnt[d].Val1()/DegSumCnt[d].Val2()))); } DegToCCfV.Sort(); return SumCcf / double(NIdCOTriadV.Len()); } template <class PGraph> double GetClustCf(const PGraph& Graph, TFltPrV& DegToCCfV, int64& ClosedTriads, int64& OpenTriads, int SampleNodes) { TIntTrV NIdCOTriadV; GetTriads(Graph, NIdCOTriadV, SampleNodes); THash<TInt, TFltPr> DegSumCnt; double SumCcf = 0.0; int64 closedTriads = 0; int64 openTriads = 0; for (int i = 0; i < NIdCOTriadV.Len(); i++) { const int D = NIdCOTriadV[i].Val2()+NIdCOTriadV[i].Val3(); const double Ccf = D!=0 ? NIdCOTriadV[i].Val2() / double(D) : 0.0; closedTriads += NIdCOTriadV[i].Val2; openTriads += NIdCOTriadV[i].Val3; TFltPr& SumCnt = DegSumCnt.AddDat(Graph->GetNI(NIdCOTriadV[i].Val1).GetDeg()); SumCnt.Val1 += Ccf; SumCnt.Val2 += 1; SumCcf += Ccf; } // get average clustering coefficient for each degree DegToCCfV.Gen(DegSumCnt.Len(), 0); for (int d = 0; d < DegSumCnt.Len(); d++) { DegToCCfV.Add(TFltPr(DegSumCnt.GetKey(d).Val, DegSumCnt[d].Val1()/DegSumCnt[d].Val2())); } //if(closedTriads/3 > (uint64) TInt::Mx) { WarnNotify(TStr::Fmt("[%s line %d] %g closed triads.\n", __FILE__, __LINE__, float(closedTriads/3)).CStr()); } //if(openTriads > (uint64) TInt::Mx) { WarnNotify(TStr::Fmt("[%s line %d] %g open triads.\n", __FILE__, __LINE__, float(openTriads/3)).CStr()); } ClosedTriads = closedTriads/int64(3); // each triad is counted 3 times OpenTriads = openTriads; DegToCCfV.Sort(); return SumCcf / double(NIdCOTriadV.Len()); } template <class PGraph> double GetNodeClustCf(const PGraph& Graph, const int& NId) { int Open, Closed; GetNodeTriads(Graph, NId, Open, Closed); //const double Deg = Graph->GetNI(NId).GetDeg(); return (Open+Closed)==0 ? 0 : double(Open)/double(Open+Closed); } template <class PGraph> void GetNodeClustCf(const PGraph& Graph, TIntFltH& NIdCCfH) { TIntTrV NIdCOTriadV; GetTriads(Graph, NIdCOTriadV); NIdCCfH.Clr(false); for (int i = 0; i < NIdCOTriadV.Len(); i++) { const int D = NIdCOTriadV[i].Val2()+NIdCOTriadV[i].Val3(); const double CCf = D!=0 ? NIdCOTriadV[i].Val2() / double(D) : 0.0; NIdCCfH.AddDat(NIdCOTriadV[i].Val1, CCf); } } template <class PGraph> int64 GetTriads(const PGraph& Graph, int SampleNodes) { int64 OpenTriads, ClosedTriads; return GetTriads(Graph, ClosedTriads, OpenTriads, SampleNodes); } template <class PGraph> int64 GetTriads(const PGraph& Graph, int64& ClosedTriads, int64& OpenTriads, int SampleNodes) { TIntTrV NIdCOTriadV; GetTriads(Graph, NIdCOTriadV, SampleNodes); uint64 closedTriads = 0; uint64 openTriads = 0; for (int i = 0; i < NIdCOTriadV.Len(); i++) { closedTriads += NIdCOTriadV[i].Val2; openTriads += NIdCOTriadV[i].Val3; } //IAssert(closedTriads/3 < (uint64) TInt::Mx); //IAssert(openTriads < (uint64) TInt::Mx); ClosedTriads = int64(closedTriads/3); // each triad is counted 3 times OpenTriads = int64(openTriads); return ClosedTriads; } // Function pretends that the graph is undirected (count unique connected triples of nodes) // This implementation is slower, it uses hash tables directly template <class PGraph> void GetTriads_v0(const PGraph& Graph, TIntTrV& NIdCOTriadV, int SampleNodes) { const bool IsDir = Graph->HasFlag(gfDirected); TIntSet NbrH; TIntV NIdV; TRnd Rnd(0); Graph->GetNIdV(NIdV); NIdV.Shuffle(Rnd); if (SampleNodes == -1) { SampleNodes = Graph->GetNodes(); } NIdCOTriadV.Clr(false); NIdCOTriadV.Reserve(SampleNodes); for (int node = 0; node < SampleNodes; node++) { typename PGraph::TObj::TNodeI NI = Graph->GetNI(NIdV[node]); if (NI.GetDeg() < 2) { NIdCOTriadV.Add(TIntTr(NI.GetId(), 0, 0)); // zero triangles continue; } // find neighborhood NbrH.Clr(false); for (int e = 0; e < NI.GetOutDeg(); e++) { if (NI.GetOutNId(e) != NI.GetId()) { NbrH.AddKey(NI.GetOutNId(e)); } } if (IsDir) { for (int e = 0; e < NI.GetInDeg(); e++) { if (NI.GetInNId(e) != NI.GetId()) { NbrH.AddKey(NI.GetInNId(e)); } } } // count connected neighbors int OpenCnt=0, CloseCnt=0; for (int srcNbr = 0; srcNbr < NbrH.Len(); srcNbr++) { const typename PGraph::TObj::TNodeI SrcNode = Graph->GetNI(NbrH.GetKey(srcNbr)); for (int dstNbr = srcNbr+1; dstNbr < NbrH.Len(); dstNbr++) { const int dstNId = NbrH.GetKey(dstNbr); if (SrcNode.IsNbrNId(dstNId)) { CloseCnt++; } // is edge else { OpenCnt++; } } } IAssert(2*(OpenCnt+CloseCnt) == NbrH.Len()*(NbrH.Len()-1)); NIdCOTriadV.Add(TIntTr(NI.GetId(), CloseCnt, OpenCnt)); } } // Function pretends that the graph is undirected (count unique connected triples of nodes) // This implementation is faster, it converts hash tables to vectors template <class PGraph> void GetTriads(const PGraph& Graph, TIntTrV& NIdCOTriadV, int SampleNodes) { const bool IsDir = Graph->HasFlag(gfDirected); TIntSet NbrH; TIntV NIdV; //TRnd Rnd(0); TRnd Rnd(1); int NNodes; TIntV Nbrs; int NId; int64 hcount; hcount = 0; NNodes = Graph->GetNodes(); Graph->GetNIdV(NIdV); NIdV.Shuffle(Rnd); if (SampleNodes == -1) { SampleNodes = NNodes; } int MxId = -1; for (int i = 0; i < NNodes; i++) { if (NIdV[i] > MxId) { MxId = NIdV[i]; } } TVec<TIntV> NbrV(MxId + 1); if (IsDir) { // get in and out neighbors for (int node = 0; node < NNodes; node++) { int NId = NIdV[node]; NbrV[NId] = TIntV(); GetUniqueNbrV(Graph, NId, NbrV[NId]); } } else { // get only out neighbors for (int node = 0; node < NNodes; node++) { int NId = NIdV[node]; typename PGraph::TObj::TNodeI NI = Graph->GetNI(NId); NbrV[NId] = TIntV(); NbrV[NId].Reserve(NI.GetOutDeg()); NbrV[NId].Reduce(0); for (int i = 0; i < NI.GetOutDeg(); i++) { NbrV[NId].Add(NI.GetOutNId(i)); } } } NIdCOTriadV.Clr(false); NIdCOTriadV.Reserve(SampleNodes); for (int node = 0; node < SampleNodes; node++) { typename PGraph::TObj::TNodeI NI = Graph->GetNI(NIdV[node]); int NLen; NId = NI.GetId(); hcount++; if (NI.GetDeg() < 2) { NIdCOTriadV.Add(TIntTr(NId, 0, 0)); // zero triangles continue; } Nbrs = NbrV[NId]; NLen = Nbrs.Len(); // count connected neighbors int OpenCnt1 = 0, CloseCnt1 = 0; for (int srcNbr = 0; srcNbr < NLen; srcNbr++) { int Count = GetCommon(NbrV[NbrV[NId][srcNbr]],Nbrs); CloseCnt1 += Count; } CloseCnt1 /= 2; OpenCnt1 = (NLen*(NLen-1))/2 - CloseCnt1; NIdCOTriadV.Add(TIntTr(NId, CloseCnt1, OpenCnt1)); } } template<class PGraph> int64 CountTriangles(const PGraph& Graph) { THash<TInt, TInt> H; TIntV MapV; int ind = 0; for (TNGraph::TNodeI NI = Graph->BegNI(); NI < Graph->EndNI(); NI++) { H.AddDat(NI.GetId(), ind); MapV.Add(NI.GetId()); ind += 1; } TVec<TIntV> HigherDegNbrV(ind); #ifdef USE_OPENMP #pragma omp parallel for schedule(dynamic) #endif for (int i = 0; i < ind; i++) { TNGraph::TNodeI NI = Graph->GetNI(MapV[i]); TIntV NbrV; GetMergeSortedV(NbrV, NI); TIntV V; for (int j = 0; j < NbrV.Len(); j++) { TInt Vert = NbrV[j]; TInt Deg = Graph->GetNI(Vert).GetDeg(); if (Deg > NI.GetDeg() || (Deg == NI.GetDeg() && Vert > NI.GetId())) { V.Add(Vert); } } HigherDegNbrV[i] = V; } int64 cnt = 0; #ifdef USE_OPENMP #pragma omp parallel for schedule(dynamic) reduction(+:cnt) #endif for (int i = 0; i < HigherDegNbrV.Len(); i++) { for (int j = 0; j < HigherDegNbrV[i].Len(); j++) { TInt NbrInd = H.GetDat(HigherDegNbrV[i][j]); int64 num = GetCommon(HigherDegNbrV[i], HigherDegNbrV[NbrInd]); cnt += num; } } return cnt; } template<class PGraph> int64 GetTriangleCnt(const PGraph& Graph) { struct timeval start, end; struct timeval startall, endall; float delta; TTmProfiler Profiler; int TimerId = Profiler.AddTimer("Profiler"); int TimerAll = Profiler.AddTimer("ProfilerAll"); const int NNodes = Graph->GetNodes(); TIntV MapV(NNodes); TVec<typename PGraph::TObj::TNodeI> NV(NNodes); NV.Reduce(0); Profiler.ResetTimer(TimerAll); Profiler.StartTimer(TimerAll); gettimeofday(&startall, NULL); Profiler.ResetTimer(TimerId); Profiler.StartTimer(TimerId); gettimeofday(&start, NULL); int MxId = -1; int ind = 0; for (typename PGraph::TObj::TNodeI NI = Graph->BegNI(); NI < Graph->EndNI(); NI++) { NV.Add(NI); int Id = NI.GetId(); if (Id > MxId) { MxId = Id; } MapV[ind] = Id; ind++; } TIntV IndV(MxId+1); for (int j = 0; j < NNodes; j++) { IndV[MapV[j]] = j; } gettimeofday(&end, NULL); Profiler.StopTimer(TimerId); delta = ((end.tv_sec - start.tv_sec) * 1000000u + end.tv_usec - start.tv_usec) / 1.e6; printf("__nodemap__\ttime %7.3f\tcpu %8.3f\n", delta, Profiler.GetTimerSec(TimerId)); Profiler.ResetTimer(TimerId); Profiler.StartTimer(TimerId); gettimeofday(&start, NULL); ind = MapV.Len(); Profiler.ResetTimer(TimerId); Profiler.StartTimer(TimerId); gettimeofday(&start, NULL); TVec<TIntV> HigherDegNbrV(ind); for (int i = 0; i < ind; i++) { HigherDegNbrV[i] = TVec<TInt>(); HigherDegNbrV[i].Reserve(NV[i].GetDeg()); HigherDegNbrV[i].Reduce(0); } gettimeofday(&end, NULL); Profiler.StopTimer(TimerId); delta = ((end.tv_sec - start.tv_sec) * 1000000u + end.tv_usec - start.tv_usec) / 1.e6; printf("__valloc__\ttime %7.3f\tcpu %8.3f\n", delta, Profiler.GetTimerSec(TimerId)); Profiler.ResetTimer(TimerId); Profiler.StartTimer(TimerId); gettimeofday(&start, NULL); #ifdef USE_OPENMP #pragma omp parallel for schedule(dynamic) #endif for (int i = 0; i < ind; i++) { typename PGraph::TObj::TNodeI NI = NV[i]; //HigherDegNbrV[i] = TVec<TInt>(); //HigherDegNbrV[i].Reserve(NI.GetDeg()); //HigherDegNbrV[i].Reduce(0); MergeNbrs<PGraph>(HigherDegNbrV[i], NI); int k = 0; for (int j = 0; j < HigherDegNbrV[i].Len(); j++) { TInt Vert = HigherDegNbrV[i][j]; TInt Deg = NV[IndV[Vert]].GetDeg(); if (Deg > NI.GetDeg() || (Deg == NI.GetDeg() && Vert > NI.GetId())) { HigherDegNbrV[i][k] = Vert; k++; } } HigherDegNbrV[i].Reduce(k); } gettimeofday(&end, NULL); Profiler.StopTimer(TimerId); delta = ((end.tv_sec - start.tv_sec) * 1000000u + end.tv_usec - start.tv_usec) / 1.e6; printf("__sort__\ttime %7.3f\tcpu %8.3f\n", delta, Profiler.GetTimerSec(TimerId)); Profiler.ResetTimer(TimerId); Profiler.StartTimer(TimerId); gettimeofday(&start, NULL); int64 cnt = 0; #ifdef USE_OPENMP #pragma omp parallel for schedule(dynamic) reduction(+:cnt) #endif for (int i = 0; i < HigherDegNbrV.Len(); i++) { for (int j = 0; j < HigherDegNbrV[i].Len(); j++) { //TInt NbrInd = H.GetDat(HigherDegNbrV[i][j]); TInt NbrInd = IndV[HigherDegNbrV[i][j]]; int64 num = GetCommon(HigherDegNbrV[i], HigherDegNbrV[NbrInd]); cnt += num; } } gettimeofday(&end, NULL); Profiler.StopTimer(TimerId); delta = ((end.tv_sec - start.tv_sec) * 1000000u + end.tv_usec - start.tv_usec) / 1.e6; printf("__count__\ttime %7.3f\tcpu %8.3f\n", delta, Profiler.GetTimerSec(TimerId)); gettimeofday(&endall, NULL); Profiler.StopTimer(TimerAll); delta = ((endall.tv_sec - startall.tv_sec) * 1000000u + endall.tv_usec - startall.tv_usec) / 1.e6; printf("__all__ \ttime %7.3f\tcpu %8.3f\n", delta, Profiler.GetTimerSec(TimerAll)); return cnt; } template<class PGraph> void MergeNbrs(TIntV& NeighbourV, const typename PGraph::TObj::TNodeI& NI) { int j = 0; int k = 0; int prev = -1; int indeg = NI.GetInDeg(); int outdeg = NI.GetOutDeg(); //while (j < NI.GetInDeg() && k < NI.GetOutDeg()) { if (indeg > 0 && outdeg > 0) { int v1 = NI.GetInNId(j); int v2 = NI.GetOutNId(k); while (1) { if (v1 <= v2) { if (prev != v1) { NeighbourV.Add(v1); prev = v1; } j += 1; if (j >= indeg) { break; } v1 = NI.GetInNId(j); } else { if (prev != v2) { NeighbourV.Add(v2); prev = v2; } k += 1; if (k >= outdeg) { break; } v2 = NI.GetOutNId(k); } } } while (j < indeg) { int v = NI.GetInNId(j); if (prev != v) { NeighbourV.Add(v); prev = v; } j += 1; } while (k < outdeg) { int v = NI.GetOutNId(k); if (prev != v) { NeighbourV.Add(v); prev = v; } k += 1; } } // Count the number of edges that participate in at least one triad template <class PGraph> int GetTriadEdges(const PGraph& Graph, int SampleEdges) { const bool IsDir = Graph->HasFlag(gfDirected); TIntSet NbrH; int TriadEdges = 0; for(typename PGraph::TObj::TNodeI NI = Graph->BegNI(); NI < Graph->EndNI(); NI++) { NbrH.Clr(false); for (int e = 0; e < NI.GetOutDeg(); e++) { if (NI.GetOutNId(e) != NI.GetId()) { NbrH.AddKey(NI.GetOutNId(e)); } } if (IsDir) { for (int e = 0; e < NI.GetInDeg(); e++) { if (NI.GetInNId(e) != NI.GetId()) { NbrH.AddKey(NI.GetInNId(e)); } } } for (int e = 0; e < NI.GetOutDeg(); e++) { if (!IsDir && NI.GetId()<NI.GetOutNId(e)) { continue; } // for undirected graphs count each edge only once const typename PGraph::TObj::TNodeI SrcNode = Graph->GetNI(NI.GetOutNId(e)); bool Triad=false; for (int e1 = 0; e1 < SrcNode.GetOutDeg(); e1++) { if (NbrH.IsKey(SrcNode.GetOutNId(e1))) { Triad=true; break; } } if (IsDir && ! Triad) { for (int e1 = 0; e1 < SrcNode.GetInDeg(); e1++) { if (NbrH.IsKey(SrcNode.GetInNId(e1))) { Triad=true; break; } } } if (Triad) { TriadEdges++; } } } return TriadEdges; } // Returns number of undirected triads a node participates in template <class PGraph> int GetNodeTriads(const PGraph& Graph, const int& NId) { int ClosedTriads=0, OpenTriads=0; return GetNodeTriads(Graph, NId, ClosedTriads, OpenTriads); } // Return number of undirected triads a node participates in template <class PGraph> int GetNodeTriads(const PGraph& Graph, const int& NId, int& ClosedTriads, int& OpenTriads) { const typename PGraph::TObj::TNodeI NI = Graph->GetNI(NId); ClosedTriads=0; OpenTriads=0; if (NI.GetDeg() < 2) { return 0; } // find neighborhood TIntSet NbrSet(NI.GetDeg()); for (int e = 0; e < NI.GetOutDeg(); e++) { if (NI.GetOutNId(e) != NI.GetId()) { // exclude self edges NbrSet.AddKey(NI.GetOutNId(e)); } } if (Graph->HasFlag(gfDirected)) { for (int e = 0; e < NI.GetInDeg(); e++) { if (NI.GetInNId(e) != NI.GetId()) { // exclude self edges NbrSet.AddKey(NI.GetInNId(e)); } } } // count connected neighbors for (int srcNbr = 0; srcNbr < NbrSet.Len(); srcNbr++) { const typename PGraph::TObj::TNodeI SrcNode = Graph->GetNI(NbrSet.GetKey(srcNbr)); for (int dstNbr = srcNbr+1; dstNbr < NbrSet.Len(); dstNbr++) { const int dstNId = NbrSet.GetKey(dstNbr); if (SrcNode.IsNbrNId(dstNId)) { ClosedTriads++; } else { OpenTriads++; } } } return ClosedTriads; } // Node NId and a subset of its neighbors GroupSet // InGroupEdges ... triads (NId, g1, g2), where g1 and g2 are in GroupSet // InOutGroupEdges ... triads (NId, g1, o1), where g1 in GroupSet and o1 not in GroupSet // OutGroupEdges ... triads (NId, o1, o2), where o1 and o2 are not in GroupSet template <class PGraph> int GetNodeTriads(const PGraph& Graph, const int& NId, const TIntSet& GroupSet, int& InGroupEdges, int& InOutGroupEdges, int& OutGroupEdges) { const typename PGraph::TObj::TNodeI NI = Graph->GetNI(NId); const bool IsDir = Graph->HasFlag(gfDirected); InGroupEdges=0; InOutGroupEdges=0; OutGroupEdges=0; if (NI.GetDeg() < 2) { return 0; } // find neighborhood TIntSet NbrSet(NI.GetDeg()); for (int e = 0; e < NI.GetOutDeg(); e++) { if (NI.GetOutNId(e) != NI.GetId()) { // exclude self edges NbrSet.AddKey(NI.GetOutNId(e)); } } if (IsDir) { for (int e = 0; e < NI.GetInDeg(); e++) { if (NI.GetInNId(e) != NI.GetId()) { NbrSet.AddKey(NI.GetInNId(e)); } } } // count connected neighbors for (int srcNbr = 0; srcNbr < NbrSet.Len(); srcNbr++) { const int NbrId = NbrSet.GetKey(srcNbr); const bool NbrIn = GroupSet.IsKey(NbrId); const typename PGraph::TObj::TNodeI SrcNode = Graph->GetNI(NbrId); for (int dstNbr = srcNbr+1; dstNbr < NbrSet.Len(); dstNbr++) { const int DstNId = NbrSet.GetKey(dstNbr); if (SrcNode.IsNbrNId(DstNId)) { // triad (NId, NbrId, DstNid) bool DstIn = GroupSet.IsKey(DstNId); if (NbrIn && DstIn) { InGroupEdges++; } else if (NbrIn || DstIn) { InOutGroupEdges++; } else { OutGroupEdges++; } } } } return InGroupEdges; } // For each node count how many triangles it participates in template <class PGraph> void GetTriadParticip(const PGraph& Graph, TIntPrV& TriadCntV) { TIntH TriadCntH; for (typename PGraph::TObj::TNodeI NI = Graph->BegNI(); NI < Graph->EndNI(); NI++) { const int Triads = GetNodeTriads(Graph, NI.GetId()); TriadCntH.AddDat(Triads) += 1; } TriadCntH.GetKeyDatPrV(TriadCntV); TriadCntV.Sort(); } template<class PGraph> int GetCmnNbrs(const PGraph& Graph, const int& NId1, const int& NId2) { TIntV NbrV; return GetCmnNbrs(Graph, NId1, NId2, NbrV); } // Get common neighbors between a pair of nodes (undirected) template<class PGraph> int GetCmnNbrs(const PGraph& Graph, const int& NId1, const int& NId2, TIntV& NbrV) { if (! Graph->IsNode(NId1) || ! Graph->IsNode(NId2)) { NbrV.Clr(false); return 0; } typename PGraph::TObj::TNodeI NI1 = Graph->GetNI(NId1); typename PGraph::TObj::TNodeI NI2 = Graph->GetNI(NId2); NbrV.Clr(false); NbrV.Reserve(TMath::Mn(NI1.GetDeg(), NI2.GetDeg())); TIntSet NSet1(NI1.GetDeg()), NSet2(NI2.GetDeg()); for (int i = 0; i < NI1.GetDeg(); i++) { const int nid = NI1.GetNbrNId(i); if (nid!=NId1 && nid!=NId2) { NSet1.AddKey(nid); } } for (int i = 0; i < NI2.GetDeg(); i++) { const int nid = NI2.GetNbrNId(i); if (NSet1.IsKey(nid)) { NSet2.AddKey(nid); } } NSet2.GetKeyV(NbrV); return NbrV.Len(); } template<> inline int GetCmnNbrs<PUNGraph>(const PUNGraph& Graph, const int& NId1, const int& NId2, TIntV& NbrV) { if (! Graph->IsNode(NId1) || ! Graph->IsNode(NId2)) { NbrV.Clr(false); return 0; } const TUNGraph::TNodeI NI1 = Graph->GetNI(NId1); const TUNGraph::TNodeI NI2 = Graph->GetNI(NId2); int i=0, j=0; NbrV.Clr(false); NbrV.Reserve(TMath::Mn(NI1.GetDeg(), NI2.GetDeg())); while (i < NI1.GetDeg() && j < NI2.GetDeg()) { const int nid = NI1.GetNbrNId(i); while (j < NI2.GetDeg() && NI2.GetNbrNId(j) < nid) { j++; } if (j < NI2.GetDeg() && nid==NI2.GetNbrNId(j) && nid!=NId1 && nid!=NId2) { IAssert(NbrV.Empty() || NbrV.Last() < nid); NbrV.Add(nid); j++; } i++; } return NbrV.Len(); } // get number of length 2 directed paths between a pair of nodes // for a pair of nodes (i,j): |{u: (i,u) and (u,j) }| template<class PGraph> int GetLen2Paths(const PGraph& Graph, const int& NId1, const int& NId2) { TIntV NbrV; return GetLen2Paths(Graph, NId1, NId2, NbrV); } // get number of length 2 directed paths between a pair of nodes // for a pair of nodes (i,j): {u: (i,u) and (u,j) } template<class PGraph> int GetLen2Paths(const PGraph& Graph, const int& NId1, const int& NId2, TIntV& NbrV) { const typename PGraph::TObj::TNodeI NI = Graph->GetNI(NId1); NbrV.Clr(false); NbrV.Reserve(NI.GetOutDeg()); for (int e = 0; e < NI.GetOutDeg(); e++) { const typename PGraph::TObj::TNodeI MidNI = Graph->GetNI(NI.GetOutNId(e)); if (MidNI.IsOutNId(NId2)) { NbrV.Add(MidNI.GetId()); } } return NbrV.Len(); } template <class PGraph> void GetUniqueNbrV(const PGraph& Graph, const int& NId, TIntV& NbrV) { typename PGraph::TObj::TNodeI NI = Graph->GetNI(NId); NbrV.Reserve(NI.GetDeg()); NbrV.Reduce(0); int j = 0; int k = 0; int Prev = -1; int InDeg = NI.GetInDeg(); int OutDeg = NI.GetOutDeg(); if (InDeg > 0 && OutDeg > 0) { int v1 = NI.GetInNId(j); int v2 = NI.GetOutNId(k); while (1) { if (v1 <= v2) { if (Prev != v1) { if (v1 != NId) { NbrV.Add(v1); Prev = v1; } } j += 1; if (j >= InDeg) { break; } v1 = NI.GetInNId(j); } else { if (Prev != v2) { if (v2 != NId) { NbrV.Add(v2); } Prev = v2; } k += 1; if (k >= OutDeg) { break; } v2 = NI.GetOutNId(k); } } } while (j < InDeg) { int v = NI.GetInNId(j); if (Prev != v) { if (v != NId) { NbrV.Add(v); } Prev = v; } j += 1; } while (k < OutDeg) { int v = NI.GetOutNId(k); if (Prev != v) { if (v != NId) { NbrV.Add(v); } Prev = v; } k += 1; } } }; // mamespace TSnap ///////////////////////////////////////////////// // Node and Edge Network Constraint (by Ron Burt) // works for directed and undirected graphs (but not for multigraphs) template <class PGraph> class TNetConstraint { public: PGraph Graph; THash<TIntPr, TFlt> NodePrCH; // pairs of nodes that have non-zero network constraint public: TNetConstraint(const PGraph& GraphPt, const bool& CalcaAll=true); int Len() const { return NodePrCH.Len(); } double GetC(const int& ConstraintN) const { return NodePrCH[ConstraintN]; } TIntPr GetNodePr(const int& ConstraintN) const { return NodePrCH.GetKey(ConstraintN); } double GetEdgeC(const int& NId1, const int& NId2) const; double GetNodeC(const int& NId) const; void AddConstraint(const int& NId1, const int& NId2); void CalcConstraints(); void CalcConstraints(const int& NId); void Dump() const; static void Test(); }; template <class PGraph> TNetConstraint<PGraph>::TNetConstraint(const PGraph& GraphPt, const bool& CalcaAll) : Graph(GraphPt) { CAssert(! HasGraphFlag(typename PGraph::TObj, gfMultiGraph)); // must not be multigraph if (CalcaAll) { CalcConstraints(); } } template <class PGraph> double TNetConstraint<PGraph>::GetEdgeC(const int& NId1, const int& NId2) const { if (NodePrCH.IsKey(TIntPr(NId1, NId2))) { return NodePrCH.GetDat(TIntPr(NId1, NId2)); } else { return 0.0; } } template <class PGraph> double TNetConstraint<PGraph>::GetNodeC(const int& NId) const { typename PGraph::TObj::TNodeI NI1 = Graph->GetNI(NId); if (NI1.GetOutDeg() == 0) { return 0.0; } int KeyId = -1; for (int k = 0; k<NI1.GetOutDeg(); k++) { KeyId = NodePrCH.GetKeyId(TIntPr(NI1.GetId(), NI1.GetOutNId(k))); if (KeyId > -1) { break; } } if (KeyId < 0) { return 0.0; } double Constraint = NodePrCH[KeyId]; for (int i = KeyId-1; i >-1 && NodePrCH.GetKey(i).Val1()==NId; i--) { Constraint += NodePrCH[i]; } for (int i = KeyId+1; i < NodePrCH.Len() && NodePrCH.GetKey(i).Val1()==NId; i++) { Constraint += NodePrCH[i]; } return Constraint; } template <class PGraph> void TNetConstraint<PGraph>::AddConstraint(const int& NId1, const int& NId2) { if (NId1==NId2 || NodePrCH.IsKey(TIntPr(NId1, NId2))) { return; } typename PGraph::TObj::TNodeI NI1 = Graph->GetNI(NId1); double Constraint = 0.0; if (NI1.IsOutNId(NId2)) { // is direct edge Constraint += 1.0/(double) NI1.GetOutDeg(); } const double SrcC = 1.0/(double) NI1.GetOutDeg(); for (int e = 0; e < NI1.GetOutDeg(); e++) { const int MidNId = NI1.GetOutNId(e); if (MidNId == NId1 || MidNId == NId2) { continue; } const typename PGraph::TObj::TNodeI MidNI = Graph->GetNI(MidNId); if (MidNI.IsOutNId(NId2)) { Constraint += SrcC * (1.0/(double)MidNI.GetOutDeg()); } } if (Constraint==0) { return; } Constraint = TMath::Sqr(Constraint); NodePrCH.AddDat(TIntPr(NId1, NId2), Constraint); } template <class PGraph> void TNetConstraint<PGraph>::CalcConstraints() { // add edges for (typename PGraph::TObj::TEdgeI EI = Graph->BegEI(); EI < Graph->EndEI(); EI++) { AddConstraint(EI.GetSrcNId(), EI.GetDstNId()); AddConstraint(EI.GetDstNId(), EI.GetSrcNId()); } // add open triads for (typename PGraph::TObj::TNodeI NI = Graph->BegNI(); NI < Graph->EndNI(); NI++) { for (int i = 0; i < NI.GetDeg(); i++) { const int NId1 = NI.GetNbrNId(i); for (int j = 0; j < NI.GetDeg(); j++) { const int NId2 = NI.GetNbrNId(j); AddConstraint(NId1, NId2); } } } NodePrCH.SortByKey(); } // calculate constraints around a node id template <class PGraph> void TNetConstraint<PGraph>::CalcConstraints(const int& NId) { typename PGraph::TObj::TNodeI StartNI = Graph->GetNI(NId); TIntSet SeenSet; for (int e = 0; e < StartNI.GetOutDeg(); e++) { typename PGraph::TObj::TNodeI MidNI = Graph->GetNI(StartNI.GetOutNId(e)); AddConstraint(NId, MidNI.GetId()); for (int i = 0; i < MidNI.GetOutDeg(); i++) { const int EndNId = MidNI.GetOutNId(i); if (! SeenSet.IsKey(EndNId)) { AddConstraint(NId, EndNId); SeenSet.AddKey(EndNId); } } } } template <class PGraph> void TNetConstraint<PGraph>::Dump() const { printf("Edge network constraint: (%d, %d)\n", Graph->GetNodes(), Graph->GetEdges()); for (int e = 0; e < NodePrCH.Len(); e++) { printf(" %4d %4d : %f\n", NodePrCH.GetKey(e).Val1(), NodePrCH.GetKey(e).Val2(), NodePrCH[e].Val); } printf("\n"); } // example from page 56 of Structural Holes by Ronald S. Burt // (http://www.amazon.com/Structural-Holes-Social-Structure-Competition/dp/0674843711) template <class PGraph> void TNetConstraint<PGraph>::Test() { PUNGraph G = TUNGraph::New(); G->AddNode(0); G->AddNode(1); G->AddNode(2); G->AddNode(3); G->AddNode(4); G->AddNode(5); G->AddNode(6); G->AddEdge(0,1); G->AddEdge(0,2); G->AddEdge(0,3); G->AddEdge(0,4); G->AddEdge(0,5); G->AddEdge(0,6); G->AddEdge(1,2); G->AddEdge(1,5); G->AddEdge(1,6); G->AddEdge(2,4); TNetConstraint<PUNGraph> NetConstraint(G, true); // NetConstraint.CalcConstraints(0); NetConstraint.Dump(); printf("middle node network constraint: %f\n", NetConstraint.GetNodeC(0)); } #endif // TRIAD_H
residualbased_newton_raphson_mpc_contact_strategy.h
// KRATOS ___| | | | // \___ \ __| __| | | __| __| | | __| _` | | // | | | | | ( | | | | ( | | // _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS // // License: BSD License // license: StructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_RESIDUALBASED_NEWTON_RAPHSON_MPC_CONTACT_STRATEGY) #define KRATOS_RESIDUALBASED_NEWTON_RAPHSON_MPC_CONTACT_STRATEGY /* System Includes */ /* External Includes */ /* Project includes */ #include "contact_structural_mechanics_application_variables.h" #include "includes/kratos_parameters.h" #include "includes/define.h" #include "includes/model_part.h" #include "includes/variables.h" // Strategies #include "solving_strategies/strategies/residualbased_newton_raphson_strategy.h" // Contact criteria #include "custom_strategies/custom_convergencecriterias/mpc_contact_criteria.h" // Utilities #include "utilities/variable_utils.h" #include "utilities/color_utilities.h" #include "utilities/math_utils.h" // // Processes // #include "processes/fast_transfer_between_model_parts_process.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class ResidualBasedNewtonRaphsonMPCContactStrategy * @ingroup ContactStructuralMechanicsApplication * @brief Contact Newton Raphson class * @details This class is a specialization of the Newton Raphson strategy with some custom modifications for contact problems * @author Vicente Mataix Ferrandiz */ template<class TSparseSpace, class TDenseSpace, // = DenseSpace<double>, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class ResidualBasedNewtonRaphsonMPCContactStrategy : public ResidualBasedNewtonRaphsonStrategy< TSparseSpace, TDenseSpace, TLinearSolver > { public: ///@name Type Definitions ///@{ /** Counted pointer of ClassName */ KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedNewtonRaphsonMPCContactStrategy ); typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> StrategyBaseType; typedef ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef ConvergenceCriteria<TSparseSpace, TDenseSpace> TConvergenceCriteriaType; typedef MPCContactCriteria<TSparseSpace, TDenseSpace> TMPCContactCriteriaType; typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType; typedef typename BaseType::TDataType TDataType; typedef TSparseSpace SparseSpaceType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef ModelPart::NodesContainerType NodesArrayType; typedef ModelPart::ElementsContainerType ElementsArrayType; typedef ModelPart::ConditionsContainerType ConditionsArrayType; typedef ModelPart::MasterSlaveConstraintContainerType ConstraintArrayType; typedef std::size_t IndexType; typedef std::size_t SizeType; /** * @brief Default constructor * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewConvergenceCriteria The convergence criteria employed * @param MaxIterations The maximum number of iterations * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ ResidualBasedNewtonRaphsonMPCContactStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver, IndexType MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false, Parameters ThisParameters = Parameters(R"({})") ) : ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag ), mThisParameters(ThisParameters) { KRATOS_TRY; // We create the contact criteria mpMPCContactCriteria = Kratos::make_shared<TMPCContactCriteriaType>(); Parameters default_parameters = GetDefaultParameters(); mThisParameters.ValidateAndAssignDefaults(default_parameters); KRATOS_CATCH(""); } /** * @brief Default constructor * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param MaxIterations The maximum number of iterations * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ ResidualBasedNewtonRaphsonMPCContactStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, IndexType MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false, Parameters ThisParameters = Parameters(R"({})") ) : ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag), mThisParameters(ThisParameters) { KRATOS_TRY; // We create the contact criteria mpMPCContactCriteria = Kratos::make_shared<TMPCContactCriteriaType>(); Parameters default_parameters = GetDefaultParameters(); mThisParameters.ValidateAndAssignDefaults(default_parameters); KRATOS_CATCH(""); } /** * @brief Default constructor * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param MaxIterations The maximum number of iterations * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ ResidualBasedNewtonRaphsonMPCContactStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver, IndexType MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false, Parameters ThisParameters = Parameters(R"({})") ) : ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag ), mThisParameters(ThisParameters) { KRATOS_TRY; // We create the contact criteria mpMPCContactCriteria = Kratos::make_shared<TMPCContactCriteriaType>(); Parameters default_parameters = GetDefaultParameters(); mThisParameters.ValidateAndAssignDefaults(default_parameters); KRATOS_CATCH(""); } /** * Destructor. */ ~ResidualBasedNewtonRaphsonMPCContactStrategy() override = default; //******************** OPERATIONS ACCESSIBLE FROM THE INPUT: ************************// //***********************************************************************************// /** * @brief Operation to predict the solution ... if it is not called a trivial predictor is used in which the * values of the solution step of interest are assumed equal to the old values */ void Predict() override { KRATOS_TRY BaseType::Predict(); // Getting model part ModelPart& r_model_part = StrategyBaseType::GetModelPart(); // We get the system TSystemMatrixType& rA = *BaseType::mpA; TSystemVectorType& rDx = *BaseType::mpDx; TSystemVectorType& rb = *BaseType::mpb; // We solve the system in order to check the active set once TSparseSpace::SetToZero(rA); TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); typename TSchemeType::Pointer p_scheme = BaseType::GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = BaseType::GetBuilderAndSolver(); p_builder_and_solver->BuildAndSolve(p_scheme, BaseType::GetModelPart(), rA, rDx, rb); // Check active set const SizeType echo_level_convergence_criteria = BaseType::mpConvergenceCriteria->GetEchoLevel(); BaseType::mpConvergenceCriteria->SetEchoLevel(0); mpMPCContactCriteria->PostCriteria(r_model_part, BaseType::GetBuilderAndSolver()->GetDofSet(), rA, rDx, rb); BaseType::mpConvergenceCriteria->SetEchoLevel(echo_level_convergence_criteria); KRATOS_CATCH("") } /** * @brief Initialization of member variables and prior operations */ void Initialize() override { KRATOS_TRY; // Computing nodal weights ComputeNodalWeights(); BaseType::Initialize(); KRATOS_CATCH(""); } /** * @brief The problem of interest is solved. * @details This function calls sequentially: Initialize(), InitializeSolutionStep(), Predict(), * SolveSolutionStep() and FinalizeSolutionStep(). * All those functions can otherwise be called separately. */ double Solve() override { this->Initialize(); this->InitializeSolutionStep(); this->Predict(); this->SolveSolutionStep(); this->FinalizeSolutionStep(); // TODO: Comment for proper work of interaction return 0.0; } /** * @brief Performs all the required operations that should be done (for each step) * before solving the solution step. * @details A member variable should be used as a flag to make sure this function is called only once per step. */ void InitializeSolutionStep() override { // Computing nodal weights ComputeNodalWeights(); BaseType::InitializeSolutionStep(); // // If enforcing NTN // const bool enforce_ntn = mThisParameters["enforce_ntn"].GetBool(); // if (enforce_ntn) { // EnforcingNTN(); // } } /** * @brief Performs all the required operations that should be done (for each step) * after solving the solution step. */ void FinalizeSolutionStep() override { KRATOS_TRY; BaseType::FinalizeSolutionStep(); KRATOS_CATCH(""); } /** * @brief Solves the current step. * @details This function returns true if a solution has been found, false otherwise. */ bool SolveSolutionStep() override { KRATOS_TRY; bool is_converged = false; // Getting model part ModelPart& r_model_part = StrategyBaseType::GetModelPart(); // We get the process info ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); if (r_process_info.Is(INTERACTION)) { // We get the system TSystemMatrixType& rA = *BaseType::mpA; TSystemVectorType& rDx = *BaseType::mpDx; TSystemVectorType& rb = *BaseType::mpb; int inner_iteration = 0; const SizeType echo_level_convergence_criteria = BaseType::mpConvergenceCriteria->GetEchoLevel(); while (!is_converged && inner_iteration < mThisParameters["inner_loop_iterations"].GetInt()) { ++inner_iteration; if (echo_level_convergence_criteria > 0 && r_model_part.GetCommunicator().MyPID() == 0 ) { KRATOS_INFO("Simplified semi-smooth strategy") << BOLDFONT("INNER ITERATION: ") << inner_iteration << std::endl; } // We solve one loop r_process_info[NL_ITERATION_NUMBER] = 1; is_converged = AuxiliarSolveSolutionStep(); // We check the convergence if (r_process_info[NL_ITERATION_NUMBER] == 1) r_process_info[NL_ITERATION_NUMBER] = 2; // Trigger check is_converged = mpMPCContactCriteria->PostCriteria(r_model_part, BaseType::GetBuilderAndSolver()->GetDofSet(), rA, rDx, rb); if (echo_level_convergence_criteria > 0 && r_model_part.GetCommunicator().MyPID() == 0 ) { if (is_converged) KRATOS_INFO("Simplified semi-smooth strategy") << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << BOLDFONT(FGRN("CONVERGED")) << std::endl; else KRATOS_INFO("Simplified semi-smooth strategy") << BOLDFONT("INNER ITERATION: ") << BOLDFONT(FRED("NOT CONVERGED")) << std::endl; } } } else { is_converged = AuxiliarSolveSolutionStep(); } return is_converged; KRATOS_CATCH(""); } /** * @brief Solves the current step. This function returns true if a solution has been found, false otherwise. (auxiliar method) */ bool AuxiliarSolveSolutionStep() { // Getting flag INTERACTION ModelPart& r_model_part = StrategyBaseType::GetModelPart(); const bool update_each_nl_iteration = mThisParameters["update_each_nl_iteration"].GetBool(); VariableUtils().SetFlag(INTERACTION, update_each_nl_iteration, r_model_part.GetSubModelPart("ComputingContact").Conditions()); // Pointers needed in the solution typename TSchemeType::Pointer p_scheme = this->GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = this->GetBuilderAndSolver(); auto& r_dof_set = p_builder_and_solver->GetDofSet(); TSystemMatrixType& rA = *BaseType::mpA; TSystemVectorType& rDx = *BaseType::mpDx; TSystemVectorType& rb = *BaseType::mpb; // Initializing the parameters of the Newton-Raphson cycle unsigned int iteration_number = 1; r_model_part.GetProcessInfo()[NL_ITERATION_NUMBER] = iteration_number; bool is_converged = false; bool residual_is_updated = false; // Computing nodal weights ComputeNodalWeights(); p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); is_converged = BaseType::mpConvergenceCriteria->PreCriteria(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb); // // If enforcing NTN // const bool enforce_ntn = mThisParameters["enforce_ntn"].GetBool(); // if (enforce_ntn) { // EnforcingNTN(); // } // Function to perform the building and the solving phase. if (StrategyBaseType::mRebuildLevel > 0 || StrategyBaseType::mStiffnessMatrixIsBuilt == false) { TSparseSpace::SetToZero(rA); TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb); } else { TSparseSpace::SetToZero(rDx); //Dx=0.00; TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } // Debugging info BaseType::EchoInfo(iteration_number); // Updating the results stored in the database BaseType::UpdateDatabase(rA, rDx, rb, StrategyBaseType::MoveMeshFlag()); p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); // Calculate reactions if required if (BaseType::mCalculateReactionsFlag) p_builder_and_solver->CalculateReactions(p_scheme, r_model_part, rA, rDx, rb); if (is_converged) { if (BaseType::mpConvergenceCriteria->GetActualizeRHSflag()) { TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb); } is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb); } // Iteration Cycle... performed only for NonLinearProblems while (!is_converged && iteration_number++ < BaseType::mMaxIterationNumber) { // Setting the number of iteration r_model_part.GetProcessInfo()[NL_ITERATION_NUMBER] = iteration_number; // Computing nodal weights ComputeNodalWeights(); // Calling InitializeNonLinIteration p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); // Shaping correctly the system if (update_each_nl_iteration) { p_builder_and_solver->SetUpDofSet(p_scheme, r_model_part); p_builder_and_solver->SetUpSystem(r_model_part); p_builder_and_solver->ResizeAndInitializeVectors(p_scheme, BaseType::mpA, BaseType::mpDx, BaseType::mpb, r_model_part); } is_converged = BaseType::mpConvergenceCriteria->PreCriteria(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb); // Call the linear system solver to find the correction mDx for the it is not called if there is no system to solve if (SparseSpaceType::Size(rDx) != 0) { if (StrategyBaseType::mRebuildLevel > 1 || !StrategyBaseType::mStiffnessMatrixIsBuilt) { if (!BaseType::GetKeepSystemConstantDuringIterations()) { //A = 0.00; TSparseSpace::SetToZero(rA); TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb); } else { TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } } else { TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } } else { KRATOS_WARNING("NO DOFS") << "ATTENTION: no free DOFs!! " << std::endl; } // Debugging info BaseType::EchoInfo(iteration_number); // Updating the results stored in the database BaseType::UpdateDatabase(rA, rDx, rb, StrategyBaseType::MoveMeshFlag()); p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); residual_is_updated = false; // Calculate reactions if required if (BaseType::mCalculateReactionsFlag) p_builder_and_solver->CalculateReactions(p_scheme, r_model_part, rA, rDx, rb); if (is_converged) { if (BaseType::mpConvergenceCriteria->GetActualizeRHSflag()) { TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb); residual_is_updated = true; } is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb); } } // Plots a warning if the maximum number of iterations is exceeded if (iteration_number >= BaseType::mMaxIterationNumber) { BaseType::MaxIterationsExceeded(); } else { KRATOS_INFO_IF("NR-Strategy", this->GetEchoLevel() > 0) << "Convergence achieved after " << iteration_number << " / " << BaseType::mMaxIterationNumber << " iterations" << std::endl; } // Recalculate residual if needed (note that some convergence criteria need it to be recalculated) if (!residual_is_updated) { // NOTE: // The following part will be commented because it is time consuming // and there is no obvious reason to be here. If someone need this // part please notify the community via mailing list before uncommenting it. // Pooyan. // TSparseSpace::SetToZero(mb); // p_builder_and_solver->BuildRHS(p_scheme, r_model_part, mb); } // Calculate reactions if required if (BaseType::mCalculateReactionsFlag) p_builder_and_solver->CalculateReactions(p_scheme, r_model_part, rA, rDx, rb); return is_converged; } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ Parameters mThisParameters; /// The configuration parameters typename TConvergenceCriteriaType::Pointer mpMPCContactCriteria; /// The contact criteria ///@} ///@name Protected Operators ///@{ /** * @brief This method returns the defaulr parameters in order to avoid code duplication * @return Returns the default parameters */ Parameters GetDefaultParameters() const override { Parameters default_parameters = Parameters(R"( { "inner_loop_iterations" : 5, "update_each_nl_iteration" : false, "enforce_ntn" : false })" ); return default_parameters; } ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@{ /** * Copy constructor. */ ResidualBasedNewtonRaphsonMPCContactStrategy(const ResidualBasedNewtonRaphsonMPCContactStrategy& Other) { }; private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ // /** // * @brief This inforces NTN formulation // */ // void EnforcingNTN() // { // // List of enforced nodes to not repeat // std::unordered_set<IndexType> enforced_nodes; // // // Getting contact model part // ModelPart& r_root_model_part = StrategyBaseType::GetModelPart().GetRootModelPart(); // ModelPart& r_computing_contact_model_part = StrategyBaseType::GetModelPart().GetSubModelPart("ComputingContact"); // // // The process info // const auto& r_process_info = r_root_model_part.GetProcessInfo(); // // // Reset the pointers of the conditions // for (auto& r_cond : r_computing_contact_model_part.Conditions()) { // if (r_cond.Has(CONSTRAINT_POINTER)) { // r_cond.SetValue(CONSTRAINT_POINTER, nullptr); // } // } // // // Iterate over the constraints // IndexType counter = 1; // for (auto& r_const : r_root_model_part.MasterSlaveConstraints()) { // r_const.SetId(counter); // ++counter; // } // // // Auxiliar classes // Matrix original_relation_matrix, relation_matrix; // Vector original_constant_vector, constant_vector; // ModelPart::DofsVectorType original_master_dofs, master_dofs, original_slave_dofs, slave_dofs; // // // Iterate over the constraints // for (auto& r_const : r_computing_contact_model_part.MasterSlaveConstraints()) { // // Getting original system // r_const.GetLocalSystem(original_relation_matrix, original_constant_vector, r_process_info); // r_const.GetDofList(original_slave_dofs, original_master_dofs, r_process_info); // // // TODO: Finish rebuild // // // Creating new constraint // r_root_model_part.CreateNewMasterSlaveConstraint("LinearMasterSlaveConstraint", counter, master_dofs, slave_dofs, relation_matrix, constant_vector); // // // Setting to remove the old constraints // r_const.Set(TO_ERASE, true); // // ++counter; // } // // // Remove old constraints // r_root_model_part.RemoveMasterSlaveConstraintsFromAllLevels(TO_ERASE); // // // Transfer constraints from the root to the computing model part // FastTransferBetweenModelPartsProcess(r_computing_contact_model_part, r_root_model_part, FastTransferBetweenModelPartsProcess::EntityTransfered::CONSTRAINTS).Execute(); // // // Reorder ids // counter = 1; // for (auto& r_const : r_root_model_part.MasterSlaveConstraints()) { // r_const.SetId(counter); // ++counter; // } // } /** * @brief This computes the nodal weights */ void ComputeNodalWeights() { // Getting contact model part ModelPart& r_contact_model_part = StrategyBaseType::GetModelPart().GetSubModelPart("Contact"); // Reset the NODAL_PAUX and NODAL_MAUX auto& r_nodes_array = r_contact_model_part.Nodes(); VariableUtils().SetNonHistoricalVariableToZero(NODAL_PAUX, r_nodes_array); VariableUtils().SetNonHistoricalVariableToZero(NODAL_MAUX, r_nodes_array); // We set the constraints active and inactive in function of the active set auto& r_conditions_array = r_contact_model_part.Conditions(); auto it_cond_begin = r_conditions_array.begin(); // If enforcing NTN const bool enforce_ntn = false; // const bool enforce_ntn = mThisParameters["enforce_ntn"].GetBool(); // if (enforce_ntn) { // VariableUtils().SetNonHistoricalVariable(NODAL_PAUX, 1.0, r_nodes_array); // } #pragma omp parallel for for(int i = 0; i < static_cast<int>(r_conditions_array.size()); ++i) { auto it_cond = it_cond_begin + i; // Only slave conditions if (it_cond->Is(SLAVE)) { auto& r_geometry = it_cond->GetGeometry(); Vector lumping_factor; lumping_factor = r_geometry.LumpingFactors(lumping_factor); const double domain_size = r_geometry.DomainSize(); for (IndexType i_node = 0; i_node < r_geometry.size(); ++i_node) { auto& r_node = r_geometry[i_node]; if (!enforce_ntn) { #pragma omp atomic r_node.GetValue(NODAL_PAUX) += 1.0; } #pragma omp atomic r_node.GetValue(NODAL_MAUX) += lumping_factor[i_node] * domain_size; } } } } ///@} ///@name Private Access ///@{ ///@} ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class ResidualBasedNewtonRaphsonMPCContactStrategy */ ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ ///@} } // namespace Kratos #endif /* KRATOS_RESIDUALBASED_NEWTON_RAPHSON_MPC_CONTACT_STRATEGY */
GB_unop__identity_fp32_uint64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fp32_uint64) // op(A') function: GB (_unop_tran__identity_fp32_uint64) // C type: float // A type: uint64_t // cast: float cij = (float) aij // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ float z = (float) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = (float) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fp32_uint64) ( float *Cx, // Cx and Ax may be aliased const uint64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; float z = (float) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint64_t aij = Ax [p] ; float z = (float) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fp32_uint64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__lnot_int16_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int16_uint64 // op(A') function: GB_tran__lnot_int16_uint64 // C type: int16_t // A type: uint64_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT16 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int16_uint64 ( int16_t *Cx, // Cx and Ax may be aliased uint64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int16_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
smallscaling.c
// run with OPENBLAS_NUM_THREADS=1 and OMP_NUM_THREADS=n #include <math.h> #include <stdlib.h> #include <stdio.h> #include <time.h> #include <cblas.h> #include <omp.h> #define MIN_SIZE 5 #define MAX_SIZE 60 #define NB_SIZE 10 // number of loop for a 1x1 matrix. Lower it if the test is // too slow on you computer. #define NLOOP 2e7 typedef struct { int matrix_size; int n_loop; void (* bench_func)(); void (* blas_func)(); void * (* create_matrix)(int size); } BenchParam; void * s_create_matrix(int size) { float * r = malloc(size * sizeof(double)); int i; for(i = 0; i < size; i++) r[i] = 1e3 * i / size; return r; } void * c_create_matrix(int size) { float * r = malloc(size * 2 * sizeof(double)); int i; for(i = 0; i < 2 * size; i++) r[i] = 1e3 * i / size; return r; } void * z_create_matrix(int size) { double * r = malloc(size * 2 * sizeof(double)); int i; for(i = 0; i < 2 * size; i++) r[i] = 1e3 * i / size; return r; } void * d_create_matrix(int size) { double * r = malloc(size * sizeof(double)); int i; for(i = 0; i < size; i++) r[i] = 1e3 * i / size; return r; } void trmv_bench(BenchParam * param) { int i, n; int size = param->matrix_size; n = param->n_loop / size; int one = 1; void * A = param->create_matrix(size * size); void * y = param->create_matrix(size); for(i = 0; i < n; i++) { param->blas_func("U", "N", "N", &size, A, &size, y, &one); } free(A); free(y); } void gemv_bench(BenchParam * param) { int i, n; int size = param->matrix_size; n = param->n_loop / size; double v = 1.01; int one = 1; void * A = param->create_matrix(size * size); void * y = param->create_matrix(size); for(i = 0; i < n; i++) { param->blas_func("N", &size, &size, &v, A, &size, y, &one, &v, y, &one); } free(A); free(y); } void ger_bench(BenchParam * param) { int i, n; int size = param->matrix_size; n = param->n_loop / size; double v = 1.01; int one = 1; void * A = param->create_matrix(size * size); void * y = param->create_matrix(size); for(i = 0; i < n; i++) { param->blas_func(&size, &size, &v, y, &one, y, &one, A, &size); } free(A); free(y); } #ifndef _WIN32 void * pthread_func_wrapper(void * param) { ((BenchParam *)param)->bench_func(param); pthread_exit(NULL); } #endif #define NB_TESTS 5 void * TESTS[4 * NB_TESTS] = { trmv_bench, ztrmv_, z_create_matrix, "ztrmv", gemv_bench, dgemv_, d_create_matrix, "dgemv", gemv_bench, zgemv_, z_create_matrix, "zgemv", ger_bench, dger_, d_create_matrix, "dger", ger_bench, zgerc_, z_create_matrix, "zgerc", }; inline static double delta_time(struct timespec tick) { struct timespec tock; clock_gettime(CLOCK_MONOTONIC, &tock); return (tock.tv_sec - tick.tv_sec) + (tock.tv_nsec - tick.tv_nsec) / 1e9; } double pthread_bench(BenchParam * param, int nb_threads) { #ifdef _WIN32 return 0; #else BenchParam threaded_param = *param; pthread_t threads[nb_threads]; int t, rc; struct timespec tick; threaded_param.n_loop /= nb_threads; clock_gettime(CLOCK_MONOTONIC, &tick); for(t=0; t<nb_threads; t++){ rc = pthread_create(&threads[t], NULL, pthread_func_wrapper, &threaded_param); if (rc){ printf("ERROR; return code from pthread_create() is %d\n", rc); exit(-1); } } for(t=0; t<nb_threads; t++){ pthread_join(threads[t], NULL); } return delta_time(tick); #endif } double seq_bench(BenchParam * param) { struct timespec tick; clock_gettime(CLOCK_MONOTONIC, &tick); param->bench_func(param); return delta_time(tick); } double omp_bench(BenchParam * param) { BenchParam threaded_param = *param; struct timespec tick; int t; int nb_threads = omp_get_max_threads(); threaded_param.n_loop /= nb_threads; clock_gettime(CLOCK_MONOTONIC, &tick); #pragma omp parallel for for(t = 0; t < nb_threads; t ++){ param->bench_func(&threaded_param); } return delta_time(tick); } int main(int argc, char * argv[]) { double inc_factor = exp(log((double)MAX_SIZE / MIN_SIZE) / NB_SIZE); BenchParam param; int test_id; printf ("Running on %d threads\n", omp_get_max_threads()); for(test_id = 0; test_id < NB_TESTS; test_id ++) { double size = MIN_SIZE; param.bench_func = TESTS[test_id * 4]; param.blas_func = TESTS[test_id * 4 + 1]; param.create_matrix = TESTS[test_id * 4 + 2]; printf("\nBenchmark of %s\n", (char*)TESTS[test_id * 4 + 3]); param.n_loop = NLOOP; while(size <= MAX_SIZE) { param.matrix_size = (int)(size + 0.5); double seq_time = seq_bench(&param); double omp_time = omp_bench(&param); double pthread_time = pthread_bench(&param, omp_get_max_threads()); printf("matrix size %d, sequential %gs, openmp %gs, speedup %g, " "pthread %gs, speedup %g\n", param.matrix_size, seq_time, omp_time, seq_time / omp_time, pthread_time, seq_time / pthread_time); size *= inc_factor; } } return(0); }
morn_image_shape.c
/* Copyright (C) 2019-2020 JingWeiZhangHuai <jingweizhanghuai@163.com> Licensed under the Apache License, Version 2.0; you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include "morn_image.h" struct HoughLineInfo { MImagePoint ps; MImagePoint pe; MImagePoint point1; MImagePoint point2; float k; float b; float a; float r; int count; }; struct HandleImageHoughLine { float sn[362]; float cs[362]; MTable *tab; }; void endImageHoughLine(void *info) { struct HandleImageHoughLine *handle = (struct HandleImageHoughLine *)info; if(handle->tab!=NULL) mTableRelease(handle->tab); } #define HASH_ImageHoughLine 0x3d5cf2dd void mImageHoughLine(MImage *src,MList *list,int thresh1,int thresh2,int thresh) { int j; mException(INVALID_IMAGE(src),EXIT,"invalid input source"); mException((thresh<=0),EXIT,"invalid input threshold"); if(thresh1<=0) thresh1=thresh; if(thresh2<=0) thresh2=0; int height = src->height; int width = src->width; float cx = (float)width/2.0f; float cy = (float)height/2.0f; unsigned char **data = src->data[0]; int range = (int)(sqrt((double)(cx*cx+cy*cy)))+1; // printf("range is %d\n",range); MHandle *hdl=mHandle(src,ImageHoughLine); struct HandleImageHoughLine *handle = (struct HandleImageHoughLine *)(hdl->handle); if(hdl->valid==0) { if(handle->tab==NULL) handle->tab = mTableCreate(range*2,362,sizeof(unsigned short),NULL); else mTableRedefine(handle->tab,range*2,362,sizeof(unsigned short),NULL); for(int i=0;i<362;i++) { handle->sn[i] = mSin(((float)i-1.0f)*0.5f); handle->cs[i] = mCos(((float)i-1.0f)*0.5f); // printf("sn[n] is %f,cs[n] is %f\n",sn[n],cs[n]); } hdl->valid = 1; } unsigned short **tab_data = handle->tab->dataU16; for(j=0;j<range*2;j++) memset(tab_data[j],0,handle->tab->col*sizeof(unsigned short)); float *sn = handle->sn; float *cs = handle->cs; // mTimerBegin(); #pragma omp parallel for for(j=0;j<height;j++) for(int i=0;i<width;i++) { if(data[j][i] == 255) { for(int n=0;n<362;n++) { int l = (int)(((float)i-cx)*cs[n]+((float)j-cy)*sn[n]+(float)range+0.5); tab_data[l][n] += 1; } } } // mTimerEnd(); // printf("tab_data is %d,thresh is %d\n",tab_data[1169][125],(thresh<<2)); // printf("tab_data[1169][124] is %d,tab_data[1169][126] is %d\n",tab_data[1169][124],tab_data[1169][126]); // printf("tab_data[1168][125] is %d,tab_data[1170][125] is %d\n",tab_data[1168][125],tab_data[1170][125]); list->num = 0; struct HoughLineInfo line; thresh = thresh >>2; thresh1 = thresh1>>2; thresh2 = thresh2>>2; // mTimerBegin(); for(j=0;j<range*2;j++) { for(int i=1;i<361;i++) { // if((i==125)&&(j==1169)) // { // printf("tab_data is %d,thresh is %d\n",tab_data[j][i],(thresh<<2)); // printf("tab_data[j][i-1] is %d,tab_data[j][i+1] is %d\n",tab_data[j][i-1],tab_data[j][i+1]); // } if(tab_data[j][i]<(thresh<<2)) continue; if((tab_data[j][i]< tab_data[j][i-1])||(tab_data[j][i]< tab_data[j][i+1])) continue; tab_data[j][i] +=1; line.a = ((float)i-1.0f)*0.5f; line.r = ((float)(j-range)); if(sn[i]==0.0f) { sn[i] = 0.0001f; cs[i] = (float)sqrt(1.0-sn[i]*sn[i]); // printf("sn is %f,cs is %f\t",sn,cs); } line.k = 0.0f-cs[i]/sn[i]; line.b = line.r/sn[i]+cx*cs[i]/sn[i]+cy; // printf("k is %f,b is %f,a is %f,r is %f,tab_data is %d\n",k[m],b[m],a[m],r[m],tab_data[j][i]); line.point1.x = 0.0f;line.point1.y = line.b; if((line.point1.y<0)||(line.point1.y>=src->height)) { line.point1.y = 0.0f;line.point1.x = (0.0f-line.b)/line.k; if((line.point1.x<0)||(line.point1.x>=src->width)) {line.point1.y = src->height-1;line.point1.x = (line.point1.y-line.b)/line.k;} if((line.point1.x<0)||(line.point1.x>=src->width)) continue; } line.point2.x = src->width;line.point2.y = line.k*line.point2.x + line.b; if((line.point2.y<0)||(line.point2.y>=src->height)) { line.point2.y = src->height-1;line.point2.x = (line.point2.y-line.b)/line.k; if((line.point2.x<0)||(line.point2.x>=src->width)) {line.point2.y = 0.0f;line.point2.x = (0.0f-line.b)/line.k;} if((line.point2.x<0)||(line.point2.x>=src->width)) continue; } // if((i==125)&&(j==1169)) // printf("line.point1 is %f,%f,line.point2 is %f,%f\n",line.point1.x,line.point1.y,line.point2.x,line.point2.y); int count1 = 0; int count2 = 0; int count = 0; line.count = 0; int state = 1; #define HOUGH_POINT_CHECK(U,V) {\ int u=(int)(((float)U)+0.5);\ int v=(int)(((float)V)+0.5);\ if(state == 0)\ {\ if(data[u ][v ] == 255) count1 += 1;\ else if(data[u-1][v ]+data[u ][v-1]+data[u ][v+1]+data[u+1][v ] != 0) ;\ else\ {\ if(count1 > thresh1)\ {\ line.count += count1;\ line.pe.x = V;\ line.pe.y = U;\ count2 = 1;\ }\ count += count1;\ count1 = 0;\ if((tab_data[j][i]/4 - count) < thresh1)\ break;\ state = 1;\ }\ }\ else\ {\ if(data[u][v] != 255) count2 += 1;\ else\ {\ if(count2 > thresh2)\ {\ if(line.count > thresh)\ {\ mListWrite(list,DFLT,&line,sizeof(struct HoughLineInfo));\ }\ line.count = 0;\ }\ if(line.count==0) {line.ps.x = V; line.ps.y = U;}\ count1 = 1;\ state = 0;\ }\ }\ } int m,n;float l; if(ABS(line.point1.x-line.point2.x)>ABS(line.point1.y-line.point2.y)) { float step = 4.0*line.k; if((line.point1.x)<(line.point2.x)) { for(n = line.point1.x,l = line.point1.y;n<line.point2.x;n=n+4,l = l+step) {HOUGH_POINT_CHECK(l,n);} if(state == 0) {if(count1 > thresh1) {line.count += count1;line.pe.x = n;line.pe.y = l;}} } else { for(n = line.point2.x,l = line.point2.y;n<line.point1.x;n=n+4,l = l+step) {HOUGH_POINT_CHECK(l,n);} if(state == 0) {if(count1 > thresh1) {line.count += count1;line.pe.x = n;line.pe.y = l;}} } } else { float step = 4.0/line.k; if((line.point1.y)<(line.point2.y)) { for(m = line.point1.y,l = line.point1.x;m<line.point2.y;m=m+4,l = l+step) {HOUGH_POINT_CHECK(m,l);} if(state == 0) {if(count1 > thresh1) {line.count += count1;line.pe.x = l;line.pe.y = m;}} } else { for(m = line.point2.y,l = line.point2.x;m<line.point1.y;m=m+4,l = l+step) {HOUGH_POINT_CHECK(m,l);} if(state == 0) {if(count1 > thresh1) {line.count += count1;line.pe.x = l;line.pe.y = m;}} } } if(line.count > thresh) { // mLog(INFO,"%d:\t",list->num); // printf("tb_data[%d][%d] is %d,line.count is %d\n",j,i,tab_data[j][i],line.count*4); // printf("line.a is %f\t",line.a); // mLog(INFO,"line.ps is %f,%f\t",line.ps.x,line.ps.y); // mLog(INFO,"line.pe is %f,%f\n",line.pe.x,line.pe.y); mListWrite(list,DFLT,&line,sizeof(struct HoughLineInfo)); } } } // mTimerEnd(); // printf("time use is %f\n",mTimerUse()); } void ImageHoughLineDrawImage(MImage *src,MList *list,char *filename) { MImage *dst = mImageCreate(src->channel,src->height,src->width,NULL); mImageCopy(src,dst); unsigned char color[3] = {128,255,0}; for(int i=0;i<list->num;i++) { MImagePoint *point = (MImagePoint *)(list->data[i]); mImageDrawLine(dst,NULL,&(point[0]),&(point[1]),color,4); } mBMPSave(dst,filename); mImageRelease(dst); }
linked_notasks.c
#include <stdlib.h> #include <stdio.h> #include "omp.h" #define N 15 #define FS 30 #define NMAX 20 struct node { int data; int fibdata; struct node* next; }; int fib(int n) { int x, y; if (n < 2) { return (n); } else { x = fib(n - 1); y = fib(n - 2); return (x + y); } } void processwork(struct node* p) { int n; n = p->data; p->fibdata = fib(n); } struct node* init_list(struct node* p) { int i; struct node* head = NULL; struct node* temp = NULL; head = malloc(sizeof(struct node)); p = head; p->data = FS; p->fibdata = 0; for (i=0; i< N; i++) { temp = malloc(sizeof(struct node)); p->next = temp; p = temp; p->data = FS + i + 1; p->fibdata = i+1; } p->next = NULL; return head; } int main(int argc, char *argv[]) { double start, end; struct node *p=NULL; struct node *temp=NULL; struct node *head=NULL; struct node *parr[NMAX]; int i, count=0; printf("Process linked list\n"); printf(" Each linked list node will be processed by function 'processwork()'\n"); printf(" Each ll node will compute %d fibonacci numbers beginning with %d\n",N,FS); p = init_list(p); head = p; start = omp_get_wtime(); { while (p != NULL) { processwork(p); p = p->next; } } end = omp_get_wtime(); printf("serial Compute Time: %f seconds\n", end - start); p = head; start = omp_get_wtime(); { // count number of items in the list. Strictly speaking this isn't // needed since we know there are N elements in the list. But in // most cases you don't know this and need to count nodes. while (p != NULL) { p = p->next; count++; } // traverse the list and collect pointers into an array. p = head; for(i=0; i<count; i++) { parr[i] = p; p = p->next; } // do the work in parallel #pragma omp parallel { #pragma omp single printf(" %d threads \n",omp_get_num_threads()); #pragma omp for schedule(dynamic,1) for(i=0; i<count; i++) processwork(parr[i]); } } end = omp_get_wtime(); p = head; while (p != NULL) { printf("%d : %d\n",p->data, p->fibdata); temp = p->next; free (p); p = temp; } free (p); printf("Compute Time: %f seconds\n", end - start); return 0; }
GB_binop__ne_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ne_fp64) // A.*B function (eWiseMult): GB (_AemultB_08__ne_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__ne_fp64) // A.*B function (eWiseMult): GB (_AemultB_04__ne_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_fp64) // A*D function (colscale): GB (_AxD__ne_fp64) // D*A function (rowscale): GB (_DxB__ne_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__ne_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__ne_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_fp64) // C=scalar+B GB (_bind1st__ne_fp64) // C=scalar+B' GB (_bind1st_tran__ne_fp64) // C=A+scalar GB (_bind2nd__ne_fp64) // C=A'+scalar GB (_bind2nd_tran__ne_fp64) // C type: bool // A type: double // A pattern? 0 // B type: double // B pattern? 0 // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x != y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_NE || GxB_NO_FP64 || GxB_NO_NE_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__ne_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ne_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ne_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__ne_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__ne_fp64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ne_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; double alpha_scalar ; double beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((double *) alpha_scalar_in)) ; beta_scalar = (*((double *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__ne_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ne_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__ne_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ne_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ne_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ne_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB (_bind1st_tran__ne_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB (_bind2nd_tran__ne_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
fc3.c
/* Copyright (C) 2015 Atsushi Togo */ /* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <stdlib.h> #include "fc3.h" static void rotate_delta_fc2s(double (*rot_delta_fc2s)[3][3], const long i_atom, const long j_atom, const double (*delta_fc2s)[3][3], const double (*site_sym_cart)[3][3], const long *rot_map_sym, const long num_atom, const long num_site_sym, const long num_disp); static void tensor2_rotation(double rot_tensor[3][3], const double tensor[3][3], const double r[3][3]); static void tensor3_rotation(double *rot_tensor, const double *tensor, const double *rot_cartesian); static double tensor3_rotation_elem(const double *tensor, const double *r, const long pos); static void copy_permutation_symmetry_fc3_elem(double *fc3, const double fc3_elem[27], const long a, const long b, const long c, const long num_atom); static void set_permutation_symmetry_fc3_elem(double *fc3_elem, const double *fc3, const long a, const long b, const long c, const long num_atom); static void set_permutation_symmetry_compact_fc3(double * fc3, const long p2s[], const long s2pp[], const long nsym_list[], const long perms[], const long n_satom, const long n_patom); static void transpose_compact_fc3_type01(double * fc3, const long p2s[], const long s2pp[], const long nsym_list[], const long perms[], const long n_satom, const long n_patom, const long t_type); static void transpose_compact_fc3_type2(double * fc3, const long p2s[], const long s2pp[], const long nsym_list[], const long perms[], const long n_satom, const long n_patom); void fc3_distribute_fc3(double *fc3, const long target, const long source, const long *atom_mapping, const long num_atom, const double *rot_cart) { long i, j, adrs_out, adrs_in; for (i = 0; i < num_atom; i++) { for (j = 0; j < num_atom; j++) { adrs_out = (num_atom * num_atom * target + num_atom * i + j) * 27; adrs_in = (num_atom * num_atom * source + num_atom * atom_mapping[i] + atom_mapping[j]) * 27; tensor3_rotation(fc3 + adrs_out, fc3 + adrs_in, rot_cart); } } } void fc3_rotate_delta_fc2(double (*fc3)[3][3][3], const double (*delta_fc2s)[3][3], const double *inv_U, const double (*site_sym_cart)[3][3], const long *rot_map_syms, const long num_atom, const long num_site_sym, const long num_disp) { long i_atoms, i, j, k, l, m, n; double (*rot_delta_fc2s)[3][3]; rot_delta_fc2s = (double(*)[3][3]) malloc(sizeof(double[3][3]) * num_site_sym * num_disp); for (i_atoms = 0; i_atoms < num_atom * num_atom; i_atoms++) { i = i_atoms / num_atom; j = i_atoms % num_atom; rotate_delta_fc2s(rot_delta_fc2s, i, j, delta_fc2s, site_sym_cart, rot_map_syms, num_atom, num_site_sym, num_disp); for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { for (m = 0; m < 3; m++) { fc3[i_atoms][k][l][m] = 0; for (n = 0; n < num_site_sym * num_disp; n++) { fc3[i_atoms][k][l][m] += inv_U[k * num_site_sym * num_disp + n] * rot_delta_fc2s[n][l][m]; } } } } } free(rot_delta_fc2s); rot_delta_fc2s = NULL; } void fc3_set_permutation_symmetry_fc3(double *fc3, const long num_atom) { double fc3_elem[27]; long i, j, k; #pragma omp parallel for private(j, k, fc3_elem) for (i = 0; i < num_atom; i++) { for (j = i; j < num_atom; j++) { for (k = j; k < num_atom; k++) { set_permutation_symmetry_fc3_elem(fc3_elem, fc3, i, j, k, num_atom); copy_permutation_symmetry_fc3_elem(fc3, fc3_elem, i, j, k, num_atom); } } } } void fc3_set_permutation_symmetry_compact_fc3(double * fc3, const long p2s[], const long s2pp[], const long nsym_list[], const long perms[], const long n_satom, const long n_patom) { set_permutation_symmetry_compact_fc3(fc3, p2s, s2pp, nsym_list, perms, n_satom, n_patom); } void fc3_transpose_compact_fc3(double * fc3, const long p2s[], const long s2pp[], const long nsym_list[], const long perms[], const long n_satom, const long n_patom, const long t_type) { /* Three types of index permutations */ /* t_type=0: dim[0] <-> dim[1] */ /* t_type=1: dim[0] <-> dim[2] */ /* t_type=2: dim[1] <-> dim[2] */ if (t_type == 0 || t_type == 1) { transpose_compact_fc3_type01(fc3, p2s, s2pp, nsym_list, perms, n_satom, n_patom, t_type); } else { if (t_type == 2) { transpose_compact_fc3_type2(fc3, p2s, s2pp, nsym_list, perms, n_satom, n_patom); } } } static void rotate_delta_fc2s(double (*rot_delta_fc2s)[3][3], const long i_atom, const long j_atom, const double (*delta_fc2s)[3][3], const double (*site_sym_cart)[3][3], const long *rot_map_sym, const long num_atom, const long num_site_sym, const long num_disp) { long i, j; for (i = 0; i < num_disp; i++) { for (j = 0; j < num_site_sym; j++) { tensor2_rotation(rot_delta_fc2s[i * num_site_sym + j], delta_fc2s[i * num_atom * num_atom + rot_map_sym[j * num_atom + i_atom] * num_atom + rot_map_sym[j * num_atom + j_atom]], site_sym_cart[j]); } } } static void tensor2_rotation(double rot_tensor[3][3], const double tensor[3][3], const double r[3][3]) { long i, j, k, l; for (i = 0; i < 3; i++) { for (j = 0; j < 3; j++) { rot_tensor[i][j] = 0; } } for (i = 0; i < 3; i++) { for (j = 0; j < 3; j++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { rot_tensor[i][j] += r[i][k] * r[j][l] * tensor[k][l]; } } } } } static void tensor3_rotation(double *rot_tensor, const double *tensor, const double *rot_cartesian) { long l; for (l = 0; l < 27; l++) { rot_tensor[l] = tensor3_rotation_elem(tensor, rot_cartesian, l); } } static double tensor3_rotation_elem(const double *tensor, const double *r, const long pos) { long i, j, k, l, m, n; double sum; l = pos / 9; m = (pos % 9) / 3; n = pos % 3; sum = 0.0; for (i = 0; i < 3; i++) { for (j = 0; j < 3; j++) { for (k = 0; k < 3; k++) { sum += r[l * 3 + i] * r[m * 3 + j] * r[n * 3 + k] * tensor[i * 9 + j * 3 + k]; } } } return sum; } static void copy_permutation_symmetry_fc3_elem(double *fc3, const double fc3_elem[27], const long a, const long b, const long c, const long num_atom) { long i, j, k; for (i = 0; i < 3; i++) { for (j = 0; j < 3; j++) { for (k = 0; k < 3; k++) { fc3[a * num_atom * num_atom * 27 + b * num_atom * 27 + c * 27 + i * 9 + j * 3 + k] = fc3_elem[i * 9 + j * 3 + k]; fc3[a * num_atom * num_atom * 27 + c * num_atom * 27 + b * 27 + i * 9 + k * 3 + j] = fc3_elem[i * 9 + j * 3 + k]; fc3[b * num_atom * num_atom * 27 + a * num_atom * 27 + c * 27 + j * 9 + i * 3 + k] = fc3_elem[i * 9 + j * 3 + k]; fc3[b * num_atom * num_atom * 27 + c * num_atom * 27 + a * 27 + j * 9 + k * 3 + i] = fc3_elem[i * 9 + j * 3 + k]; fc3[c * num_atom * num_atom * 27 + a * num_atom * 27 + b * 27 + k * 9 + i * 3 + j] = fc3_elem[i * 9 + j * 3 + k]; fc3[c * num_atom * num_atom * 27 + b * num_atom * 27 + a * 27 + k * 9 + j * 3 + i] = fc3_elem[i * 9 + j * 3 + k]; } } } } static void set_permutation_symmetry_fc3_elem(double *fc3_elem, const double *fc3, const long a, const long b, const long c, const long num_atom) { long i, j, k; for (i = 0; i < 3; i++) { for (j = 0; j < 3; j++) { for (k = 0; k < 3; k++) { fc3_elem[i * 9 + j * 3 + k] = (fc3[a * num_atom * num_atom * 27 + b * num_atom * 27 + c * 27 + i * 9 + j * 3 + k] + fc3[a * num_atom * num_atom * 27 + c * num_atom * 27 + b * 27 + i * 9 + k * 3 + j] + fc3[b * num_atom * num_atom * 27 + a * num_atom * 27 + c * 27 + j * 9 + i * 3 + k] + fc3[b * num_atom * num_atom * 27 + c * num_atom * 27 + a * 27 + j * 9 + k * 3 + i] + fc3[c * num_atom * num_atom * 27 + a * num_atom * 27 + b * 27 + k * 9 + i * 3 + j] + fc3[c * num_atom * num_atom * 27 + b * num_atom * 27 + a * 27 + k * 9 + j * 3 + i]) / 6; } } } } static void set_permutation_symmetry_compact_fc3(double * fc3, const long p2s[], const long s2pp[], const long nsym_list[], const long perms[], const long n_satom, const long n_patom) { /* fc3 shape=(n_patom, n_satom, n_satom, 3, 3, 3) */ /* 1D indexing: */ /* i * n_satom * n_satom * 27 + j * n_satom * 27 + */ /* k * 27 + l * 9 + m * 3 + n */ long i, j, k, l, m, n, i_p, j_p, k_p; long done_any; long i_trans_j, k_trans_j, i_trans_k, j_trans_k; long adrs[6]; double fc3_elem[3][3][3]; char *done; done = NULL; done = (char*)malloc(sizeof(char) * n_patom * n_satom * n_satom); for (i = 0; i < n_patom * n_satom * n_satom; i++) { done[i] = 0; } for (i_p = 0; i_p < n_patom; i_p++) { i = p2s[i_p]; for (j = 0; j < n_satom; j++) { j_p = s2pp[j]; i_trans_j = perms[nsym_list[j] * n_satom + i]; for (k = 0; k < n_satom; k++) { k_p = s2pp[k]; k_trans_j = perms[nsym_list[j] * n_satom + k]; i_trans_k = perms[nsym_list[k] * n_satom + i]; j_trans_k = perms[nsym_list[k] * n_satom + j]; /* ijk, ikj, jik, jki, kij, kji */ adrs[0] = i_p * n_satom * n_satom + j * n_satom + k; adrs[1] = i_p * n_satom * n_satom + k * n_satom + j; adrs[2] = j_p * n_satom * n_satom + i_trans_j * n_satom + k_trans_j; adrs[3] = j_p * n_satom * n_satom + k_trans_j * n_satom + i_trans_j; adrs[4] = k_p * n_satom * n_satom + i_trans_k * n_satom + j_trans_k; adrs[5] = k_p * n_satom * n_satom + j_trans_k * n_satom + i_trans_k; done_any = 0; for (l = 0; l < 6; l++) { if (done[adrs[l]]) { done_any = 1; break; } } if (done_any) { continue; } for (l = 0; l < 6; l++) { done[adrs[l]] = 1; adrs[l] *= 27; } for (l = 0; l < 3; l++) { for (m = 0; m < 3; m++) { for (n = 0; n < 3; n++) { fc3_elem[l][m][n] = fc3[adrs[0] + l * 9 + m * 3 + n]; fc3_elem[l][m][n] += fc3[adrs[1] + l * 9 + n * 3 + m]; fc3_elem[l][m][n] += fc3[adrs[2] + m * 9 + l * 3 + n]; fc3_elem[l][m][n] += fc3[adrs[3] + m * 9 + n * 3 + l]; fc3_elem[l][m][n] += fc3[adrs[4] + n * 9 + l * 3 + m]; fc3_elem[l][m][n] += fc3[adrs[5] + n * 9 + m * 3 + l]; fc3_elem[l][m][n] /= 6; } } } for (l = 0; l < 3; l++) { for (m = 0; m < 3; m++) { for (n = 0; n < 3; n++) { fc3[adrs[0] + l * 9 + m * 3 + n] = fc3_elem[l][m][n]; fc3[adrs[1] + l * 9 + n * 3 + m] = fc3_elem[l][m][n]; fc3[adrs[2] + m * 9 + l * 3 + n] = fc3_elem[l][m][n]; fc3[adrs[3] + m * 9 + n * 3 + l] = fc3_elem[l][m][n]; fc3[adrs[4] + n * 9 + l * 3 + m] = fc3_elem[l][m][n]; fc3[adrs[5] + n * 9 + m * 3 + l] = fc3_elem[l][m][n]; } } } } } } free(done); done = NULL; } static void transpose_compact_fc3_type01(double * fc3, const long p2s[], const long s2pp[], const long nsym_list[], const long perms[], const long n_satom, const long n_patom, const long t_type) { /* Three types of index permutations */ /* t_type=0: dim[0] <-> dim[1] */ /* t_type=1: dim[0] <-> dim[2] */ /* t_type=2: dim[1] <-> dim[2] */ long i, j, k, l, m, n, i_p, j_p, i_trans, k_trans; long adrs, adrs_t; double fc3_elem[3][3][3]; char *done; done = NULL; done = (char*)malloc(sizeof(char) * n_satom * n_patom); for (i = 0; i < n_satom * n_patom; i++) { done[i] = 0; } for (i_p = 0; i_p < n_patom; i_p++) { i = p2s[i_p]; for (j = 0; j < n_satom; j++) { j_p = s2pp[j]; if (!done[i_p * n_satom + j]) { /* (j, i) -- nsym_list[j] --> (j', i') */ /* nsym_list[j] translates j to j' where j' is in */ /* primitive cell. The same translation sends i to i' */ /* where i' is not necessarily to be in primitive cell. */ /* Thus, i' = perms[nsym_list[j] * n_satom + i] */ i_trans = perms[nsym_list[j] * n_satom + i]; done[i_p * n_satom + j] = 1; done[j_p * n_satom + i_trans] = 1; for (k = 0; k < n_satom; k++) { k_trans = perms[nsym_list[j] * n_satom + k]; switch (t_type) { case 0: adrs = (i_p * n_satom * n_satom + j * n_satom + k) * 27; adrs_t = (j_p * n_satom * n_satom + i_trans * n_satom + k_trans) * 27; for (l = 0; l < 3; l++) { for (m = 0; m < 3; m++) { for (n = 0; n < 3; n++) { fc3_elem[l][m][n] = fc3[adrs + l * 9 + m * 3 + n]; } } } if (adrs != adrs_t) { for (l = 0; l < 3; l++) { for (m = 0; m < 3; m++) { for (n = 0; n < 3; n++) { fc3[adrs + l * 9 + m * 3 + n] = fc3[adrs_t + m * 9 + l * 3 + n]; } } } } for (l = 0; l < 3; l++) { for (m = 0; m < 3; m++) { for (n = 0; n < 3; n++) { fc3[adrs_t + m * 9 + l * 3 + n] = fc3_elem[l][m][n]; } } } break; case 1: adrs = (i_p * n_satom * n_satom + k * n_satom + j) * 27; adrs_t = (j_p * n_satom * n_satom + k_trans * n_satom + i_trans) * 27; for (l = 0; l < 3; l++) { for (m = 0; m < 3; m++) { for (n = 0; n < 3; n++) { fc3_elem[l][m][n] = fc3[adrs + l * 9 + m * 3 + n]; } } } if (adrs != adrs_t) { for (l = 0; l < 3; l++) { for (m = 0; m < 3; m++) { for (n = 0; n < 3; n++) { fc3[adrs + l * 9 + m * 3 + n] = fc3[adrs_t + n * 9 + m * 3 + l]; } } } } for (l = 0; l < 3; l++) { for (m = 0; m < 3; m++) { for (n = 0; n < 3; n++) { fc3[adrs_t + n * 9 + m * 3 + l] = fc3_elem[l][m][n]; } } } break; } /* end switch */ } } } } free(done); done = NULL; } static void transpose_compact_fc3_type2(double * fc3, const long p2s[], const long s2pp[], const long nsym_list[], const long perms[], const long n_satom, const long n_patom) { long j, k, l, m, n, i_p; long adrs, adrs_t; double fc3_elem[3][3][3]; for (i_p = 0; i_p < n_patom; i_p++) { for (j = 0; j < n_satom; j++) { for (k = j; k < n_satom; k++) { /* k >= j */ adrs = (i_p * n_satom * n_satom + j * n_satom + k) * 27; adrs_t = (i_p * n_satom * n_satom + k * n_satom + j) * 27; for (l = 0; l < 3; l++) { for (m = 0; m < 3; m++) { for (n = 0; n < 3; n++) { fc3_elem[l][m][n] = fc3[adrs + l * 9 + m * 3 + n]; } } } if (k != j) { for (l = 0; l < 3; l++) { for (m = 0; m < 3; m++) { for (n = 0; n < 3; n++) { fc3[adrs + l * 9 + m * 3 + n] = fc3[adrs_t + l * 9 + n * 3 + m]; } } } } for (l = 0; l < 3; l++) { for (m = 0; m < 3; m++) { for (n = 0; n < 3; n++) { fc3[adrs_t + l * 9 + n * 3 + m] = fc3_elem[l][m][n]; } } } } } } }
schedule.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> int ordered_example(int lb, int ub, int stride) { int i; int size = (ub-lb)/ stride; double *output = (double*)malloc(size * sizeof(double)); #pragma omp target teams map(from:output[0:size]) #pragma omp parallel for ordered schedule(dynamic) for (i=lb; i<ub; i+=stride) { #pragma omp ordered { /////////////////////////////////////// // // Make sure device printf is available, otherwise freezing printf(" %d\n", i); // The following shall be printed in order // 0 // 5 // 10 // 15 // 20 // 21 // 22 //.. // 95 // //////////////////////////////////////// output[(i-lb)/stride] = omp_get_wtime(); } } // verification for (int j = 0; j < size; j++) { for (int jj = j+1; jj < size; jj++) { if (output[j] > output[jj]) { printf("Fail to schedule in order.\n"); free(output); return 1; } } } free(output); printf("test OK\n"); return 0; } int main() { return ordered_example(0, 100, 5); }
GB_unaryop__ainv_uint8_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint8_int16 // op(A') function: GB_tran__ainv_uint8_int16 // C type: uint8_t // A type: int16_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = -aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ uint8_t z = (uint8_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT8 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint8_int16 ( uint8_t *Cx, // Cx and Ax may be aliased int16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint8_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
matrix.h
/****************************************************************************************************************/ /* */ /* OpenNN: Open Neural Networks Library */ /* www.opennn.net */ /* */ /* M A T R I X C O N T A I N E R */ /* */ /* Roberto Lopez */ /* Artelnics - Making intelligent use of data */ /* robertolopez@artelnics.com */ /* */ /****************************************************************************************************************/ #ifndef __MATRIX_H__ #define __MATRIX_H__ // System includes #include <cmath> #include <cstdlib> #include <fstream> #include <iomanip> #include <iostream> #include <sstream> #include <stdexcept> // OpenNN includes #include "vector.h" namespace OpenNN { /// This template class defines a matrix for general purpose use. /// This matrix also implements some mathematical methods which can be useful. template <class T> class Matrix : public std::vector<T> { public: // CONSTRUCTORS explicit Matrix(void); explicit Matrix(const size_t&, const size_t&); explicit Matrix(const size_t&, const size_t&, const T&); explicit Matrix(const std::string&); Matrix(const Matrix&); // DESTRUCTOR virtual ~Matrix(void); // ASSIGNMENT OPERATORS inline Matrix<T>& operator = (const Matrix<T>&); // REFERENCE OPERATORS inline T& operator () (const size_t&, const size_t&); inline const T& operator () (const size_t&, const size_t&) const; bool operator == (const Matrix<T>&) const; bool operator == (const T&) const; bool operator != (const Matrix<T>&) const; bool operator != (const T& value) const; bool operator > (const Matrix<T>&) const; bool operator > (const T& value) const; bool operator < (const Matrix<T>&) const; bool operator < (const T& value) const; bool operator >= (const Matrix<T>&) const; bool operator >= (const T&) const; bool operator <= (const Matrix<T>&) const; bool operator <= (const T&) const; // METHODS // Get methods const size_t& get_rows_number(void) const; const size_t& get_columns_number(void) const; // Set methods void set(void); void set(const size_t&, const size_t&); void set(const size_t&, const size_t&, const T&); void set(const Matrix<T>&); void set(const std::string&); void set_identity(const size_t&); void set_rows_number(const size_t&); void set_columns_number(const size_t&); void tuck_in(const size_t&, const size_t&, const Matrix<T>&); size_t count_diagonal_elements(void) const; size_t count_off_diagonal_elements(void) const; Matrix<T> arrange_submatrix(const Vector<size_t>&, const Vector<size_t>&) const; Matrix<T> arrange_submatrix_rows(const Vector<size_t>&) const; Matrix<T> arrange_submatrix_columns(const Vector<size_t>&) const; Vector<T> arrange_row(const size_t&) const; Vector<T> arrange_row(const size_t&, const Vector<size_t>&) const; Vector<T> arrange_column(const size_t&) const; Vector<T> arrange_column(const size_t&, const Vector<size_t>&) const; Vector<T> get_diagonal(void) const; void set_row(const size_t&, const Vector<T>&); void set_row(const size_t&, const T&); void set_column(const size_t&, const Vector<T>&); void set_column(const size_t&, const T&); void set_diagonal(const T&); void set_diagonal(const Vector<T>&); void initialize_diagonal(const size_t&, const T&); void initialize_diagonal(const size_t&, const Vector<T>&); Matrix<T> sum_diagonal(const T&) const; Matrix<T> sum_diagonal(const Vector<T>&) const; void append_row(const Vector<T>&); void append_column(const Vector<T>&) ; void insert_row(const size_t&, const Vector<T>&); void insert_column(const size_t&, const Vector<T>&); void subtract_row(const size_t&); void subtract_column(const size_t&); Matrix<T> assemble_rows(const Matrix<T>&) const; Matrix<T> assemble_columns(const Matrix<T>&) const; Matrix<T> sort_less_rows(const size_t&) const; Matrix<T> sort_greater_rows(const size_t&) const; void initialize(const T&); void randomize_uniform(const double& = -1.0, const double& = 1.0); void randomize_uniform(const Vector<double>&, const Vector<double>&); void randomize_uniform(const Matrix<double>&, const Matrix<double>&); void randomize_normal(const double& = 0.0, const double& = 1.0); void randomize_normal(const Vector<double>&, const Vector<double>&); void randomize_normal(const Matrix<double>&, const Matrix<double>&); void initialize_identity(void); void initialize_diagonal(const T&); // Mathematical methods T calculate_sum(void) const; Vector<T> calculate_rows_sum(void) const; void sum_row(const size_t&, const Vector<T>&); double calculate_trace(void) const; Vector<double> calculate_mean(void) const; double calculate_mean(const size_t&) const; Vector<double> calculate_mean(const Vector<size_t>&) const; Vector<double> calculate_mean(const Vector<size_t>&, const Vector<size_t>&) const; Vector<double> calculate_mean_missing_values(const Vector< Vector<size_t> >&) const; Vector<double> calculate_mean_missing_values(const Vector<size_t>&, const Vector<size_t>&, const Vector< Vector<size_t> >&) const; Vector< Vector<double> > calculate_mean_standard_deviation(void) const; Vector< Vector<double> > calculate_mean_standard_deviation(const Vector<size_t>&) const; Vector< Vector<double> > calculate_mean_standard_deviation(const Vector<size_t>&, const Vector<size_t>&) const; T calculate_minimum(void) const; T calculate_maximum(void) const; Vector< Vector<T> > calculate_minimum_maximum(void) const; Vector< Vector<T> > calculate_minimum_maximum(const Vector<size_t>&) const; Vector< Vector<T> > calculate_minimum_maximum(const Vector<size_t>&, const Vector<size_t>&) const; Vector< Statistics<T> > calculate_statistics(void) const; Vector< Statistics<T> > calculate_statistics_missing_values(const Vector< Vector<size_t> >&) const; Vector< Statistics<T> > calculate_statistics(const Vector<size_t>&, const Vector<size_t>&) const; Vector< Statistics<T> > calculate_rows_statistics(const Vector<size_t>&) const; Vector< Statistics<T> > calculate_rows_statistics_missing_values(const Vector<size_t>&, const Vector< Vector<size_t> >&) const; Vector< Statistics<T> > calculate_columns_statistics(const Vector<size_t>&) const; Vector< Statistics<T> > calculate_columns_statistics_missing_values(const Vector<size_t>&, const Vector< Vector<size_t> >) const; Vector< Vector<double> > calculate_shape_parameters(void) const; Vector< Vector<double> > calculate_shape_parameters_missing_values(const Vector<Vector<size_t> > &) const; Vector< Vector<double> > calculate_shape_parameters(const Vector<size_t>&, const Vector<size_t>&) const; Vector< Vector<double> > calculate_rows_shape_parameters(const Vector<size_t>&) const; Vector< Vector<double> > calculate_rows_shape_parameters_missing_values(const Vector<size_t>&, const Vector< Vector<size_t> >&) const; Vector< Vector<double> > calculate_columns_shape_parameters(const Vector<size_t>&) const; Vector< Vector<double> > calculate_columns_shape_parameters_missing_values(const Vector<size_t>&, const Vector< Vector<size_t> >&) const; Matrix<double> calculate_covariance_matrix(void) const; Vector< Histogram<T> > calculate_histograms(const size_t& = 10) const; Vector< Histogram<T> > calculate_histograms_missing_values(const Vector< Vector<size_t> >&, const size_t& = 10) const; Matrix<size_t> calculate_less_than_indices(const T&) const; Matrix<size_t> calculate_greater_than_indices(const T&) const; void scale_mean_standard_deviation(const Vector< Statistics<T> >&); Vector< Statistics<T> > scale_mean_standard_deviation(void); void scale_rows_mean_standard_deviation(const Vector< Statistics<T> >&, const Vector<size_t>&); void scale_columns_mean_standard_deviation(const Vector< Statistics<T> >&, const Vector<size_t>&); void scale_minimum_maximum(const Vector< Statistics<T> >&); Vector< Statistics<T> > scale_minimum_maximum(void); void scale_rows_minimum_maximum(const Vector< Statistics<T> >&, const Vector<size_t>&); void scale_columns_minimum_maximum(const Vector< Statistics<T> >&, const Vector<size_t>&); void unscale_mean_standard_deviation(const Vector< Statistics<T> >&); void unscale_rows_mean_standard_deviation(const Vector< Statistics<T> >&, const Vector<size_t>&); void unscale_columns_mean_standard_deviation(const Vector< Statistics<T> >&, const Vector<size_t>&); void unscale_minimum_maximum(const Vector< Statistics<T> >&); void unscale_rows_minimum_maximum(const Vector< Statistics<T> >&, const Vector<size_t>&); void unscale_columns_minimum_maximum(const Vector< Statistics<T> >&, const Vector<size_t>&); Vector<size_t> calculate_minimal_indices(void) const; Vector<size_t> calculate_maximal_indices(void) const; Vector< Vector<size_t> > calculate_minimal_maximal_indices(void) const; double calculate_sum_squared_error(const Matrix<double>&) const; double calculate_sum_squared_error(const Vector<double>&) const; Vector<double> calculate_rows_norm(void) const; Matrix<T> calculate_absolute_value(void) const; Matrix<T> calculate_transpose(void) const; T calculate_determinant(void) const; Matrix<T> calculate_cofactor(void) const; Matrix<T> calculate_inverse(void) const; Matrix<T> calculate_LU_inverse(void) const; Vector<T> solve_LDLT(const Vector<double>&) const; double calculate_distance(const size_t&, const size_t&) const; Matrix<T> operator + (const T&) const; Matrix<T> operator + (const Vector<T>&) const; Matrix<T> operator + (const Matrix<T>&) const; Matrix<T> operator - (const T& scalar) const; Matrix<T> operator - (const Vector<T>&) const; Matrix<T> operator - (const Matrix<T>&) const; Matrix<T> operator * (const T&) const; Matrix<T> operator * (const Vector<T>&) const; Matrix<T> operator * (const Matrix<T>&) const; Matrix<T> operator / (const T&) const; Matrix<T> operator / (const Vector<T>&) const; Matrix<T> operator / (const Matrix<T>&) const; void operator += (const T& value); void operator += (const Matrix<T>& other_matrix); void operator -= (const T&); void operator -= (const Matrix<T>&); void operator *= (const T&); void operator *= (const Matrix<T>&); void operator /= (const T&); void operator /= (const Matrix<T>&); // void sum_diagonal(const T&); Vector<double> dot(const Vector<double>&) const; Matrix<double> dot(const Matrix<double>&) const; Matrix<double> calculate_eigenvalues(void) const; Matrix<double> calculate_eigenvectors(void) const; Matrix<T> direct(const Matrix<T>&) const; bool empty(void) const; bool is_square(void) const; bool is_symmetric(void) const; bool is_antisymmetric(void) const; bool is_diagonal(void) const; bool is_scalar(void) const; bool is_identity(void) const; bool is_binary(void) const; bool is_column_binary(const size_t&) const; Matrix<T> filter(const size_t&, const T&, const T&) const; void convert_time_series(const size_t&); void convert_association(void); void convert_angular_variables_degrees(const size_t&); void convert_angular_variables_radians(const size_t&); // Serialization methods void print(void) const; void load(const std::string&); void load_binary(const std::string&); void save(const std::string&) const; void save_binary(const std::string&) const; void save_csv(const std::string&, const Vector<std::string>& = Vector<std::string>()) const; void parse(const std::string&); std::string to_string(const std::string& = " ") const; Matrix<std::string> write_string_matrix(const size_t& = 3) const; std::vector<T> to_std_vector(void) const; Vector<T> to_vector(void) const; void print_preview(void) const; private: /// Number of rows in the matrix. size_t rows_number; /// Number of columns in the matrix. size_t columns_number; }; // CONSTRUCTORS /// Default constructor. It creates a matrix with zero rows and zero columns. template <class T> Matrix<T>::Matrix(void) : std::vector<T>() { rows_number = 0; columns_number = 0; } /// Constructor. It creates a matrix with n rows and m columns, containing n*m copies of the default value for Type. /// @param new_rows_number Number of rows in matrix. /// @param new_columns_number Number of columns in matrix. template <class T> Matrix<T>::Matrix(const size_t& new_rows_number, const size_t& new_columns_number) : std::vector<T>(new_rows_number*new_columns_number) { if(new_rows_number == 0 && new_columns_number == 0) { rows_number = 0; columns_number = 0; } else if(new_rows_number == 0) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "Constructor Matrix(const size_t&, const size_t&).\n" << "Number of rows must be greater than zero.\n"; throw std::logic_error(buffer.str()); } else if(new_columns_number == 0) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "Constructor Matrix(const size_t&, const size_t&).\n" << "Number of columns must be greater than zero.\n"; throw std::logic_error(buffer.str()); } else { rows_number = new_rows_number; columns_number = new_columns_number; } } /// Constructor. It creates a matrix with n rows and m columns, containing n*m copies of the type value of Type. /// @param new_rows_number Number of rows in matrix. /// @param new_columns_number Number of columns in matrix. /// @param value Value of Type. template <class T> Matrix<T>::Matrix(const size_t& new_rows_number, const size_t& new_columns_number, const T& value) : std::vector<T>(new_rows_number*new_columns_number) { if(new_rows_number == 0 && new_columns_number == 0) { rows_number = 0; columns_number = 0; } else if(new_rows_number == 0) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "Constructor Matrix(const size_t&, const size_t&, const T&).\n" << "Number of rows must be greater than zero.\n"; throw std::logic_error(buffer.str()); } else if(new_columns_number == 0) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "Constructor Matrix(const size_t&, const size_t&, const T&).\n" << "Number of columns must be greater than zero.\n"; throw std::logic_error(buffer.str()); } else { // Set sizes rows_number = new_rows_number; columns_number = new_columns_number; (*this).initialize(value); } } /// File constructor. It creates a matrix which members are loaded from a data file. /// @param file_name Name of matrix data file. template <class T> Matrix<T>::Matrix(const std::string& file_name) : std::vector<T>() { rows_number = 0; columns_number = 0; load(file_name); } /// Copy constructor. It creates a copy of an existing matrix. /// @param other_matrix Matrix to be copied. template <class T> Matrix<T>::Matrix(const Matrix& other_matrix) : std::vector<T>(other_matrix.begin(), other_matrix.end()) { rows_number = other_matrix.rows_number; columns_number = other_matrix.columns_number; } // DESTRUCTOR /// Destructor. template <class T> Matrix<T>::~Matrix(void) { } // ASSIGNMENT OPERATORS /// Assignment operator. It assigns to self a copy of an existing matrix. /// @param other_matrix Matrix to be assigned. template <class T> Matrix<T>& Matrix<T>::operator = (const Matrix<T>& other_matrix) { if(other_matrix.rows_number != rows_number || other_matrix.columns_number != columns_number) { rows_number = other_matrix.rows_number; columns_number = other_matrix.columns_number; this->clear(); this->resize(rows_number*columns_number); } std::copy(other_matrix.begin(), other_matrix.end(), (*this).begin()); return(*this); } // REFERENCE OPERATORS /// Reference operator. /// Returns the element (i,j) of the matrix. /// @param row Index of row. /// @param column Index of column. template <class T> inline T& Matrix<T>::operator () (const size_t& row, const size_t& column) { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(row >= rows_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "operator () (const size_t&, const size_t&).\n" << "Row index (" << row << ") must be less than number of rows (" << rows_number << ").\n"; throw std::logic_error(buffer.str()); } else if(column >= columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "operator () (const size_t&, const size_t&).\n" << "Column index (" << column << ") must be less than number of columns (" << columns_number << ").\n"; throw std::logic_error(buffer.str()); } #endif // Return matrix element return((*this)[rows_number*column+row]); } /// Reference operator. /// Returns the element (i,j) of the matrix. /// @param row Index of row. /// @param column Index of column. template <class T> inline const T& Matrix<T>::operator () (const size_t& row, const size_t& column) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(row >= rows_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "operator () (const size_t&, const size_t&).\n" << "Row index (" << row << ") must be less than number of rows (" << rows_number << ").\n"; throw std::logic_error(buffer.str()); } else if(column >= columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "operator () (const size_t&, const size_t&).\n" << "Column index (" << column << ") must be less than number of columns (" << columns_number << ").\n"; throw std::logic_error(buffer.str()); } #endif // Return matrix element return((*this)[rows_number*column+row]); } // bool operator == (const Matrix<T>&) const /// Equivalent relational operator between this matrix and other matrix. /// It produces true if all the elements of the two matrices are equal, and false otherwise. /// @param other_matrix Matrix to be compared with. template <class T> bool Matrix<T>::operator == (const Matrix<T>& other_matrix) const { const size_t other_rows_number = other_matrix.get_rows_number(); const size_t other_columns_number = other_matrix.get_columns_number(); if(other_rows_number != rows_number) { return(false); } else if(other_columns_number != columns_number) { return(false); } else { for(size_t i = 0; i < this->size(); i++) { if((*this)[i] != other_matrix[i]) { return(false); } } } return(true); } // bool operator == (const T&) /// Equivalent relational operator between this matrix and a Type value. /// It produces true if all the elements of this matrix are equal to the Type value, and false otherwise. /// @param value Type value to be compared with. template <class T> bool Matrix<T>::operator == (const T& value) const { for(size_t i = 0; i < this->size(); i++) { if((*this)[i] != value) { return(false); } } return(true); } // bool operator != (const Matrix<T>&) /// Not equivalent relational operator between this matrix and other matrix. /// It produces true if the two matrices have any not equal element, and false otherwise. /// @param other_matrix Matrix to be compared with. template <class T> bool Matrix<T>::operator != (const Matrix<T>& other_matrix) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ const size_t other_rows_number = other_matrix.get_rows_number(); const size_t other_columns_number = other_matrix.get_columns_number(); if(other_rows_number != rows_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "bool operator != (const Matrix<T>&) const.\n" << "Both numbers of rows must be the same.\n"; throw std::logic_error(buffer.str()); } else if(other_columns_number != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "bool operator != (const Matrix<T>&) const.\n" << "Both numbers of columns must be the same.\n"; throw std::logic_error(buffer.str()); } #endif for(size_t i = 0; i < this->size(); i++) { if((*this)[i] != other_matrix[i]) { return(true); } } return(false); } // bool operator != (const T&) const /// Not equivalent relational operator between this matrix and a Type value. /// It produces true if some element of this matrix is not equal to the Type value, and false otherwise. /// @param value Type value to be compared with. template <class T> bool Matrix<T>::operator != (const T& value) const { // Control sentence (if debug) for(size_t i = 0; i < this->size(); i++) { if((*this)[i] != value) { return(true); } } return(false); } // bool operator > (const Matrix<T>&) const /// Greater than relational operator between this matrix and other vector. /// It produces true if all the elements of this matrix are greater than the corresponding elements of the other matrix, /// and false otherwise. /// @param other_matrix matrix to be compared with. template <class T> bool Matrix<T>::operator > (const Matrix<T>& other_matrix) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ const size_t other_rows_number = other_matrix.get_rows_number(); const size_t other_columns_number = other_matrix.get_columns_number(); if(other_rows_number != rows_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "bool operator > (const Matrix<T>&) const.\n" << "Both numbers of rows must be the same.\n"; throw std::logic_error(buffer.str()); } else if(other_columns_number != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "bool operator > (const Matrix<T>&) const.\n" << "Both numbers of columns must be the same.\n"; throw std::logic_error(buffer.str()); } #endif for(size_t i = 0; i < this->size(); i++) { if((*this)[i] <= other_matrix[i]) { return(false); } } return(true); } // bool operator > (const T&) const /// Greater than relational operator between this matrix and a Type value. /// It produces true if all the elements of this matrix are greater than the Type value, and false otherwise. /// @param value Type value to be compared with. template <class T> bool Matrix<T>::operator > (const T& value) const { for(size_t i = 0; i < this->size(); i++) { if((*this)[i] <= value) { return(false); } } return(true); } // bool operator < (const Matrix<T>&) const /// Less than relational operator between this matrix and other matrix. /// It produces true if all the elements of this matrix are less than the corresponding elements of the other matrix, /// and false otherwise. /// @param other_matrix Matrix to be compared with. template <class T> bool Matrix<T>::operator < (const Matrix<T>& other_matrix) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ const size_t other_rows_number = other_matrix.get_rows_number(); const size_t other_columns_number = other_matrix.get_columns_number(); if(other_rows_number != rows_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "bool operator < (const Matrix<T>&) const.\n" << "Both numbers of rows must be the same.\n"; throw std::logic_error(buffer.str()); } else if(other_columns_number != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "bool operator < (const Matrix<T>&) const.\n" << "Both numbers of columns must be the same.\n"; throw std::logic_error(buffer.str()); } #endif for(size_t i = 0; i < this->size(); i++) { if((*this)[i] >= other_matrix[i]) { return(false); } } return(true); } // bool operator < (const T&) const /// Less than relational operator between this matrix and a Type value. /// It produces true if all the elements of this matrix are less than the Type value, and false otherwise. /// @param value Type value to be compared with. template <class T> bool Matrix<T>::operator < (const T& value) const { for(size_t i = 0; i < this->size(); i++) { if((*this)[i] >= value) { return(false); } } return(true); } // bool operator >= (const Matrix<T>&) const /// Greater than or equal to relational operator between this matrix and other matrix. /// It produces true if all the elements of this matrix are greater than or equal to the corresponding elements of the /// other matrix, and false otherwise. /// @param other_matrix Matrix to be compared with. template <class T> bool Matrix<T>::operator >= (const Matrix<T>& other_matrix) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ const size_t other_rows_number = other_matrix.get_rows_number(); const size_t other_columns_number = other_matrix.get_columns_number(); if(other_rows_number != rows_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "bool operator >= (const Matrix<T>&) const.\n" << "Both numbers of rows must be the same.\n"; throw std::logic_error(buffer.str()); } else if(other_columns_number != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "bool operator >= (const Matrix<T>&) const.\n" << "Both numbers of columns must be the same.\n"; throw std::logic_error(buffer.str()); } #endif for(size_t i = 0; i < this->size(); i++) { if((*this)[i] < other_matrix[i]) { return(false); } } return(true); } // bool operator >= (const T&) const /// Greater than or equal to than relational operator between this matrix and a Type value. /// It produces true if all the elements of this matrix are greater than or equal to the Type value, and false otherwise. /// @param value Type value to be compared with. template <class T> bool Matrix<T>::operator >= (const T& value) const { for(size_t i = 0; i < this->size(); i++) { if((*this)[i] < value) { return(false); } } return(true); } // bool operator <= (const Matrix<T>&) const /// Less than or equal to relational operator between this matrix and other matrix. /// It produces true if all the elements of this matrix are less than or equal to the corresponding elements of the /// other matrix, and false otherwise. /// @param other_matrix Matrix to be compared with. template <class T> bool Matrix<T>::operator <= (const Matrix<T>& other_matrix) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ const size_t other_rows_number = other_matrix.get_rows_number(); const size_t other_columns_number = other_matrix.get_columns_number(); if(other_rows_number != rows_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "bool operator >= (const Matrix<T>&) const.\n" << "Both numbers of rows must be the same.\n"; throw std::logic_error(buffer.str()); } else if(other_columns_number != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "bool operator >= (const Matrix<T>&) const.\n" << "Both numbers of columns must be the same.\n"; throw std::logic_error(buffer.str()); } #endif for(size_t i = 0; i < this->size(); i++) { if((*this)[i] > other_matrix[i]) { return(false); } } return(true); } // bool operator <= (const T&) const /// Less than or equal to than relational operator between this matrix and a Type value. /// It produces true if all the elements of this matrix are less than or equal to the Type value, and false otherwise. /// @param value Type value to be compared with. template <class T> bool Matrix<T>::operator <= (const T& value) const { for(size_t i = 0; i < this->size(); i++) { if((*this)[i] > value) { return(false); } } return(true); } // METHODS // size_t get_rows_number(void) const method /// Returns the number of rows in the matrix. template <class T> const size_t& Matrix<T>::get_rows_number(void) const { return(rows_number); } // size_t get_columns_number(void) const method /// Returns the number of columns in the matrix. template <class T> const size_t& Matrix<T>::get_columns_number(void) const { return(columns_number); } // void set(void) method /// This method set the numbers of rows and columns of the matrix to zero. template <class T> void Matrix<T>::set(void) { rows_number = 0; columns_number = 0; this->clear(); } // void set(const size_t&, const size_t&) method /// This method set new numbers of rows and columns in the matrix. /// @param new_rows_number Number of rows. /// @param new_columns_number Number of columns. template <class T> void Matrix<T>::set(const size_t& new_rows_number, const size_t& new_columns_number) { // Control sentence (if debug) if(new_rows_number == rows_number && new_columns_number == columns_number) { // do nothing } else if(new_rows_number == 0 && new_columns_number == 0) { set(); } else if(new_rows_number == 0) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "void set(const size_t&, const size_t&) method.\n" << "Number of rows must be greater than zero.\n"; throw std::logic_error(buffer.str()); } else if(new_columns_number == 0) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "void set(const size_t&, const size_t&) method.\n" << "Number of columns must be greater than zero.\n"; throw std::logic_error(buffer.str()); } else { rows_number = new_rows_number; columns_number = new_columns_number; this->resize(rows_number*columns_number); } } // void set(const size_t&, const size_t&, const T&) method /// This method set new numbers of rows and columns in the matrix. /// It also initializes all the matrix elements to a given value. /// @param new_rows_number Number of rows. /// @param new_columns_number Number of columns. /// @param value Initialization value. template <class T> void Matrix<T>::set(const size_t& new_rows_number, const size_t& new_columns_number, const T& value) { if(new_rows_number == 0 && new_columns_number == 0) { set(); } else if(new_rows_number == 0) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "void set(const size_t&, const size_t&, const T&) method.\n" << "Number of rows must be greater than zero.\n"; throw std::logic_error(buffer.str()); } else if(new_columns_number == 0) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "void set(const size_t&, const size_t&, const T&) method.\n" << "Number of columns must be greater than zero.\n"; throw std::logic_error(buffer.str()); } else { set(new_rows_number, new_columns_number); initialize(value); } } // void set(const Matrix&) method /// Sets all the members of the matrix to those of another matrix. /// @param other_matrix Setting matrix. template <class T> void Matrix<T>::set(const Matrix<T>& other_matrix) { rows_number = other_matrix.rows_number; columns_number = other_matrix.columns_number; this->resize(rows_number*columns_number); for(size_t i = 0; i < (size_t)this->size(); i++) { (*this)[i] = other_matrix[i]; } } // void set(const std::string&) method /// Sets the members of this object by loading them from a data file. /// @param file_name Name of data file. template <class T> void Matrix<T>::set(const std::string& file_name) { load(file_name); } // void set_identity(const size_t&) method /// Sets the matrix to be squared, with elements equal one in the diagonal and zero outside the diagonal. /// @param new_size New number of rows and columns in this matrix. template <class T> void Matrix<T>::set_identity(const size_t& new_size) { set(new_size, new_size); initialize_identity(); } // void set_rows_number(const size_t&) method /// Sets a new number of rows in the matrix. /// @param new_rows_number Number of matrix rows. template <class T> void Matrix<T>::set_rows_number(const size_t& new_rows_number) { if(new_rows_number != rows_number) { set(new_rows_number, columns_number); } } // void set_columns_number(const size_t&) method /// Sets a new number of columns in the matrix. /// @param new_columns_number Number of matrix columns. template <class T> void Matrix<T>::set_columns_number(const size_t& new_columns_number) { if(new_columns_number != columns_number) { set(rows_number, new_columns_number); } } // void tuck_in(const size_t&, const size_t&, const Matrix<T>&) const method /// Tuck in another matrix starting from a given position. /// @param row_position Insertion row position. /// @param column_position Insertion row position. /// @param other_matrix Matrix to be inserted. template <class T> void Matrix<T>::tuck_in(const size_t& row_position, const size_t& column_position, const Matrix<T>& other_matrix) { const size_t other_rows_number = other_matrix.get_rows_number(); const size_t other_columns_number = other_matrix.get_columns_number(); // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(row_position + other_rows_number > rows_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "void tuck_in(const size_t&, const size_t&, const Matrix<T>&) const method.\n" << "Cannot tuck in matrix.\n"; throw std::logic_error(buffer.str()); } if(column_position + other_columns_number > columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "void tuck_in(const size_t&, const size_t&, const Matrix<T>&) const method.\n" << "Cannot tuck in matrix.\n"; throw std::logic_error(buffer.str()); } #endif for(size_t i = 0; i < other_rows_number; i++) { for(size_t j = 0; j < other_columns_number; j++) { (*this)(row_position+i,column_position+j) = other_matrix(i,j); } } } // size_t count_diagonal_elements(void) const method /// Returns the number of elements in the diagonal which are not zero. /// This method is only defined for square matrices. template <class T> size_t Matrix<T>::count_diagonal_elements(void) const { #ifdef __OPENNN_DEBUG__ if(!is_square()) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "size_t count_diagonal_elements(void) const method.\n" << "The matrix is not square.\n"; throw std::logic_error(buffer.str()); } #endif const size_t rows_number = get_rows_number(); size_t count = 0; for(size_t i = 0; i < rows_number; i++) { if((*this)(i,i) != 0) { count++; } } return(count); } // size_t count_off_diagonal_elements(void) const method /// Returns the number of elements outside the diagonal which are not zero. /// This method is only defined for square matrices. template <class T> size_t Matrix<T>::count_off_diagonal_elements(void) const { #ifdef __OPENNN_DEBUG__ if(!is_square()) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "size_t count_off_diagonal_elements(void) const method.\n" << "The matrix is not square.\n"; throw std::logic_error(buffer.str()); } #endif const size_t rows_number = get_rows_number(); const size_t columns_number = get_columns_number(); size_t count = 0; for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { if(i != j && (*this)(i,j) != 0) { count++; } } } return(count); } // Matrix<T> arrange_submatrix(const Vector<size_t>&, const Vector<size_t>&) const method /// Returns a matrix with the values of given rows and columns from this matrix. /// @param row_indices Indices of matrix rows. /// @param column_indices Indices of matrix columns. template <class T> Matrix<T> Matrix<T>::arrange_submatrix(const Vector<size_t>& row_indices, const Vector<size_t>& column_indices) const { const size_t row_indices_size = row_indices.size(); const size_t column_indices_size = column_indices.size(); Matrix<T> sub_matrix(row_indices_size, column_indices_size); size_t row_index; size_t column_index; for(size_t i = 0; i < row_indices_size; i++) { row_index = row_indices[i]; for(size_t j = 0; j < column_indices_size; j++) { column_index = column_indices[j]; sub_matrix(i,j) = (*this)(row_index,column_index); } } return(sub_matrix); } // Matrix<T> arrange_submatrix_rows(const Vector<size_t>&) const method /// Returns a submatrix with the values of given rows from this matrix. /// @param row_indices Indices of matrix rows. template <class T> Matrix<T> Matrix<T>::arrange_submatrix_rows(const Vector<size_t>& row_indices) const { const size_t row_indices_size = row_indices.size(); Matrix<T> sub_matrix(row_indices_size, columns_number); size_t row_index; for(size_t i = 0; i < row_indices_size; i++) { row_index = row_indices[i]; for(size_t j = 0; j < columns_number; j++) { sub_matrix(i,j) = (*this)(row_index,j); } } return(sub_matrix); } // Matrix<T> arrange_submatrix_columns(const Vector<size_t>&) const method /// Returns a submatrix with the values of given columns from this matrix. /// @param column_indices Indices of matrix columns. template <class T> Matrix<T> Matrix<T>::arrange_submatrix_columns(const Vector<size_t>& column_indices) const { const size_t column_indices_size = column_indices.size(); Matrix<T> sub_matrix(rows_number, column_indices_size); size_t column_index; for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < column_indices_size; j++) { column_index = column_indices[j]; sub_matrix(i,j) = (*this)(i,column_index); } } return(sub_matrix); } // Vector<T> arrange_row(const size_t&) const method /// Returns the row i of the matrix. /// @param i Index of row. template <class T> Vector<T> Matrix<T>::arrange_row(const size_t& i) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(i >= rows_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "Vector<T> arrange_row(const size_t&) const method.\n" << "Row index (" << i << ") must be less than number of rows (" << rows_number << ").\n"; throw std::logic_error(buffer.str()); } #endif Vector<T> row(columns_number); for(size_t j = 0; j < columns_number; j++) { row[j] = (*this)(i,j); } return(row); } // Vector<T> arrange_row(const size_t&, const Vector<size_t>&) const method /// Returns the row i of the matrix, but only the elements specified by given indices. /// @param row_index Index of row. /// @param column_indices Column indices of row. template <class T> Vector<T> Matrix<T>::arrange_row(const size_t& row_index, const Vector<size_t>& column_indices) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(row_index >= rows_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "Vector<T> arrange_row(const size_t&, const Vector<size_t>&) const method.\n" << "Row index (" << row_index << ") must be less than number of rows (" << rows_number << ").\n"; throw std::logic_error(buffer.str()); } #endif const size_t size = column_indices.size(); Vector<T> row(size); for(size_t i = 0; i < size; i++) { row[i] = (*this)(row_index,column_indices[i]); } return(row); } // Vector<T> arrange_column(const size_t&) const method /// Returns the column j of the matrix. /// @param j Index of column. template <class T> Vector<T> Matrix<T>::arrange_column(const size_t& j) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(j >= columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "Vector<T> arrange_column(const size_t&) const method.\n" << "Column index (" << j << ") must be less than number of columns (" << columns_number << ").\n"; throw std::logic_error(buffer.str()); } #endif Vector<T> column(rows_number); for(size_t i = 0; i < rows_number; i++) { column[i] = (*this)(i,j); } return(column); } // Vector<T> arrange_column(const size_t&) const method /// Returns the column j of the matrix, but only those elements specified by given indices. /// @param column_index Index of column. /// @param row_indices Row indices of column. template <class T> Vector<T> Matrix<T>::arrange_column(const size_t& column_index, const Vector<size_t>& row_indices) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(column_index >= columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "Vector<T> arrange_column(const size_t&) const method.\n" << "Column index (" << column_index << ") must be less than number of rows (" << columns_number << ").\n"; throw std::logic_error(buffer.str()); } #endif const size_t size = row_indices.size(); Vector<T> column(size); for(size_t i = 0; i < size; i++) { column[i] = (*this)(row_indices[i],column_index); } return(column); } // Vector<T> get_diagonal(void) const method /// Returns the diagonal of the matrix. template <class T> Vector<T> Matrix<T>::get_diagonal(void) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(rows_number != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "Vector<T> get_diagonal(void) const method.\n" << "Matrix must be squared.\n"; throw std::logic_error(buffer.str()); } #endif Vector<T> diagonal(rows_number); for(size_t i = 0; i < rows_number; i++) { diagonal[i] = (*this)(i,i); } return(diagonal); } // void set_row(const size_t&, const Vector<T>&) const method /// Sets new values of a single row in the matrix. /// @param row_index Index of row. /// @param new_row New values of single row. template <class T> void Matrix<T>::set_row(const size_t& row_index, const Vector<T>& new_row) { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(row_index >= rows_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "set_row(const size_t&, const Vector<T>&) method.\n" << "Index must be less than number of rows.\n"; throw std::logic_error(buffer.str()); } const size_t size = new_row.size(); if(size != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "set_row(const size_t&, const Vector<T>&) method.\n" << "Size (" << size << ") must be equal to number of columns (" << columns_number << ").\n"; throw std::logic_error(buffer.str()); } #endif // Set new row for(size_t i = 0; i < columns_number; i++) { (*this)(row_index,i) = new_row[i]; } } // void set_row(const size_t&, const T&) method /// Sets a new value of a single row in the matrix. /// @param row_index Index of row. /// @param value New value of single row. template <class T> void Matrix<T>::set_row(const size_t& row_index, const T& value) { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(row_index >= rows_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "set_row(const size_t&, const T&) method.\n" << "Index must be less than number of rows.\n"; throw std::logic_error(buffer.str()); } #endif // Set new row for(size_t i = 0; i < columns_number; i++) { (*this)(row_index,i) = value; } } // void set_column(const size_t&, const Vector<T>&) method /// Sets new values of a single column in the matrix. /// @param column_index Index of column. /// @param new_column New values of single column. template <class T> void Matrix<T>::set_column(const size_t& column_index, const Vector<T>& new_column) { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(column_index >= columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "set_column(const size_t&, const Vector<T>&).\n" << "Index (" << column_index << ") must be less than number of columns (" << columns_number << ").\n"; throw std::logic_error(buffer.str()); } const size_t size = new_column.size(); if(size != rows_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "set_column(const size_t&, const Vector<T>&).\n" << "Size must be equal to number of rows.\n"; throw std::logic_error(buffer.str()); } #endif // Set new column for(size_t i = 0; i < rows_number; i++) { (*this)(i,column_index) = new_column[i]; } } // void set_column(const size_t&, const T&) method /// Sets a new values of a single column in the matrix. /// @param column_index Index of column. /// @param value New value of single column. template <class T> void Matrix<T>::set_column(const size_t& column_index, const T& value) { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(column_index >= columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "set_column(const size_t&, const T&).\n" << "Index must be less than number of columns.\n"; throw std::logic_error(buffer.str()); } #endif // Set new column for(size_t i = 0; i < rows_number; i++) { (*this)(i,column_index) = value; } } // void set_diagonal(const T&) method /// Sets a new value for the diagonal elements in the matrix. /// The matrix must be square. /// @param new_diagonal New value of diagonal. template <class T> void Matrix<T>::set_diagonal(const T& new_diagonal) { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(rows_number != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "set_diagonal(const T&).\n" << "Matrix must be square.\n"; throw std::logic_error(buffer.str()); } #endif // Set new column for(size_t i = 0; i < rows_number; i++) { (*this)(i,i) = new_diagonal; } } // void set_diagonal(const Vector<T>&) method /// Sets new values of the diagonal in the matrix. /// The matrix must be square. /// @param new_diagonal New values of diagonal. template <class T> void Matrix<T>::set_diagonal(const Vector<T>& new_diagonal) { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(rows_number != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "set_diagonal(const Vector<T>&) const.\n" << "Matrix is not square.\n"; throw std::logic_error(buffer.str()); } const size_t size = new_diagonal.size(); if(size != rows_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "set_diagonal(const Vector<T>&) const.\n" << "Size of diagonal (" << size << ") is not equal to size of matrix (" << rows_number << ").\n"; throw std::logic_error(buffer.str()); } #endif // Set new column for(size_t i = 0; i < rows_number; i++) { (*this)(i,i) = new_diagonal[i]; } } // void initialize_diagonal(const size_t&, const T&) method /// Sets this matrix to be diagonal. /// A diagonal matrix is a square matrix in which the entries outside the main diagonal are all zero. /// It also initializes the elements on the main diagonal to a unique given value. /// @param new_size Number of rows and colums in the matrix. /// @param new_value Value of all the elements in the main diagonal. template <class T> void Matrix<T>::initialize_diagonal(const size_t& new_size, const T& new_value) { set(new_size, new_size, 0.0); set_diagonal(new_value); } // void initialize_diagonal(const size_t&, const Vector<T>&) method /// Sets this matrix to be diagonal. /// A diagonal matrix is a square matrix in which the entries outside the main diagonal are all zero. /// It also initializes the elements on the main diagonal to given values. /// @param new_size Number of rows and colums in the matrix. /// @param new_values Values of the elements in the main diagonal. template <class T> void Matrix<T>::initialize_diagonal(const size_t& new_size, const Vector<T>& new_values) { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ const size_t new_values_size = new_values.size(); if(new_values_size != new_size) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "initialize_diagonal(const size_t&, const size_t&) const.\n" << "Size of new values is not equal to size of square matrix.\n"; throw std::logic_error(buffer.str()); } #endif set(new_size, new_size, 0.0); set_diagonal(new_values); } // Matrix<T> sum_diagonal(const T&) const method /// This method sums a new value to the diagonal elements in the matrix. /// The matrix must be square. /// @param value New summing value. template <class T> Matrix<T> Matrix<T>::sum_diagonal(const T& value) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(rows_number != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "sum_diagonal(const T&) const.\n" << "Matrix must be square.\n"; throw std::logic_error(buffer.str()); } #endif Matrix<T> sum(*this); for(size_t i = 0; i < rows_number; i++) { sum(i,i) += value; } return(sum); } // Matrix<T> sum_diagonal(const Vector<T>&) const method /// This method sums new values to the diagonal in the matrix. /// The matrix must be square. /// @param new_summing_values Vector of summing values. template <class T> Matrix<T> Matrix<T>::sum_diagonal(const Vector<T>& new_summing_values) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(rows_number != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "sum_diagonal(const Vector<T>&) const.\n" << "Matrix must be square.\n"; throw std::logic_error(buffer.str()); } const size_t size = new_summing_values.size(); if(size != rows_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "sum_diagonal(const Vector<T>&) const.\n" << "Size must be equal to number of rows.\n"; throw std::logic_error(buffer.str()); } #endif Matrix<T> sum(*this); for(size_t i = 0; i < rows_number; i++) { sum(i,i) += new_summing_values[i]; } return(sum); } // void append_row(const Vector<T>&) const method /// This method appends a new row to the matrix. /// The size of the row vector must be equal to the number of columns of the matrix. /// Note that resizing is necessary here and therefore this method can be very inefficient. /// @param new_row Row to be appended. template <class T> void Matrix<T>::append_row(const Vector<T>& new_row) { #ifdef __OPENNN_DEBUG__ const size_t size = new_row.size(); if(size != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "append_row(const Vector<T>&) const.\n" << "Size (" << size << ") must be equal to number of columns (" << columns_number << ").\n"; throw std::logic_error(buffer.str()); } #endif Matrix<T> copy(*this); set(rows_number+1, columns_number); for(size_t i = 0; i < copy.get_rows_number(); i++) { for(size_t j = 0; j < copy.get_columns_number(); j++) { (*this)(i,j) = copy(i,j); } } set_row(rows_number-1, new_row); } // void append_column(const Vector<T>&) const method /// This method appends a new column to the matrix. /// The size of the column vector must be equal to the number of rows of the matrix. /// Note that resizing is necessary here and therefore this method can be very inefficient. /// @param new_column Column to be appended. template <class T> void Matrix<T>::append_column(const Vector<T>& new_column) { #ifdef __OPENNN_DEBUG__ const size_t size = new_column.size(); if(size != rows_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "append_column(const Vector<T>&) const.\n" << "Size (" << size << ") must be equal to number of rows (" << rows_number << ").\n"; throw std::logic_error(buffer.str()); } #endif set(rows_number, columns_number+1); set_column(columns_number-1, new_column); } // void insert_row(const size_t&, const Vector<T>&) const method /// Inserts a new row in a given position. /// Note that this method resizes the matrix, which can be computationally expensive. /// @param position Index of new row. /// @param new_row Vector with the row contents. template <class T> void Matrix<T>::insert_row(const size_t& position, const Vector<T>& new_row) { #ifdef __OPENNN_DEBUG__ if(position > rows_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "insert_row(const size_t&, const Vector<T>&) const.\n" << "Position must be less or equal than number of rows.\n"; throw std::logic_error(buffer.str()); } const size_t size = new_row.size(); if(size != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "insert_row(const size_t&, const Vector<T>&) const.\n" << "Size must be equal to number of columns.\n"; throw std::logic_error(buffer.str()); } #endif const size_t new_rows_number = rows_number + 1; Matrix<T> new_matrix(new_rows_number, columns_number); for(size_t i = 0; i < position; i++) { for(size_t j = 0; j < columns_number; j++) { new_matrix(i,j) = (*this)(i,j); } } for(size_t j = 0; j < columns_number; j++) { new_matrix(position,j) = new_row[j]; } for(size_t i = position+1; i < new_rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { new_matrix(i,j) = (*this)(i-1,j); } } set(new_matrix); } // void insert_column(const size_t&, const Vector<T>&) const method /// Inserts a new column in a given position. /// Note that this method resizes the matrix, which can be computationally expensive. /// @param position Index of new column. /// @param new_column Vector with the column contents. template <class T> void Matrix<T>::insert_column(const size_t& position, const Vector<T>& new_column) { #ifdef __OPENNN_DEBUG__ if(position > columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "insert_column(const size_t&, const Vector<T>&) const.\n" << "Position must be less or equal than number of columns.\n"; throw std::logic_error(buffer.str()); } const size_t size = (size_t)new_column.size(); if(size != rows_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "insert_column(const size_t, const Vector<T>&) const.\n" << "Size must be equal to number of rows.\n"; throw std::logic_error(buffer.str()); } #endif const size_t new_columns_number = columns_number + 1; Matrix<T> new_matrix(rows_number, new_columns_number); for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < position; j++) { new_matrix(i,j) = (*this)(i,j); } new_matrix(i,position) = new_column[i]; for(size_t j = position+1; j < new_columns_number; j++) { new_matrix(i,j) = (*this)(i,j-1); } } set(new_matrix); } // void subtract_row(const size_t&) const method /// This method removes the row with given index. /// Note that resizing is here necessary and this method can be very inefficient. /// @param row_index Index of row to be removed. template <class T> void Matrix<T>::subtract_row(const size_t& row_index) { #ifdef __OPENNN_DEBUG__ if(row_index >= rows_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "subtract_row(const size_t&) const.\n" << "Index of row must be less than number of rows.\n"; throw std::logic_error(buffer.str()); } else if(rows_number < 2) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "subtract_row(const size_t&) const.\n" << "Number of rows must be equal or greater than two.\n"; throw std::logic_error(buffer.str()); } #endif Matrix<T> new_matrix(rows_number-1, columns_number); for(size_t i = 0; i < row_index; i++) { for(size_t j = 0; j < columns_number; j++) { new_matrix(i,j) = (*this)(i,j); } } for(size_t i = row_index+1; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { new_matrix(i-1,j) = (*this)(i,j); } } *this = new_matrix; } // void subtract_column(const size_t&) method /// This method removes the column with given index. /// Note that resizing is here necessary and this method can be very inefficient. /// @param column_index Index of column to be removed. template <class T> void Matrix<T>::subtract_column(const size_t& column_index) { #ifdef __OPENNN_DEBUG__ if(column_index >= columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "subtract_column(const size_t&) const.\n" << "Index of column must be less than number of columns.\n"; throw std::logic_error(buffer.str()); } else if(columns_number < 2) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "subtract_column(const size_t&) const.\n" << "Number of columns must be equal or greater than two.\n"; throw std::logic_error(buffer.str()); } #endif Matrix<T> new_matrix(rows_number, columns_number-1); for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < column_index; j++) { new_matrix(i,j) = (*this)(i,j); } } for(size_t i = 0; i < rows_number; i++) { for(size_t j = column_index+1; j < columns_number; j++) { new_matrix(i,j-1) = (*this)(i,j); } } *this = new_matrix; } // Matrix<T> assemble_rows(const Matrix<T>&) const method /// Assemble two matrices. /// @param other_matrix matrix to be get_assembled to this matrix. template <class T> Matrix<T> Matrix<T>::assemble_rows(const Matrix<T>& other_matrix) const { #ifdef __OPENNN_DEBUG__ const size_t other_columns_number = other_matrix.get_columns_number(); if(other_columns_number != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "Matrix<T> assemble_rows(const Matrix<T>&) const method.\n" << "Number of columns of other matrix (" << other_columns_number << ") must be equal to number of columns of this matrix (" << columns_number << ").\n"; throw std::logic_error(buffer.str()); } #endif const size_t other_rows_number = other_matrix.get_rows_number(); Matrix<T> assembly(rows_number + other_rows_number, columns_number); for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { assembly(i,j) = (*this)(i,j); } } for(size_t i = 0; i < other_rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { assembly(rows_number+i,j) = other_matrix(i,j); } } return(assembly); } // Matrix<T> sort_less_rows(const size_t&) method /// Sorts the rows of the matrix in descending order attending to the values of the column with given index. /// It returns a new sorted matrix, it does not change the original one. /// @param column_index Index of column to sort. template <class T> Matrix<T> Matrix<T>::sort_less_rows(const size_t& column_index) const { Matrix<T> sorted(rows_number, columns_number); const Vector<T> column = arrange_column(column_index); const Vector<size_t> indices = column.sort_less_indices(); size_t index; // #pragma parallel for private(index, i, j) for(size_t i = 0; i < rows_number; i++) { index = indices[i]; for(size_t j = 0; j < columns_number; j++) { sorted(i,j) = (*this)(index, j); } } return(sorted); } template <class T> bool compare(size_t a, size_t b, const Vector<T>& data) { return data[a]<data[b]; } // Matrix<T> sort_greater_rows(const size_t&) method /// Sorts the rows of the matrix in ascending order attending to the values of the column with given index. /// It returns a new sorted matrix, it does not change the original one. /// @param column_index Index of column to sort. template <class T> Matrix<T> Matrix<T>::sort_greater_rows(const size_t& column_index) const { Matrix<T> sorted(rows_number, columns_number); const Vector<T> column = arrange_column(column_index); // std::sort(std::begin(indices), std::end(indices), [&data](size_t i1, size_t i2) {return data[i1] > data[i2];}); const Vector<size_t> indices = column.sort_greater_indices(); // const Vector<size_t> sorted_indices = column.calculate_maximal_indices(rows_number); //Vector<T> sorted_indices(*column); //std::sort(sorted_vector.begin(), sorted_vector.end(), std::greater<double>()); // std::sort(column.begin(), column.end(), [](data const &a, data const &b) { return a.number < b.number; }); size_t index; // #pragma parallel for private(index, i, j) for(size_t i = 0; i < rows_number; i++) { index = indices[i]; for(size_t j = 0; j < columns_number; j++) { sorted(i,j) = (*this)(index, j); } } return(sorted); } // Matrix<T> assemble_columns(const Matrix<T>&) const method /// Assemble two matrices. /// @param other_matrix matrix to be get_assemblyd to this matrix. template <class T> Matrix<T> Matrix<T>::assemble_columns(const Matrix<T>& other_matrix) const { #ifdef __OPENNN_DEBUG__ const size_t other_rows_number = other_matrix.get_rows_number(); if(other_rows_number != rows_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "Matrix<T> assemble_columns(const Matrix<T>&) const method.\n" << "Number of rows of other matrix (" << other_rows_number << ") must be equal to number of rows of this matrix (" << rows_number << ").\n"; throw std::logic_error(buffer.str()); } #endif const size_t other_columns_number = other_matrix.get_columns_number(); Matrix<T> assembly(rows_number, columns_number + other_columns_number); for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { assembly(i,j) = (*this)(i,j); } for(size_t j = 0; j < other_columns_number; j++) { assembly(i,columns_number+j) = other_matrix(i,j); } } return(assembly); } // void initialize(const T&) method /// Initializes all the elements of the matrix with a given value. /// @param value Type value. template <class T> void Matrix<T>::initialize(const T& value) { std::fill((*this).begin(), (*this).end(), value); } // void randomize_uniform(const double&, const double&) method /// Initializes all the elements in the matrix with random values comprised between a minimum and a maximum /// values. /// @param minimum Minimum possible value. /// @param maximum Maximum possible value. template <class T> void Matrix<T>::randomize_uniform(const double& minimum, const double& maximum) { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(minimum > maximum) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "void randomize_uniform(const double&, const double&) const method.\n" << "Minimum value must be less or equal than maximum value.\n"; throw std::logic_error(buffer.str()); } #endif for(size_t i = 0; i < this->size(); i++) { (*this)[i] = (T)calculate_random_uniform(minimum, maximum); } } // void randomize_uniform(const Vector<double>&, const Vector<double>&) const method /// Initializes all the elements in the matrix with random values comprised between a minimum and a maximum /// values for each element. /// @param minimums Minimum possible values. /// @param maximums Maximum possible values. template <class T> void Matrix<T>::randomize_uniform(const Vector<double>& minimums, const Vector<double>& maximums) { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(minimums.size() != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "void randomize_uniform(const Vector<double>&, const Vector<double>&) const method.\n" << "Size of minimums must be equal to number of columns.\n"; throw std::logic_error(buffer.str()); } if(maximums.size() != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "void randomize_uniform(const Vector<double>&, const Vector<double>&) const method.\n" << "Size of maximums must be equal to number of columns.\n"; throw std::logic_error(buffer.str()); } if(minimums > maximums) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "void randomize_uniform(const Vector<double>&, const Vector<double>&) const method.\n" << "Minimums must be less or equal than maximums.\n"; throw std::logic_error(buffer.str()); } #endif Vector<double> column(rows_number); for(size_t i = 0; i < columns_number; i++) { column.randomize_uniform(minimums[i], maximums[i]); set_column(i, column); } } // void randomize_uniform(const Matrix<double>&, const Matrix<double>&) const method /// Initializes all the elements in the matrix with random values comprised between a minimum and a maximum /// values for each element. /// @param minimum Minimum possible values. /// @param maximum Maximum possible values. template <class T> void Matrix<T>::randomize_uniform(const Matrix<double>& minimum, const Matrix<double>& maximum) { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(minimum > maximum) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "void randomize_uniform(const Matrix<double>&, const Matrix<double>&) const method.\n" << "Minimum values must be less or equal than their respective maximum values.\n"; throw std::logic_error(buffer.str()); } #endif for(size_t i = 0; i < this->size(); i++) { (*this)[i] = calculate_random_uniform(minimum[i], maximum[i]); } } // void randomize_normal(const double&, const double&) method /// Assigns random values to each element in the matrix, taken from a normal distribution with /// a given mean and a given standard deviation. /// @param mean Mean value of uniform distribution. /// @param standard_deviation Standard deviation value of uniform distribution. template <class T> void Matrix<T>::randomize_normal(const double& mean, const double& standard_deviation) { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(standard_deviation < 0.0) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "void randomize_normal(const double&, const double&) method.\n" << "Standard deviation must be equal or greater than zero.\n"; throw std::logic_error(buffer.str()); } #endif for(size_t i = 0; i < this->size(); i++) { (*this)[i] = calculate_random_normal(mean, standard_deviation); } } // void randomize_normal(const Vector<double>&, const Vector<double>&) const method /// Assigns random values to each element in the matrix, taken from a normal distribution with /// a given mean and a given standard deviation. /// @param means Means values of uniform distribution. /// @param standard_deviations Standard deviations values of uniform distribution. template <class T> void Matrix<T>::randomize_normal(const Vector<double>& means, const Vector<double>& standard_deviations) { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(means.size() != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "void randomize_normal(const Vector<double>&, const Vector<double>&) const method.\n" << "Size of means must be equal to number of columns.\n"; throw std::logic_error(buffer.str()); } if(standard_deviations.size() != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "void randomize_normal(const Vector<double>&, const Vector<double>&) const method.\n" << "Size of standard deviations must be equal to number of columns.\n"; throw std::logic_error(buffer.str()); } if(means < 0.0) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "void randomize_normal(const Vector<double>&, const Vector<double>&) const method.\n" << "Means must be less or equal than zero.\n"; throw std::logic_error(buffer.str()); } #endif Vector<double> column(rows_number); for(size_t i = 0; i < columns_number; i++) { column.randomize_normal(means[i], standard_deviations[i]); set_column(i, column); } } // void randomize_normal(const Matrix<double>&, const Matrix<double>&) const method /// Assigns random values to each element in the vector, taken from normal distributions with /// given means and standard deviations for each element. /// @param mean Mean values of uniform distributions. /// @param standard_deviation Standard deviation values of uniform distributions. template <class T> void Matrix<T>::randomize_normal(const Matrix<double>& mean, const Matrix<double>& standard_deviation) { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(standard_deviation < 0.0) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "void randomize_normal(const Matrix<double>&, const Matrix<double>&) const method.\n" << "Standard deviations must be equal or greater than zero.\n"; throw std::logic_error(buffer.str()); } #endif for(size_t i = 0; i < this->size(); i++) { (*this)[i] = calculate_random_uniform(mean[i], standard_deviation[i]); } } // void initialize_identity(void) const method /// Sets the diagonal elements in the matrix with ones and the rest elements with zeros. The matrix /// must be square. template <class T> void Matrix<T>::initialize_identity(void) { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(rows_number != columns_number) { std::ostringstream buffer; std::cout << "OpenNN Exception: Matrix Template.\n" << "initialize_identity(void) const method.\n" << "Matrix must be square.\n"; throw std::logic_error(buffer.str()); } #endif (*this).initialize(0); for(size_t i = 0; i < rows_number; i++) { (*this)(i,i) = 1; } } // void initialize_diagonal(const T&) method /// Sets the diagonal elements in the matrix with a given value and the rest elements with zeros. /// The matrix must be square. template <class T> void Matrix<T>::initialize_diagonal(const T& value) { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(rows_number != columns_number) { std::ostringstream buffer; std::cout << "OpenNN Exception: Matrix Template.\n" << "initialize_diagonal(const T&) const method.\n" << "Matrix must be square.\n"; throw std::logic_error(buffer.str()); } #endif for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { if(i==j) { (*this)(i,j) = value; } else { (*this)(i,j) = 0; } } } } // T calculate_sum(void) const method /// Returns the sum of all the elements in the matrix. template <class T> T Matrix<T>::calculate_sum(void) const { T sum = 0; for(size_t i = 0; i < this->size(); i++) { sum += (*this)[i]; } return(sum); } // Vector<T> calculate_rows_sum(void) const method /// Returns the sum of all the rows in the matrix. template <class T> Vector<T> Matrix<T>::calculate_rows_sum(void) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(this->empty()) { std::ostringstream buffer; std::cout << "OpenNN Exception: Matrix Template.\n" << "Vector<T> calculate_rows_sum(void) const method.\n" << "Matrix is empty.\n"; throw std::logic_error(buffer.str()); } #endif Vector<T> rows_sum(columns_number, 0); for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { rows_sum[j] += (*this)(i,j); } } return(rows_sum); } // void sum_row(const size_t&, const Vector<T>&) method /// Sums the values of a given row with the values of a given vector. /// The size of the vector must be equal to the number of columns. template <class T> void Matrix<T>::sum_row(const size_t& row_index, const Vector<T>& vector) { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(vector.size() != columns_number) { std::ostringstream buffer; std::cout << "OpenNN Exception: Matrix Template.\n" << "void sum_row(const size_t&, const Vector<T>&) method.\n" << "Size of vector must be equal to number of columns.\n"; throw std::logic_error(buffer.str()); } #endif for(size_t j = 0; j < columns_number; j++) { (*this)(row_index,j) += vector[j]; } } // double calculate_trace(void) const method /// Returns the trace of the matrix, which is defined to be the sum of the main diagonal elements. /// The matrix must be square. template <class T> double Matrix<T>::calculate_trace(void) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(!is_square()) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template.\n" << "double calculate_trace(void) const method.\n" << "Matrix is not square.\n"; throw std::logic_error(buffer.str()); } #endif double trace = 0.0; for(size_t i = 0; i < rows_number; i++) { trace += (*this)(i,i); } return(trace); } // Vector<double> calculate_mean(void) const method /// Returns a vector with the mean values of all the matrix columns. /// The size is equal to the number of columns in the matrix. template <class T> Vector<double> Matrix<T>::calculate_mean(void) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(rows_number == 0) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template.\n" << "Vector<double> calculate_mean(void) const method.\n" << "Number of rows must be greater than one.\n"; throw std::logic_error(buffer.str()); } #endif // Mean Vector<double> mean(columns_number, 0.0); for(size_t j = 0; j < columns_number; j++) { for(size_t i = 0; i < rows_number; i++) { mean[j] += (*this)(i,j); } mean[j] /= (double)rows_number; } return(mean); } // double calculate_mean(const size_t&) const method /// Returns a vector with the mean values of all the matrix columns. /// The size is equal to the number of columns in the matrix. template <class T> double Matrix<T>::calculate_mean(const size_t& column_index) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(rows_number == 0) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template.\n" << "double calculate_mean(const size_t&) const method.\n" << "Number of rows must be greater than one.\n"; throw std::logic_error(buffer.str()); } if(column_index >= columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template.\n" << "double calculate_mean(const size_t&) const method.\n" << "Index of column must be less than number of columns.\n"; throw std::logic_error(buffer.str()); } #endif // Mean double mean = 0.0; for(size_t i = 0; i < rows_number; i++) { mean += (*this)(i,column_index); } mean /= (double)rows_number; return(mean); } // Vector<double> calculate_mean(const Vector<size_t>&) const method /// Returns a vector with the mean values of given columns. /// The size of the vector is equal to the size of the column indices vector. /// @param column_indices Indices of columns. template <class T> Vector<double> Matrix<T>::calculate_mean(const Vector<size_t>& column_indices) const { const size_t column_indices_size = column_indices.size(); size_t column_index; // Mean Vector<double> mean(column_indices_size, 0.0); for(size_t j = 0; j < column_indices_size; j++) { column_index = column_indices[j]; for(size_t i = 0; i < rows_number; i++) { mean[j] += (*this)(i,column_index); } mean[j] /= (double)rows_number; } return(mean); } // Vector<double> calculate_mean(const Vector<size_t>&, const Vector<size_t>&) const method /// Returns a vector with the mean values of given columns for given rows. /// The size of the vector is equal to the size of the column indices vector. /// @param row_indices Indices of rows. /// @param column_indices Indices of columns. template <class T> Vector<double> Matrix<T>::calculate_mean(const Vector<size_t>& row_indices, const Vector<size_t>& column_indices) const { const size_t row_indices_size = row_indices.size(); const size_t column_indices_size = column_indices.size(); // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ // Rows check if(row_indices_size > rows_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template.\n" << "Vector<double> calculate_mean(const Vector<size_t>&, const Vector<size_t>&) const method.\n" << "Size of row indices (" << row_indices_size << ") is greater than number of rows (" << rows_number << ").\n"; throw std::logic_error(buffer.str()); } for(size_t i = 0; i < row_indices_size; i++) { if(row_indices[i] >= rows_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template.\n" << "Vector<double> calculate_mean(const Vector<size_t>&, const Vector<size_t>&) const method.\n" << "Row index " << i << " must be less than rows number.\n"; throw std::logic_error(buffer.str()); } } if(row_indices_size == 0) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template.\n" << "Vector<double> calculate_mean(const Vector<size_t>&, const Vector<size_t>&) const method.\n" << "Size of row indices must be greater than zero.\n"; throw std::logic_error(buffer.str()); } // Columns check if(column_indices_size > columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template.\n" << "Vector<double> calculate_mean(const Vector<size_t>&, const Vector<size_t>&) const method.\n" << "Column indices size must be equal or less than columns number.\n"; throw std::logic_error(buffer.str()); } for(size_t i = 0; i < column_indices_size; i++) { if(column_indices[i] >= columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template.\n" << "Vector<double> calculate_mean(const Vector<size_t>&, const Vector<size_t>&) const method.\n" << "Column index " << i << " must be less than columns number.\n"; throw std::logic_error(buffer.str()); } } #endif size_t row_index; size_t column_index; // Mean Vector<double> mean(column_indices_size, 0.0); for(size_t j = 0; j < column_indices_size; j++) { column_index = column_indices[j]; for(size_t i = 0; i < row_indices_size; i++) { row_index = row_indices[i]; mean[j] += (*this)(row_index,column_index); } mean[j] /= (double)rows_number; } return(mean); } // Vector<double> calculate_mean_missing_values(const Vector< Vector<size_t> >&) const method /// Returns a vector with the mean values of given columns for given rows when the matrix has missing values. /// The size of the vector is equal to the size of the column indices vector. /// @param missing_indices Vector of vectors with the indices of the missing values. template <class T> Vector<double> Matrix<T>::calculate_mean_missing_values(const Vector< Vector<size_t> >& missing_indices) const { Vector<size_t> row_indices(0, 1, rows_number-1); Vector<size_t> column_indices(0, 1, columns_number-1); return(calculate_mean_missing_values(row_indices, column_indices, missing_indices)); } // Vector<double> calculate_mean_missing_values(const Vector<size_t>&, const Vector<size_t>&, const Vector< Vector<size_t> >&) const method /// Returns a vector with the mean values of given columns for given rows when the matrix has missing values. /// The size of the vector is equal to the size of the column indices vector. /// @param row_indices Indices of rows. /// @param column_indices Indices of columns. /// @param missing_indices Vector of vectors with the indices of the missing values. template <class T> Vector<double> Matrix<T>::calculate_mean_missing_values(const Vector<size_t>& row_indices, const Vector<size_t>& column_indices, const Vector< Vector<size_t> >& missing_indices) const { const size_t row_indices_size = row_indices.size(); const size_t column_indices_size = column_indices.size(); // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ // Rows check if(row_indices_size > rows_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template.\n" << "Vector<double> calculate_mean_missing_values(const Vector<size_t>&, const Vector<size_t>&, const Vector< Vector<size_t> >&) const method.\n" << "Size of row indices (" << row_indices_size << ") is greater than number of rows (" << rows_number << ").\n"; throw std::logic_error(buffer.str()); } for(size_t i = 0; i < row_indices_size; i++) { if(row_indices[i] >= rows_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template.\n" << "Vector<double> calculate_mean_missing_values(const Vector<size_t>&, const Vector<size_t>&, Vector< Vector<size_t> >&) const method.\n" << "Row index " << i << " must be less than rows number.\n"; throw std::logic_error(buffer.str()); } } if(row_indices_size == 0) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template.\n" << "Vector<double> calculate_mean_missing_values(const Vector<size_t>&, const Vector<size_t>&, const Vector< Vector<size_t> >&) const method.\n" << "Size of row indices must be greater than zero.\n"; throw std::logic_error(buffer.str()); } // Columns check if(column_indices_size > columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template.\n" << "Vector<double> calculate_mean_missing_values(const Vector<size_t>&, const Vector<size_t>&, const Vector< Vector<size_t> >&) const method.\n" << "Column indices size must be equal or less than columns number.\n"; throw std::logic_error(buffer.str()); } for(size_t i = 0; i < column_indices_size; i++) { if(column_indices[i] >= columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template.\n" << "Vector<double> calculate_mean_missing_values(const Vector<size_t>&, const Vector<size_t>&, const Vector< Vector<size_t> >&) const method.\n" << "Column index " << i << " must be less than columns number.\n"; throw std::logic_error(buffer.str()); } } #endif size_t row_index; size_t column_index; // Mean Vector<double> mean(column_indices_size, 0.0); Vector<size_t> count(column_indices_size, 0); for(size_t j = 0; j < column_indices_size; j++) { column_index = column_indices[j]; for(size_t i = 0; i < row_indices_size; i++) { row_index = row_indices[i]; if(!missing_indices[j].contains(row_index)) { mean[j] += (*this)(row_index,column_index); count[j]++; } } if(count[j] != 0) { mean[j] /= (double)count[j]; } } return(mean); } // Vector<double> calculate_mean_standard_deviation(void) const method /// Returns a vector of vectors with the mean and standard deviation values of all the matrix columns. /// The size of the vector is two. /// The size of each element is equal to the number of columns in the matrix. template <class T> Vector< Vector<double> > Matrix<T>::calculate_mean_standard_deviation(void) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(rows_number == 0) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template.\n" << "Vector<double> calculate_mean_standard_deviation(void) const method.\n" << "Number of rows must be greater than one.\n"; throw std::logic_error(buffer.str()); } #endif // Mean Vector<double> mean(columns_number, 0.0); Vector<double> standard_deviation(columns_number, 0.0); for(size_t i = 0; i < columns_number; i++) { mean[i] = arrange_column(i).calculate_mean(); standard_deviation[i] = arrange_column(i).calculate_standard_deviation(); } // Mean and standard deviation of data Vector< Vector<double> > mean_standard_deviation(2); mean_standard_deviation[0] = mean; mean_standard_deviation[1] = standard_deviation; return(mean_standard_deviation); } // Vector<double> calculate_mean_standard_deviation(const Vector<size_t>&) const method /// Returns a vector of vectors with the mean and standard deviation values of given columns. /// The size of the vector is two. /// The size of each element is equal to the size of the column indices vector. /// @param column_indices Indices of columns. template <class T> Vector< Vector<double> > Matrix<T>::calculate_mean_standard_deviation(const Vector<size_t>& column_indices) const { const size_t column_indices_size = column_indices.size(); Vector<double> mean(column_indices_size); Vector<double> standard_deviation(column_indices_size); size_t column_index; Vector<double> column(rows_number); for(size_t i = 0; i < column_indices_size; i++) { column_index = column_indices[i]; column = arrange_column(column_index); mean[i] = column.calculate_mean(); standard_deviation[i] = column.calculate_standard_deviation(); } // Mean and standard deviation Vector< Vector<double> > mean_standard_deviation(2); mean_standard_deviation[0] = mean; mean_standard_deviation[1] = standard_deviation; return(mean_standard_deviation); } // Vector<double> calculate_mean_standard_deviation(const Vector<size_t>&, const Vector<size_t>&) const method /// Returns a vector of vectors with the mean and standard deviation values of given columns for given rows. /// The size of the vector is two. /// The size of each element is equal to the size of the column indices vector. /// @param row_indices Indices of rows. /// @param column_indices Indices of columns. template <class T> Vector< Vector<double> > Matrix<T>::calculate_mean_standard_deviation(const Vector<size_t>& row_indices, const Vector<size_t>& column_indices) const { const size_t row_indices_size = row_indices.size(); const size_t column_indices_size = column_indices.size(); // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ // Rows check if(row_indices_size > rows_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template.\n" << "Vector<double> calculate_mean_standard_deviation(const Vector<size_t>&, const Vector<size_t>&) const method.\n" << "Row indices size must be equal or less than rows number.\n"; throw std::logic_error(buffer.str()); } for(size_t i = 0; i < row_indices_size; i++) { if(row_indices[i] >= rows_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template.\n" << "Vector<double> calculate_mean_standard_deviation(const Vector<size_t>&, const Vector<size_t>&) const method.\n" << "Row index " << i << " must be less than rows number.\n"; throw std::logic_error(buffer.str()); } } if(row_indices_size == 0) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template.\n" << "Vector<double> calculate_mean_standard_deviation(const Vector<size_t>&, const Vector<size_t>&) const method.\n" << "Size of row indices must be greater than zero.\n"; throw std::logic_error(buffer.str()); } // Columns check if(column_indices_size > columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template.\n" << "Vector<double> calculate_mean_standard_deviation(const Vector<size_t>&, const Vector<size_t>&) const method.\n" << "Column indices size must be equal or less than columns number.\n"; throw std::logic_error(buffer.str()); } for(size_t i = 0; i < column_indices_size; i++) { if(column_indices[i] >= columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template.\n" << "Vector<double> calculate_mean_standard_deviation(const Vector<size_t>&, const Vector<size_t>&) const method.\n" << "Column index " << i << " must be less than columns number.\n"; throw std::logic_error(buffer.str()); } } #endif size_t row_index; size_t column_index; // Mean Vector<double> mean(column_indices_size, 0.0); for(size_t j = 0; j < column_indices_size; j++) { column_index = column_indices[j]; mean[j] = 0.0; for(size_t i = 0; i < row_indices_size; i++) { row_index = row_indices[i]; mean[j] += (*this)(row_index,column_index); } mean[j] /= (double)rows_number; } // Standard deviation Vector<double> standard_deviation(column_indices_size, 0.0); for(size_t j = 0; j < column_indices_size; j++) { column_index = column_indices[j]; standard_deviation[j] = 0.0; for(size_t i = 0; i < row_indices_size; i++) { row_index = row_indices[i]; standard_deviation[j] += ((*this)(row_index,column_index) - mean[j])*((*this)(row_index,column_index) - mean[j]); } standard_deviation[j] = sqrt(standard_deviation[j]/(rows_number-1.0)); } // Mean and standard deviation Vector< Vector<double> > mean_standard_deviation(2); mean_standard_deviation[0] = mean; mean_standard_deviation[1] = standard_deviation; return(mean_standard_deviation); } // Type calculate_minimum(void) const method /// Returns the minimum value from all elements in the matrix. template <class T> T Matrix<T>::calculate_minimum(void) const { T minimum = (T)1.0e99; for(size_t i = 0; i < this->size(); i++) { if((*this)[i] < minimum) { minimum = (*this)[i]; } } return(minimum); } // Type calculate_maximum(void) const method /// Returns the maximum value from all elements in the matrix. template <class T> T Matrix<T>::calculate_maximum(void) const { T maximum = (T)-1.0e99; for(size_t i = 0; i < this->size(); i++) { if((*this)[i] > maximum) { maximum = (*this)[i]; } } return(maximum); } // Vector< Vector<T> > calculate_minimum_maximum(void) const method /// Returns a vector of vectors with the minimum and maximum values of all the matrix columns. /// The size of the vector is two. /// The size of each element is equal to the number of columns in the matrix. template <class T> Vector< Vector<T> > Matrix<T>::calculate_minimum_maximum(void) const { Vector< Vector<T> > minimum_maximum(2); Vector<T> minimum(columns_number, (T)1.0e99); Vector<T> maximum(columns_number, (T)-1.0e99); for(size_t j = 0; j < columns_number; j++) { for(size_t i = 0; i < rows_number; i++) { if((*this)(i,j) < minimum[j]) { minimum[j] = (*this)(i,j); } if((*this)(i,j) > maximum[j]) { maximum[j] = (*this)(i,j); } } } // Minimum and maximum minimum_maximum[0] = minimum; minimum_maximum[1] = maximum; return(minimum_maximum); } // Vector<double> calculate_minimum_maximum(const Vector<size_t>&) const method /// Returns a vector of vectors with the minimum and maximum values of given columns. /// The size of the vector is two. /// The size of each element is equal to the size of the column indices vector. /// @param column_indices Indices of columns. template <class T> Vector< Vector<T> > Matrix<T>::calculate_minimum_maximum(const Vector<size_t>& column_indices) const { const size_t column_indices_size = column_indices.size(); #ifdef __OPENNN_DEBUG__ for(size_t i = 0; i < column_indices_size; i++) { if(column_indices[i] >= columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template." << "Vector<T> calculate_minimum_maximum(const Vector<size_t>&) const method.\n" << "Index of column must be less than number of columns.\n"; throw std::logic_error(buffer.str()); } } #endif size_t column_index; Vector<T> minimum(column_indices_size, (T)1.0e99); Vector<T> maximum(column_indices_size, (T)-1.0e99); for(size_t j = 0; j < column_indices_size; j++) { column_index = column_indices[j]; for(size_t i = 0; i < rows_number; i++) { if((*this)(i,column_index) < minimum[j]) { minimum[j] = (*this)(i,column_index); } if((*this)(i,column_index) > maximum[j]) { maximum[j] = (*this)(i,column_index); } } } // Minimum and maximum Vector< Vector<T> > minimum_maximum(2); minimum_maximum[0] = minimum; minimum_maximum[1] = maximum; return(minimum_maximum); } // Vector<double> calculate_minimum_maximum(const Vector<size_t>&, const Vector<size_t>&) const method /// Returns a vector of vectors with the minimum and maximum values of given columns for given rows. /// The size of the vector is two. /// The size of each element is equal to the size of the column indices vector. /// @param row_indices Indices of rows. /// @param column_indices Indices of columns. template <class T> Vector< Vector<T> > Matrix<T>::calculate_minimum_maximum(const Vector<size_t>& row_indices, const Vector<size_t>& column_indices) const { const size_t row_indices_size = row_indices.size(); const size_t column_indices_size = column_indices.size(); Vector<T> minimum(column_indices_size, (T) 1.0e99); Vector<T> maximum(column_indices_size, (T)-1.0e99); size_t row_index; size_t column_index; for(size_t j = 0; j < column_indices_size; j++) { column_index = column_indices[j]; for(size_t i = 0; i < row_indices_size; i++) { row_index = row_indices[i]; if((*this)(row_index,column_index) < minimum[j]) { minimum[j] = (*this)(row_index,column_index); } if((*this)(row_index,column_index) > maximum[j]) { maximum[j] = (*this)(row_index,column_index); } } } // Minimum and maximum Vector< Vector<T> > minimum_maximum(2); minimum_maximum[0] = minimum; minimum_maximum[1] = maximum; return(minimum_maximum); } // Vector< Statistics<T> > calculate_statistics(void) const method /// Returns the basic statistics of the columns. /// The format is a vector of statistics structures. /// The size of that vector is equal to the number of columns in this matrix. template <class T> Vector< Statistics<T> > Matrix<T>::calculate_statistics(void) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(rows_number == 0) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template.\n" << "Vector< Statistics<double> > calculate_statistics(void) const method.\n" << "Number of rows must be greater than one.\n"; throw std::logic_error(buffer.str()); } #endif Vector< Statistics<T> > statistics(columns_number); Vector<T> column(rows_number); for(size_t i = 0; i < columns_number; i++) { column = arrange_column(i); statistics[i] = column.calculate_statistics(); } return(statistics); } // Vector< Statistics<T> > calculate_statistics_missing_values(const Vector< Vector<size_t> >&) const method /// Returns the basic statistics of the columns when the matrix has missing values. /// The format is a vector of statistics structures. /// The size of that vector is equal to the number of columns in this matrix. /// @param missing_indices Vector of vectors with the indices of the missing values. template <class T> Vector< Statistics<T> > Matrix<T>::calculate_statistics_missing_values(const Vector< Vector<size_t> >& missing_indices) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(rows_number == 0) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template.\n" << "Vector< Statistics<double> > calculate_statistics_missing_values(const Vector< Vector<size_t> >&) const method.\n" << "Number of rows must be greater than one.\n"; throw std::logic_error(buffer.str()); } if(missing_indices.size() != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template.\n" << "Vector< Statistics<double> > calculate_statistics_missing_values(const Vector< Vector<size_t> >&) const method.\n" << "Size of missing indices (" << missing_indices.size() << ") must be equal to to number of columns (" << columns_number << ").\n"; throw std::logic_error(buffer.str()); } #endif Vector< Statistics<T> > statistics(columns_number); Vector<T> column(rows_number); for(size_t i = 0; i < columns_number; i++) { column = arrange_column(i); statistics[i] = column.calculate_statistics_missing_values(missing_indices[i]); } return(statistics); } // Vector< Statistics<T> > calculate_statistics(const Vector<size_t>&, const Vector<size_t>&) const method /// Returns the basic statistics of given columns for given rows. /// The format is a vector of statistics structures. /// The size of that vector is equal to the number of given columns. /// @param row_indices Indices of the rows for which the statistics are to be computed. /// @param column_indices Indices of the columns for which the statistics are to be computed. template <class T> Vector< Statistics<T> > Matrix<T>::calculate_statistics(const Vector<size_t>& row_indices, const Vector<size_t>& column_indices) const { const size_t row_indices_size = row_indices.size(); const size_t column_indices_size = column_indices.size(); Vector< Statistics<T> > statistics(column_indices_size); size_t index; Vector<T> column(row_indices_size); for(size_t i = 0; i < column_indices_size; i++) { index = column_indices[i]; column = arrange_column(index, row_indices); statistics[i] = column.calculate_statistics(); } return statistics; } // Vector< Statistics<T> > calculate_rows_statistics(const Vector<size_t>&) const method /// Returns the basic statistics of all the columns for given rows. /// The format is a vector of statistics structures. /// The size of that vector is equal to the number of columns in this matrix. /// @param row_indices Indices of the rows for which the statistics are to be computed. template <class T> Vector< Statistics<T> > Matrix<T>::calculate_rows_statistics(const Vector<size_t>& row_indices) const { const size_t row_indices_size = row_indices.size(); Vector< Statistics<T> > statistics(columns_number); Vector<T> column(row_indices_size); for(size_t i = 0; i < columns_number; i++) { column = arrange_column(i, row_indices); statistics[i] = column.calculate_statistics(); } return statistics; } // Vector< Statistics<T> > calculate_rows_statistics_missing_values(const Vector<size_t>&, const Vector< Vector<size_t> >&) const method /// Returns the basic statistics of all the columns for given rows when the matrix has missing values. /// The format is a vector of statistics structures. /// The size of that vector is equal to the number of columns in this matrix. /// @param row_indices Indices of the rows for which the statistics are to be computed. /// @param missing_indices Vector of vectors with the indices of the missing values. template <class T> Vector< Statistics<T> > Matrix<T>::calculate_rows_statistics_missing_values(const Vector<size_t>& row_indices, const Vector< Vector<size_t> >& missing_indices) const { const size_t row_indices_size = row_indices.size(); Vector< Statistics<T> > statistics(columns_number); Vector<T> column(row_indices_size); for(size_t i = 0; i < columns_number; i++) { column = arrange_column(i, row_indices); statistics[i] = column.calculate_statistics_missing_values(missing_indices[i]); } return statistics; } // Vector< Statistics<T> > calculate_columns_statistics(const Vector<size_t>&) const method /// Returns the basic statistics of given columns. /// The format is a vector of statistics structures. /// The size of that vector is equal to the number of given columns. /// @param column_indices Indices of the columns for which the statistics are to be computed. template <class T> Vector< Statistics<T> > Matrix<T>::calculate_columns_statistics(const Vector<size_t>& column_indices) const { const size_t column_indices_size = column_indices.size(); Vector< Statistics<T> > statistics(column_indices_size); size_t index; Vector<T> column(rows_number); for(size_t i = 0; i < column_indices_size; i++) { index = column_indices[i]; column = arrange_column(index); statistics[i] = column.calculate_statistics(); } return statistics; } // Vector< Statistics<T> > calculate_columns_statistics_missing_values(const Vector<size_t>&, const Vector<size_t>&) const method /// Returns the basic statistics of given columns when the matrix has missing values. /// The format is a vector of statistics structures. /// The size of that vector is equal to the number of given columns. /// @param column_indices Indices of the columns for which the statistics are to be computed. /// @param missing_indices Vector of vectors with the indices of the missing values. template <class T> Vector< Statistics<T> > Matrix<T>::calculate_columns_statistics_missing_values(const Vector<size_t>& column_indices, const Vector< Vector<size_t> > missing_indices) const { const size_t column_indices_size = column_indices.size(); Vector< Statistics<T> > statistics(column_indices_size); size_t index; Vector<T> column(rows_number); #pragma omp parallel for private(index, column) for(int i = 0; i < column_indices_size; i++) { index = column_indices[i]; column = arrange_column(index); statistics[i] = column.calculate_statistics_missing_values(missing_indices[index]); } return statistics; } // Vector < Vector <double> > calculate_shape_parameters(void) const method /// Returns the asymmetry and the kurtosis of the columns. /// The format is a vector of subvectors. /// The size of that vector is equal to the number of columns in this matrix. template <class T> Vector< Vector<double> > Matrix<T>::calculate_shape_parameters(void) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(rows_number == 0) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template.\n" << "Vector< Vector<double> > calculate_shape_parameters(void) const method.\n" << "Number of rows must be greater than one.\n"; throw std::logic_error(buffer.str()); } #endif Vector< Vector<double> > shape_parameters(columns_number); Vector<T> column(rows_number); for(size_t i = 0; i < columns_number; i++) { column = arrange_column(i); shape_parameters[i] = column.calculate_shape_parameters(); } return(shape_parameters); } // Vector< Vector<double> > calculate_shape_parameters_missing_values(const Vector<size_t>&) const /// Returns the asymmetry and the kurtosis of the columns when the matrix has missing values. /// The format is a vector of subvectors. /// The size of that vector is equal to the number of columns in this matrix. /// @param missing_indices Vector of vectors with the indices of the missing values. template <class T> Vector< Vector<double> > Matrix<T>::calculate_shape_parameters_missing_values(const Vector< Vector<size_t> >& missing_indices) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(rows_number == 0) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template.\n" << "Vector< Vector<double> > calculate_shape_parameters_missing_values(const Vector< Vector<size_t> >&) const method.\n" << "Number of rows must be greater than one.\n"; throw std::logic_error(buffer.str()); } if(missing_indices.size() != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template.\n" << "Vector< Vector<double> > calculate_shape_parameters_missing_values(const Vector< Vector<size_t> >&) const method.\n" << "Size of missing indices (" << missing_indices.size() << ") must be equal to to number of columns (" << columns_number << ").\n"; throw std::logic_error(buffer.str()); } #endif Vector< Vector<double> > shape_parameters(columns_number); Vector<T> column(rows_number); for(size_t i = 0; i < columns_number; i++) { column = arrange_column(i); shape_parameters[i] = column.calculate_shape_parameters_missing_values(missing_indices[i]); } return(shape_parameters); } // Vector< Vector<double> > calculate_shape_parameters(const Vector<size_t>&, const Vector<size_t>&) const method /// Returns the asymmetry and the kurtosis of given columns for given rows. /// The format is a vector of subvectors. /// The size of that vector is equal to the number of given columns. /// @param row_indices Indices of the rows for which the statistics are to be computed. /// @param column_indices Indices of the columns for which the statistics are to be computed. template <class T> Vector< Vector<double> > Matrix<T>::calculate_shape_parameters(const Vector<size_t>& row_indices, const Vector<size_t>& column_indices) const { const size_t row_indices_size = row_indices.size(); const size_t column_indices_size = column_indices.size(); Vector< Vector<double> > shape_parameters(column_indices_size); size_t index; Vector<T> column(row_indices_size); for(size_t i = 0; i < column_indices_size; i++) { index = column_indices[i]; column = arrange_column(index, row_indices); shape_parameters[i] = column.calculate_shape_parameters(); } return shape_parameters; } // Vector< Vector<double> > calculate_rows_statistics(const Vector<size_t>&) const method /// Returns the asymmetry and the kurtosis of all the columns for given rows. /// The format is a vector of subvectors. /// The size of that vector is equal to the number of columns in this matrix. /// @param row_indices Indices of the rows for which the statistics are to be computed. template <class T> Vector< Vector<double> > Matrix<T>::calculate_rows_shape_parameters(const Vector<size_t>& row_indices) const { const size_t row_indices_size = row_indices.size(); Vector< Vector<double> > shape_parameters(columns_number); Vector<T> column(row_indices_size); for(size_t i = 0; i < columns_number; i++) { column = arrange_column(i, row_indices); shape_parameters[i] = column.calculate_shape_parameters(); } return shape_parameters; } // Vector< Vector<double> > calculate_rows_shape_parameters_missing_values(const Vector<size_t>&, const Vector< Vector<size_t> >&) const method /// Returns the asymmetry and the kurtosis of all the columns for given rows when the matrix has missing values. /// The format is a vector of subvectors. /// The size of that vector is equal to the number of columns in this matrix. /// @param row_indices Indices of the rows for which the statistics are to be computed. /// @param missing_indices Vector of vectors with the indices of the missing values. template <class T> Vector< Vector<double> > Matrix<T>::calculate_rows_shape_parameters_missing_values(const Vector<size_t>& row_indices, const Vector< Vector<size_t> >& missing_indices) const { const size_t row_indices_size = row_indices.size(); Vector< Vector<double> > shape_parameters(columns_number); Vector<T> column(row_indices_size); for(size_t i = 0; i < columns_number; i++) { column = arrange_column(i, row_indices); shape_parameters[i] = column.calculate_shape_parameters_missing_values(missing_indices[i]); } return shape_parameters; } // Vector< Vector<double> > calculate_columns_shape_parameters(const Vector<size_t>&) const method /// Returns the asymmetry and the kurtosis of given columns. /// The format is a vector of subvectors. /// The size of that vector is equal to the number of given columns. /// @param column_indices Indices of the columns for which the statistics are to be computed. template <class T> Vector< Vector<double> > Matrix<T>::calculate_columns_shape_parameters(const Vector<size_t>& column_indices) const { const size_t column_indices_size = column_indices.size(); Vector< Vector<double> > shape_parameters(column_indices_size); size_t index; Vector<T> column(rows_number); for(size_t i = 0; i < column_indices_size; i++) { index = column_indices[i]; column = arrange_column(index); shape_parameters[i] = column.calculate_shape_parameters(); } return shape_parameters; } // Vector< Vector<double> > calculate_columns_shape_parameters_missing_values(const Vector<size_t>&, const Vector< Vector<size_t> >&) const method /// Returns the asymmetry and the kurtosis of given columns when the matrix has missing values. /// The format is a vector of subvectors. /// The size of that vector is equal to the number of given columns. /// @param column_indices Indices of the columns for which the statistics are to be computed. /// @param missing_indices Vector of vectors with the indices of the missing values. template <class T> Vector< Vector<double> > Matrix<T>::calculate_columns_shape_parameters_missing_values(const Vector<size_t>& column_indices, const Vector< Vector<size_t> >& missing_indices) const { const size_t column_indices_size = column_indices.size(); Vector< Vector<double> > shape_parameters(column_indices_size); size_t index; Vector<T> column(rows_number); for(size_t i = 0; i < column_indices_size; i++) { index = column_indices[i]; column = arrange_column(index); shape_parameters[i] = column.calculate_shape_parameters_missing_values(missing_indices[index]); } return shape_parameters; } // Matrix<double> calculate_covariance_matrix(void) const method /// Retruns the covariance matrix of this matrix. /// The number of columns and rows of the matrix is equal to the number of columns of this matrix. template <class T> Matrix<double> Matrix<T>::calculate_covariance_matrix(void) const { const size_t size = (*this).get_columns_number(); #ifdef __OPENNN_DEBUG__ if(size == 0) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template." << "void calculate_covariance_matrix(void) const method.\n" << "Number of columns must be greater than zero.\n"; throw std::logic_error(buffer.str()); } #endif Matrix<double> covariance_matrix(size, size, 0.0); Vector<double> first_column; Vector<double> second_column; for(size_t i = 0; i < size; i++) { first_column = (*this).arrange_column(i); for(size_t j = i; j < size; j++) { second_column = (*this).arrange_column(j); covariance_matrix(i,j) = first_column.calculate_covariance(second_column); covariance_matrix(j,i) = covariance_matrix(i,j); } } return covariance_matrix; } // Vector<Histogram<T> > calculate_histograms(const size_t&) const method /// Calculates a histogram for each column, each having a given number of bins. /// It returns a vector of vectors of vectors. /// The size of the main vector is the number of columns. /// Each subvector contains the frequencies and centers of that colums. /// @param bins_number Number of bins for each histogram. template <class T> Vector< Histogram<T> > Matrix<T>::calculate_histograms(const size_t& bins_number) const { Vector< Histogram<T> > histograms(columns_number); Vector<T> column(rows_number); for(size_t i = 0; i < columns_number; i++) { column = arrange_column(i); if (column.is_binary()) { histograms[i] = column.calculate_histogram_binary(); } else { histograms[i] = column.calculate_histogram(bins_number); } } return(histograms); } // Vector<Histogram<T> > calculate_histograms_missing_values(const Vector<size_t>&, const size_t&) const method /// Calculates a histogram for each column, each having a given number of bins, when the data has missing values. /// It returns a vector of vectors of vectors. /// The size of the main vector is the number of columns. /// Each subvector contains the frequencies and centers of that colums. /// @param bins_number Number of bins for each histogram. /// @param missing_indices Vector of vectors with the indices of the missing values. template <class T> Vector< Histogram<T> > Matrix<T>::calculate_histograms_missing_values(const Vector< Vector<size_t> >& missing_indices, const size_t& bins_number) const { Vector< Histogram<T> > histograms(columns_number); Vector<T> column(rows_number); for(size_t i = 0; i < columns_number; i++) { column = arrange_column(i); histograms[i] = column.calculate_histogram_missing_values(missing_indices[i], bins_number); } return(histograms); } // Matrix<size_t> calculate_less_than_indices(const T&) const method /// Returns the matrix indices at which the elements are less than some given value. /// @param value Value. template <class T> Matrix<size_t> Matrix<T>::calculate_less_than_indices(const T& value) const { Matrix<size_t> indices; Vector<size_t> row(2); for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { if((*this)(i,j) < value && indices.empty()) { indices.set(1, 2); row[0] = i; row[1] = j; indices.set_row(0, row); } else if((*this)(i,j) < value) { row[0] = i; row[1] = j; indices.append_row(row); } } } return(indices); } // Matrix<size_t> calculate_greater_than_indices(const T&) const method /// Returns the matrix indices at which the elements are greater than some given value. /// @param value Value. template <class T> Matrix<size_t> Matrix<T>::calculate_greater_than_indices(const T& value) const { Matrix<size_t> indices; Vector<size_t> row(2); for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { if((*this)(i,j) > value && indices.empty()) { indices.set(1, 2); row[0] = i; row[1] = j; indices.set_row(0, row); } else if((*this)(i,j) > value) { row[0] = i; row[1] = j; indices.append_row(row); } } } return(indices); } // void scale_mean_standard_deviation(const Vector< Statistics<T> >&) method /// Scales the matrix elements with the mean and standard deviation method. /// It updates the data in the matrix. /// @param statistics Vector of statistics structures conatining the mean and standard deviation values for the scaling. /// The size of that vector must be equal to the number of columns in this matrix. template <class T> void Matrix<T>::scale_mean_standard_deviation(const Vector< Statistics<T> >& statistics) { #ifdef __OPENNN_DEBUG__ const size_t size = statistics.size(); if(size != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template." << "void scale_mean_standard_deviation(const Vector< Statistics<T> >&) const method.\n" << "Size of statistics vector must be equal to number of columns.\n"; throw std::logic_error(buffer.str()); } #endif // Rescale data for(size_t j = 0; j < columns_number; j++) { if(statistics[j].standard_deviation < 1e-99) { // Do nothing } else { for(size_t i = 0; i < rows_number; i++) { (*this)(i,j) = ((*this)(i,j) - statistics[j].mean)/statistics[j].standard_deviation; } } } } // Vector< Statistics<T> > scale_mean_standard_deviation(void) method /// Scales the data using the mean and standard deviation method and /// the mean and standard deviation values calculated from the matrix. /// It also returns the statistics of all the columns. template <class T> Vector< Statistics<T> > Matrix<T>::scale_mean_standard_deviation(void) { const Vector< Statistics<T> > statistics = calculate_statistics(); scale_mean_standard_deviation(statistics); return(statistics); } // void scale_rows_mean_standard_deviation(const Vector< Statistics<T> >&, const Vector<size_t>&) const /// Scales given rows from the matrix using the mean and standard deviation method. /// @param statistics Vector of statistics for all the columns. /// @param row_indices Indices of rows to be scaled. template <class T> void Matrix<T>::scale_rows_mean_standard_deviation(const Vector< Statistics<T> >& statistics, const Vector<size_t>& row_indices) { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ const size_t statistics_size = statistics.size(); if(statistics_size != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Vector template.\n" << "void scale_rows_mean_standard_deviation(const Vector< Statistics<T> >&, const Vector<size_t>&) method.\n" << "Size of statistics must be equal to number of columns.\n"; throw std::logic_error(buffer.str()); } #endif size_t row_index; // Scale columns for(size_t j = 0; j < columns_number; j++) { if(statistics[j].standard_deviation < 1e-99) { // Do nothing } else { for(size_t i = 0; i < row_indices.size(); i++) { row_index = row_indices[i]; (*this)(row_index,j) = ((*this)(row_index,j) - statistics[j].mean)/statistics[j].standard_deviation; } } } } // void scale_columns_mean_standard_deviation(const Vector< Statistics<T> >&, const Vector<size_t>&) method /// Scales given columns of this matrix with the mean and standard deviation method. /// @param statistics Vector of statistics structure containing the mean and standard deviation values for the scaling. /// The size of that vector must be equal to the number of columns to be scaled. /// @param columns_indices Vector of indices with the columns to be scaled. /// The size of that vector must be equal to the number of columns to be scaled. template <class T> void Matrix<T>::scale_columns_mean_standard_deviation(const Vector< Statistics<T> >& statistics, const Vector<size_t>& columns_indices) { const size_t columns_indices_size = columns_indices.size(); // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ const size_t statistics_size = statistics.size(); if(statistics_size != columns_indices_size) { std::ostringstream buffer; buffer << "OpenNN Exception: Vector template.\n" << "void scale_columns_mean_standard_deviation(const Vector< Statistics<T> >&, const Vector<size_t>&) method.\n" << "Size of statistics must be equal to size of columns indices.\n"; throw std::logic_error(buffer.str()); } #endif size_t column_index; // Scale columns for(size_t j = 0; j < columns_indices_size; j++) { if(statistics[j].standard_deviation < 1e-99) { // Do nothing } else { column_index = columns_indices[j]; for(size_t i = 0; i < rows_number; i++) { (*this)(i,column_index) = ((*this)(i,column_index) - statistics[j].mean)/statistics[j].standard_deviation; } } } } // void scale_minimum_maximum(const Vector< Statistics<T> >&) method /// Scales the matrix columns with the minimum and maximum method. /// It updates the data in the matrix. /// @param statistics Vector of statistics structures containing the minimum and maximum values for the scaling. /// The size of that vector must be equal to the number of columns in this matrix. template <class T> void Matrix<T>::scale_minimum_maximum(const Vector< Statistics<T> >& statistics) { #ifdef __OPENNN_DEBUG__ const size_t size = statistics.size(); if(size != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template." << "void scale_minimum_maximum(const Vector< Statistics<T> >&) method.\n" << "Size of statistics vector must be equal to number of columns.\n"; throw std::logic_error(buffer.str()); } #endif // Rescale data for(size_t j = 0; j < columns_number; j++) { if(statistics[j].maximum - statistics[j].minimum < 1e-99) { // Do nothing } else { for(size_t i = 0; i < rows_number; i++) { (*this)(i,j) = 2.0*((*this)(i,j) - statistics[j].minimum)/(statistics[j].maximum-statistics[j].minimum)-1.0; } } } } // Vector< Statistics<T> > scale_minimum_maximum(void) method /// Scales the data using the minimum and maximum method and /// the minimum and maximum values calculated from the matrix. /// It also returns the statistics of all the columns. template <class T> Vector< Statistics<T> > Matrix<T>::scale_minimum_maximum(void) { const Vector< Statistics<T> > statistics = calculate_statistics(); scale_minimum_maximum(statistics); return(statistics); } // void scale_rows_minimum_maximum(const Vector< Statistics<T> >&, const Vector<size_t>&) /// Scales given rows from the matrix using the minimum and maximum method. /// @param statistics Vector of statistics for all the columns. /// @param row_indices Indices of rows to be scaled. template <class T> void Matrix<T>::scale_rows_minimum_maximum(const Vector< Statistics<T> >& statistics, const Vector<size_t>& row_indices) { // Control sentence (if debug) const size_t row_indices_size = row_indices.size(); #ifdef __OPENNN_DEBUG__ const size_t statistics_size = statistics.size(); if(statistics_size != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Vector template.\n" << "void scale_rows_minimum_maximum(const Vector< Statistics<T> >&, const Vector<size_t>&) method.\n" << "Size of statistics must be equal to number of columns.\n"; throw std::logic_error(buffer.str()); } #endif // Rescale targets data size_t row_index; for(size_t j = 0; j < columns_number; j++) { if(statistics[j].maximum - statistics[j].minimum < 1e-99) { // Do nothing } else { for(size_t i = 0; i < row_indices_size; i++) { row_index = row_indices[i]; (*this)(row_index,j) = 2.0*((*this)(row_index,j) - statistics[j].minimum)/(statistics[j].maximum-statistics[j].minimum) - 1.0; } } } } // void scale_columns_minimum_maximum(const Vector< Statistics<T> >&, const Vector<size_t>&) method /// Scales given columns of this matrix with the minimum and maximum method. /// @param statistics Vector of statistics structure containing the minimum and maximum values for the scaling. /// The size of that vector must be equal to the number of columns to be scaled. /// @param column_indices Vector of indices with the columns to be scaled. /// The size of that vector must be equal to the number of columns to be scaled. template <class T> void Matrix<T>::scale_columns_minimum_maximum(const Vector< Statistics<T> >& statistics, const Vector<size_t>& column_indices) { // Control sentence (if debug) const size_t column_indices_size = column_indices.size(); #ifdef __OPENNN_DEBUG__ const size_t statistics_size = statistics.size(); if(statistics_size != column_indices_size) { std::ostringstream buffer; buffer << "OpenNN Exception: Vector template.\n" << "void scale_columns_minimum_maximum(const Vector< Statistics<T> >&, const Vector<size_t>&) method.\n" << "Size of statistics must be equal to size of columns indices.\n"; throw std::logic_error(buffer.str()); } #endif size_t column_index; // Rescale targets data for(size_t j = 0; j < column_indices_size; j++) { column_index = column_indices[j]; if(statistics[j].maximum - statistics[j].minimum < 1e-99) { // Do nothing } else { for(size_t i = 0; i < rows_number; i++) { (*this)(i,column_index) = 2.0*((*this)(i,column_index) - statistics[j].minimum)/(statistics[j].maximum-statistics[j].minimum) - 1.0; } } } } // void unscale_mean_standard_deviation(const Vector< Statistics<T> >&) method /// Unscales the matrix columns with the mean and standard deviation method. /// It updates the matrix elements. /// @param statistics Vector of statistics structures containing the mean and standard deviations for the unscaling. /// The size of that vector must be equal to the number of columns in this matrix. template <class T> void Matrix<T>::unscale_mean_standard_deviation(const Vector< Statistics<T> >& statistics) { #ifdef __OPENNN_DEBUG__ const size_t size = statistics.size(); if(size != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template." << "void unscale_mean_standard_deviation(const Vector< Statistics<T> >&) const method.\n" << "Size of statistics vector must be equal to number of columns.\n"; throw std::logic_error(buffer.str()); } #endif for(size_t j = 0; j < columns_number; j++) { if(statistics[j].standard_deviation < 1e-99) { // Do nothing } else { for(size_t i = 0; i < rows_number; i++) { (*this)(i,j) = (*this)(i,j)*statistics[j].standard_deviation + statistics[j].mean; } } } } // void unscale_rows_mean_standard_deviation(const Vector< Statistics<T> >&, const Vector<size_t>&) method /// Unscales given rows using the mean and standard deviation method. /// @param statistics Vector of statistics structures for all the columns. /// The size of this vector must be equal to the number of columns. /// @param row_indices Indices of rows to be unscaled. template <class T> void Matrix<T>::unscale_rows_mean_standard_deviation(const Vector< Statistics<T> >& statistics, const Vector<size_t>& row_indices) { size_t row_index; // Unscale columns for(size_t j = 0; j < columns_number; j++) { if(statistics[j].standard_deviation < 1e-99) { // Do nothing } else { for(size_t i = 0; i < rows_number; i++) { row_index = row_indices[i]; (*this)(row_index,j) = (*this)(row_index,j)*statistics[j].standard_deviation + statistics[j].mean; } } } } // void unscale_columns_mean_standard_deviation(const Vector< Statistics<T> >&, const Vector<size_t>&) method /// Unscales given columns of this matrix with the mean and standard deviation method. /// @param statistics Vector of statistics structure containing the mean and standard deviation values for the scaling. /// The size of that vector must be equal to the number of columns in the matrix. /// @param column_indices Vector of indices with the columns to be unscaled. /// The size of that vector must be equal to the number of columns to be scaled. template <class T> void Matrix<T>::unscale_columns_mean_standard_deviation(const Vector< Statistics<T> >& statistics, const Vector<size_t>& column_indices) { #ifdef __OPENNN_DEBUG__ if(statistics.size() != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template.\n" << "void unscale_columns_mean_standard_deviation(const Vector< Statistics<T> >&, const Vector<size_t>&) const method.\n" << "Size of statistics vector must be equal to number of columns.\n"; throw std::logic_error(buffer.str()); } #endif size_t column_index; // Unscale columns for(size_t j = 0; j < column_indices.size(); j++) { column_index = column_indices[j]; if(statistics[column_index].standard_deviation < 1e-99) { // Do nothing } else { for(size_t i = 0; i < rows_number; i++) { (*this)(i,column_index) = (*this)(i,column_index)*statistics[column_index].standard_deviation + statistics[column_index].mean; } } } } // void unscale_minimum_maximum(const Vector< Statistics<T> >&) method /// Unscales the matrix columns with the minimum and maximum method. /// @param statistics Vector of statistics which contains the minimum and maximum scaling values. /// The size of that vector must be equal to the number of columns in this matrix. template <class T> void Matrix<T>::unscale_minimum_maximum(const Vector< Statistics<T> >& statistics) { #ifdef __OPENNN_DEBUG__ const size_t size = statistics.size(); if(size != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template." << "void unscale_minimum_maximum(const Vector< Statistics<T> >&) method.\n" << "Size of minimum vector must be equal to number of columns.\n"; throw std::logic_error(buffer.str()); } #endif for(size_t j = 0; j < columns_number; j++) { if(statistics[j].maximum - statistics[j].minimum < 1e-99) { std::cout << "OpenNN Warning: Matrix template.\n" << "void unscale_minimum_maximum(const Vector< Statistics<T> >&) const method.\n" << "Minimum and maximum values of column " << j << " are equal.\n" << "Those columns won't be unscaled.\n"; // Do nothing } else { for(size_t i = 0; i < rows_number; i++) { (*this)(i,j) = 0.5*((*this)(i,j) + 1.0)*(statistics[j].maximum-statistics[j].minimum) + statistics[j].minimum; } } } } // void unscale_rows_minimum_maximum(const Vector< Statistics<T> >&, const Vector<size_t>&) method /// Unscales given rows using the minimum and maximum method. /// @param statistics Vector of statistics structures for all the columns. /// The size of this vector must be equal to the number of columns. /// @param row_indices Indices of rows to be unscaled. template <class T> void Matrix<T>::unscale_rows_minimum_maximum(const Vector< Statistics<T> >& statistics, const Vector<size_t>& row_indices) { size_t row_index; // Unscale rows for(size_t j = 0; j < columns_number; j++) { if(statistics[j].maximum - statistics[j].minimum < 1e-99) { // Do nothing } else { for(size_t i = 0; i < rows_number; i++) { row_index = row_indices[i]; (*this)(row_index,j) = 0.5*((*this)(row_index,j) + 1.0)*(statistics[j].maximum-statistics[j].minimum) + statistics[j].minimum; } } } } // void unscale_columns_minimum_maximum(const Vector< Statistics<T> >&, const Vector<size_t>&) method /// Unscales given columns in the matrix with the minimum and maximum method. /// @param statistics Vector of statistics structures containing the minimum and maximum values for the unscaling. /// The size of that vector must be equal to the number of columns in the matrix. /// @param column_indices Vector of indices of the columns to be unscaled. /// The size of that vector must be equal to the number of columns to be unscaled. template <class T> void Matrix<T>::unscale_columns_minimum_maximum(const Vector< Statistics<T> >& statistics, const Vector<size_t>& column_indices) { #ifdef __OPENNN_DEBUG__ if(statistics.size() != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template.\n" << "void unscale_columns_minimum_maximum_deviation(const Vector< Statistics<T> >&, const Vector<size_t>&) const method.\n" << "Size of statistics vector must be equal to number of columns.\n"; throw std::logic_error(buffer.str()); } #endif size_t column_index; // Unscale columns for(size_t j = 0; j < column_indices.size(); j++) { column_index = column_indices[j]; if(statistics[column_index].maximum - statistics[column_index].minimum < 1e-99) { // Do nothing } else { for(size_t i = 0; i < rows_number; i++) { (*this)(i,column_index) = 0.5*((*this)(i,column_index) + 1.0)*(statistics[column_index].maximum-statistics[column_index].minimum) + statistics[column_index].minimum; } } } } // Vector<size_t> calculate_minimal_indices(void) const method /// Returns the row and column indices corresponding to the entry with minimum value. template <class T> Vector<size_t> Matrix<T>::calculate_minimal_indices(void) const { T minimum = (*this)(0,0); Vector<size_t> minimal_indices(2, 0); for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { if((*this)(i,j) < minimum) { minimum = (*this)(i,j); minimal_indices[0] = i; minimal_indices[1] = j; } } } return(minimal_indices); } // Vector<size_t> calculate_maximal_indices(void) const method /// Returns the row and column indices corresponding to the entry with maximum value. template <class T> Vector<size_t> Matrix<T>::calculate_maximal_indices(void) const { T maximum = (*this)(0,0); Vector<size_t> maximal_indices(2, 0); for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { if((*this)(i,j) > maximum) { maximum = (*this)(i,j); maximal_indices[0] = i; maximal_indices[1] = j; } } } return(maximal_indices); } // Vector< Vector<size_t> > calculate_minimal_maximal_indices(void) const method /// Returns the row and column indices corresponding to the entries with minimum and maximum values. /// The format is a vector of two vectors. /// Each subvector also has two elements. /// The first vector contains the minimal indices, and the second vector contains the maximal indices. template <class T> Vector< Vector<size_t> > Matrix<T>::calculate_minimal_maximal_indices(void) const { T minimum = (*this)(0,0); T maximum = (*this)(0,0); Vector<size_t> minimal_indices(2, 0); Vector<size_t> maximal_indices(2, 0); for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { if((*this)(i,j) < minimum) { minimum = (*this)(i,j); minimal_indices[0] = i; minimal_indices[1] = j; } if((*this)(i,j) > maximum) { maximum = (*this)(i,j); maximal_indices[0] = i; maximal_indices[1] = j; } } } Vector< Vector<size_t> > minimal_maximal_indices(2); minimal_maximal_indices[0] = minimal_indices; minimal_maximal_indices[1] = maximal_indices; return(minimal_maximal_indices); } // double calculate_sum_squared_error(const Matrix<double>&) const method /// Returns the sum squared error between the elements of this matrix and the elements of another matrix. /// @param other_matrix Other matrix. template <class T> double Matrix<T>::calculate_sum_squared_error(const Matrix<double>& other_matrix) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ const size_t other_rows_number = other_matrix.get_rows_number(); if(other_rows_number != rows_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "double calculate_sum_squared_error(const Matrix<double>&) const method.\n" << "Other number of rows must be equal to this number of rows.\n"; throw std::logic_error(buffer.str()); } const size_t other_columns_number = other_matrix.get_columns_number(); if(other_columns_number != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "double calculate_sum_squared_error(const Matrix<double>&) const method.\n" << "Other number of columns must be equal to this number of columns.\n"; throw std::logic_error(buffer.str()); } #endif double sum_squared_error = 0.0; for(size_t i = 0; i < rows_number; i++) { sum_squared_error += ((*this)[i] - other_matrix[i])*((*this)[i] - other_matrix[i]); } return(sum_squared_error); } // double calculate_sum_squared_error(const Vector<double>&) const method /// This method retuns the sum squared error between the elements of this matrix and the elements of a vector, by columns. /// The size of the vector must be equal to the number of columns of this matrix. /// @param vector Vector to be compared to this matrix. template <class T> double Matrix<T>::calculate_sum_squared_error(const Vector<double>& vector) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ const size_t size = vector.size(); if(size != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "double calculate_sum_squared_error(const Vector<double>&) const method.\n" << "Size must be equal to number of columns.\n"; throw std::logic_error(buffer.str()); } #endif double sum_squared_error = 0.0; for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { sum_squared_error += ((*this)(i,j) - vector[j])*((*this)(i,j) - vector[j]); } } return(sum_squared_error); } // Vector<double> calculate_rows_norm(void) const method /// Returns a vector with the norm of each row. /// The size of that vector is the number of rows. template <class T> Vector<double> Matrix<T>::calculate_rows_norm(void) const { Vector<double> rows_norm(rows_number, 0.0); for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { rows_norm[i] += (*this)(i,j)*(*this)(i,j); } rows_norm[i] = sqrt(rows_norm[i]); } return(rows_norm); } // Matrix<T> calculate_absolute_value(void) const method /// Returns a matrix with the absolute values of this matrix. template <class T> Matrix<T> Matrix<T>::calculate_absolute_value(void) const { Matrix<T> absolute_value(rows_number, columns_number); for(size_t i = 0; i < this->size(); i++) { if((*this)[i] > 0) { absolute_value[i] = (*this)[i]; } else { absolute_value[i] = -(*this)[i]; } } return(absolute_value); } // Matrix<T> calculate_transpose(void) const method /// Returns the transpose of the matrix. template <class T> Matrix<T> Matrix<T>::calculate_transpose(void) const { Matrix<T> transpose(columns_number, rows_number); for(size_t i = 0; i < columns_number; i++) { for(size_t j = 0; j < rows_number; j++) { transpose(i,j) = (*this)(j,i); } } return(transpose); } // Type calculate_determinant(void) const method /// Returns the determinant of a square matrix. template <class T> T Matrix<T>::calculate_determinant(void) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(empty()) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "calculate_determinant(void) const method.\n" << "Matrix is empty.\n"; throw std::logic_error(buffer.str()); } if(rows_number != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "calculate_determinant(void) const method.\n" << "Matrix must be square.\n"; throw std::logic_error(buffer.str()); } #endif T determinant = 0; if(rows_number == 1) { determinant = (*this)(0,0); } else if(rows_number == 2) { determinant = (*this)(0,0)*(*this)(1,1) - (*this)(1,0)*(*this)(0,1); } else { int sign; for(size_t row_index = 0; row_index < rows_number; row_index++) { // Calculate sub data Matrix<T> sub_matrix(rows_number-1, columns_number-1); for(size_t i = 1; i < rows_number; i++) { size_t j2 = 0; for(size_t j = 0; j < columns_number; j++) { if(j == row_index) { continue; } sub_matrix(i-1,j2) = (*this)(i,j); j2++; } } //sign = (size_t)(pow(-1.0, row_index+2.0)); sign = static_cast<int>( (((row_index + 2) % 2) == 0) ? 1 : -1 ); determinant += sign*(*this)(0,row_index)*sub_matrix.calculate_determinant(); } } return(determinant); } // Matrix<T> calculate_cofactor(void) const method /// Returns the cofactor matrix. template <class T> Matrix<T> Matrix<T>::calculate_cofactor(void) const { Matrix<T> cofactor(rows_number, columns_number); Matrix<T> c(rows_number-1, columns_number-1); for(size_t j = 0; j < rows_number; j++) { for(size_t i = 0; i < rows_number; i++) { // Form the adjoint a(i,j) size_t i1 = 0; for(size_t ii = 0; ii < rows_number; ii++) { if(ii == i) { continue; } size_t j1 = 0; for(size_t jj = 0; jj < rows_number; jj++) { if(jj == j) { continue; } c(i1,j1) = (*this)(ii,jj); j1++; } i1++; } const double determinant = c.calculate_determinant(); cofactor(i,j) = static_cast<T>((((i + j) % 2) == 0) ? 1 : -1)*determinant; //cofactor(i,j) = pow(-1.0, i+j+2.0)*determinant; } } return(cofactor); } // Matrix<T> calculate_inverse(void) const method /// Returns the inverse of a square matrix. /// An error message is printed if the matrix is singular. template <class T> Matrix<T> Matrix<T>::calculate_inverse(void) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(empty()) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "calculate_inverse(void) const method.\n" << "Matrix is empty.\n"; throw std::logic_error(buffer.str()); } if(rows_number != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "calculate_inverse(void) const method.\n" << "Matrix must be square.\n"; throw std::logic_error(buffer.str()); } #endif const double determinant = calculate_determinant(); if(determinant == 0.0) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "calculate_inverse(void) const method.\n" << "Matrix is singular.\n"; throw std::logic_error(buffer.str()); } if(rows_number == 1) { Matrix<T> inverse(1, 1, 1.0/determinant); return(inverse); } // Calculate cofactor matrix const Matrix<T> cofactor = calculate_cofactor(); // Adjoint matrix is the transpose of cofactor matrix const Matrix<T> adjoint = cofactor.calculate_transpose(); // Inverse matrix is adjoint matrix divided by matrix determinant const Matrix<T> inverse = adjoint/determinant; return(inverse); } // Matrix<T> calculate_LU_inverse(void) const method /// Returns the inverse of a square matrix using the LU decomposition method. /// The given matrix must be invertible. template <class T> Matrix<T> Matrix<T>::calculate_LU_inverse(void) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(empty()) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "calculate_LU_inverse(void) const method.\n" << "Matrix is empty.\n"; throw std::logic_error(buffer.str()); } if(rows_number != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "calculate_LU_inverse(void) const method.\n" << "Matrix must be square.\n"; throw std::logic_error(buffer.str()); } #endif Matrix<T> inverse(rows_number, columns_number); const Eigen::Map<Eigen::MatrixXd> this_eigen((double*)this->data(), rows_number, columns_number); Eigen::Map<Eigen::MatrixXd> inverse_eigen(inverse.data(), rows_number, columns_number); inverse_eigen = this_eigen.inverse(); return(inverse); } // Vector<T> solve_LDLT(const Vector<double>&) const method /// Solve a sisem of the form Ax = b, using the Cholesky decomposition. /// A is this matrix and must be positive or negative semidefinite. /// @param b Independent term of the system. template <class T> Vector<T> Matrix<T>::solve_LDLT(const Vector<double>& b) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(empty()) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "solve_LLT(const Vector<double>&) const method.\n" << "Matrix is empty.\n"; throw std::logic_error(buffer.str()); } if(rows_number != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "solve_LLT(const Vector<double>&) const method.\n" << "Matrix must be squared.\n"; throw std::logic_error(buffer.str()); } #endif Vector<T> solution(rows_number); const Eigen::Map<Eigen::MatrixXd> this_eigen((double*)this->data(), rows_number, columns_number); const Eigen::Map<Eigen::VectorXd> b_eigen((double*)b.data(),rows_number); Eigen::Map<Eigen::VectorXd> solution_eigen(solution.data(), rows_number); solution_eigen = this_eigen.ldlt().solve(b_eigen); return(solution); } // double calculate_distances(const size_t&, const size_t&) const /// Calculates the distance between two rows in the matix template <class T> double Matrix<T>::calculate_distance(const size_t& first_index, const size_t& second_index) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(empty()) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "calculate_distance(const size_t&, const size_t&) const method.\n" << "Matrix is empty.\n"; throw std::logic_error(buffer.str()); } #endif const Vector<T> first_row = arrange_row(first_index); const Vector<T> second_row = arrange_row(second_index); return(first_row.calculate_distance(second_row)); } // Matrix<T> operator + (const T&) const method /// Sum matrix+scalar arithmetic operator. /// @param scalar Scalar value to be added to this matrix. template <class T> Matrix<T> Matrix<T>::operator + (const T& scalar) const { Matrix<T> sum(rows_number, columns_number); std::transform(this->begin(), this->end(), sum.begin(), std::bind2nd(std::plus<T>(), scalar)); return(sum); } // Matrix<T> operator + (const Vector<T>&) const method /// Sum matrix+vector arithmetic operator. /// @param vector Vector to be added to this matrix. template <class T> Matrix<T> Matrix<T>::operator + (const Vector<T>& vector) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ const size_t size = vector.size(); if(size != rows_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "Matrix<T> operator + (const Vector<T>&) const.\n" << "Size of vector must be equal to number of rows.\n"; throw std::logic_error(buffer.str()); } #endif Matrix<T> sum(rows_number, columns_number); for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { sum(i,j) = (*this)(i,j) + vector[i]; } } return(sum); } // Matrix<T> operator + (const Matrix<T>&) const method /// Sum matrix+matrix arithmetic operator. /// @param other_matrix Matrix to be added to this vector. template <class T> Matrix<T> Matrix<T>::operator + (const Matrix<T>& other_matrix) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ const size_t other_rows_number = other_matrix.get_rows_number(); const size_t other_columns_number = other_matrix.get_columns_number(); if(other_rows_number != rows_number || other_columns_number != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "Matrix<T> operator + (const Matrix<T>&) const.\n" << "Sizes of other matrix (" << other_rows_number << "," << other_columns_number << ") must be the same than sizes of this matrix (" << rows_number << "," << columns_number << ").\n"; throw std::logic_error(buffer.str()); } #endif Matrix<T> sum(rows_number, columns_number); std::transform(this->begin(), this->end(), other_matrix.begin(), sum.begin(), std::plus<T>()); return(sum); } // Matrix<T> operator - (const T&) const method /// Difference matrix-scalar arithmetic operator. /// @param scalar Scalar value to be subtracted to this matrix. template <class T> Matrix<T> Matrix<T>::operator - (const T& scalar) const { Matrix<T> difference(rows_number, columns_number); std::transform( this->begin(), this->end(), difference.begin(), std::bind2nd(std::minus<T>(), scalar)); return(difference); } // Matrix<T> operator - (const Vector<T>&) const method /// Sum matrix-vector arithmetic operator. /// @param vector Vector to be subtracted to this matrix. template <class T> Matrix<T> Matrix<T>::operator - (const Vector<T>& vector) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ const size_t size = vector.size(); if(size != rows_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "Matrix<T> operator - (const Vector<T>&) const.\n" << "Size of vector must be equal to number of rows.\n"; throw std::logic_error(buffer.str()); } #endif Matrix<T> difference(rows_number, columns_number); for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { difference(i,j) = (*this)(i,j) - vector[i]; } } return(difference); } // Matrix<T> operator - (const Matrix<T>&) const method /// Difference matrix-matrix arithmetic operator. /// @param other_matrix Matrix to be subtracted to this matrix. template <class T> Matrix<T> Matrix<T>::operator - (const Matrix<T>& other_matrix) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ const size_t other_rows_number = other_matrix.get_rows_number(); const size_t other_columns_number = other_matrix.get_columns_number(); if(other_rows_number != rows_number || other_columns_number != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "Matrix<T> operator - (const Matrix<T>&) const method.\n" << "Sizes of other matrix (" << other_rows_number << "," << other_columns_number << ") must be equal to sizes of this matrix ("<< rows_number << "," << columns_number <<").\n"; throw std::logic_error(buffer.str()); } #endif Matrix<T> difference(rows_number, columns_number); std::transform( this->begin(), this->end(), other_matrix.begin(), difference.begin(), std::minus<T>()); return(difference); } // Matrix<T> operator * (const T&) const method /// Product matrix*scalar arithmetic operator. /// @param scalar Scalar value to be multiplied to this matrix. template <class T> Matrix<T> Matrix<T>::operator * (const T& scalar) const { Matrix<T> product(rows_number, columns_number); for(size_t i = 0; i < this->size(); i++) { product[i] = (*this)[i]*scalar; } return(product); } // Matrix<T> operator * (const Vector<T>&) const method /// Row by element matrix*row arithmetic operator. /// @param vector vector to be multiplied to this matrix. template <class T> Matrix<T> Matrix<T>::operator * (const Vector<T>& vector) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ const size_t size = vector.size(); if(size != rows_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "Matrix<T> operator * (const Vector<T>&) const method.\n" << "Vector size (" << size << ") must be equal to number of matrix rows (" << rows_number << ").\n"; throw std::logic_error(buffer.str()); } #endif Matrix<T> product(rows_number, columns_number); for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { product(i,j) = (*this)(i,j)*vector[i]; } } return(product); } // Matrix<T> operator * (const Matrix<T>&) const method /// Product matrix*matrix arithmetic operator. /// @param other_matrix Matrix to be multiplied to this matrix. template <class T> Matrix<T> Matrix<T>::operator * (const Matrix<T>& other_matrix) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ const size_t other_rows_number = other_matrix.get_rows_number(); const size_t other_columns_number = other_matrix.get_columns_number(); if(other_rows_number != rows_number || other_columns_number != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "Matrix<T> operator * (const Matrix<T>&) const method.\n" << "Sizes of other matrix (" << other_rows_number << "," << other_columns_number << ") must be equal to sizes of this matrix (" << rows_number << "," << columns_number << ").\n"; throw std::logic_error(buffer.str()); } #endif Matrix<T> product(rows_number, columns_number); for(size_t i = 0; i < this->size(); i++) { product[i] = (*this)[i]*other_matrix[i]; } return(product); } // Matrix<T> operator / (const T&) const method /// Cocient Matrix/scalar arithmetic operator. /// @param scalar Value of scalar. template <class T> Matrix<T> Matrix<T>::operator / (const T& scalar) const { Matrix<T> results(rows_number, columns_number); for(size_t i = 0; i < results.size(); i++) { results[i] = (*this)[i]/scalar; } return(results); } // Matrix<T> operator / (const Vector<T>&) const method /// Cocient matrix/vector arithmetic operator. /// @param vector Vector to be divided to this matrix. template <class T> Matrix<T> Matrix<T>::operator / (const Vector<T>& vector) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ const size_t size = vector.size(); if(size != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "Matrix<T> operator / (const Vector<T>&) const.\n" << "Size of vector must be equal to number of columns.\n"; throw std::logic_error(buffer.str()); } #endif Matrix<T> cocient(rows_number, columns_number); for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { cocient(i,j) = (*this)(i,j)/vector[j]; } } return(cocient); } // Matrix<T> operator / (const Matrix<T>&) const method /// Cocient matrix/matrix arithmetic operator. /// @param other_matrix Matrix to be divided to this vector. template <class T> Matrix<T> Matrix<T>::operator / (const Matrix<T>& other_matrix) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ const size_t other_rows_number = other_matrix.get_rows_number(); const size_t other_columns_number = other_matrix.get_columns_number(); if(other_rows_number != rows_number || other_columns_number != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "Matrix<T> operator / (const Matrix<T>&) const method.\n" << "Both matrix sizes must be the same.\n"; throw std::logic_error(buffer.str()); } #endif Matrix<T> cocient(rows_number, columns_number); for(size_t i = 0; i < rows_number; i++) { cocient[i] = (*this)[i]/other_matrix[i]; } return(cocient); } // void operator += (const T&) /// Scalar sum and assignment operator. /// @param value Scalar value to be added to this matrix. template <class T> void Matrix<T>::operator += (const T& value) { for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { (*this)(i,j) += value; } } } // void operator += (const Matrix<T>&) /// Matrix sum and assignment operator. /// @param other_matrix Matrix to be added to this matrix. template <class T> void Matrix<T>::operator += (const Matrix<T>& other_matrix) { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ const size_t other_rows_number = other_matrix.get_rows_number(); if(other_rows_number != rows_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "void operator += (const Matrix<T>&).\n" << "Both numbers of rows must be the same.\n"; throw std::logic_error(buffer.str()); } const size_t other_columns_number = other_matrix.get_columns_number(); if(other_columns_number != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "void operator += (const Matrix<T>&).\n" << "Both numbers of columns must be the same.\n"; throw std::logic_error(buffer.str()); } #endif for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { (*this)(i,j) += other_matrix(i,j); } } } // void operator -= (const T&) /// Scalar rest and assignment operator. /// @param value Scalar value to be subtracted to this matrix. template <class T> void Matrix<T>::operator -= (const T& value) { for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { (*this)(i,j) -= value; } } } // void operator -= (const Matrix<T>&) /// Matrix rest and assignment operator. /// @param other_matrix Matrix to be subtracted to this matrix. template <class T> void Matrix<T>::operator -= (const Matrix<T>& other_matrix) { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ const size_t other_rows_number = other_matrix.get_rows_number(); if(other_rows_number != rows_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "void operator -= (const Matrix<T>&).\n" << "Both numbers of rows must be the same.\n"; throw std::logic_error(buffer.str()); } const size_t other_columns_number = other_matrix.get_columns_number(); if(other_columns_number != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "void operator -= (const Matrix<T>&).\n" << "Both numbers of columns must be the same.\n"; throw std::logic_error(buffer.str()); } #endif for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { (*this)(i,j) -= other_matrix(i,j); } } } // void operator *= (const T&) /// Scalar product and assignment operator. /// @param value Scalar value to be multiplied to this matrix. template <class T> void Matrix<T>::operator *= (const T& value) { for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { (*this)(i,j) *= value; } } } // void operator *= (const Matrix<T>&) /// Matrix product and assignment operator. /// @param other_matrix Matrix to be multiplied to this matrix. template <class T> void Matrix<T>::operator *= (const Matrix<T>& other_matrix) { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ const size_t other_rows_number = other_matrix.get_rows_number(); const size_t rows_number = get_rows_number(); if(other_rows_number != rows_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "void operator *= (const Matrix<T>&).\n" << "The number of rows in the other matrix (" << other_rows_number << ")" << " is not equal to the number of rows in this matrix (" << rows_number << ").\n"; throw std::logic_error(buffer.str()); } #endif for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { (*this)(i,j) *= other_matrix(i,j); } } } // void operator /= (const T&) /// Scalar division and assignment operator. /// @param value Scalar value to be divided to this matrix. template <class T> void Matrix<T>::operator /= (const T& value) { for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { (*this)(i,j) /= value; } } } // void operator /= (const Matrix<T>&) /// Matrix division and assignment operator. /// @param other_matrix Matrix to be divided to this matrix. template <class T> void Matrix<T>::operator /= (const Matrix<T>& other_matrix) { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ const size_t other_rows_number = other_matrix.get_rows_number(); if(other_rows_number != rows_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "void operator /= (const Matrix<T>&).\n" << "Both numbers of rows must be the same.\n"; throw std::logic_error(buffer.str()); } const size_t other_columns_number = other_matrix.get_columns_number(); if(other_columns_number != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "void operator /= (const Matrix<T>&).\n" << "Both numbers of columns must be the same.\n"; throw std::logic_error(buffer.str()); } #endif for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { (*this)(i,j) /= other_matrix(i,j); } } } // void sum_diagonal(const T&) method /* template <class T> void Matrix<T>::sum_diagonal(const T& value) { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(!is_square()) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "void sum_diagonal(const T&) method.\n" << "Matrix must be squared.\n"; throw std::logic_error(buffer.str()); } #endif for(size_t i = 0; i < rows_number; i++) { (*this)(i,i) += value; } } */ // Vector<double> dot(const Vector<double>&) const method /// Returns the dot product of this matrix with a vector. /// The size of the vector must be equal to the number of columns of the matrix. /// @param vector Vector to be multiplied to this matrix. template <class T> Vector<double> Matrix<T>::dot(const Vector<double>& vector) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ const size_t size = vector.size(); if(size != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "Vector<T> dot(const Vector<T>&) const method.\n" << "Vector size must be equal to matrix number of columns.\n"; throw std::logic_error(buffer.str()); } #endif // Calculate matrix-vector poduct Vector<double> product(rows_number); // for(size_t i = 0; i < rows_number; i++) // { // product[i] = 0; // for(size_t j = 0; j < columns_number; j++) // { // product[i] += vector[j]*(*this)(i,j); // } // } const Eigen::Map<Eigen::MatrixXd> matrix_eigen((double*)this->data(), rows_number, columns_number); const Eigen::Map<Eigen::VectorXd> vector_eigen((double*)vector.data(), columns_number); Eigen::Map<Eigen::VectorXd> product_eigen(product.data(), rows_number); product_eigen = matrix_eigen*vector_eigen; return(product); } // Matrix<double> dot(const Matrix<double>&) const method /// Returns the dot product of this matrix with another matrix. /// @param other_matrix Matrix to be multiplied to this matrix. template <class T> Matrix<double> Matrix<T>::dot(const Matrix<double>& other_matrix) const { const size_t other_columns_number = other_matrix.get_columns_number(); const size_t other_rows_number = other_matrix.get_rows_number(); // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(other_rows_number != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "Matrix<T> dot(const Matrix<T>&) const method.\n" << "The number of rows of the other matrix (" << other_rows_number << ") must be equal to the number of columns of this matrix (" << columns_number << ").\n"; throw std::logic_error(buffer.str()); } #endif Matrix<T> product(rows_number, other_columns_number); // for(size_t i = 0; i < rows_number; i++) { // for(size_t j = 0; j < other_columns_number; j++) { // for(size_t k = 0; k < columns_number; k++) { // product(i,j) += (*this)(i,k)*other_matrix(k,j); // } // } // } const Eigen::Map<Eigen::MatrixXd> this_eigen((double*)this->data(), rows_number, columns_number); const Eigen::Map<Eigen::MatrixXd> other_eigen((double*)other_matrix.data(), other_rows_number, other_columns_number); Eigen::Map<Eigen::MatrixXd> product_eigen(product.data(), rows_number, other_columns_number); product_eigen = this_eigen*other_eigen; return(product); } // Matrix<double> calculate_eigenvalues(void) const method /// Calculates the eigen values of this matrix, which must be squared. /// Returns a matrix with only one column and rows the same as this matrix with the eigenvalues. template<class T> Matrix<double> Matrix<T>::calculate_eigenvalues(void) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if((*this).get_columns_number() == 0) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "Matrix<T> calculate_eigen_values(void) const method.\n" << "Number of columns must be greater than zero.\n"; throw std::logic_error(buffer.str()); } #endif #ifdef __OPENNN_DEBUG__ if((*this).get_rows_number() == 0) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "Matrix<T> calculate_eigen_values(void) const method.\n" << "Number of rows must be greater than zero.\n"; throw std::logic_error(buffer.str()); } #endif #ifdef __OPENNN_DEBUG__ if((*this).get_columns_number() != (*this).get_rows_number()) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "Matrix<T> calculate_eigen_values(void) const method.\n" << "The matrix must be squared.\n"; throw std::logic_error(buffer.str()); } #endif Matrix<T> eigenvalues(rows_number, 1); const Eigen::Map<Eigen::MatrixXd> this_eigen((double*)this->data(), rows_number, columns_number); const Eigen::SelfAdjointEigenSolver<Eigen::MatrixXd> matrix_eigen(this_eigen, Eigen::EigenvaluesOnly); Eigen::Map<Eigen::MatrixXd> eigenvalues_eigen(eigenvalues.data(), rows_number, 1); eigenvalues_eigen = matrix_eigen.eigenvalues(); return(eigenvalues); } // Matrix<double> calculate_eigenvectors(void) const method /// Calculates the eigenvectors of this matrix, which must be squared. /// Returns a matrix whose columns are the eigenvectors. template<class T> Matrix<double> Matrix<T>::calculate_eigenvectors(void) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if((*this).get_columns_number() == 0) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "Matrix<T> calculate_eigen_values(void) const method.\n" << "Number of columns must be greater than zero.\n"; throw std::logic_error(buffer.str()); } #endif #ifdef __OPENNN_DEBUG__ if((*this).get_rows_number() == 0) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "Matrix<T> calculate_eigen_values(void) const method.\n" << "Number of rows must be greater than zero.\n"; throw std::logic_error(buffer.str()); } #endif #ifdef __OPENNN_DEBUG__ if((*this).get_columns_number() != (*this).get_rows_number()) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "Matrix<T> calculate_eigen_values(void) const method.\n" << "The matrix must be squared.\n"; throw std::logic_error(buffer.str()); } #endif Matrix<T> eigenvectors(rows_number, rows_number); const Eigen::Map<Eigen::MatrixXd> this_eigen((double*)this->data(), rows_number, columns_number); const Eigen::SelfAdjointEigenSolver<Eigen::MatrixXd> matrix_eigen(this_eigen, Eigen::ComputeEigenvectors); Eigen::Map<Eigen::MatrixXd> eigenvectors_eigen(eigenvectors.data(), rows_number, rows_number); eigenvectors_eigen = matrix_eigen.eigenvectors(); return(eigenvectors); } // Matrix<T> direct(const Matrix<T>&) const method /// Calculates the direct product of this matrix with another matrix. /// This product is also known as the Kronecker product. /// @param other_matrix Second product term. template <class T> Matrix<T> Matrix<T>::direct(const Matrix<T>& other_matrix) const { const size_t other_rows_number = other_matrix.get_rows_number(); const size_t other_columns_number = other_matrix.get_columns_number(); Matrix<T> direct(rows_number*other_rows_number, columns_number*other_columns_number); size_t alpha; size_t beta; for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { for(size_t k = 0; k < other_rows_number; k++) { for(size_t l = 0; l < other_columns_number; l++) { alpha = other_rows_number*i+k; beta = other_columns_number*j+l; direct(alpha,beta) = (*this)(i,j)*other_matrix(k,l); } } } } return(direct); } // bool empty(void) const method /// Returns true if number of rows and columns is zero. template <class T> bool Matrix<T>::empty(void) const { if(rows_number == 0 && columns_number == 0) { return(true); } else { return(false); } } // bool is_square(void) const method /// Returns true if this matrix is square. /// A square matrix has the same numbers of rows and columns. template <class T> bool Matrix<T>::is_square(void) const { if(rows_number == columns_number) { return(true); } else { return(false); } } // bool is_symmetric(void) const method /// Returns true if this matrix is symmetric. /// A symmetric matrix is a squared matrix which is equal to its transpose. template <class T> bool Matrix<T>::is_symmetric(void) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(rows_number != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "bool is_symmetric(void) const method.\n" << "Matrix must be squared.\n"; throw std::logic_error(buffer.str()); } #endif const Matrix<T> transpose = calculate_transpose(); if((*this) == transpose) { return(true); } else { return(false); } } // bool is_antisymmetric(void) const method /// Returns true if this matrix is antysymmetric. /// A symmetric matrix is a squared matrix which its opposed is equal to its transpose. template <class T> bool Matrix<T>::is_antisymmetric(void) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(rows_number != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "bool is_antisymmetric(void) const method.\n" << "Matrix must be squared.\n"; throw std::logic_error(buffer.str()); } #endif const Matrix<T> transpose = calculate_transpose(); if((*this) == transpose*(-1)) { return(true); } else { return(false); } } // bool is_diagonal(void) const method /// Returns true if this matrix is diagonal. /// A diagonal matrix is which the entries outside the main diagonal are zero. template <class T> bool Matrix<T>::is_diagonal(void) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(rows_number != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "bool is_diagonal(void) const method.\n" << "Matrix must be squared.\n"; throw std::logic_error(buffer.str()); } #endif for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { if(i != j && (*this)(i,j) != 0) { return(false); } } } return(true); } // bool is_scalar(void) const method /// Returns true if this matrix is scalar. /// A scalar matrix is a diagonal matrix whose diagonal elements all contain the same scalar. template <class T> bool Matrix<T>::is_scalar(void) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(rows_number != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "bool is_scalar(void) const method.\n" << "Matrix must be squared.\n"; throw std::logic_error(buffer.str()); } #endif // @todo return(false); } /// Returns true if this matrix is the identity. /// The identity matrix or unit matrix is a square matrix with ones on the main diagonal and zeros elsewhere. template <class T> bool Matrix<T>::is_identity(void) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(rows_number != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "bool is_unity(void) const method.\n" << "Matrix must be squared.\n"; throw std::logic_error(buffer.str()); } #endif for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { if(i != j && (*this)(i,j) != 0) { return(false); } else if(i == j && (*this)(i,j) != 1) { return(false); } } } return(true); } // bool is_binary(void) const method /// Returns true if this matrix has binary values. template <class T> bool Matrix<T>::is_binary(void) const { for(size_t i = 0; i < this->size(); i++) { if((*this)[i] != 0 && (*this)[i] != 1) { return(false); } } return(true); } // bool is_column_binary(const size_t) const method /// Returns true if a column this matrix has binary values. template <class T> bool Matrix<T>::is_column_binary(const size_t& j) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ const size_t columns_number = get_columns_number(); if(j >= columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "bool is_column_binary(const size_t) const method method.\n" << "Index of column (" << j << ") must be less than number of columns.\n"; throw std::logic_error(buffer.str()); } #endif const size_t rows_number = get_rows_number(); for(size_t i = 0; i < rows_number; i++) { if((*this)(i,j) != 0 && (*this)(i,j) != 1) { return(false); } } return(true); } // Matrix<T> filter(const size_t&, const T&, const T&) const method /// Returns a new matrix where a given column has been filtered. /// @param column_index Index of column. /// @param minimum Minimum filtering value. /// @param maximum Maximum filtering value. template <class T> Matrix<T> Matrix<T>::filter(const size_t& column_index, const T& minimum, const T& maximum) const { const Vector<T> column = arrange_column(column_index); const size_t new_rows_number = rows_number - column.count_less_than(minimum) - column.count_greater_than(maximum); Matrix<T> new_matrix(new_rows_number, columns_number); size_t row_index = 0; Vector<T> row(columns_number); for(size_t i = 0; i < rows_number; i++) { if((*this)(i,column_index) >= minimum && (*this)(i,column_index) <= maximum) { row = arrange_row(i); new_matrix.set_row(row_index, row); row_index++; } } return(new_matrix); } // void convert_time_series(const size_t&) method /// Arranges a time series data matrix in a proper format for forecasting. /// Note that this method sets new numbers of rows and columns in the matrix. /// @param lags_number Number of lags for the prediction. /// @todo template <class T> void Matrix<T>::convert_time_series(const size_t& lags_number) { const size_t new_rows_number = rows_number - lags_number; const size_t new_columns_number = columns_number*(1 + lags_number); Matrix<T> new_matrix(new_rows_number, new_columns_number); Vector<T> row(rows_number); for(size_t i = 0; i < new_rows_number; i++) { row = arrange_row(i); for(size_t j = 1; j <= lags_number; j++) { row = row.assemble(arrange_row(i+j)); } new_matrix.set_row(i, row); } set(new_matrix); } // void convert_association(void) method /// Arranges the matrix in a proper format for association. /// Note that this method sets new numbers of columns in the matrix. template <class T> void Matrix<T>::convert_association(void) { Matrix<T> copy(*this); set(copy.assemble_columns(copy)); } // void convert_angular_variables_degrees(const size_t&) method /// Converts a given column, representing angles in degrees, to two different columns with the sinus and the cosinus of the corresponding angles. /// Note that this method sets a new number of columns in the matrix. /// @param column_index Index of column to be converted. template <class T> void Matrix<T>::convert_angular_variables_degrees(const size_t& column_index) { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(column_index >= columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "void convert_angular_variables_degrees(const size_t&) method.\n" << "Index of column (" << column_index << ") must be less than number of columns.\n"; throw std::logic_error(buffer.str()); } #endif const double pi = 4.0*atan(1.0); Vector<T> sin_angle(rows_number); Vector<T> cos_angle(rows_number); double angle_rad; for(size_t i = 0; i < rows_number; i++) { if((*this)(i,column_index) != -99.9) { angle_rad = pi*(*this)(i,column_index)/180.0; sin_angle[i] = sin(angle_rad); cos_angle[i] = cos(angle_rad); } else { sin_angle[i] = (T)-99.9; cos_angle[i] = (T)-99.9; } } set_column(column_index, sin_angle); insert_column(column_index+1, cos_angle); } // void convert_angular_variables_radians(const size_t&) method /// Converts a given column, representing angles in radians, to two different columns with the sinus and the cosinus of the corresponding angles. /// Note that this method sets a new number of columns in the matrix. /// @param column_index Index of column to be converted. template <class T> void Matrix<T>::convert_angular_variables_radians(const size_t& column_index) { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ if(column_index >= columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "void convert_angular_variables_radians(const size_t&) method.\n" << "Index of column (" << column_index << ") must be less than number of columns.\n"; throw std::logic_error(buffer.str()); } #endif Vector<T> sin_angle(rows_number); Vector<T> cos_angle(rows_number); for(size_t i = 0; i < rows_number; i++) { sin_angle[i] = sin((*this)(i,column_index)); cos_angle[i] = cos((*this)(i,column_index)); } set_column(column_index, sin_angle); insert_column(column_index+1, cos_angle); } // void print(void) const method /// Prints to the screen in the matrix object. template <class T> void Matrix<T>::print(void) const { std::cout << *this; } // void load(const std::string&) method /// Loads the numbers of rows and columns and the values of the matrix from a data file. /// @param file_name File name. template <class T> void Matrix<T>::load(const std::string& file_name) { std::ifstream file(file_name.c_str()); if(!file.is_open()) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template.\n" << "void load(const std::string&) method.\n" << "Cannot open matrix data file: " << file_name << "\n"; throw std::logic_error(buffer.str()); } if(file.peek() == std::ifstream::traits_type::eof()) { //std::ostringstream buffer; //buffer << "OpenNN Exception: Matrix template.\n" // << "void load(const std::string&) method.\n" // << "Data file " << file_name << " is empty.\n"; //throw std::logic_error(buffer.str()); this->set(); return; } //file.is // Set matrix sizes std::string line; std::getline(file, line); if(line.empty()) { set(); } else { std::istringstream buffer(line); std::istream_iterator<std::string> it(buffer); std::istream_iterator<std::string> end; const std::vector<std::string> results(it, end); const size_t new_columns_number = (size_t)results.size(); size_t new_rows_number = 1; while(file.good()) { getline(file, line); if(!line.empty()) { new_rows_number++; } } set(new_rows_number, new_columns_number); // Clear file file.clear(); file.seekg(0, std::ios::beg); for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { file >> (*this)(i,j); } } } // Close file file.close(); } // void load_binary(const std::string&) method /// Loads the numbers of rows and columns and the values of the matrix from a binary file. /// @param file_name Name of binary file. template <class T> void Matrix<T>::load_binary(const std::string& file_name) { std::ifstream file; file.open(file_name.c_str(), std::ios::binary); if(!file.is_open()) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template.\n" << "void load_binary(const std::string&) method.\n" << "Cannot open binary file: " << file_name << "\n"; throw std::logic_error(buffer.str()); } std::streamsize size = sizeof(size_t); size_t columns_number; size_t rows_number; file.read(reinterpret_cast<char*>(&columns_number), size); file.read(reinterpret_cast<char*>(&rows_number), size); size = sizeof(double); double value; this->set(rows_number, columns_number); for(size_t i = 0; i < columns_number; i++) { for(size_t j = 0; j < rows_number; j++) { file.read(reinterpret_cast<char*>(&value), size); (*this)(j,i) = value; } } file.close(); } // void save(const std::string&) const method /// Saves the values of the matrix to a data file separated by spaces. /// @param file_name File name. template <class T> void Matrix<T>::save(const std::string& file_name) const { std::ofstream file(file_name.c_str()); if(!file.is_open()) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template." << std::endl << "void save(const std::string) method." << std::endl << "Cannot open matrix data file." << std::endl; throw std::logic_error(buffer.str()); } // Write file for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { file << (*this)(i,j) << " "; } file << std::endl; } // Close file file.close(); } // void save_binary(const std::string&) const method /// Saves the values of the matrix to a binary file. /// @param file_name File name. template <class T> void Matrix<T>::save_binary(const std::string& file_name) const { std::ofstream file(file_name.c_str(), std::ios::binary); if(!file.is_open()) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template." << std::endl << "void save(const std::string) method." << std::endl << "Cannot open matrix binary file." << std::endl; throw std::logic_error(buffer.str()); } // Write data std::streamsize size = sizeof(size_t); size_t m = columns_number; size_t n = rows_number; file.write(reinterpret_cast<char*>(&m), size); file.write(reinterpret_cast<char*>(&n), size); size = sizeof(double); double value; for(int i = 0; i < this->size(); i++) { value = (*this)[i]; file.write(reinterpret_cast<char*>(&value), size); } // Close file file.close(); } // void save_csv(const std::string&, const Vector<std::string>&) const method /// Saves the values of the matrix to a data file separated by commas. /// @param file_name File name. /// @param column_names Names of the columns. template <class T> void Matrix<T>::save_csv(const std::string& file_name, const Vector<std::string>& column_names) const { std::ofstream file(file_name.c_str()); if(!file.is_open()) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template." << std::endl << "void save_csv(const std::string, const Vector<std::string>&) method." << std::endl << "Cannot open matrix data file." << std::endl; throw std::logic_error(buffer.str()); } if(column_names.size() != 0 && column_names.size() != columns_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Matrix template." << std::endl << "void save_csv(const std::string, const Vector<std::string>&) method." << std::endl << "Column names must have size 0 or " << columns_number << "." << std::endl; throw std::logic_error(buffer.str()); } // Write file if(column_names.size() == 0) { for(size_t j = 0; j < columns_number; j++) { file << "c" << j+1; if(j != columns_number-1) { file << ","; } } } else { for(size_t j = 0; j < columns_number; j++) { file << column_names[j]; if(j != columns_number-1) { file << ","; } } } file << std::endl; file.precision(20); for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { file << (*this)(i,j); if(j != columns_number-1) { file << ","; } } file << std::endl; } // Close file file.close(); } // void parse(const std::string&) method /// This method takes a string representation of a matrix and sets this matrix /// from that data. /// @param str String to be parsed. template <class T> void Matrix<T>::parse(const std::string& str) { if(str.empty()) { set(); } else { // Set matrix sizes std::istringstream str_buffer(str); std::string line; std::getline(str_buffer, line); std::istringstream line_buffer(line); std::istream_iterator<std::string> it(line_buffer); std::istream_iterator<std::string> end; const std::vector<std::string> results(it, end); const size_t new_columns_number = (size_t)results.size(); size_t new_rows_number = 1; while(str_buffer.good()) { getline(str_buffer, line); if(!line.empty()) { new_rows_number++; } } set(new_rows_number, new_columns_number); // Clear file str_buffer.clear(); str_buffer.seekg(0, std::ios::beg); for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { str_buffer >> (*this)(i,j); } } } } // std::string to_string(const std::string&) const method /// Returns a string representation of this matrix. /// The elements are separated by spaces. /// The rows are separated by the character "\n". template <class T> std::string Matrix<T>::to_string(const std::string& separator) const { std::ostringstream buffer; if(rows_number > 0 && columns_number > 0) { buffer << arrange_row(0).to_string(separator); for(size_t i = 1; i < rows_number; i++) { buffer << "\n" << arrange_row(i).to_string(separator); } } return(buffer.str()); } // Matrix<std::string> write_string_matrix(const size_t&) const /// Returns a new matrix in which each entry has been converted to a string. template <class T> Matrix<std::string> Matrix<T>::write_string_matrix(const size_t& precision) const { Matrix<std::string> string_matrix(rows_number, columns_number); std::ostringstream buffer; for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { buffer.str(""); buffer << std::setprecision(precision) << (*this)(i,j); string_matrix(i,j) = buffer.str(); } } return(string_matrix); } // vector<T> to_std_vector(void) const /// Returns a std::vector representation of this matrix. /// The size of the new vector is equal to the number of elements of this matrix. /// The entries of the new vector are the entries of this matrix ordered by rows. template <class T> std::vector<T> Matrix<T>::to_std_vector(void) const { const std::vector<T> std_vector((*this).begin(), (*this).end()); return(std_vector); } // Vector<T> to_vector(void) const /// Returns a vector representation of this matrix. /// The size of the new vector is equal to the number of elements of this matrix. /// The entries of the new vector are the entries of this matrix ordered by rows. template <class T> Vector<T> Matrix<T>::to_vector(void) const { Vector<T> vector(rows_number*columns_number); for(size_t i = 0; i < rows_number*columns_number; i++) { vector[i] = (*this)[i]; } return(vector); } // void print_preview(void) const method /// Prints to the sceen a preview of the matrix, /// i.e., the first, second and last rows template <class T> void Matrix<T>::print_preview(void) const { std::cout << "Rows number: " << rows_number << std::endl << "Columns number: " << columns_number << std::endl; if(rows_number > 0) { const Vector<T> first_row = arrange_row(0); std::cout << "Row 0:\n" << first_row << std::endl; } if(rows_number > 1) { const Vector<T> second_row = arrange_row(1); std::cout << "Row 1:\n" << second_row << std::endl; } if(rows_number > 2) { const Vector<T> last_row = arrange_row(rows_number-1); std::cout << "Row " << rows_number << ":\n" << last_row << std::endl; } } /// This method re-writes the input operator >> for the Matrix template. /// @param is Input stream. /// @param m Input matrix. template<class T> std::istream& operator >> (std::istream& is, Matrix<T>& m) { const size_t rows_number = m.get_rows_number(); const size_t columns_number = m.get_columns_number(); for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { is >> m(i,j); } } return(is); } // Output operator /// This method re-writes the output operator << for the Matrix template. /// @param os Output stream. /// @param m Output matrix. template<class T> std::ostream& operator << (std::ostream& os, const Matrix<T>& m) { const size_t rows_number = m.get_rows_number(); const size_t columns_number = m.get_columns_number(); if(rows_number > 0 && columns_number > 0) { os << m.arrange_row(0); for(size_t i = 1; i < rows_number; i++) { os << "\n" << m.arrange_row(i); } } return(os); } // Output operator /// This method re-writes the output operator << for matrices of vectors. /// @param os Output stream. /// @param m Output matrix of vectors. template<class T> std::ostream& operator << (std::ostream& os, const Matrix< Vector<T> >& m) { const size_t rows_number = m.get_rows_number(); const size_t columns_number = m.get_columns_number(); for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { os << "subvector_" << i << "_" << j << "\n" << m(i,j) << std::endl; } } return(os); } // Output operator /// This method re-writes the output operator << for matrices of matrices. /// @param os Output stream. /// @param m Output matrix of matrices. template<class T> std::ostream& operator << (std::ostream& os, const Matrix< Matrix<T> >& m) { const size_t rows_number = m.get_rows_number(); const size_t columns_number = m.get_columns_number(); for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { os << "submatrix_" << i << "_" << j << "\n" << m(i,j); } } return(os); } } // end namespace #endif // OpenNN: Open Neural Networks Library. // Copyright (c) 2005-2016 Roberto Lopez. // // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public // License as published by the Free Software Foundation; either // version 2.1 of the License, or any later version. // // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // Lesser General Public License for more details. // You should have received a copy of the GNU Lesser General Public // License along with this library; if not, write to the Free Software // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
simd.c
#include <stdio.h> #define N 100 int main() { int a[N], aa[N]; int i, error = 0; // initialize for(i=0; i<N; i++) aa[i] = a[i] = -1; // offload #pragma omp target map(tofrom: a[0:100]) { int k; #pragma omp simd for(k=0; k<N; k++) a[k] = k; } // host for(i=0; i<N; i++) aa[i] = i; // check int first = -1; int last = - 1; for(i=0; i<N; i++) { if (a[i] != aa[i]) { if (first == -1) first = i; last = i; ++error; } } if (error) { if (error == 1) printf("one mismatch: [index:%d]: a %d != %d\n", first, a[first], aa[first]); else { printf("first mismatch: [index:%d]: a %d != %d (total errors: %d)\n", first, a[first], aa[first], error); printf("last mismatch: [index:%d]: a %d != %d (total errors %d)\n", last, a[last], aa[last], error); } return 0; } // report printf("Done with %d errors\n", error); return error; }
bml_allocate.c
#include "bml_allocate.h" #include "bml_introspection.h" #include "bml_logger.h" #include "bml_parallel.h" #include "dense/bml_allocate_dense.h" #include "ellpack/bml_allocate_ellpack.h" #include "ellblock/bml_allocate_ellblock.h" #include "ellsort/bml_allocate_ellsort.h" #include "csr/bml_allocate_csr.h" #ifdef DO_MPI #include "distributed2d/bml_allocate_distributed2d.h" #endif #include <errno.h> #include <math.h> #include <stdlib.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif /** Check if matrix is allocated. * * \ingroup allocate_group_C * * \param A[in,out] Matrix * \return \f$ > 0 \f$ if allocated, else -1 */ int bml_allocated( bml_matrix_t * A) { return bml_get_N(A); } /** Allocate and zero a chunk of memory. * * \ingroup allocate_group_C * * \param size The size of the memory. * \return A pointer to the allocated chunk. */ void * bml_allocate_memory( size_t size) { #if defined(INTEL_OPT) char *ptr = _mm_malloc(size, MALLOC_ALIGNMENT); #pragma omp parallel for simd #pragma vector aligned for (size_t i = 0; i < size; i++) { __assume_aligned(ptr, MALLOC_ALIGNMENT); ptr[i] = 0; } #elif defined(HAVE_POSIX_MEMALIGN) char *ptr; posix_memalign((void **) &ptr, MALLOC_ALIGNMENT, size); for (size_t i = 0; i < size; i++) { ptr[i] = 0; } #else void *ptr = calloc(1, size); #endif if (ptr == NULL) { LOG_ERROR("error allocating memory of size %d: %s\n", size, strerror(errno)); } return (void *) ptr; } /** Allocate a chunk of memory without initialization. * * \ingroup allocate_group_C * * \param size The size of the memory. * \return A pointer to the allocated chunk. */ void * bml_noinit_allocate_memory( size_t size) { #if defined(INTEL_OPT) void *ptr = _mm_malloc(size, MALLOC_ALIGNMENT); #elif defined(HAVE_POSIX_MEMALIGN) void *ptr; posix_memalign(&ptr, MALLOC_ALIGNMENT, size); #else void *ptr = malloc(size); #endif if (ptr == NULL) { LOG_ERROR("error allocating memory: %s\n", strerror(errno)); } return ptr; } /** Reallocate a chunk of memory. * * \ingroup allocate_group_C * * \param size The size of the memory. * \return A pointer to the reallocated chunk. */ void * bml_reallocate_memory( void *ptr, const size_t size) { void *ptr_new = realloc(ptr, size); if (ptr_new == NULL) { LOG_ERROR("error reallocating memory: %s\n", strerror(errno)); } return ptr_new; } /** Deallocate a chunk of memory. * * \ingroup allocate_group_C * * \param ptr A pointer to the previously allocated chunk. */ void bml_free_memory( void *ptr) { #ifdef INTEL_OPT _mm_free(ptr); #else free(ptr); #endif } /** De-allocate a chunk of memory that was allocated inside a C * function. This is used by the Fortran bml_free_C interface. Note * the "pointer to pointer" in the API. * * \ingroup allocate_group_C * * \param ptr A pointer to the previously allocated chunk. */ void bml_free_ptr( void **ptr) { bml_free_memory(*ptr); } /** Deallocate a matrix. * * \ingroup allocate_group_C * * \param A[in,out] The matrix. */ void bml_deallocate( bml_matrix_t ** A) { if (A == NULL) { LOG_DEBUG("A is NULL\n"); } else if (*A == NULL) { LOG_DEBUG("*A is NULL\n"); } else { LOG_DEBUG("deallocating bml matrix\n"); switch (bml_get_type(*A)) { case dense: bml_deallocate_dense(*A); break; case ellpack: bml_deallocate_ellpack(*A); break; case ellsort: bml_deallocate_ellsort(*A); break; case ellblock: bml_deallocate_ellblock(*A); break; case csr: bml_deallocate_csr(*A); break; #ifdef DO_MPI case distributed2d: bml_deallocate_distributed2d(*A); break; #endif default: LOG_ERROR("unknown matrix type (%d)\n", bml_get_type(*A)); break; } *A = NULL; } } /** Deallocate a domain. * * \ingroup allocate_group_C * * \param D[in,out] The domain. */ void bml_deallocate_domain( bml_domain_t * D) { bml_free_memory(D->localRowMin); bml_free_memory(D->localRowMax); bml_free_memory(D->localRowExtent); bml_free_memory(D->localDispl); bml_free_memory(D->localElements); bml_free_memory(D); } /** Clear a matrix. * * \ingroup allocate_group_C * * \param A[in,out] The matrix. */ void bml_clear( bml_matrix_t * A) { switch (bml_get_type(A)) { case dense: bml_clear_dense(A); break; case ellpack: bml_clear_ellpack(A); break; case ellsort: bml_clear_ellsort(A); break; case ellblock: bml_clear_ellblock(A); break; case csr: bml_clear_csr(A); break; #ifdef DO_MPI case distributed2d: bml_clear_distributed2d(A); break; #endif default: LOG_ERROR("unknown matrix type (%d)\n", bml_get_type(A)); break; } } /** Allocate a matrix without initializing. * * Note that the matrix \f$ A \f$ will be newly allocated. The * function does not check whether the matrix is already allocated. * * \ingroup allocate_group_C * * \param matrix_type The matrix type. * \param matrix_precision The precision of the matrix. * \param matrix_dimension The matrix size. * \param distrib_mode The distribution mode. * \return The matrix. */ bml_matrix_t * bml_noinit_rectangular_matrix( bml_matrix_type_t matrix_type, bml_matrix_precision_t matrix_precision, bml_matrix_dimension_t matrix_dimension, bml_distribution_mode_t distrib_mode) { LOG_DEBUG("noinit matrix of size %d (or zero matrix for dense)\n", matrix_dimension.N_rows); #ifdef DO_MPI if (distrib_mode == distributed) return bml_zero_matrix_distributed2d(matrix_type, matrix_precision, matrix_dimension.N_rows, matrix_dimension.N_nz_max); else #endif switch (matrix_type) { case dense: return bml_zero_matrix_dense(matrix_precision, matrix_dimension, distrib_mode); break; case ellpack: return bml_noinit_matrix_ellpack(matrix_precision, matrix_dimension, distrib_mode); break; case ellsort: return bml_noinit_matrix_ellsort(matrix_precision, matrix_dimension, distrib_mode); break; case ellblock: return bml_noinit_matrix_ellblock(matrix_precision, matrix_dimension, distrib_mode); break; case csr: return bml_noinit_matrix_csr(matrix_precision, matrix_dimension, distrib_mode); break; default: LOG_ERROR("unknown matrix type\n"); break; } return NULL; } /** Allocate a block matrix * * \param matrix_type The matrix type. * \param matrix_precision The precision of the matrix. * \param NB The number of blocks in a row. * \param bsizes The sizes of each block * \param MB The number of non-zeroes blocks per row. * \param distrib_mode The distribution mode. * \return The matrix. */ bml_matrix_t * bml_block_matrix( bml_matrix_type_t matrix_type, bml_matrix_precision_t matrix_precision, int NB, int MB, int M, int *bsizes, bml_distribution_mode_t distrib_mode) { LOG_DEBUG("block matrix with %d blocks\n", NB); switch (matrix_type) { case ellpack: return bml_block_matrix_ellblock(matrix_precision, NB, MB, M, bsizes, distrib_mode); break; case ellblock: return bml_block_matrix_ellblock(matrix_precision, NB, MB, M, bsizes, distrib_mode); break; default: LOG_ERROR("unsupported matrix type (type ID %d)\n", matrix_type); break; } return NULL; } /** Allocate a matrix without initializing. * * Note that the matrix \f$ A \f$ will be newly allocated. The * function does not check whether the matrix is already allocated. * * \ingroup allocate_group_C * * \param matrix_type The matrix type. * \param matrix_precision The precision of the matrix. * \param N The matrix size. * \param M The number of non-zeroes per row. * \param distrib_mode The distribution mode. * \return The matrix. */ bml_matrix_t * bml_noinit_matrix( bml_matrix_type_t matrix_type, bml_matrix_precision_t matrix_precision, int N, int M, bml_distribution_mode_t distrib_mode) { bml_matrix_dimension_t matrix_dimension = { N, N, M }; return bml_noinit_rectangular_matrix(matrix_type, matrix_precision, matrix_dimension, distrib_mode); } /** Allocate the zero matrix. * * Note that the matrix \f$ A \f$ will be newly allocated. The * function does not check whether the matrix is already allocated. * * \ingroup allocate_group_C * * \param matrix_type The matrix type. * \param matrix_precision The precision of the matrix. * \param N The matrix size. * \param M The number of non-zeroes per row. * \param distrib_mode The distribution mode. * \return The matrix. */ bml_matrix_t * bml_zero_matrix( bml_matrix_type_t matrix_type, bml_matrix_precision_t matrix_precision, int N, int M, bml_distribution_mode_t distrib_mode) { LOG_DEBUG("zero matrix of size %d\n", N); #ifdef DO_MPI if (distrib_mode == distributed) return bml_zero_matrix_distributed2d(matrix_type, matrix_precision, N, M); else #endif { bml_matrix_dimension_t matrix_dimension = { N, N, M }; switch (matrix_type) { case dense: return bml_zero_matrix_dense(matrix_precision, matrix_dimension, distrib_mode); break; case ellpack: return bml_zero_matrix_ellpack(matrix_precision, N, M, distrib_mode); break; case ellsort: return bml_zero_matrix_ellsort(matrix_precision, N, M, distrib_mode); break; case ellblock: return bml_zero_matrix_ellblock(matrix_precision, N, M, distrib_mode); break; case csr: return bml_zero_matrix_csr(matrix_precision, N, M, distrib_mode); break; default: LOG_ERROR("unknown matrix type\n"); break; } } return NULL; } /** Allocate a random matrix. * * Note that the matrix \f$ A \f$ will be newly allocated. The * function does not check whether the matrix is already allocated. * * \ingroup allocate_group_C * * \param matrix_type The matrix type. * \param matrix_precision The precision of the matrix. * \param N The matrix size. * \param M The number of non-zeroes per row. * \param distrib_mode The distribution mode. * \return The matrix. */ bml_matrix_t * bml_random_matrix( bml_matrix_type_t matrix_type, bml_matrix_precision_t matrix_precision, int N, int M, bml_distribution_mode_t distrib_mode) { #ifdef DO_MPI if (distrib_mode == distributed) return bml_random_matrix_distributed2d(matrix_type, matrix_precision, N, M); else #endif switch (matrix_type) { case dense: return bml_random_matrix_dense(matrix_precision, N, distrib_mode); break; case ellpack: return bml_random_matrix_ellpack(matrix_precision, N, M, distrib_mode); break; case ellsort: return bml_random_matrix_ellsort(matrix_precision, N, M, distrib_mode); break; case ellblock: return bml_random_matrix_ellblock(matrix_precision, N, M, distrib_mode); break; case csr: return bml_random_matrix_csr(matrix_precision, N, M, distrib_mode); break; default: LOG_ERROR("unknown matrix type (type ID %d)\n", matrix_type); break; } return NULL; } /** Allocate a banded matrix. * * Note that the matrix \f$ A \f$ will be newly allocated. The * function does not check whether the matrix is already allocated. * * \ingroup allocate_group_C * * \param matrix_type The matrix type. * \param matrix_precision The precision of the matrix. * \param N The matrix size. * \param M The bandwidth of the matrix. * \param distrib_mode The distribution mode. * \return The matrix. */ bml_matrix_t * bml_banded_matrix( bml_matrix_type_t matrix_type, bml_matrix_precision_t matrix_precision, int N, int M, bml_distribution_mode_t distrib_mode) { LOG_DEBUG("banded matrix of size %d\n", N); switch (matrix_type) { case dense: return bml_banded_matrix_dense(matrix_precision, N, M, distrib_mode); break; case ellpack: return bml_banded_matrix_ellpack(matrix_precision, N, M, distrib_mode); break; case ellsort: return bml_banded_matrix_ellsort(matrix_precision, N, M, distrib_mode); break; case ellblock: return bml_banded_matrix_ellblock(matrix_precision, N, M, distrib_mode); break; case csr: return bml_banded_matrix_csr(matrix_precision, N, M, distrib_mode); break; default: LOG_ERROR("unknown matrix type (type ID %d)\n", matrix_type); break; } return NULL; } /** Allocate the identity matrix. * * Note that the matrix \f$ A \f$ will be newly allocated. The * function does not check whether the matrix is already allocated. * * \ingroup allocate_group_C * * \param matrix_type The matrix type. * \param matrix_precision The precision of the matrix. * \param N The matrix size. * \param M The number of non-zeroes per row. * \param distrib_mode The distribution mode. * \return The matrix. */ bml_matrix_t * bml_identity_matrix( bml_matrix_type_t matrix_type, bml_matrix_precision_t matrix_precision, int N, int M, bml_distribution_mode_t distrib_mode) { LOG_DEBUG("identity matrix of size %d\n", N); #ifdef DO_MPI if (distrib_mode == distributed) return bml_identity_matrix_distributed2d(matrix_type, matrix_precision, N, M); else #endif switch (matrix_type) { case dense: return bml_identity_matrix_dense(matrix_precision, N, distrib_mode); break; case ellpack: return bml_identity_matrix_ellpack(matrix_precision, N, M, distrib_mode); break; case ellsort: return bml_identity_matrix_ellsort(matrix_precision, N, M, distrib_mode); break; case ellblock: return bml_identity_matrix_ellblock(matrix_precision, N, M, distrib_mode); break; case csr: return bml_identity_matrix_csr(matrix_precision, N, M, distrib_mode); break; default: LOG_ERROR("unknown matrix type (type ID %d)\n", matrix_type); break; } return NULL; } /** Allocate a default domain for a bml matrix. * * \ingroup allocate_group_C * * \param N The number of rows * \param M The number of columns * \param distrib_mode The distribution mode * \return The domain */ bml_domain_t * bml_default_domain( int N, int M, bml_distribution_mode_t distrib_mode) { int avgExtent, nleft; int nRanks = bml_getNRanks(); bml_domain_t *domain = bml_allocate_memory(sizeof(bml_domain_t)); domain->localRowMin = bml_allocate_memory(nRanks * sizeof(int)); domain->localRowMax = bml_allocate_memory(nRanks * sizeof(int)); domain->localRowExtent = bml_allocate_memory(nRanks * sizeof(int)); domain->localDispl = bml_allocate_memory(nRanks * sizeof(int)); domain->localElements = bml_allocate_memory(nRanks * sizeof(int)); domain->totalProcs = nRanks; domain->totalRows = N; domain->totalCols = M; domain->globalRowMin = 0; domain->globalRowMax = domain->totalRows; domain->globalRowExtent = domain->globalRowMax - domain->globalRowMin; switch (distrib_mode) { case sequential: { // Default - each rank contains entire matrix, even when running distributed for (int i = 0; i < nRanks; i++) { domain->localRowMin[i] = domain->globalRowMin; domain->localRowMax[i] = domain->globalRowMax; domain->localRowExtent[i] = domain->localRowMax[i] - domain->localRowMin[i]; domain->localElements[i] = domain->localRowExtent[i] * domain->totalCols; domain->localDispl[i] = 0; } } break; case distributed: { // For completely distributed avgExtent = N / nRanks; domain->maxLocalExtent = ceil((float) N / (float) nRanks); domain->minLocalExtent = avgExtent; for (int i = 0; i < nRanks; i++) { domain->localRowExtent[i] = avgExtent; } nleft = N - nRanks * avgExtent; if (nleft > 0) { for (int i = 0; i < nleft; i++) { domain->localRowExtent[i]++; } } /** For first rank */ domain->localRowMin[0] = domain->globalRowMin; domain->localRowMax[0] = domain->localRowExtent[0]; /** For middle ranks */ for (int i = 1; i < (nRanks - 1); i++) { domain->localRowMin[i] = domain->localRowMax[i - 1]; domain->localRowMax[i] = domain->localRowMin[i] + domain->localRowExtent[i]; } /** For last rank */ if (nRanks > 1) { int last = nRanks - 1; domain->localRowMin[last] = domain->localRowMax[last - 1]; domain->localRowMax[last] = domain->localRowMin[last] + domain->localRowExtent[last]; } /** Number of elements and displacement per rank */ for (int i = 0; i < nRanks; i++) { domain->localElements[i] = domain->localRowExtent[i] * domain->totalCols; domain->localDispl[i] = (i == 0) ? 0 : domain->localDispl[i - 1] + domain->localElements[i - 1]; } } break; case graph_distributed: LOG_ERROR("graph_distibuted not available\n"); break; default: LOG_ERROR("unknown distribution method\n"); break; } return domain; } /** Update a domain for a bml matrix. * * \ingroup allocate_group_C * * \param A Matrix with domain * \param localPartMin First part on each rank * \param localPartMax Last part on each rank * \param nnodesInPart Number of nodes in each part */ void bml_update_domain( bml_matrix_t * A, int *localPartMin, int *localPartMax, int *nnodesInPart) { switch (bml_get_type(A)) { case dense: bml_update_domain_dense(A, localPartMin, localPartMax, nnodesInPart); break; case ellpack: bml_update_domain_ellpack(A, localPartMin, localPartMax, nnodesInPart); break; case ellsort: bml_update_domain_ellsort(A, localPartMin, localPartMax, nnodesInPart); break; default: LOG_ERROR("unknown matrix type (%d)\n", bml_get_type(A)); break; } }
GB_binop__isge_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isge_fp64) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__isge_fp64) // A.*B function (eWiseMult): GB (_AemultB_03__isge_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_fp64) // A*D function (colscale): GB (_AxD__isge_fp64) // D*A function (rowscale): GB (_DxB__isge_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__isge_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__isge_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_fp64) // C=scalar+B GB (_bind1st__isge_fp64) // C=scalar+B' GB (_bind1st_tran__isge_fp64) // C=A+scalar GB (_bind2nd__isge_fp64) // C=A'+scalar GB (_bind2nd_tran__isge_fp64) // C type: double // A type: double // B,b type: double // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ double bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGE || GxB_NO_FP64 || GxB_NO_ISGE_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isge_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isge_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isge_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isge_fp64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isge_fp64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isge_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isge_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isge_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isge_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isge_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isge_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; double bij = Bx [p] ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isge_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = Ax [p] ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__isge_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__isge_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
opencl_keychain_fmt_plug.c
/* * Modified by Dhiru Kholia <dhiru at openwall.com> for Keychain format. * * This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net> * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_keychain; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_keychain); #else #include <stdint.h> #include <string.h> #include <openssl/des.h> #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "formats.h" #include "common.h" #include "misc.h" #include "options.h" #include "jumbo.h" #include "common-opencl.h" #define FORMAT_LABEL "keychain-opencl" #define FORMAT_NAME "Mac OS X Keychain" #define FORMAT_TAG "$keychain$*" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL 3DES" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define SWAP(n) \ (((n) << 24) | (((n) & 0xff00) << 8) | (((n) >> 8) & 0xff00) | ((n) >> 24)) #define BINARY_SIZE 0 #define PLAINTEXT_LENGTH 64 #define SALT_SIZE sizeof(*salt_struct) #define BINARY_ALIGN MEM_ALIGN_WORD #define SALT_ALIGN 4 #define SALTLEN 20 #define IVLEN 8 #define CTLEN 48 typedef struct { uint32_t length; uint8_t v[PLAINTEXT_LENGTH]; } keychain_password; typedef struct { uint32_t v[32/4]; } keychain_hash; typedef struct { uint32_t iterations; uint32_t outlen; uint32_t skip_bytes; uint8_t length; uint8_t salt[64]; } keychain_salt; static int *cracked; static int any_cracked; static struct fmt_main *self; static struct fmt_tests keychain_tests[] = { {"$keychain$*10f7445c8510fa40d9ef6b4e0f8c772a9d37e449*f3d19b2a45cdcccb*8c3c3b1c7d48a24dad4ccbd4fd794ca9b0b3f1386a0a4527f3548bfe6e2f1001804b082076641bbedbc9f3a7c33c084b", "password"}, // these were generated with pass_gen.pl. NOTE, they ALL have the data (which gets encrypted) which was decrypted from the above hash. {"$keychain$*a88cd6fbaaf40bc5437eee015a0f95ab8ab70545*b12372b1b7cb5c1f*1f5c596bcdd015afc126bc86f42dd092cb9d531d14a0aafaa89283f1bebace60562d497332afbd952fd329cc864144ec", "password"}, {"$keychain$*23328e264557b93204dc825c46a25f7fb1e17d4a*19a9efde2ca98d30*6ac89184134758a95c61bd274087ae0cffcf49f433c7f91edea98bd4fd60094e2936d99e4d985dec98284379f23259c0", "hhh"}, {"$keychain$*927717d8509db73aa47c5e820e3a381928b5e048*eef33a4a1483ae45*a52691580f17e295b8c2320947968503c605b2784bfe4851077782139f0de46f71889835190c361870baa56e2f4e9e43", "JtR-Jumbo"}, {"$keychain$*1fab88d0b8ea1a3d303e0aef519796eb29e46299*3358b0e77d60892f*286f975dcd191024227514ed9939d0fa94034294ba1eca6d5c767559e75e944b5a2fcb54fd696be64c64f9d069ce628a", "really long password -----------------------------"}, {NULL} }; static struct custom_salt { unsigned char salt[SALTLEN]; unsigned char iv[IVLEN]; unsigned char ct[CTLEN]; } *salt_struct; static cl_int cl_error; static keychain_password *inbuffer; static keychain_hash *outbuffer; static keychain_salt currentsalt; static cl_mem mem_in, mem_out, mem_setting; size_t insize, outsize, settingsize, cracked_size; #define STEP 0 #define SEED 256 // This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl_autotune.h" #include "memdbg.h" static const char * warn[] = { "xfer: ", ", crypt: ", ", xfer: " }; /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel); } static void create_clobj(size_t gws, struct fmt_main *self) { insize = sizeof(keychain_password) * gws; outsize = sizeof(keychain_hash) * gws; settingsize = sizeof(keychain_salt); cracked_size = sizeof(*cracked) * gws; inbuffer = mem_calloc(1, insize); outbuffer = mem_alloc(outsize); cracked = mem_calloc(1, cracked_size); /// Allocate memory mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem in"); mem_setting = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem setting"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem out"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting), &mem_setting), "Error while setting mem_salt kernel argument"); } static void release_clobj(void) { if (cracked) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(inbuffer); MEM_FREE(outbuffer); MEM_FREE(cracked); } } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } static void init(struct fmt_main *_self) { self = _self; opencl_prepare_dev(gpu_id); } static void reset(struct db_main *db) { if (!autotuned) { char build_opts[64]; snprintf(build_opts, sizeof(build_opts), "-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d", PLAINTEXT_LENGTH, (int)sizeof(currentsalt.salt), (int)sizeof(outbuffer->v)); opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl", gpu_id, build_opts); crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); // Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self, create_clobj, release_clobj, sizeof(keychain_password), 0, db); // Auto tune execution from shared/included code. autotune_run(self, 1, 0, 1000); } } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; int extra; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += FORMAT_TAG_LEN; if ((p = strtokm(ctcopy, "*")) == NULL) /* salt */ goto err; if (hexlenl(p, &extra) != SALTLEN * 2 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iv */ goto err; if (hexlenl(p, &extra) != IVLEN * 2 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* ciphertext */ goto err; if (hexlenl(p, &extra) != CTLEN * 2 || extra) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; static struct custom_salt *salt_struct; if (!salt_struct) salt_struct = mem_calloc_tiny(sizeof(struct custom_salt), MEM_ALIGN_WORD); ctcopy += FORMAT_TAG_LEN; /* skip over "$keychain$*" */ p = strtokm(ctcopy, "*"); for (i = 0; i < SALTLEN; i++) salt_struct->salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < IVLEN; i++) salt_struct->iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < CTLEN; i++) salt_struct->ct[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)salt_struct; } static void set_salt(void *salt) { salt_struct = (struct custom_salt *)salt; memcpy((char*)currentsalt.salt, salt_struct->salt, 20); currentsalt.length = 20; currentsalt.iterations = 1000; currentsalt.outlen = 24; currentsalt.skip_bytes = 0; HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting, CL_FALSE, 0, settingsize, &currentsalt, 0, NULL, NULL), "Copy salt to gpu"); } #undef set_key static void set_key(char *key, int index) { uint8_t length = strlen(key); if (length > PLAINTEXT_LENGTH) length = PLAINTEXT_LENGTH; inbuffer[index].length = length; memcpy(inbuffer[index].v, key, length); } static char *get_key(int index) { static char ret[PLAINTEXT_LENGTH + 1]; uint8_t length = inbuffer[index].length; memcpy(ret, inbuffer[index].v, length); ret[length] = '\0'; return ret; } static int kcdecrypt(unsigned char *key, unsigned char *iv, unsigned char *data) { unsigned char out[CTLEN]; DES_cblock key1, key2, key3; DES_cblock ivec; DES_key_schedule ks1, ks2, ks3; memset(out, 0, sizeof(out)); memcpy(key1, key, 8); memcpy(key2, key + 8, 8); memcpy(key3, key + 16, 8); DES_set_key((DES_cblock *) key1, &ks1); DES_set_key((DES_cblock *) key2, &ks2); DES_set_key((DES_cblock *) key3, &ks3); memcpy(ivec, iv, 8); DES_ede3_cbc_encrypt(data, out, CTLEN, &ks1, &ks2, &ks3, &ivec, DES_DECRYPT); /* possible bug here, is this assumption (pad of 4) always valid? */ if (out[47] != 4 || check_pkcs_pad(out, CTLEN, 8) < 0) return -1; return 0; } #if 0 //#ifdef DEBUG static void print_hex(unsigned char *str, int len) { int i; for (i = 0; i < len; ++i) printf("%02x", str[i]); printf("\n"); } #endif static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size); if (any_cracked) { memset(cracked, 0, cracked_size); any_cracked = 0; } /// Copy data to gpu BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, insize, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); /// Run kernel BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run kernel"); /// Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back"); if (ocl_autotune_running) return count; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) if (!kcdecrypt((unsigned char*)outbuffer[index].v, salt_struct->iv, salt_struct->ct)) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_opencl_keychain = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_NOT_EXACT, { NULL }, { FORMAT_TAG }, keychain_tests }, { init, done, reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
filter.c
/* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define MAX(X,Y) ((X>Y) ? X:Y) #define MIN(X,Y) ((X<Y) ? X:Y) void blur5(unsigned restrict char *imgData, unsigned restrict char *out, long w, long h, long ch, long step) { long x, y; const int filtersize = 5; double filter[5][5] = { 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 2, 3, 2, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1 }; // The denominator for scale should be the sum // of non-zero elements in the filter. float scale = 1.0 / 35.0; #pragma acc parallel loop collapse(2) gang vector copyin(imgData[0:w * h * ch]) copyout(out[0:w * h * ch]) for ( y = 0; y < h; y++ ) { for ( x = 0; x < w; x++ ) { float blue = 0.0, green = 0.0, red = 0.0; for ( int fy = 0; fy < filtersize; fy++ ) { long iy = y - (filtersize/2) + fy; for ( int fx = 0; fx < filtersize; fx++ ) { long ix = x - (filtersize/2) + fx; if ( (iy<0) || (ix<0) || (iy>=h) || (ix>=w) ) continue; blue += filter[fy][fx] * (float)imgData[iy * step + ix * ch]; green += filter[fy][fx] * (float)imgData[iy * step + ix * ch + 1]; red += filter[fy][fx] * (float)imgData[iy * step + ix * ch + 2]; } } out[y * step + x * ch] = 255 - (scale * blue); out[y * step + x * ch + 1 ] = 255 - (scale * green); out[y * step + x * ch + 2 ] = 255 - (scale * red); } } } void blur5_blocked(unsigned restrict char *imgData, unsigned restrict char *out, long w, long h, long ch, long step) { long x, y; const int filtersize = 5, nblocks = 8; double filter[5][5] = { 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 2, 3, 2, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1 }; // The denominator for scale should be the sum // of non-zero elements in the filter. float scale = 1.0 / 35.0; long blocksize = h/ nblocks; #pragma acc data copyin(imgData[:w*h*ch],filter)copyout(out[:w*h*ch]) for ( long blocky = 0; blocky < nblocks; blocky++) { // For data copies we need to include the ghost zones for the filter long starty = blocky * blocksize; long endy = starty + blocksize; #pragma acc parallel loop collapse(2) gang vector for ( y = starty; y < endy; y++ ) { for ( x = 0; x < w; x++ ) { float blue = 0.0, green = 0.0, red = 0.0; for ( int fy = 0; fy < filtersize; fy++ ) { long iy = y - (filtersize/2) + fy; for ( int fx = 0; fx < filtersize; fx++ ) { long ix = x - (filtersize/2) + fx; if ( (iy<0) || (ix<0) || (iy>=h) || (ix>=w) ) continue; blue += filter[fy][fx] * (float)imgData[iy * step + ix * ch]; green += filter[fy][fx] * (float)imgData[iy * step + ix * ch + 1]; red += filter[fy][fx] * (float)imgData[iy * step + ix * ch + 2]; } } out[y * step + x * ch] = 255 - (scale * blue); out[y * step + x * ch + 1 ] = 255 - (scale * green); out[y * step + x * ch + 2 ] = 255 - (scale * red); } } } } void blur5_update(unsigned restrict char *imgData, unsigned restrict char *out, long w, long h, long ch, long step) { long x, y; const int filtersize = 5, nblocks = 8; double filter[5][5] = { 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 2, 3, 2, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1 }; // The denominator for scale should be the sum // of non-zero elements in the filter. float scale = 1.0 / 35.0; long blocksize = h/ nblocks; #pragma acc data create(imgData[w*h*ch],out[w*h*ch]) copyin(filter) { for ( long blocky = 0; blocky < nblocks; blocky++) { // For data copies we need to include the ghost zones for the filter long starty = MAX(0,blocky * blocksize - filtersize/2); long endy = MIN(h,starty + blocksize + filtersize/2); #pragma acc update device(imgData[starty*step:(endy-starty)*step]) starty = blocky * blocksize; endy = starty + blocksize; #pragma acc parallel loop collapse(2) gang vector for ( y = starty; y < endy; y++ ) { for ( x = 0; x < w; x++ ) { float blue = 0.0, green = 0.0, red = 0.0; for ( int fy = 0; fy < filtersize; fy++ ) { long iy = y - (filtersize/2) + fy; for ( int fx = 0; fx < filtersize; fx++ ) { long ix = x - (filtersize/2) + fx; if ( (iy<0) || (ix<0) || (iy>=h) || (ix>=w) ) continue; blue += filter[fy][fx] * (float)imgData[iy * step + ix * ch]; green += filter[fy][fx] * (float)imgData[iy * step + ix * ch + 1]; red += filter[fy][fx] * (float)imgData[iy * step + ix * ch + 2]; } } out[y * step + x * ch] = 255 - (scale * blue); out[y * step + x * ch + 1 ] = 255 - (scale * green); out[y * step + x * ch + 2 ] = 255 - (scale * red); } } #pragma acc update self(out[starty*step:blocksize*step]) } } } void blur5_pipelined(unsigned restrict char *imgData, unsigned restrict char *out, long w, long h, long ch, long step) { long x, y; const int filtersize = 5, nblocks = 8; double filter[5][5] = { 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 2, 3, 2, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1 }; // The denominator for scale should be the sum // of non-zero elements in the filter. float scale = 1.0 / 35.0; long blocksize = h/ nblocks; #pragma acc data create(imgData[w*h*ch],out[w*h*ch]) { for ( long blocky = 0; blocky < nblocks; blocky++) { // For data copies we need to include the ghost zones for the filter long starty = MAX(0,blocky * blocksize - filtersize/2); long endy = MIN(h,starty + blocksize + filtersize/2); #pragma acc update device(imgData[starty*step:(endy-starty)*step]) async(blocky%3+1) starty = blocky * blocksize; endy = starty + blocksize; #pragma acc parallel loop collapse(2) gang vector async(blocky%3+1) for ( y = starty; y < endy; y++ ) { for ( x = 0; x < w; x++ ) { float blue = 0.0, green = 0.0, red = 0.0; for ( int fy = 0; fy < filtersize; fy++ ) { long iy = y - (filtersize/2) + fy; for ( int fx = 0; fx < filtersize; fx++ ) { long ix = x - (filtersize/2) + fx; if ( (iy<0) || (ix<0) || (iy>=h) || (ix>=w) ) continue; blue += filter[fy][fx] * (float)imgData[iy * step + ix * ch]; green += filter[fy][fx] * (float)imgData[iy * step + ix * ch + 1]; red += filter[fy][fx] * (float)imgData[iy * step + ix * ch + 2]; } } out[y * step + x * ch] = 255 - (scale * blue); out[y * step + x * ch + 1 ] = 255 - (scale * green); out[y * step + x * ch + 2 ] = 255 - (scale * red); } } #pragma acc update self(out[starty*step:blocksize*step]) async(blocky%3+1) } #pragma acc wait } } #include <openacc.h> #include <omp.h> void blur5_pipelined_multi(unsigned restrict char *imgData, unsigned restrict char *out, long w, long h, long ch, long step) { const int filtersize = 5, nblocks = 32; double filter[5][5] = { 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 2, 3, 2, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1 }; // The denominator for scale should be the sum // of non-zero elements in the filter. float scale = 1.0 / 35.0; long blocksize = h/ nblocks; #pragma omp parallel num_threads(acc_get_num_devices(acc_device_nvidia)) { int myid = omp_get_thread_num(); acc_set_device_num(myid,acc_device_nvidia); int queue = 1; #pragma acc data create(imgData[w*h*ch],out[w*h*ch]) { #pragma omp for schedule(static) for ( long blocky = 0; blocky < nblocks; blocky++) { // For data copies we need to include the ghost zones for the filter long starty = MAX(0,blocky * blocksize - filtersize/2); long endy = MIN(h,starty + blocksize + filtersize/2); #pragma acc update device(imgData[starty*step:(endy-starty)*step]) async(queue) starty = blocky * blocksize; endy = starty + blocksize; #pragma acc parallel loop collapse(2) gang vector async(queue) for ( long y = starty; y < endy; y++ ) { for ( long x = 0; x < w; x++ ) { float blue = 0.0, green = 0.0, red = 0.0; for ( int fy = 0; fy < filtersize; fy++ ) { long iy = y - (filtersize/2) + fy; for ( int fx = 0; fx < filtersize; fx++ ) { long ix = x - (filtersize/2) + fx; if ( (iy<0) || (ix<0) || (iy>=h) || (ix>=w) ) continue; blue += filter[fy][fx] * (float)imgData[iy * step + ix * ch]; green += filter[fy][fx] * (float)imgData[iy * step + ix * ch + 1]; red += filter[fy][fx] * (float)imgData[iy * step + ix * ch + 2]; } } out[y * step + x * ch] = 255 - (scale * blue); out[y * step + x * ch + 1 ] = 255 - (scale * green); out[y * step + x * ch + 2 ] = 255 - (scale * red); } } #pragma acc update self(out[starty*step:blocksize*step]) async(queue) queue = (queue%3)+1; } #pragma acc wait } } }
convolution_3x3_pack16.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd63_transform_kernel_pack16_avx512(const Mat& kernel, Mat& kernel_tm_pack16, int inch, int outch, const Option& opt) { // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = 16b-16a-inch/16a-64-outch/16b kernel_tm_pack16.create(inch / 16, 64, outch / 16, (size_t)4u * 16 * 16, 16 * 16); for (int q = 0; q + 15 < outch; q += 16) { Mat g0 = kernel_tm_pack16.channel(q / 16); for (int k = 0; k < 64; k++) { float* g00 = g0.row(k); for (int p = 0; p + 15 < inch; p += 16) { for (int i = 0; i < 16; i++) { for (int j = 0; j < 16; j++) { const float* k00 = kernel_tm.channel(q + j).row(p + i); g00[0] = k00[k]; g00++; } } } } } } static void conv3x3s1_winograd63_pack16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tiles = outw / 6; int h_tiles = outh / 6; const int tiles = w_tiles * h_tiles; bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); conv3x3s1_winograd63_transform_input_pack16_avx512(bottom_blob_bordered, bottom_blob_tm, opt); } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; convolution_winograd_dot_pack16_avx512(bottom_blob_tm, outch, kernel_tm, top_blob_tm, opt); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { conv3x3s1_winograd63_transform_output_pack16_avx512(top_blob_tm, top_blob_bordered, bias, opt); } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_winograd43_transform_kernel_pack16_avx512(const Mat& kernel, Mat& kernel_tm_pack16, int inch, int outch, const Option& opt) { // winograd43 transform kernel Mat kernel_tm(6 * 6, inch, outch); const float ktm[6][3] = { {1.0f / 4, 0.0f, 0.0f}, {-1.0f / 6, -1.0f / 6, -1.0f / 6}, {-1.0f / 6, 1.0f / 6, -1.0f / 6}, {1.0f / 24, 1.0f / 12, 1.0f / 6}, {1.0f / 24, -1.0f / 12, 1.0f / 6}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 36-inch-outch // dst = 16b-16a-inch/16a-36-outch/16b kernel_tm_pack16.create(inch / 16, 36, outch / 16, (size_t)4u * 16 * 16, 16 * 16); for (int q = 0; q + 15 < outch; q += 16) { Mat g0 = kernel_tm_pack16.channel(q / 16); for (int k = 0; k < 36; k++) { float* g00 = g0.row<float>(k); for (int p = 0; p + 15 < inch; p += 16) { for (int i = 0; i < 16; i++) { for (int j = 0; j < 16; j++) { const float* k00 = kernel_tm.channel(q + j).row(p + i); g00[0] = k00[k]; g00++; } } } } } } static void conv3x3s1_winograd43_pack16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tiles = outw / 4; int h_tiles = outh / 4; const int tiles = w_tiles * h_tiles; bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); conv3x3s1_winograd43_transform_input_pack16_avx512(bottom_blob_bordered, bottom_blob_tm, opt); } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; convolution_winograd_dot_pack16_avx512(bottom_blob_tm, outch, kernel_tm, top_blob_tm, opt); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { conv3x3s1_winograd43_transform_output_pack16_avx512(top_blob_tm, top_blob_bordered, bias, opt); } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_winograd23_transform_kernel_pack16_avx512(const Mat& kernel, Mat& kernel_tm_pack16, int inch, int outch, const Option& opt) { // winograd23 transform kernel Mat kernel_tm(4 * 4, inch, outch); const float ktm[4][3] = { {1.0f, 0.0f, 0.0f}, {1.0f / 2, 1.0f / 2, 1.0f / 2}, {1.0f / 2, -1.0f / 2, 1.0f / 2}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[4][3]; for (int i = 0; i < 4; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 4; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 4; i++) { kernel_tm0[j * 4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 16-inch-outch // dst = pb-pa-inch/pa-16-outch/pb kernel_tm_pack16.create(inch / 16, 16, outch / 16, (size_t)4u * 16 * 16, 16 * 16); for (int q = 0; q + 15 < outch; q += 16) { Mat g0 = kernel_tm_pack16.channel(q / 16); for (int k = 0; k < 16; k++) { float* g00 = g0.row<float>(k); for (int p = 0; p + 15 < inch; p += 16) { for (int i = 0; i < 16; i++) { for (int j = 0; j < 16; j++) { const float* k00 = kernel_tm.channel(q + j).row(p + i); g00[0] = k00[k]; g00++; } } } } } } static void conv3x3s1_winograd23_pack16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 2n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 1) / 2 * 2; outh = (outh + 1) / 2 * 2; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tiles = outw / 2; int h_tiles = outh / 2; int tiles = w_tiles * h_tiles; bottom_blob_tm.create(tiles, 16, inch, elemsize, elempack, opt.workspace_allocator); conv3x3s1_winograd23_transform_input_pack16_avx512(bottom_blob_bordered, bottom_blob_tm, opt); } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; convolution_winograd_dot_pack16_avx512(bottom_blob_tm, outch, kernel_tm, top_blob_tm, opt); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { conv3x3s1_winograd23_transform_output_pack16_avx512(top_blob_tm, top_blob_bordered, bias, opt); } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
lenet.c
#include "lenet.h" #include <memory.h> #include <time.h> #include <stdlib.h> #include <math.h> #define GETLENGTH(array) (sizeof(array)/sizeof(*(array))) #define GETCOUNT(array) (sizeof(array)/sizeof(double)) #define FOREACH(i,count) for (int i = 0; i < count; ++i) #define CONVOLUTE_VALID(input,output,weight) \ { \ FOREACH(o0,GETLENGTH(output)) \ FOREACH(o1,GETLENGTH(*(output))) \ FOREACH(w0,GETLENGTH(weight)) \ FOREACH(w1,GETLENGTH(*(weight))) \ (output)[o0][o1] += (input)[o0 + w0][o1 + w1] * (weight)[w0][w1]; \ } #define CONVOLUTE_FULL(input,output,weight) \ { \ FOREACH(i0,GETLENGTH(input)) \ FOREACH(i1,GETLENGTH(*(input))) \ FOREACH(w0,GETLENGTH(weight)) \ FOREACH(w1,GETLENGTH(*(weight))) \ (output)[i0 + w0][i1 + w1] += (input)[i0][i1] * (weight)[w0][w1]; \ } #define CONVOLUTION_FORWARD(input,output,weight,bias,action) \ { \ for (int x = 0; x < GETLENGTH(weight); ++x) \ for (int y = 0; y < GETLENGTH(*weight); ++y) \ CONVOLUTE_VALID(input[x], output[y], weight[x][y]); \ FOREACH(j, GETLENGTH(output)) \ FOREACH(i, GETCOUNT(output[j])) \ ((double *)output[j])[i] = action(((double *)output[j])[i] + bias[j]); \ } #define CONVOLUTION_BACKWARD(input,inerror,outerror,weight,wd,bd,actiongrad)\ { \ for (int x = 0; x < GETLENGTH(weight); ++x) \ for (int y = 0; y < GETLENGTH(*weight); ++y) \ CONVOLUTE_FULL(outerror[y], inerror[x], weight[x][y]); \ FOREACH(i, GETCOUNT(inerror)) \ ((double *)inerror)[i] *= actiongrad(((double *)input)[i]); \ FOREACH(j, GETLENGTH(outerror)) \ FOREACH(i, GETCOUNT(outerror[j])) \ bd[j] += ((double *)outerror[j])[i]; \ for (int x = 0; x < GETLENGTH(weight); ++x) \ for (int y = 0; y < GETLENGTH(*weight); ++y) \ CONVOLUTE_VALID(input[x], wd[x][y], outerror[y]); \ } #define SUBSAMP_MAX_FORWARD(input,output) \ { \ const int len0 = GETLENGTH(*(input)) / GETLENGTH(*(output)); \ const int len1 = GETLENGTH(**(input)) / GETLENGTH(**(output)); \ FOREACH(i, GETLENGTH(output)) \ FOREACH(o0, GETLENGTH(*(output))) \ FOREACH(o1, GETLENGTH(**(output))) \ { \ int x0 = 0, x1 = 0, ismax; \ FOREACH(l0, len0) \ FOREACH(l1, len1) \ { \ ismax = input[i][o0*len0 + l0][o1*len1 + l1] > input[i][o0*len0 + x0][o1*len1 + x1];\ x0 += ismax * (l0 - x0); \ x1 += ismax * (l1 - x1); \ } \ output[i][o0][o1] = input[i][o0*len0 + x0][o1*len1 + x1]; \ } \ } #define SUBSAMP_MAX_BACKWARD(input,inerror,outerror) \ { \ const int len0 = GETLENGTH(*(inerror)) / GETLENGTH(*(outerror)); \ const int len1 = GETLENGTH(**(inerror)) / GETLENGTH(**(outerror)); \ FOREACH(i, GETLENGTH(outerror)) \ FOREACH(o0, GETLENGTH(*(outerror))) \ FOREACH(o1, GETLENGTH(**(outerror))) \ { \ int x0 = 0, x1 = 0, ismax; \ FOREACH(l0, len0) \ FOREACH(l1, len1) \ { \ ismax = input[i][o0*len0 + l0][o1*len1 + l1] > input[i][o0*len0 + x0][o1*len1 + x1];\ x0 += ismax * (l0 - x0); \ x1 += ismax * (l1 - x1); \ } \ inerror[i][o0*len0 + x0][o1*len1 + x1] = outerror[i][o0][o1]; \ } \ } #define DOT_PRODUCT_FORWARD(input,output,weight,bias,action) \ { \ for (int x = 0; x < GETLENGTH(weight); ++x) \ for (int y = 0; y < GETLENGTH(*weight); ++y) \ ((double *)output)[y] += ((double *)input)[x] * weight[x][y]; \ FOREACH(j, GETLENGTH(bias)) \ ((double *)output)[j] = action(((double *)output)[j] + bias[j]); \ } #define DOT_PRODUCT_BACKWARD(input,inerror,outerror,weight,wd,bd,actiongrad) \ { \ for (int x = 0; x < GETLENGTH(weight); ++x) \ for (int y = 0; y < GETLENGTH(*weight); ++y) \ ((double *)inerror)[x] += ((double *)outerror)[y] * weight[x][y]; \ FOREACH(i, GETCOUNT(inerror)) \ ((double *)inerror)[i] *= actiongrad(((double *)input)[i]); \ FOREACH(j, GETLENGTH(outerror)) \ bd[j] += ((double *)outerror)[j]; \ for (int x = 0; x < GETLENGTH(weight); ++x) \ for (int y = 0; y < GETLENGTH(*weight); ++y) \ wd[x][y] += ((double *)input)[x] * ((double *)outerror)[y]; \ } double relu(double x) { return x*(x > 0); } double relugrad(double y) { return y > 0; } static void forward(LeNet5 *lenet, Feature *features, double(*action)(double)) { CONVOLUTION_FORWARD(features->input, features->layer1, lenet->weight0_1, lenet->bias0_1, action); SUBSAMP_MAX_FORWARD(features->layer1, features->layer2); CONVOLUTION_FORWARD(features->layer2, features->layer3, lenet->weight2_3, lenet->bias2_3, action); SUBSAMP_MAX_FORWARD(features->layer3, features->layer4); CONVOLUTION_FORWARD(features->layer4, features->layer5, lenet->weight4_5, lenet->bias4_5, action); DOT_PRODUCT_FORWARD(features->layer5, features->output, lenet->weight5_6, lenet->bias5_6, action); } static void backward(LeNet5 *lenet, LeNet5 *deltas, Feature *errors, Feature *features, double(*actiongrad)(double)) { DOT_PRODUCT_BACKWARD(features->layer5, errors->layer5, errors->output, lenet->weight5_6, deltas->weight5_6, deltas->bias5_6, actiongrad); CONVOLUTION_BACKWARD(features->layer4, errors->layer4, errors->layer5, lenet->weight4_5, deltas->weight4_5, deltas->bias4_5, actiongrad); SUBSAMP_MAX_BACKWARD(features->layer3, errors->layer3, errors->layer4); CONVOLUTION_BACKWARD(features->layer2, errors->layer2, errors->layer3, lenet->weight2_3, deltas->weight2_3, deltas->bias2_3, actiongrad); SUBSAMP_MAX_BACKWARD(features->layer1, errors->layer1, errors->layer2); CONVOLUTION_BACKWARD(features->input, errors->input, errors->layer1, lenet->weight0_1, deltas->weight0_1, deltas->bias0_1, actiongrad); } static inline void load_input(Feature *features, image input) { double (*layer0)[LENGTH_FEATURE0][LENGTH_FEATURE0] = features->input; const long sz = sizeof(image) / sizeof(**input); double mean = 0, std = 0; FOREACH(j, sizeof(image) / sizeof(*input)) FOREACH(k, sizeof(*input) / sizeof(**input)) { mean += input[j][k]; std += input[j][k] * input[j][k]; } mean /= sz; std = sqrt(std / sz - mean*mean); FOREACH(j, sizeof(image) / sizeof(*input)) FOREACH(k, sizeof(*input) / sizeof(**input)) { layer0[0][j + PADDING][k + PADDING] = (input[j][k] - mean) / std; } } static inline void softmax(double input[OUTPUT], double loss[OUTPUT], int label, int count) { double inner = 0; for (int i = 0; i < count; ++i) { double res = 0; for (int j = 0; j < count; ++j) { res += exp(input[j] - input[i]); } loss[i] = 1. / res; inner -= loss[i] * loss[i]; } inner += loss[label]; for (int i = 0; i < count; ++i) { loss[i] *= (i == label) - loss[i] - inner; } } static void load_target(Feature *features, Feature *errors, int label) { double *output = (double *)features->output; double *error = (double *)errors->output; softmax(output, error, label, GETCOUNT(features->output)); } static uint8 get_result(Feature *features, uint8 count) { double *output = (double *)features->output; const int outlen = GETCOUNT(features->output); uint8 result = 0; double maxvalue = *output; for (uint8 i = 1; i < count; ++i) { if (output[i] > maxvalue) { maxvalue = output[i]; result = i; } } return result; } static double f64rand() { static int randbit = 0; if (!randbit) { srand((unsigned)time(0)); for (int i = RAND_MAX; i; i >>= 1, ++randbit); } unsigned long long lvalue = 0x4000000000000000L; int i = 52 - randbit; for (; i > 0; i -= randbit) lvalue |= (unsigned long long)rand() << i; lvalue |= (unsigned long long)rand() >> -i; return *(double *)&lvalue - 3; } void TrainBatch(LeNet5 *lenet, image *inputs, uint8 *labels, int batchSize) { double buffer[GETCOUNT(LeNet5)] = { 0 }; int i = 0; #pragma omp parallel for for (i = 0; i < batchSize; ++i) { Feature features = { 0 }; Feature errors = { 0 }; LeNet5 deltas = { 0 }; load_input(&features, inputs[i]); forward(lenet, &features, relu); load_target(&features, &errors, labels[i]); backward(lenet, &deltas, &errors, &features, relugrad); #pragma omp critical { FOREACH(j, GETCOUNT(LeNet5)) buffer[j] += ((double *)&deltas)[j]; } } double k = ALPHA / batchSize; FOREACH(i, GETCOUNT(LeNet5)) ((double *)lenet)[i] += k * buffer[i]; } void Train(LeNet5 *lenet, image input, uint8 label) { Feature features = { 0 }; Feature errors = { 0 }; LeNet5 deltas = { 0 }; load_input(&features, input); forward(lenet, &features, relu); load_target(&features, &errors, label); backward(lenet, &deltas, &errors, &features, relugrad); FOREACH(i, GETCOUNT(LeNet5)) ((double *)lenet)[i] += ALPHA * ((double *)&deltas)[i]; } uint8 Predict(LeNet5 *lenet, image input,uint8 count) { Feature features = { 0 }; load_input(&features, input); forward(lenet, &features, relu); return get_result(&features, count); } void Initial(LeNet5 *lenet) { for (double *pos = (double *)lenet->weight0_1; pos < (double *)lenet->bias0_1; *pos++ = f64rand()); for (double *pos = (double *)lenet->weight0_1; pos < (double *)lenet->weight2_3; *pos++ *= sqrt(6.0 / (LENGTH_KERNEL * LENGTH_KERNEL * (INPUT + LAYER1)))); for (double *pos = (double *)lenet->weight2_3; pos < (double *)lenet->weight4_5; *pos++ *= sqrt(6.0 / (LENGTH_KERNEL * LENGTH_KERNEL * (LAYER2 + LAYER3)))); for (double *pos = (double *)lenet->weight4_5; pos < (double *)lenet->weight5_6; *pos++ *= sqrt(6.0 / (LENGTH_KERNEL * LENGTH_KERNEL * (LAYER4 + LAYER5)))); for (double *pos = (double *)lenet->weight5_6; pos < (double *)lenet->bias0_1; *pos++ *= sqrt(6.0 / (LAYER5 + OUTPUT))); for (int *pos = (int *)lenet->bias0_1; pos < (int *)(lenet + 1); *pos++ = 0); }
trap-omp.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> /* Demo program for OpenMP: computes trapezoidal approximation to an integral*/ const double pi = 3.141592653589793238462643383079; double f(double x); // declaration of th function f, defined below /* * The integral from 0 to pi of the function sine(x) should be equal to 2.0 * This program computes that using the 'trapezoidal rule' by summing rectangles. */ int main(int argc, char** argv) { /* Variables */ double a = 0.0, b = pi; /* limits of integration */; unsigned long n = 1048576; /* number of subdivisions default = 2^20 */ double integral; /* accumulates answer */ int threadct = 1; /* number of threads to use */ /* parse command-line arg for number of rectangles */ if (argc > 1) { n = atoi(argv[1]); // needs to be positive, though we have no check here for that } double h = (b - a) / n; /* width of subdivision */ /* parse command-line arg for number of threads */ if (argc > 2) { threadct = atoi(argv[2]); } #ifdef _OPENMP // Doesn't print threadcount anymore omp_set_num_threads(threadct); #else printf("OMP not defined\n"); #endif integral = (f(a) + f(b))/2.0; int i; // for timimg double start = omp_get_wtime(); // compute each rectangle, adding area to the accumulator #pragma omp parallel for default(none) shared(a, h, n) private (i) reduction(+:integral) for(i = 1; i < n; i++) { integral += f(a+i*h); } integral = integral * h; // Measuring the elapsed time double end = omp_get_wtime(); // Time calculation (in seconds) double elapsed_time = end - start; //output // Only print out the time result with a trailing space printf("%lf ", elapsed_time); } /* * Function that simply computes the sine of x. */ double f(double x) { return sin(x); }
remapping_example.c
#include <stdlib.h> #include "example_helper.h" // Problem size enum { WIDTH = 500, HEIGHT = 200, LINE_SIZE = 100 }; /* Apply some pixel value inversion in a 1D array */ void invert_vector(int line_size, int input_line[line_size], int output_line[line_size]) { for(int i = 0; i < line_size; i++) output_line[i] = 500 - input_line[i]; } /* The main host program controlling and representing the whole application */ int main(int argc, char* argv[]) { int image[HEIGHT][WIDTH]; unsigned char output[HEIGHT][WIDTH]; // Initialize with some values init_image(WIDTH, HEIGHT, image); // Draw 70 horizontal lines and map operation on 8 PEs: #pragma omp parallel for num_threads(8) for(int proc = 0; proc < 70; proc++) // Each iteration is on a different PE in parallel: #pragma smecy map(PE, proc & 7) \ arg(2, in, [1][LINE_SIZE]) \ arg(3, out, [1][LINE_SIZE]) // Invert an horizontal line: invert_vector(LINE_SIZE, &image[HEIGHT - 20 - proc][WIDTH/2 + 2*proc], &image[HEIGHT - 20 - proc][WIDTH/2 + 2*proc]); /* Here we guess we have 5 hardware accelerators and we launch operations on them: */ #pragma omp parallel for num_threads(5) for(int proc = 0; proc < 5; proc++) { /* This is need to express the fact that our accelerator only accept continuous data but we want apply them on non contiguous data in the array */ int input_line[LINE_SIZE]; int output_line[LINE_SIZE]; /* We need to remap data in the good shape. The compiler should use the remapping information to generate DMA transfer for example and remove input_line array */ SMECY_remap_int2D_to_int1D(HEIGHT, WIDTH, HEIGHT/3, 30 + 20*proc, LINE_SIZE, 1, image, LINE_SIZE, input_line); // Each iteration is on a different PE in parallel: #pragma smecy map(PE, proc) arg(2, in, [LINE_SIZE]) arg(3, out, [LINE_SIZE]) invert_vector(LINE_SIZE, input_line, output_line); SMECY_remap_int1D_to_int2D(LINE_SIZE, output_line, HEIGHT, WIDTH, HEIGHT/3, 30 + 20*proc, LINE_SIZE, 1, image); } // Convert int image to char image: normalize_to_char(WIDTH, HEIGHT, image, output); write_pgm_image("remapping_example-output.pgm", WIDTH, HEIGHT, output); return EXIT_SUCCESS; }
start.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include <libconfig.h> #include "inventory.h" #include "config.h" #include "recipes.h" #include "FTPManagement.h" #include "start.h" #include "calculator.h" #include <time.h> #include "cJSON.h" #include <curl/curl.h> #include "logger.h" #include <sys/stat.h> #include <sys/types.h> int current_frame_record; const char *local_ver; int getLocalRecord() { return current_frame_record; } void setLocalRecord(int frames) { current_frame_record = frames; } const char *getLocalVersion() { return local_ver; } int main() { int cycle_count = 1; current_frame_record = 9999; initConfig(); // If select and randomise are both 0, the same roadmap will be calculated on every thread, so set threads = 1 // The debug setting can only be meaningfully used with one thread as well. int workerCount = (getConfigInt("select") || getConfigInt("randomise")) && !getConfigInt("debug") ? getConfigInt("workerCount") : 1; local_ver = getConfigStr("Version"); init_level_cfg(); curl_global_init(CURL_GLOBAL_DEFAULT); // Initialize libcurl int update = checkForUpdates(local_ver); // Greeting message to user printf("Welcome to Recipes@Home!\n"); printf("Leave this program running as long as you want to search for new recipe orders.\n"); int blob_record = getFastestRecordOnBlob(); if (blob_record == 0) { printf("There was an error contacting the server to retrieve the fastest time.\n"); printf("Please check your internet connection, but we'll continue for now.\n"); } else { printf("The current fastest record is %d frames. Happy cooking!\n", blob_record); } if (update == -1) { printf("Could not check version on Github. Please check your internet connection.\n"); printf("Otherwise, we can't submit completed roadmaps to the server!\n"); printf("Alternatively you may have been rate-limited. Please wait a while and try again.\n"); printf("Press ENTER to quit.\n"); char exitChar = getchar(); return -1; } else if (update == 1) { printf("Please visit https://github.com/SevenChords/CipesAtHome/releases to download the newest version of this program!\n"); printf("Press ENTER to quit.\n"); char exitChar = getchar(); return -1; } // Verify that username field is not malformed, // as this would cause errors when a roadmap is submitted to the servers if (getConfigStr("Username") == NULL) { printf("Username field is malformed. Please verify that your username is within quotation marks next to \"Username = \"\n"); printf("Press ENTER to exit the program.\n"); char exitChar = getchar(); exit(1); } // Verify that the results folder exists // If not, create the directory mkdir("./results", 0777); // To avoid generating roadmaps that are slower than the user's record best, // use PB.txt to identify the user's current best FILE* fp = fopen("results/PB.txt", "r"); // The PB file may not have been created yet, so ignore the case where it is missing if (fp != NULL) { int PB_record; if (fscanf(fp, "%d", &PB_record) == 1) { current_frame_record = PB_record; testRecord(current_frame_record); } fclose(fp); // Submit the user's fastest roadmap to the server for leaderboard purposes } // Initialize global variables in calculator.c // This does not need to be done in parallel, as these globals will // persist through all parallel calls to calculator.c initializeInvFrames(); initializeRecipeList(); // Create workerCount threads omp_set_num_threads(workerCount); #pragma omp parallel { int ID = omp_get_thread_num(); // Seed each thread's PRNG for the select and randomise config options srand(((int)time(NULL)) ^ ID); while (1) { struct Result result = calculateOrder(ID); // result might store -1 frames for errors that might be recoverable if (result.frames > -1) { testRecord(result.frames); } } } return 0; }
pr80394.c
/* PR libgomp/80394 */ int main () { int x = 0; #pragma omp parallel shared(x) #pragma omp single { #pragma omp task depend(inout: x) { for (int i = 0; i < 100000; i++) asm volatile ("" : : : "memory"); x += 5; } #pragma omp task if (0) depend(inout: x) ; if (x != 5) __builtin_abort (); } return 0; }
GB_binop__isgt_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__isgt_int8 // A.*B function (eWiseMult): GB_AemultB__isgt_int8 // A*D function (colscale): GB_AxD__isgt_int8 // D*A function (rowscale): GB_DxB__isgt_int8 // C+=B function (dense accum): GB_Cdense_accumB__isgt_int8 // C+=b function (dense accum): GB_Cdense_accumb__isgt_int8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isgt_int8 // C=scalar+B GB_bind1st__isgt_int8 // C=scalar+B' GB_bind1st_tran__isgt_int8 // C=A+scalar GB_bind2nd__isgt_int8 // C=A'+scalar GB_bind2nd_tran__isgt_int8 // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x > y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGT || GxB_NO_INT8 || GxB_NO_ISGT_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__isgt_int8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__isgt_int8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__isgt_int8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__isgt_int8 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__isgt_int8 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__isgt_int8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__isgt_int8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__isgt_int8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = Bx [p] ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__isgt_int8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = Ax [p] ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB_bind1st_tran__isgt_int8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB_bind2nd_tran__isgt_int8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__ainv_uint64_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint64_int8 // op(A') function: GB_tran__ainv_uint64_int8 // C type: uint64_t // A type: int8_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = -aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT64 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint64_int8 ( uint64_t *Cx, // Cx and Ax may be aliased int8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint64_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mixed_tentusscher_myo_epi_2004_S2_12.c
// Scenario 1 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium) // (AP + max:dvdt) #include <stdio.h> #include "mixed_tentusscher_myo_epi_2004_S2_12.h" GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { static bool first_call = true; if(first_call) { print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n"); first_call = false; } // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } // Initial conditions for TenTusscher myocardium if (mapping[sv_id] == 0) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } // Initial conditions for TenTusscher epicardium else { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.6743585456438,0.00126116515238777,0.782285143101146,0.781885737321280,0.000172267497323657,0.486193660951379,0.00291820808108493,0.999998382455018,1.89973078307127e-08,1.86451321167615e-05,0.999780198191440,1.00782702931804,0.999999754763967,2.76599036686923e-05,0.357538249293263,10.7085717792583,139.021384569998}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = (uint32_t )i; for (int j = 0; j < num_steps; ++j) { if (mapping[i] == 0) solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]); else solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_myo(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Myocardium cell real Gks=0.062; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Myocardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; Irel=A*sd*sg; Ileak=0.00008f*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; // [!] Myocardium cell R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; } void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_epi(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Epicardium cell real Gks=0.245; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Epicardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.4941061664816,0.000306940351318330,0.000126486160649835,0.000251593758331556,0.231852653636147,0.170492615868249,0.109036079095606,4.44796487754522,0.0111149661882113,1.23956736157302,1099.91017026794,0.000314927815763443,0.381236416535235,0.0193513922111542,0.00539385037460332,9.81890868796030e-06}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
par_gsmg.c
/****************************************************************************** * Copyright (c) 1998 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Geometrically smooth interpolation multigrid * *****************************************************************************/ #include <stdio.h> #include <math.h> #include "_hypre_parcsr_ls.h" #include "par_amg.h" #include "_hypre_lapack.h" #ifndef ABS #define ABS(x) ((x)>0 ? (x) : -(x)) #endif #ifndef MAX #define MAX(a,b) ((a)>(b)?(a):(b)) #endif static HYPRE_Real mydnrm2(HYPRE_Int n, HYPRE_Real *x) { HYPRE_Real temp = 0.; HYPRE_Int i; for (i = 0; i < n; i++) { temp = temp + x[i] * x[i]; } return sqrt(temp); } static void mydscal(HYPRE_Int n, HYPRE_Real a, HYPRE_Real *x) { HYPRE_Int i; for (i = 0; i < n; i++) { x[i] = a * x[i]; } } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixFillSmooth * - fill in smooth matrix * - this function will scale the smooth vectors *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixFillSmooth(HYPRE_Int nsamples, HYPRE_Real *samples, hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A, HYPRE_Int num_functions, HYPRE_Int *dof_func) { hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int n = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int i, j, k, ii, index, start; HYPRE_Int num_cols_offd; HYPRE_Int num_sends; HYPRE_Int *dof_func_offd; HYPRE_Int *int_buf_data; HYPRE_Real temp; HYPRE_Real *p; HYPRE_Real *p_offd; HYPRE_Real *p_ptr; HYPRE_Real *buf_data; HYPRE_Real nm; #if 0 HYPRE_Real mx = 0., my = 1.e+10; #endif /* normalize each sample vector and divide by number of samples */ for (k = 0; k < nsamples; k++) { nm = mydnrm2(n, samples + k * n); nm = 1. / nm / nsamples; mydscal(n, nm, samples + k * n); } num_cols_offd = hypre_CSRMatrixNumCols(S_offd); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); p_offd = hypre_CTAlloc(HYPRE_Real, nsamples * num_cols_offd, HYPRE_MEMORY_HOST); p_ptr = p_offd; p = samples; for (k = 0; k < nsamples; k++) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) buf_data[index++] = p[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, buf_data, p_offd); hypre_ParCSRCommHandleDestroy(comm_handle); p = p + n; p_offd = p_offd + num_cols_offd; } hypre_TFree(buf_data, HYPRE_MEMORY_HOST); if (num_functions > 1) { dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } for (i = 0; i < n; i++) { for (j = S_diag_i[i] + 1; j < S_diag_i[i + 1]; j++) { ii = S_diag_j[j]; /* only interpolate between like functions */ if (num_functions > 1 && dof_func[i] != dof_func[ii]) { S_diag_data[j] = 0.; continue; } /* explicit zeros */ if (A_diag_data[j] == 0.) { S_diag_data[j] = 0.; continue; } temp = 0.; p = samples; for (k = 0; k < nsamples; k++) { temp = temp + ABS(p[i] - p[ii]); p = p + n; } /* explicit zeros in matrix may cause this */ if (temp == 0.) { S_diag_data[j] = 0.; continue; } temp = 1. / temp; /* reciprocal */ #if 0 my = hypre_min(my, temp); mx = hypre_max(mx, temp); #endif S_diag_data[j] = temp; } for (j = S_offd_i[i]; j < S_offd_i[i + 1]; j++) { ii = S_offd_j[j]; /* only interpolate between like functions */ if (num_functions > 1 && dof_func[i] != dof_func_offd[ii]) { S_offd_data[j] = 0.; continue; } /* explicit zeros */ if (A_offd_data[j] == 0.) { S_offd_data[j] = 0.; continue; } temp = 0.; p = samples; p_offd = p_ptr; for (k = 0; k < nsamples; k++) { temp = temp + ABS(p[i] - p_offd[ii]); p = p + n; p_offd = p_offd + num_cols_offd; } /* explicit zeros in matrix may cause this */ if (temp == 0.) { S_offd_data[j] = 0.; continue; } temp = 1. / temp; /* reciprocal */ #if 0 my = hypre_min(my, temp); mx = hypre_max(mx, temp); #endif S_offd_data[j] = temp; } } #if 0 hypre_printf("MIN, MAX: %f %f\n", my, mx); #endif hypre_TFree(p_ptr, HYPRE_MEMORY_HOST); if (num_functions > 1) { hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); } return 0; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixChooseThresh *--------------------------------------------------------------------------*/ HYPRE_Real hypre_ParCSRMatrixChooseThresh(hypre_ParCSRMatrix *S) { MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag); HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd); HYPRE_Int n = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int i, j; HYPRE_Real mx, minimax = 1.e+10; HYPRE_Real minmin; for (i = 0; i < n; i++) { mx = 0.; for (j = S_diag_i[i]; j < S_diag_i[i + 1]; j++) { mx = hypre_max(mx, S_diag_data[j]); } for (j = S_offd_i[i]; j < S_offd_i[i + 1]; j++) { mx = hypre_max(mx, S_offd_data[j]); } if (mx != 0.) { minimax = hypre_min(minimax, mx); } } hypre_MPI_Allreduce(&minimax, &minmin, 1, HYPRE_MPI_REAL, hypre_MPI_MIN, comm); return minmin; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixThreshold *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixThreshold(hypre_ParCSRMatrix *A, HYPRE_Real thresh) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_nonzeros_diag = A_diag_i[n]; HYPRE_Int num_nonzeros_offd = A_offd_i[n]; HYPRE_Int *S_diag_i; HYPRE_Int *S_diag_j; HYPRE_Real *S_diag_data; HYPRE_Int *S_offd_i; HYPRE_Int *S_offd_j; HYPRE_Real *S_offd_data; HYPRE_Int count, i, jS, jA; /* first count the number of nonzeros we will need */ count = 0; for (i = 0; i < num_nonzeros_diag; i++) if (A_diag_data[i] >= thresh) { count++; } /* allocate vectors */ S_diag_i = hypre_CTAlloc(HYPRE_Int, n + 1, HYPRE_MEMORY_HOST); S_diag_j = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); S_diag_data = hypre_CTAlloc(HYPRE_Real, count, HYPRE_MEMORY_HOST); jS = 0; for (i = 0; i < n; i++) { S_diag_i[i] = jS; for (jA = A_diag_i[i]; jA < A_diag_i[i + 1]; jA++) { if (A_diag_data[jA] >= thresh) { S_diag_data[jS] = A_diag_data[jA]; S_diag_j[jS] = A_diag_j[jA]; jS++; } } } S_diag_i[n] = jS; hypre_CSRMatrixNumNonzeros(A_diag) = jS; /* free the vectors we don't need */ hypre_TFree(A_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(A_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(A_diag_data, HYPRE_MEMORY_HOST); /* assign the new vectors */ hypre_CSRMatrixI(A_diag) = S_diag_i; hypre_CSRMatrixJ(A_diag) = S_diag_j; hypre_CSRMatrixData(A_diag) = S_diag_data; /* * Offd part */ /* first count the number of nonzeros we will need */ count = 0; for (i = 0; i < num_nonzeros_offd; i++) if (A_offd_data[i] >= thresh) { count++; } /* allocate vectors */ S_offd_i = hypre_CTAlloc(HYPRE_Int, n + 1, HYPRE_MEMORY_HOST); S_offd_j = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); S_offd_data = hypre_CTAlloc(HYPRE_Real, count, HYPRE_MEMORY_HOST); jS = 0; for (i = 0; i < n; i++) { S_offd_i[i] = jS; for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++) { if (A_offd_data[jA] >= thresh) { S_offd_data[jS] = A_offd_data[jA]; S_offd_j[jS] = A_offd_j[jA]; jS++; } } } S_offd_i[n] = jS; hypre_CSRMatrixNumNonzeros(A_offd) = jS; /* free the vectors we don't need */ hypre_TFree(A_offd_i, HYPRE_MEMORY_HOST); hypre_TFree(A_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(A_offd_data, HYPRE_MEMORY_HOST); /* assign the new vectors */ hypre_CSRMatrixI(A_offd) = S_offd_i; hypre_CSRMatrixJ(A_offd) = S_offd_j; hypre_CSRMatrixData(A_offd) = S_offd_data; return 0; } /*-------------------------------------------------------------------------- * CreateSmoothVecs * - smoother depends on the level being used *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreateSmoothVecs(void *data, hypre_ParCSRMatrix *A, HYPRE_Int num_sweeps, HYPRE_Int level, HYPRE_Real **SmoothVecs_p) { hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_ParVector *Zero; hypre_ParVector *Temp; hypre_ParVector *U; hypre_ParVector *Qtemp = NULL; HYPRE_Int i; HYPRE_BigInt n = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int n_local = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt *starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int sample; HYPRE_Int nsamples = hypre_ParAMGDataNumSamples(amg_data); HYPRE_Int ret; HYPRE_Real *datax, *bp, *p; HYPRE_Int rlx_type; HYPRE_Int smooth_type; HYPRE_Int smooth_option = 0; HYPRE_Int smooth_num_levels; HYPRE_Solver *smoother; HYPRE_Int debug_flag = hypre_ParAMGDataDebugFlag(amg_data); HYPRE_Int num_threads; num_threads = hypre_NumThreads(); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (debug_flag >= 1) hypre_printf("Creating smooth dirs, %d sweeps, %d samples\n", num_sweeps, nsamples); smooth_type = hypre_ParAMGDataSmoothType(amg_data); smooth_num_levels = hypre_ParAMGDataSmoothNumLevels(amg_data); if (smooth_num_levels > level) { smooth_option = smooth_type; smoother = hypre_ParAMGDataSmoother(amg_data); num_sweeps = hypre_ParAMGDataSmoothNumSweeps(amg_data); } rlx_type = hypre_ParAMGDataGridRelaxType(amg_data)[0]; /* rlx_wt = hypre_ParAMGDataRelaxWeight(amg_data)[level]; */ /* omega = hypre_ParAMGDataOmega(amg_data)[level]; */ /* generate par vectors */ Zero = hypre_ParVectorCreate(comm, n, starts); hypre_ParVectorInitialize(Zero); datax = hypre_VectorData(hypre_ParVectorLocalVector(Zero)); for (i = 0; i < n_local; i++) { datax[i] = 0.; } Temp = hypre_ParVectorCreate(comm, n, starts); hypre_ParVectorInitialize(Temp); datax = hypre_VectorData(hypre_ParVectorLocalVector(Temp)); for (i = 0; i < n_local; i++) { datax[i] = 0.; } U = hypre_ParVectorCreate(comm, n, starts); hypre_ParVectorInitialize(U); datax = hypre_VectorData(hypre_ParVectorLocalVector(U)); if (num_threads > 1) { Qtemp = hypre_ParVectorCreate(comm, n, starts); hypre_ParVectorInitialize(Qtemp); } /* allocate space for the vectors */ bp = hypre_CTAlloc(HYPRE_Real, nsamples * n_local, HYPRE_MEMORY_HOST); p = bp; /* generate random vectors */ for (sample = 0; sample < nsamples; sample++) { for (i = 0; i < n_local; i++) { datax[i] = hypre_Rand() - .5; } for (i = 0; i < num_sweeps; i++) { if (smooth_option == 6) { HYPRE_SchwarzSolve(smoother[level], (HYPRE_ParCSRMatrix) A, (HYPRE_ParVector) Zero, (HYPRE_ParVector) U); } else { ret = hypre_BoomerAMGRelax(A, Zero, NULL /*CFmarker*/, rlx_type, 0 /*rel pts*/, 1.0 /*weight*/, 1.0 /*omega*/, NULL, U, Temp, Qtemp); hypre_assert(ret == 0); } } /* copy out the solution */ for (i = 0; i < n_local; i++) { *p++ = datax[i]; } } hypre_ParVectorDestroy(Zero); hypre_ParVectorDestroy(Temp); hypre_ParVectorDestroy(U); if (num_threads > 1) { hypre_ParVectorDestroy(Qtemp); } *SmoothVecs_p = bp; return 0; } /*-------------------------------------------------------------------------- * CreateSmoothDirs replaces CreateS in AMG * - smoother depends on the level being used * - in this version, CreateSmoothVecs must be called prior to this function *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreateSmoothDirs(void *data, hypre_ParCSRMatrix *A, HYPRE_Real *SmoothVecs, HYPRE_Real thresh, HYPRE_Int num_functions, HYPRE_Int *dof_func, hypre_ParCSRMatrix **S_ptr) { hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data; hypre_ParCSRMatrix *S; HYPRE_Real minimax; HYPRE_Int debug_flag = hypre_ParAMGDataDebugFlag(amg_data); S = hypre_ParCSRMatrixClone(A, 0); /* Traverse S and fill in differences */ hypre_ParCSRMatrixFillSmooth( hypre_ParAMGDataNumSamples(amg_data), SmoothVecs, S, A, num_functions, dof_func); minimax = hypre_ParCSRMatrixChooseThresh(S); if (debug_flag >= 1) { hypre_printf("Minimax chosen: %f\n", minimax); } /* Threshold and compress */ hypre_ParCSRMatrixThreshold(S, thresh * minimax); *S_ptr = S; return 0; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGNormalizeVecs * * Normalize the smooth vectors and also make the first vector the constant * vector * * inputs: * n = length of smooth vectors * num = number of smooth vectors * V = smooth vectors (array of length n*num), also an output * * output: * V = adjusted smooth vectors *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGNormalizeVecs(HYPRE_Int n, HYPRE_Int num, HYPRE_Real *V) { HYPRE_Int i, j; HYPRE_Real nrm; /* change first vector to the constant vector */ for (i = 0; i < n; i++) { V[i] = 1.0; } for (j = 0; j < num; j++) { nrm = mydnrm2(n, &V[j * n]); mydscal(n, 1. / nrm, &V[j * n]); } return 0; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGFitVectors * * Construct interpolation weights based on fitting smooth vectors * * inputs: * ip = row number of row in P being processed (0-based) * n = length of smooth vectors * num = number of smooth vectors * V = smooth vectors (array of length n*num), also an output * nc = number of coarse grid points * ind = indices of coarse grid points (0-based) * * output: * val = interpolation weights for the coarse grid points * V = smooth vectors; first one has been changed to constant vector; * vectors have also been normalized; this is also an input *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGFitVectors(HYPRE_Int ip, HYPRE_Int n, HYPRE_Int num, const HYPRE_Real *V, HYPRE_Int nc, const HYPRE_Int *ind, HYPRE_Real *val) { HYPRE_Real *a, *b; HYPRE_Real *ap; HYPRE_Int i, j; HYPRE_Real *work; HYPRE_Int work_size; HYPRE_Int info; HYPRE_Int temp; /* hypre_printf("Fit: row %d, n %d num %d, nc = %d ", ip, n, num, nc); for (i=0; i<nc; i++) hypre_printf("%d ", ind[i]); hypre_printf("\n"); */ if (nc == 0) { return 0; } work_size = 2000 * 64; work = hypre_CTAlloc(HYPRE_Real, work_size, HYPRE_MEMORY_HOST); a = hypre_CTAlloc(HYPRE_Real, num * nc, HYPRE_MEMORY_HOST); ap = a; for (j = 0; j < nc; j++) { for (i = 0; i < num; i++) { *ap = V[i * n + ind[j]]; ap++; } } temp = MAX(nc, num); b = hypre_CTAlloc(HYPRE_Real, temp, HYPRE_MEMORY_HOST); for (i = 0; i < num; i++) { b[i] = V[i * n + ip]; } { char trans = 'N'; HYPRE_Int one = 1; hypre_dgels(&trans, &num, &nc, &one, a, &num, b, &temp, work, &work_size, &info); if (info != 0) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "par_gsmg: dgels returned %d\n"); } /* copy solution into output vector */ for (j = 0; j < nc; j++) { val[j] = b[j]; } } hypre_TFree(b, HYPRE_MEMORY_HOST); hypre_TFree(a, HYPRE_MEMORY_HOST); hypre_TFree(work, HYPRE_MEMORY_HOST); return info; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildInterpLS * * Interpolation built from fitting smooth vectors * - sequential version only *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildInterpLS( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int num_smooth, HYPRE_Real *SmoothVecs, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); /* HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag); */ HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); /* HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); */ HYPRE_Int num_cols_S_offd = hypre_CSRMatrixNumCols(S_offd); /* HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(S); */ hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int *CF_marker_offd; HYPRE_Int *dof_func_offd = NULL; hypre_CSRMatrix *S_ext; //HYPRE_Real *S_ext_data; //HYPRE_Int *S_ext_i; //HYPRE_BigInt *S_ext_j; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int *P_marker; /* HYPRE_Int *P_marker_offd; */ HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; /* HYPRE_Int jj_begin_row,jj_begin_row_offd; HYPRE_Int jj_end_row,jj_end_row_offd; */ HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int *fine_to_coarse; //HYPRE_BigInt *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; //HYPRE_BigInt my_first_cpt; HYPRE_Int i, i1; HYPRE_Int j, jl, jj; HYPRE_Int start; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; //HYPRE_BigInt *big_buf_data; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); num_threads = hypre_NumThreads(); //my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); } CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST); if (num_functions > 1 && num_cols_S_offd) { dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST); } if (!comm_pkg) { hypre_MatvecCommPkgCreate(S); comm_pkg = hypre_ParCSRMatrixCommPkg(S); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (num_functions > 1) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*---------------------------------------------------------------------- * Get the ghost rows of S *---------------------------------------------------------------------*/ if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); } if (num_procs > 1) { S_ext = hypre_ParCSRMatrixExtractBExt(S, S, 1); //S_ext_i = hypre_CSRMatrixI(S_ext); //S_ext_j = hypre_CSRMatrixBigJ(S_ext); //S_ext_data = hypre_CSRMatrixData(S_ext); } if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 2 Get S_ext = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; } jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (j < rest) { ns = j * size + j; ne = (j + 1) * size + j + 1; } else { ns = j * size + rest; ne = (j + 1) * size + rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i. *--------------------------------------------------------------------*/ else { for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { /* removed */ } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i = 0; i < num_threads - 1; i++) { coarse_counter[i + 1] += coarse_counter[i]; jj_count[i + 1] += jj_count[i]; jj_count_offd[i + 1] += jj_count_offd[i]; } i = num_threads - 1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_HOST); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); } /*fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_S_offd, HYPRE_MEMORY_HOST); big_buf_data = hypre_CTAlloc(HYPRE_BigInt, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) { coarse_shift = coarse_counter[j - 1]; } size = n_fine / num_threads; rest = n_fine - size * num_threads; if (j < rest) { ns = j * size + j; ne = (j + 1) * size + j + 1; } else { ns = j * size + rest; ne = (j + 1) * size + rest; } for (i = ns; i < ne; i++) { fine_to_coarse[i] += coarse_shift; } } /*index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) big_buf_data[index++] = my_first_cpt+(HYPRE_BigInt)fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); } if (debug_flag==4) wall_time = time_getWallclockSeconds();*/ /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,jj_counter,jj_counter_offd) HYPRE_SMP_SCHEDULE #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (jl < rest) { ns = jl * size + jl; ne = (jl + 1) * size + jl + 1; } else { ns = jl * size + rest; ne = (jl + 1) * size + rest; } jj_counter = 0; if (jl > 0) { jj_counter = jj_count[jl - 1]; } jj_counter_offd = 0; if (jl > 0) { jj_counter_offd = jj_count_offd[jl - 1]; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { HYPRE_Int kk; HYPRE_Int indices[1000]; /* kludge */ /* Diagonal part of P */ P_diag_i[i] = jj_counter; kk = 0; for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_diag_j[jj_counter] = fine_to_coarse[i1]; jj_counter++; indices[kk] = i1; kk++; } } hypre_BoomerAMGFitVectors(i, n_fine, num_smooth, SmoothVecs, kk, indices, &P_diag_data[P_diag_i[i]]); /* Off-Diagonal part of P */ /* undone */ } } } P_diag_i[i] = jj_counter; /* check that this is in right place for threads */ P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(S), total_global_cpts, hypre_ParCSRMatrixColStarts(S), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, 0); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < P_offd_size; i++) { P_marker[i] = P_offd_j[i]; } hypre_qsort0(P_marker, 0, P_offd_size - 1); num_cols_P_offd = 1; index = P_marker[0]; for (i = 1; i < P_offd_size; i++) { if (P_marker[i] > index) { index = P_marker[i]; P_marker[num_cols_P_offd++] = index; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_P_offd; i++) { tmp_map_offd[i] = P_marker[i]; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P, S, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); //hypre_TFree(big_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); if (num_procs > 1) { hypre_CSRMatrixDestroy(S_ext); } return (0); } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildInterpGSMG * * Difference with hypre_BoomerAMGBuildInterp is that S contains values * and is used to build interpolation weights. Matrix A is not used. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildInterpGSMG( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_Int num_cols_S_offd = hypre_CSRMatrixNumCols(S_offd); HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(S); HYPRE_Int *tmp_map_offd = NULL; hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *CF_marker_offd; HYPRE_Int *dof_func_offd = NULL; hypre_CSRMatrix *S_ext; HYPRE_Real *S_ext_data; HYPRE_Int *S_ext_i; HYPRE_BigInt *S_ext_j; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; HYPRE_Int jj_begin_row, jj_begin_row_offd; HYPRE_Int jj_end_row, jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int strong_f_marker; HYPRE_Int *fine_to_coarse; HYPRE_Int *coarse_counter; //HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; //HYPRE_BigInt my_first_cpt; HYPRE_BigInt big_i2; HYPRE_Int i, i1, i2; HYPRE_Int j, jl, jj, jj1; HYPRE_Int start; HYPRE_Int c_num; HYPRE_Real sum; HYPRE_Real distribute; HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(S); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(S_diag); HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); num_threads = hypre_NumThreads(); //my_first_cpt = num_cpts_global[0]; total_global_cpts = 0; /* we will set this later for the matrix in the setup */ /* if (myid == (num_procs -1)) total_global_cpts = coarse_pts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm);*/ /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); } CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST); if (num_functions > 1 && num_cols_S_offd) { dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST); } if (!comm_pkg) { hypre_MatvecCommPkgCreate(S); comm_pkg = hypre_ParCSRMatrixCommPkg(S); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (num_functions > 1) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*---------------------------------------------------------------------- * Get the ghost rows of S *---------------------------------------------------------------------*/ if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); } if (num_procs > 1) { S_ext = hypre_ParCSRMatrixExtractBExt(S, S, 1); S_ext_i = hypre_CSRMatrixI(S_ext); S_ext_j = hypre_CSRMatrixBigJ(S_ext); S_ext_data = hypre_CSRMatrixData(S_ext); } if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 2 Get S_ext = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; } jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (j < rest) { ns = j * size + j; ne = (j + 1) * size + j + 1; } else { ns = j * size + rest; ne = (j + 1) * size + rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i. *--------------------------------------------------------------------*/ else { for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++) { i1 = S_offd_j[jj]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i = 0; i < num_threads - 1; i++) { coarse_counter[i + 1] += coarse_counter[i]; jj_count[i + 1] += jj_count[i]; jj_count_offd[i + 1] += jj_count_offd[i]; } i = num_threads - 1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_HOST); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); } /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (jl < rest) { ns = jl * size + jl; ne = (jl + 1) * size + jl + 1; } else { ns = jl * size + rest; ne = (jl + 1) * size + rest; } jj_counter = 0; if (jl > 0) { jj_counter = jj_count[jl - 1]; } jj_counter_offd = 0; if (jl > 0) { jj_counter_offd = jj_count_offd[jl - 1]; } P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_S_offd; i++) { P_marker_offd[i] = -1; } strong_f_marker = -2; for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; jj_begin_row = jj_counter; for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } /*-------------------------------------------------------------- * If neighbor i1 is an F-point, mark it as a strong F-point * whose connection needs to be distributed. *--------------------------------------------------------------*/ else { P_marker[i1] = strong_f_marker; } } jj_end_row = jj_counter; /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; jj_begin_row_offd = jj_counter_offd; if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++) { i1 = S_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } /*----------------------------------------------------------- * If neighbor i1 is an F-point, mark it as a strong F-point * whose connection needs to be distributed. *-----------------------------------------------------------*/ else { P_marker_offd[i1] = strong_f_marker; } } } jj_end_row_offd = jj_counter_offd; /* Loop over ith row of S. First, the diagonal part of S */ for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += S_diag_data[jj]; } /*-------------------------------------------------------------- * Case 2: neighbor i1 is an F-point and strongly influences i, * distribute a_{i,i1} to C-points that strongly infuence i. * Note: currently no distribution to the diagonal in this case. *--------------------------------------------------------------*/ else if (P_marker[i1] == strong_f_marker) { sum = zero; /*----------------------------------------------------------- * Loop over row of S for point i1 and calculate the sum * of the connections to c-points that strongly influence i. *-----------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1 + 1]; jj1++) { i2 = S_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row) { sum += S_diag_data[jj1]; } } /* Off-Diagonal block part of row i1 */ if (num_procs > 1) { for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1 + 1]; jj1++) { i2 = S_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd) { sum += S_offd_data[jj1]; } } } if (sum != 0) { distribute = S_diag_data[jj] / sum; /*----------------------------------------------------------- * Loop over row of S for point i1 and do the distribution. *-----------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1 + 1]; jj1++) { i2 = S_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row) P_diag_data[P_marker[i2]] += distribute * S_diag_data[jj1]; } /* Off-Diagonal block part of row i1 */ if (num_procs > 1) { for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1 + 1]; jj1++) { i2 = S_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd) P_offd_data[P_marker_offd[i2]] += distribute * S_offd_data[jj1]; } } } else { /* do nothing */ } } /*-------------------------------------------------------------- * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1} * into the diagonal. *--------------------------------------------------------------*/ else { /* do nothing */ } } /*---------------------------------------------------------------- * Still looping over ith row of S. Next, loop over the * off-diagonal part of S *---------------------------------------------------------------*/ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++) { i1 = S_offd_j[jj]; /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker_offd[i1] >= jj_begin_row_offd) { P_offd_data[P_marker_offd[i1]] += S_offd_data[jj]; } /*------------------------------------------------------------ * Case 2: neighbor i1 is an F-point and strongly influences i, * distribute a_{i,i1} to C-points that strongly infuence i. * Note: currently no distribution to the diagonal in this case. *-----------------------------------------------------------*/ else if (P_marker_offd[i1] == strong_f_marker) { sum = zero; /*--------------------------------------------------------- * Loop over row of S_ext for point i1 and calculate the sum * of the connections to c-points that strongly influence i. *---------------------------------------------------------*/ /* find row number */ c_num = S_offd_j[jj]; for (jj1 = S_ext_i[c_num]; jj1 < S_ext_i[c_num + 1]; jj1++) { big_i2 = S_ext_j[jj1]; if (big_i2 >= col_1 && big_i2 < col_n) { /* in the diagonal block */ if (P_marker[(HYPRE_Int)(big_i2 - col_1)] >= jj_begin_row) { sum += S_ext_data[jj1]; } } else { /* in the off_diagonal block */ j = hypre_BigBinarySearch(col_map_offd, big_i2, num_cols_S_offd); if (j != -1) { if (P_marker_offd[j] >= jj_begin_row_offd) { sum += S_ext_data[jj1]; } } } } if (sum != 0) { distribute = S_offd_data[jj] / sum; /*--------------------------------------------------------- * Loop over row of S_ext for point i1 and do * the distribution. *--------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = S_ext_i[c_num]; jj1 < S_ext_i[c_num + 1]; jj1++) { big_i2 = S_ext_j[jj1]; if (big_i2 >= col_1 && big_i2 < col_n) /* in the diagonal block */ { if (P_marker[(HYPRE_Int)(big_i2 - col_1)] >= jj_begin_row) P_diag_data[P_marker[(HYPRE_Int)(big_i2 - col_1)]] += distribute * S_ext_data[jj1]; } else { /* check to see if it is in the off_diagonal block */ j = hypre_BigBinarySearch(col_map_offd, big_i2, num_cols_S_offd); if (j != -1) { if (P_marker_offd[j] >= jj_begin_row_offd) P_offd_data[P_marker_offd[j]] += distribute * S_ext_data[jj1]; } } } } else { /* do nothing */ } } /*----------------------------------------------------------- * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1} * into the diagonal. *-----------------------------------------------------------*/ else { /* do nothing */ } } } /*----------------------------------------------------------------- * Set interpolation weight by dividing by the diagonal. *-----------------------------------------------------------------*/ sum = 0.; for (jj = jj_begin_row; jj < jj_end_row; jj++) { sum += P_diag_data[jj]; } for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) { sum += P_offd_data[jj]; } for (jj = jj_begin_row; jj < jj_end_row; jj++) { P_diag_data[jj] /= sum; } for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) { P_offd_data[jj] /= sum; } } strong_f_marker--; P_offd_i[i + 1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(S), total_global_cpts, hypre_ParCSRMatrixColStarts(S), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, 0); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < P_offd_size; i++) { P_marker[i] = P_offd_j[i]; } hypre_qsort0(P_marker, 0, P_offd_size - 1); num_cols_P_offd = 1; index = P_marker[0]; for (i = 1; i < P_offd_size; i++) { if (P_marker[i] > index) { index = P_marker[i]; P_marker[num_cols_P_offd++] = index; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_P_offd; i++) { tmp_map_offd[i] = P_marker[i]; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P, S, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); if (num_procs > 1) { hypre_CSRMatrixDestroy(S_ext); } return (0); }
create_pairs_parallel.c
#include "utils.h" #include "algorithm.h" #include <stdio.h> #include <omp.h> void create_pairs(char * ranks_dir, char * output_dir, int chunk_id) { int i, writing_file; InverseRecord output; char ranks_file_name[MAX_PATH_LENGTH]; char sa_file_name[MAX_PATH_LENGTH]; char output_file_name[MAX_PATH_LENGTH]; long * current_rank = (long*)Calloc(WORKING_CHUNK_SIZE * sizeof(long)); int * sa_buffer = (int *) Calloc(WORKING_CHUNK_SIZE * sizeof(int)); FILE * ranksFP = NULL; FILE * saFP = NULL; FILE * outputFP = NULL; sprintf(ranks_file_name, "%s/ranks_%d", ranks_dir, chunk_id); OpenBinaryFileRead(&ranksFP, ranks_file_name); fread(current_rank, sizeof(long), WORKING_CHUNK_SIZE, ranksFP); fclose(ranksFP); sprintf(sa_file_name, "%s/sa_%d", ranks_dir, chunk_id); OpenBinaryFileRead(&saFP, sa_file_name); int num_elements = fread(sa_buffer, sizeof(unsigned int), WORKING_CHUNK_SIZE, saFP); fclose(saFP); int last_file = -1; for (i=0; i<num_elements; i++) { writing_file = -1 * current_rank[sa_buffer[i]] / WORKING_CHUNK_SIZE; if (last_file != writing_file) { // printf("Writing to pairs_%d\n", writing_file); if (outputFP) { fclose(outputFP); } sprintf(output_file_name, "%s/pairs_%d", output_dir, writing_file); OpenBinaryFileAppend(&outputFP, output_file_name); last_file = writing_file; } output.index = sa_buffer[i] + chunk_id * WORKING_CHUNK_SIZE; output.value = -1 * current_rank[sa_buffer[i]]; fwrite(&output, sizeof(InverseRecord), 1, outputFP); } // printf("Rank: %ld\n", current_rank[WORKING_CHUNK_SIZE-1]); if (outputFP) { fclose(outputFP); } free(current_rank); free(sa_buffer); } int main(int argc, char ** args) { char * ranks_dir; char * output_dir; int total_chunks, chunk_id; if (argc < 4) { puts ("Run ./create_pairs <rank_dir> <output_dir> <total_chunks>"); return FAILURE; } ranks_dir = args[1]; output_dir = args[2]; total_chunks = atoi(args[3]); omp_set_num_threads(NUM_THREADS); for (chunk_id = 0; chunk_id < total_chunks; chunk_id+=NUM_THREADS) { #pragma omp parallel { if (chunk_id+omp_get_thread_num() < total_chunks) create_pairs(ranks_dir, output_dir, chunk_id+omp_get_thread_num()); } } return SUCCESS; }
Parser.h
//===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Parser interface. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_PARSE_PARSER_H #define LLVM_CLANG_PARSE_PARSER_H #include "clang/AST/OpenMPClause.h" #include "clang/AST/Availability.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/OperatorPrecedence.h" #include "clang/Basic/Specifiers.h" #include "clang/Lex/CodeCompletionHandler.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/Sema.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/PrettyStackTrace.h" #include "llvm/Support/SaveAndRestore.h" #include <memory> #include <stack> namespace clang { class PragmaHandler; class Scope; class BalancedDelimiterTracker; class CorrectionCandidateCallback; class DeclGroupRef; class DiagnosticBuilder; struct LoopHint; class Parser; class ParsingDeclRAIIObject; class ParsingDeclSpec; class ParsingDeclarator; class ParsingFieldDeclarator; class ColonProtectionRAIIObject; class InMessageExpressionRAIIObject; class PoisonSEHIdentifiersRAIIObject; class OMPClause; class ObjCTypeParamList; class ObjCTypeParameter; /// Parser - This implements a parser for the C family of languages. After /// parsing units of the grammar, productions are invoked to handle whatever has /// been read. /// class Parser : public CodeCompletionHandler { friend class ColonProtectionRAIIObject; friend class InMessageExpressionRAIIObject; friend class PoisonSEHIdentifiersRAIIObject; friend class ObjCDeclContextSwitch; friend class ParenBraceBracketBalancer; friend class BalancedDelimiterTracker; Preprocessor &PP; /// Tok - The current token we are peeking ahead. All parsing methods assume /// that this is valid. Token Tok; // PrevTokLocation - The location of the token we previously // consumed. This token is used for diagnostics where we expected to // see a token following another token (e.g., the ';' at the end of // a statement). SourceLocation PrevTokLocation; /// Tracks an expected type for the current token when parsing an expression. /// Used by code completion for ranking. PreferredTypeBuilder PreferredType; unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0; unsigned short MisplacedModuleBeginCount = 0; /// Actions - These are the callbacks we invoke as we parse various constructs /// in the file. Sema &Actions; DiagnosticsEngine &Diags; /// ScopeCache - Cache scopes to reduce malloc traffic. enum { ScopeCacheSize = 16 }; unsigned NumCachedScopes; Scope *ScopeCache[ScopeCacheSize]; /// Identifiers used for SEH handling in Borland. These are only /// allowed in particular circumstances // __except block IdentifierInfo *Ident__exception_code, *Ident___exception_code, *Ident_GetExceptionCode; // __except filter expression IdentifierInfo *Ident__exception_info, *Ident___exception_info, *Ident_GetExceptionInfo; // __finally IdentifierInfo *Ident__abnormal_termination, *Ident___abnormal_termination, *Ident_AbnormalTermination; /// Contextual keywords for Microsoft extensions. IdentifierInfo *Ident__except; mutable IdentifierInfo *Ident_sealed; /// Ident_super - IdentifierInfo for "super", to support fast /// comparison. IdentifierInfo *Ident_super; /// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and /// "bool" fast comparison. Only present if AltiVec or ZVector are enabled. IdentifierInfo *Ident_vector; IdentifierInfo *Ident_bool; /// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison. /// Only present if AltiVec enabled. IdentifierInfo *Ident_pixel; /// Objective-C contextual keywords. IdentifierInfo *Ident_instancetype; /// Identifier for "introduced". IdentifierInfo *Ident_introduced; /// Identifier for "deprecated". IdentifierInfo *Ident_deprecated; /// Identifier for "obsoleted". IdentifierInfo *Ident_obsoleted; /// Identifier for "unavailable". IdentifierInfo *Ident_unavailable; /// Identifier for "message". IdentifierInfo *Ident_message; /// Identifier for "strict". IdentifierInfo *Ident_strict; /// Identifier for "replacement". IdentifierInfo *Ident_replacement; /// Identifiers used by the 'external_source_symbol' attribute. IdentifierInfo *Ident_language, *Ident_defined_in, *Ident_generated_declaration; /// C++11 contextual keywords. mutable IdentifierInfo *Ident_final; mutable IdentifierInfo *Ident_GNU_final; mutable IdentifierInfo *Ident_override; // C++2a contextual keywords. mutable IdentifierInfo *Ident_import; mutable IdentifierInfo *Ident_module; // C++ type trait keywords that can be reverted to identifiers and still be // used as type traits. llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits; std::unique_ptr<PragmaHandler> AlignHandler; std::unique_ptr<PragmaHandler> GCCVisibilityHandler; std::unique_ptr<PragmaHandler> OptionsHandler; std::unique_ptr<PragmaHandler> PackHandler; std::unique_ptr<PragmaHandler> MSStructHandler; std::unique_ptr<PragmaHandler> UnusedHandler; std::unique_ptr<PragmaHandler> WeakHandler; std::unique_ptr<PragmaHandler> RedefineExtnameHandler; std::unique_ptr<PragmaHandler> FPContractHandler; std::unique_ptr<PragmaHandler> OpenCLExtensionHandler; std::unique_ptr<PragmaHandler> OpenMPHandler; std::unique_ptr<PragmaHandler> PCSectionHandler; std::unique_ptr<PragmaHandler> MSCommentHandler; std::unique_ptr<PragmaHandler> MSDetectMismatchHandler; std::unique_ptr<PragmaHandler> MSPointersToMembers; std::unique_ptr<PragmaHandler> MSVtorDisp; std::unique_ptr<PragmaHandler> MSInitSeg; std::unique_ptr<PragmaHandler> MSDataSeg; std::unique_ptr<PragmaHandler> MSBSSSeg; std::unique_ptr<PragmaHandler> MSConstSeg; std::unique_ptr<PragmaHandler> MSCodeSeg; std::unique_ptr<PragmaHandler> MSSection; std::unique_ptr<PragmaHandler> MSRuntimeChecks; std::unique_ptr<PragmaHandler> MSIntrinsic; std::unique_ptr<PragmaHandler> MSOptimize; std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler; std::unique_ptr<PragmaHandler> OptimizeHandler; std::unique_ptr<PragmaHandler> LoopHintHandler; std::unique_ptr<PragmaHandler> UnrollHintHandler; std::unique_ptr<PragmaHandler> NoUnrollHintHandler; std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> FPHandler; std::unique_ptr<PragmaHandler> STDCFENVHandler; std::unique_ptr<PragmaHandler> STDCCXLIMITHandler; std::unique_ptr<PragmaHandler> STDCUnknownHandler; std::unique_ptr<PragmaHandler> AttributePragmaHandler; std::unique_ptr<CommentHandler> CommentSemaHandler; /// Whether the '>' token acts as an operator or not. This will be /// true except when we are parsing an expression within a C++ /// template argument list, where the '>' closes the template /// argument list. bool GreaterThanIsOperator; /// ColonIsSacred - When this is false, we aggressively try to recover from /// code like "foo : bar" as if it were a typo for "foo :: bar". This is not /// safe in case statements and a few other things. This is managed by the /// ColonProtectionRAIIObject RAII object. bool ColonIsSacred; /// When true, we are directly inside an Objective-C message /// send expression. /// /// This is managed by the \c InMessageExpressionRAIIObject class, and /// should not be set directly. bool InMessageExpression; /// Gets set to true after calling ProduceSignatureHelp, it is for a /// workaround to make sure ProduceSignatureHelp is only called at the deepest /// function call. bool CalledSignatureHelp = false; /// The "depth" of the template parameters currently being parsed. unsigned TemplateParameterDepth; /// RAII class that manages the template parameter depth. class TemplateParameterDepthRAII { unsigned &Depth; unsigned AddedLevels; public: explicit TemplateParameterDepthRAII(unsigned &Depth) : Depth(Depth), AddedLevels(0) {} ~TemplateParameterDepthRAII() { Depth -= AddedLevels; } void operator++() { ++Depth; ++AddedLevels; } void addDepth(unsigned D) { Depth += D; AddedLevels += D; } void setAddedDepth(unsigned D) { Depth = Depth - AddedLevels + D; AddedLevels = D; } unsigned getDepth() const { return Depth; } unsigned getOriginalDepth() const { return Depth - AddedLevels; } }; /// Factory object for creating ParsedAttr objects. AttributeFactory AttrFactory; /// Gathers and cleans up TemplateIdAnnotations when parsing of a /// top-level declaration is finished. SmallVector<TemplateIdAnnotation *, 16> TemplateIds; /// Identifiers which have been declared within a tentative parse. SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers; /// Tracker for '<' tokens that might have been intended to be treated as an /// angle bracket instead of a less-than comparison. /// /// This happens when the user intends to form a template-id, but typoes the /// template-name or forgets a 'template' keyword for a dependent template /// name. /// /// We track these locations from the point where we see a '<' with a /// name-like expression on its left until we see a '>' or '>>' that might /// match it. struct AngleBracketTracker { /// Flags used to rank candidate template names when there is more than one /// '<' in a scope. enum Priority : unsigned short { /// A non-dependent name that is a potential typo for a template name. PotentialTypo = 0x0, /// A dependent name that might instantiate to a template-name. DependentName = 0x2, /// A space appears before the '<' token. SpaceBeforeLess = 0x0, /// No space before the '<' token NoSpaceBeforeLess = 0x1, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName) }; struct Loc { Expr *TemplateName; SourceLocation LessLoc; AngleBracketTracker::Priority Priority; unsigned short ParenCount, BracketCount, BraceCount; bool isActive(Parser &P) const { return P.ParenCount == ParenCount && P.BracketCount == BracketCount && P.BraceCount == BraceCount; } bool isActiveOrNested(Parser &P) const { return isActive(P) || P.ParenCount > ParenCount || P.BracketCount > BracketCount || P.BraceCount > BraceCount; } }; SmallVector<Loc, 8> Locs; /// Add an expression that might have been intended to be a template name. /// In the case of ambiguity, we arbitrarily select the innermost such /// expression, for example in 'foo < bar < baz', 'bar' is the current /// candidate. No attempt is made to track that 'foo' is also a candidate /// for the case where we see a second suspicious '>' token. void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc, Priority Prio) { if (!Locs.empty() && Locs.back().isActive(P)) { if (Locs.back().Priority <= Prio) { Locs.back().TemplateName = TemplateName; Locs.back().LessLoc = LessLoc; Locs.back().Priority = Prio; } } else { Locs.push_back({TemplateName, LessLoc, Prio, P.ParenCount, P.BracketCount, P.BraceCount}); } } /// Mark the current potential missing template location as having been /// handled (this happens if we pass a "corresponding" '>' or '>>' token /// or leave a bracket scope). void clear(Parser &P) { while (!Locs.empty() && Locs.back().isActiveOrNested(P)) Locs.pop_back(); } /// Get the current enclosing expression that might hve been intended to be /// a template name. Loc *getCurrent(Parser &P) { if (!Locs.empty() && Locs.back().isActive(P)) return &Locs.back(); return nullptr; } }; AngleBracketTracker AngleBrackets; IdentifierInfo *getSEHExceptKeyword(); /// True if we are within an Objective-C container while parsing C-like decls. /// /// This is necessary because Sema thinks we have left the container /// to parse the C-like decls, meaning Actions.getObjCDeclContext() will /// be NULL. bool ParsingInObjCContainer; /// Whether to skip parsing of function bodies. /// /// This option can be used, for example, to speed up searches for /// declarations/definitions when indexing. bool SkipFunctionBodies; /// The location of the expression statement that is being parsed right now. /// Used to determine if an expression that is being parsed is a statement or /// just a regular sub-expression. SourceLocation ExprStatementTokLoc; /// Flags describing a context in which we're parsing a statement. enum class ParsedStmtContext { /// This context permits declarations in language modes where declarations /// are not statements. AllowDeclarationsInC = 0x1, /// This context permits standalone OpenMP directives. AllowStandaloneOpenMPDirectives = 0x2, /// This context is at the top level of a GNU statement expression. InStmtExpr = 0x4, /// The context of a regular substatement. SubStmt = 0, /// The context of a compound-statement. Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives, LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr) }; /// Act on an expression statement that might be the last statement in a /// GNU statement expression. Checks whether we are actually at the end of /// a statement expression and builds a suitable expression statement. StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx); public: Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies); ~Parser() override; const LangOptions &getLangOpts() const { return PP.getLangOpts(); } const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); } Preprocessor &getPreprocessor() const { return PP; } Sema &getActions() const { return Actions; } AttributeFactory &getAttrFactory() { return AttrFactory; } const Token &getCurToken() const { return Tok; } Scope *getCurScope() const { return Actions.getCurScope(); } void incrementMSManglingNumber() const { return Actions.incrementMSManglingNumber(); } Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); } // Type forwarding. All of these are statically 'void*', but they may all be // different actual classes based on the actions in place. typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists; typedef Sema::FullExprArg FullExprArg; // Parsing methods. /// Initialize - Warm up the parser. /// void Initialize(); /// Parse the first top-level declaration in a translation unit. bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result); /// ParseTopLevelDecl - Parse one top-level declaration. Returns true if /// the EOF was encountered. bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false); bool ParseTopLevelDecl() { DeclGroupPtrTy Result; return ParseTopLevelDecl(Result); } /// ConsumeToken - Consume the current 'peek token' and lex the next one. /// This does not work with special tokens: string literals, code completion, /// annotation tokens and balanced tokens must be handled using the specific /// consume methods. /// Returns the location of the consumed token. SourceLocation ConsumeToken() { assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } bool TryConsumeToken(tok::TokenKind Expected) { if (Tok.isNot(Expected)) return false; assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return true; } bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) { if (!TryConsumeToken(Expected)) return false; Loc = PrevTokLocation; return true; } /// ConsumeAnyToken - Dispatch to the right Consume* method based on the /// current token type. This should only be used in cases where the type of /// the token really isn't known, e.g. in error recovery. SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) { if (isTokenParen()) return ConsumeParen(); if (isTokenBracket()) return ConsumeBracket(); if (isTokenBrace()) return ConsumeBrace(); if (isTokenStringLiteral()) return ConsumeStringToken(); if (Tok.is(tok::code_completion)) return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken() : handleUnexpectedCodeCompletionToken(); if (Tok.isAnnotation()) return ConsumeAnnotationToken(); return ConsumeToken(); } SourceLocation getEndOfPreviousToken() { return PP.getLocForEndOfToken(PrevTokLocation); } /// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds /// to the given nullability kind. IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) { return Actions.getNullabilityKeyword(nullability); } private: //===--------------------------------------------------------------------===// // Low-Level token peeking and consumption methods. // /// isTokenParen - Return true if the cur token is '(' or ')'. bool isTokenParen() const { return Tok.isOneOf(tok::l_paren, tok::r_paren); } /// isTokenBracket - Return true if the cur token is '[' or ']'. bool isTokenBracket() const { return Tok.isOneOf(tok::l_square, tok::r_square); } /// isTokenBrace - Return true if the cur token is '{' or '}'. bool isTokenBrace() const { return Tok.isOneOf(tok::l_brace, tok::r_brace); } /// isTokenStringLiteral - True if this token is a string-literal. bool isTokenStringLiteral() const { return tok::isStringLiteral(Tok.getKind()); } /// isTokenSpecial - True if this token requires special consumption methods. bool isTokenSpecial() const { return isTokenStringLiteral() || isTokenParen() || isTokenBracket() || isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation(); } /// Returns true if the current token is '=' or is a type of '='. /// For typos, give a fixit to '=' bool isTokenEqualOrEqualTypo(); /// Return the current token to the token stream and make the given /// token the current token. void UnconsumeToken(Token &Consumed) { Token Next = Tok; PP.EnterToken(Consumed, /*IsReinject*/true); PP.Lex(Tok); PP.EnterToken(Next, /*IsReinject*/true); } SourceLocation ConsumeAnnotationToken() { assert(Tok.isAnnotation() && "wrong consume method"); SourceLocation Loc = Tok.getLocation(); PrevTokLocation = Tok.getAnnotationEndLoc(); PP.Lex(Tok); return Loc; } /// ConsumeParen - This consume method keeps the paren count up-to-date. /// SourceLocation ConsumeParen() { assert(isTokenParen() && "wrong consume method"); if (Tok.getKind() == tok::l_paren) ++ParenCount; else if (ParenCount) { AngleBrackets.clear(*this); --ParenCount; // Don't let unbalanced )'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBracket - This consume method keeps the bracket count up-to-date. /// SourceLocation ConsumeBracket() { assert(isTokenBracket() && "wrong consume method"); if (Tok.getKind() == tok::l_square) ++BracketCount; else if (BracketCount) { AngleBrackets.clear(*this); --BracketCount; // Don't let unbalanced ]'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBrace - This consume method keeps the brace count up-to-date. /// SourceLocation ConsumeBrace() { assert(isTokenBrace() && "wrong consume method"); if (Tok.getKind() == tok::l_brace) ++BraceCount; else if (BraceCount) { AngleBrackets.clear(*this); --BraceCount; // Don't let unbalanced }'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeStringToken - Consume the current 'peek token', lexing a new one /// and returning the token kind. This method is specific to strings, as it /// handles string literal concatenation, as per C99 5.1.1.2, translation /// phase #6. SourceLocation ConsumeStringToken() { assert(isTokenStringLiteral() && "Should only consume string literals with this method"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// Consume the current code-completion token. /// /// This routine can be called to consume the code-completion token and /// continue processing in special cases where \c cutOffParsing() isn't /// desired, such as token caching or completion with lookahead. SourceLocation ConsumeCodeCompletionToken() { assert(Tok.is(tok::code_completion)); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } ///\ brief When we are consuming a code-completion token without having /// matched specific position in the grammar, provide code-completion results /// based on context. /// /// \returns the source location of the code-completion token. SourceLocation handleUnexpectedCodeCompletionToken(); /// Abruptly cut off parsing; mainly used when we have reached the /// code-completion point. void cutOffParsing() { if (PP.isCodeCompletionEnabled()) PP.setCodeCompletionReached(); // Cut off parsing by acting as if we reached the end-of-file. Tok.setKind(tok::eof); } /// Determine if we're at the end of the file or at a transition /// between modules. bool isEofOrEom() { tok::TokenKind Kind = Tok.getKind(); return Kind == tok::eof || Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include; } /// Checks if the \p Level is valid for use in a fold expression. bool isFoldOperator(prec::Level Level) const; /// Checks if the \p Kind is a valid operator for fold expressions. bool isFoldOperator(tok::TokenKind Kind) const; /// Initialize all pragma handlers. void initializePragmaHandlers(); /// Destroy and reset all pragma handlers. void resetPragmaHandlers(); /// Handle the annotation token produced for #pragma unused(...) void HandlePragmaUnused(); /// Handle the annotation token produced for /// #pragma GCC visibility... void HandlePragmaVisibility(); /// Handle the annotation token produced for /// #pragma pack... void HandlePragmaPack(); /// Handle the annotation token produced for /// #pragma ms_struct... void HandlePragmaMSStruct(); /// Handle the annotation token produced for /// #pragma comment... void HandlePragmaMSComment(); void HandlePragmaMSPointersToMembers(); void HandlePragmaMSVtorDisp(); void HandlePragmaMSPragma(); bool HandlePragmaMSSection(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSSegment(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSInitSeg(StringRef PragmaName, SourceLocation PragmaLocation); /// Handle the annotation token produced for /// #pragma align... void HandlePragmaAlign(); /// Handle the annotation token produced for /// #pragma clang __debug dump... void HandlePragmaDump(); /// Handle the annotation token produced for /// #pragma weak id... void HandlePragmaWeak(); /// Handle the annotation token produced for /// #pragma weak id = id... void HandlePragmaWeakAlias(); /// Handle the annotation token produced for /// #pragma redefine_extname... void HandlePragmaRedefineExtname(); /// Handle the annotation token produced for /// #pragma STDC FP_CONTRACT... void HandlePragmaFPContract(); /// Handle the annotation token produced for /// #pragma STDC FENV_ACCESS... void HandlePragmaFEnvAccess(); /// \brief Handle the annotation token produced for /// #pragma clang fp ... void HandlePragmaFP(); /// Handle the annotation token produced for /// #pragma OPENCL EXTENSION... void HandlePragmaOpenCLExtension(); /// Handle the annotation token produced for /// #pragma clang __debug captured StmtResult HandlePragmaCaptured(); /// Handle the annotation token produced for /// #pragma clang loop and #pragma unroll. bool HandlePragmaLoopHint(LoopHint &Hint); bool ParsePragmaAttributeSubjectMatchRuleSet( attr::ParsedSubjectMatchRuleSet &SubjectMatchRules, SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc); void HandlePragmaAttribute(); /// GetLookAheadToken - This peeks ahead N tokens and returns that token /// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1) /// returns the token after Tok, etc. /// /// Note that this differs from the Preprocessor's LookAhead method, because /// the Parser always has one token lexed that the preprocessor doesn't. /// const Token &GetLookAheadToken(unsigned N) { if (N == 0 || Tok.is(tok::eof)) return Tok; return PP.LookAhead(N-1); } public: /// NextToken - This peeks ahead one token and returns it without /// consuming it. const Token &NextToken() { return PP.LookAhead(0); } /// getTypeAnnotation - Read a parsed type out of an annotation token. static ParsedType getTypeAnnotation(const Token &Tok) { return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue()); } private: static void setTypeAnnotation(Token &Tok, ParsedType T) { Tok.setAnnotationValue(T.getAsOpaquePtr()); } /// Read an already-translated primary expression out of an annotation /// token. static ExprResult getExprAnnotation(const Token &Tok) { return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue()); } /// Set the primary expression corresponding to the given annotation /// token. static void setExprAnnotation(Token &Tok, ExprResult ER) { Tok.setAnnotationValue(ER.getAsOpaquePointer()); } public: // If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to // find a type name by attempting typo correction. bool TryAnnotateTypeOrScopeToken(); bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS, bool IsNewScope); bool TryAnnotateCXXScopeToken(bool EnteringContext = false); private: enum AnnotatedNameKind { /// Annotation has failed and emitted an error. ANK_Error, /// The identifier is a tentatively-declared name. ANK_TentativeDecl, /// The identifier is a template name. FIXME: Add an annotation for that. ANK_TemplateName, /// The identifier can't be resolved. ANK_Unresolved, /// Annotation was successful. ANK_Success }; AnnotatedNameKind TryAnnotateName(bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr); /// Push a tok::annot_cxxscope token onto the token stream. void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation); /// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens, /// replacing them with the non-context-sensitive keywords. This returns /// true if the token was replaced. bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid) { if (!getLangOpts().AltiVec && !getLangOpts().ZVector) return false; if (Tok.getIdentifierInfo() != Ident_vector && Tok.getIdentifierInfo() != Ident_bool && (!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel)) return false; return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid); } /// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector /// identifier token, replacing it with the non-context-sensitive __vector. /// This returns true if the token was replaced. bool TryAltiVecVectorToken() { if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) || Tok.getIdentifierInfo() != Ident_vector) return false; return TryAltiVecVectorTokenOutOfLine(); } bool TryAltiVecVectorTokenOutOfLine(); bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid); /// Returns true if the current token is the identifier 'instancetype'. /// /// Should only be used in Objective-C language modes. bool isObjCInstancetype() { assert(getLangOpts().ObjC); if (Tok.isAnnotation()) return false; if (!Ident_instancetype) Ident_instancetype = PP.getIdentifierInfo("instancetype"); return Tok.getIdentifierInfo() == Ident_instancetype; } /// TryKeywordIdentFallback - For compatibility with system headers using /// keywords as identifiers, attempt to convert the current token to an /// identifier and optionally disable the keyword for the remainder of the /// translation unit. This returns false if the token was not replaced, /// otherwise emits a diagnostic and returns true. bool TryKeywordIdentFallback(bool DisableKeyword); /// Get the TemplateIdAnnotation from the token. TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok); /// TentativeParsingAction - An object that is used as a kind of "tentative /// parsing transaction". It gets instantiated to mark the token position and /// after the token consumption is done, Commit() or Revert() is called to /// either "commit the consumed tokens" or revert to the previously marked /// token position. Example: /// /// TentativeParsingAction TPA(*this); /// ConsumeToken(); /// .... /// TPA.Revert(); /// class TentativeParsingAction { Parser &P; PreferredTypeBuilder PrevPreferredType; Token PrevTok; size_t PrevTentativelyDeclaredIdentifierCount; unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount; bool isActive; public: explicit TentativeParsingAction(Parser& p) : P(p) { PrevPreferredType = P.PreferredType; PrevTok = P.Tok; PrevTentativelyDeclaredIdentifierCount = P.TentativelyDeclaredIdentifiers.size(); PrevParenCount = P.ParenCount; PrevBracketCount = P.BracketCount; PrevBraceCount = P.BraceCount; P.PP.EnableBacktrackAtThisPos(); isActive = true; } void Commit() { assert(isActive && "Parsing action was finished!"); P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.PP.CommitBacktrackedTokens(); isActive = false; } void Revert() { assert(isActive && "Parsing action was finished!"); P.PP.Backtrack(); P.PreferredType = PrevPreferredType; P.Tok = PrevTok; P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.ParenCount = PrevParenCount; P.BracketCount = PrevBracketCount; P.BraceCount = PrevBraceCount; isActive = false; } ~TentativeParsingAction() { assert(!isActive && "Forgot to call Commit or Revert!"); } }; /// A TentativeParsingAction that automatically reverts in its destructor. /// Useful for disambiguation parses that will always be reverted. class RevertingTentativeParsingAction : private Parser::TentativeParsingAction { public: RevertingTentativeParsingAction(Parser &P) : Parser::TentativeParsingAction(P) {} ~RevertingTentativeParsingAction() { Revert(); } }; class UnannotatedTentativeParsingAction; /// ObjCDeclContextSwitch - An object used to switch context from /// an objective-c decl context to its enclosing decl context and /// back. class ObjCDeclContextSwitch { Parser &P; Decl *DC; SaveAndRestore<bool> WithinObjCContainer; public: explicit ObjCDeclContextSwitch(Parser &p) : P(p), DC(p.getObjCDeclContext()), WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) { if (DC) P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC)); } ~ObjCDeclContextSwitch() { if (DC) P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC)); } }; /// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the /// input. If so, it is consumed and false is returned. /// /// If a trivial punctuator misspelling is encountered, a FixIt error /// diagnostic is issued and false is returned after recovery. /// /// If the input is malformed, this emits the specified diagnostic and true is /// returned. bool ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned Diag = diag::err_expected, StringRef DiagMsg = ""); /// The parser expects a semicolon and, if present, will consume it. /// /// If the next token is not a semicolon, this emits the specified diagnostic, /// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior /// to the semicolon, consumes that extra token. bool ExpectAndConsumeSemi(unsigned DiagID); /// The kind of extra semi diagnostic to emit. enum ExtraSemiKind { OutsideFunction = 0, InsideStruct = 1, InstanceVariableList = 2, AfterMemberFunctionDefinition = 3 }; /// Consume any extra semi-colons until the end of the line. void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified); /// Return false if the next token is an identifier. An 'expected identifier' /// error is emitted otherwise. /// /// The parser tries to recover from the error by checking if the next token /// is a C++ keyword when parsing Objective-C++. Return false if the recovery /// was successful. bool expectIdentifier(); public: //===--------------------------------------------------------------------===// // Scope manipulation /// ParseScope - Introduces a new scope for parsing. The kind of /// scope is determined by ScopeFlags. Objects of this type should /// be created on the stack to coincide with the position where the /// parser enters the new scope, and this object's constructor will /// create that new scope. Similarly, once the object is destroyed /// the parser will exit the scope. class ParseScope { Parser *Self; ParseScope(const ParseScope &) = delete; void operator=(const ParseScope &) = delete; public: // ParseScope - Construct a new object to manage a scope in the // parser Self where the new Scope is created with the flags // ScopeFlags, but only when we aren't about to enter a compound statement. ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true, bool BeforeCompoundStmt = false) : Self(Self) { if (EnteredScope && !BeforeCompoundStmt) Self->EnterScope(ScopeFlags); else { if (BeforeCompoundStmt) Self->incrementMSManglingNumber(); this->Self = nullptr; } } // Exit - Exit the scope associated with this object now, rather // than waiting until the object is destroyed. void Exit() { if (Self) { Self->ExitScope(); Self = nullptr; } } ~ParseScope() { Exit(); } }; /// EnterScope - Start a new scope. void EnterScope(unsigned ScopeFlags); /// ExitScope - Pop a scope off the scope stack. void ExitScope(); private: /// RAII object used to modify the scope flags for the current scope. class ParseScopeFlags { Scope *CurScope; unsigned OldFlags; ParseScopeFlags(const ParseScopeFlags &) = delete; void operator=(const ParseScopeFlags &) = delete; public: ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true); ~ParseScopeFlags(); }; //===--------------------------------------------------------------------===// // Diagnostic Emission and Error recovery. public: DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID); DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID); DiagnosticBuilder Diag(unsigned DiagID) { return Diag(Tok, DiagID); } private: void SuggestParentheses(SourceLocation Loc, unsigned DK, SourceRange ParenRange); void CheckNestedObjCContexts(SourceLocation AtLoc); public: /// Control flags for SkipUntil functions. enum SkipUntilFlags { StopAtSemi = 1 << 0, ///< Stop skipping at semicolon /// Stop skipping at specified token, but don't skip the token itself StopBeforeMatch = 1 << 1, StopAtCodeCompletion = 1 << 2 ///< Stop at code completion }; friend constexpr SkipUntilFlags operator|(SkipUntilFlags L, SkipUntilFlags R) { return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) | static_cast<unsigned>(R)); } /// SkipUntil - Read tokens until we get to the specified token, then consume /// it (unless StopBeforeMatch is specified). Because we cannot guarantee /// that the token will ever occur, this skips to the next token, or to some /// likely good stopping point. If Flags has StopAtSemi flag, skipping will /// stop at a ';' character. /// /// If SkipUntil finds the specified token, it returns true, otherwise it /// returns false. bool SkipUntil(tok::TokenKind T, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { return SkipUntil(llvm::makeArrayRef(T), Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2}; return SkipUntil(TokArray, Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2, T3}; return SkipUntil(TokArray, Flags); } bool SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)); /// SkipMalformedDecl - Read tokens until we get to some likely good stopping /// point for skipping past a simple-declaration. void SkipMalformedDecl(); private: //===--------------------------------------------------------------------===// // Lexing and parsing of C++ inline methods. struct ParsingClass; /// [class.mem]p1: "... the class is regarded as complete within /// - function bodies /// - default arguments /// - exception-specifications (TODO: C++0x) /// - and brace-or-equal-initializers for non-static data members /// (including such things in nested classes)." /// LateParsedDeclarations build the tree of those elements so they can /// be parsed after parsing the top-level class. class LateParsedDeclaration { public: virtual ~LateParsedDeclaration(); virtual void ParseLexedMethodDeclarations(); virtual void ParseLexedMemberInitializers(); virtual void ParseLexedMethodDefs(); virtual void ParseLexedAttributes(); }; /// Inner node of the LateParsedDeclaration tree that parses /// all its members recursively. class LateParsedClass : public LateParsedDeclaration { public: LateParsedClass(Parser *P, ParsingClass *C); ~LateParsedClass() override; void ParseLexedMethodDeclarations() override; void ParseLexedMemberInitializers() override; void ParseLexedMethodDefs() override; void ParseLexedAttributes() override; private: Parser *Self; ParsingClass *Class; }; /// Contains the lexed tokens of an attribute with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. /// FIXME: Perhaps we should change the name of LateParsedDeclaration to /// LateParsedTokens. struct LateParsedAttribute : public LateParsedDeclaration { Parser *Self; CachedTokens Toks; IdentifierInfo &AttrName; IdentifierInfo *MacroII = nullptr; SourceLocation AttrNameLoc; SmallVector<Decl*, 2> Decls; explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name, SourceLocation Loc) : Self(P), AttrName(Name), AttrNameLoc(Loc) {} void ParseLexedAttributes() override; void addDecl(Decl *D) { Decls.push_back(D); } }; // A list of late-parsed attributes. Used by ParseGNUAttributes. class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> { public: LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { } bool parseSoon() { return ParseSoon; } private: bool ParseSoon; // Are we planning to parse these shortly after creation? }; /// Contains the lexed tokens of a member function definition /// which needs to be parsed at the end of the class declaration /// after parsing all other member declarations. struct LexedMethod : public LateParsedDeclaration { Parser *Self; Decl *D; CachedTokens Toks; /// Whether this member function had an associated template /// scope. When true, D is a template declaration. /// otherwise, it is a member function declaration. bool TemplateScope; explicit LexedMethod(Parser* P, Decl *MD) : Self(P), D(MD), TemplateScope(false) {} void ParseLexedMethodDefs() override; }; /// LateParsedDefaultArgument - Keeps track of a parameter that may /// have a default argument that cannot be parsed yet because it /// occurs within a member function declaration inside the class /// (C++ [class.mem]p2). struct LateParsedDefaultArgument { explicit LateParsedDefaultArgument(Decl *P, std::unique_ptr<CachedTokens> Toks = nullptr) : Param(P), Toks(std::move(Toks)) { } /// Param - The parameter declaration for this parameter. Decl *Param; /// Toks - The sequence of tokens that comprises the default /// argument expression, not including the '=' or the terminating /// ')' or ','. This will be NULL for parameters that have no /// default argument. std::unique_ptr<CachedTokens> Toks; }; /// LateParsedMethodDeclaration - A method declaration inside a class that /// contains at least one entity whose parsing needs to be delayed /// until the class itself is completely-defined, such as a default /// argument (C++ [class.mem]p2). struct LateParsedMethodDeclaration : public LateParsedDeclaration { explicit LateParsedMethodDeclaration(Parser *P, Decl *M) : Self(P), Method(M), TemplateScope(false), ExceptionSpecTokens(nullptr) {} void ParseLexedMethodDeclarations() override; Parser* Self; /// Method - The method declaration. Decl *Method; /// Whether this member function had an associated template /// scope. When true, D is a template declaration. /// otherwise, it is a member function declaration. bool TemplateScope; /// DefaultArgs - Contains the parameters of the function and /// their default arguments. At least one of the parameters will /// have a default argument, but all of the parameters of the /// method will be stored so that they can be reintroduced into /// scope at the appropriate times. SmallVector<LateParsedDefaultArgument, 8> DefaultArgs; /// The set of tokens that make up an exception-specification that /// has not yet been parsed. CachedTokens *ExceptionSpecTokens; }; /// LateParsedMemberInitializer - An initializer for a non-static class data /// member whose parsing must to be delayed until the class is completely /// defined (C++11 [class.mem]p2). struct LateParsedMemberInitializer : public LateParsedDeclaration { LateParsedMemberInitializer(Parser *P, Decl *FD) : Self(P), Field(FD) { } void ParseLexedMemberInitializers() override; Parser *Self; /// Field - The field declaration. Decl *Field; /// CachedTokens - The sequence of tokens that comprises the initializer, /// including any leading '='. CachedTokens Toks; }; /// LateParsedDeclarationsContainer - During parsing of a top (non-nested) /// C++ class, its method declarations that contain parts that won't be /// parsed until after the definition is completed (C++ [class.mem]p2), /// the method declarations and possibly attached inline definitions /// will be stored here with the tokens that will be parsed to create those /// entities. typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer; /// Representation of a class that has been parsed, including /// any member function declarations or definitions that need to be /// parsed after the corresponding top-level class is complete. struct ParsingClass { ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : TopLevelClass(TopLevelClass), TemplateScope(false), IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { } /// Whether this is a "top-level" class, meaning that it is /// not nested within another class. bool TopLevelClass : 1; /// Whether this class had an associated template /// scope. When true, TagOrTemplate is a template declaration; /// otherwise, it is a tag declaration. bool TemplateScope : 1; /// Whether this class is an __interface. bool IsInterface : 1; /// The class or class template whose definition we are parsing. Decl *TagOrTemplate; /// LateParsedDeclarations - Method declarations, inline definitions and /// nested classes that contain pieces whose parsing will be delayed until /// the top-level class is fully defined. LateParsedDeclarationsContainer LateParsedDeclarations; }; /// The stack of classes that is currently being /// parsed. Nested and local classes will be pushed onto this stack /// when they are parsed, and removed afterward. std::stack<ParsingClass *> ClassStack; ParsingClass &getCurrentClass() { assert(!ClassStack.empty() && "No lexed method stacks!"); return *ClassStack.top(); } /// RAII object used to manage the parsing of a class definition. class ParsingClassDefinition { Parser &P; bool Popped; Sema::ParsingClassState State; public: ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : P(P), Popped(false), State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) { } /// Pop this class of the stack. void Pop() { assert(!Popped && "Nested class has already been popped"); Popped = true; P.PopParsingClass(State); } ~ParsingClassDefinition() { if (!Popped) P.PopParsingClass(State); } }; /// Contains information about any template-specific /// information that has been parsed prior to parsing declaration /// specifiers. struct ParsedTemplateInfo { ParsedTemplateInfo() : Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { } ParsedTemplateInfo(TemplateParameterLists *TemplateParams, bool isSpecialization, bool lastParameterListWasEmpty = false) : Kind(isSpecialization? ExplicitSpecialization : Template), TemplateParams(TemplateParams), LastParameterListWasEmpty(lastParameterListWasEmpty) { } explicit ParsedTemplateInfo(SourceLocation ExternLoc, SourceLocation TemplateLoc) : Kind(ExplicitInstantiation), TemplateParams(nullptr), ExternLoc(ExternLoc), TemplateLoc(TemplateLoc), LastParameterListWasEmpty(false){ } /// The kind of template we are parsing. enum { /// We are not parsing a template at all. NonTemplate = 0, /// We are parsing a template declaration. Template, /// We are parsing an explicit specialization. ExplicitSpecialization, /// We are parsing an explicit instantiation. ExplicitInstantiation } Kind; /// The template parameter lists, for template declarations /// and explicit specializations. TemplateParameterLists *TemplateParams; /// The location of the 'extern' keyword, if any, for an explicit /// instantiation SourceLocation ExternLoc; /// The location of the 'template' keyword, for an explicit /// instantiation. SourceLocation TemplateLoc; /// Whether the last template parameter list was empty. bool LastParameterListWasEmpty; SourceRange getSourceRange() const LLVM_READONLY; }; void LexTemplateFunctionForLateParsing(CachedTokens &Toks); void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT); static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT); static void LateTemplateParserCleanupCallback(void *P); Sema::ParsingClassState PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface); void DeallocateParsedClasses(ParsingClass *Class); void PopParsingClass(Sema::ParsingClassState); enum CachedInitKind { CIK_DefaultArgument, CIK_DefaultInitializer }; NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS, ParsedAttributes &AccessAttrs, ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo, const VirtSpecifiers &VS, SourceLocation PureSpecLoc); void ParseCXXNonStaticMemberInitializer(Decl *VarD); void ParseLexedAttributes(ParsingClass &Class); void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D, bool EnterScope, bool OnDefinition); void ParseLexedAttribute(LateParsedAttribute &LA, bool EnterScope, bool OnDefinition); void ParseLexedMethodDeclarations(ParsingClass &Class); void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM); void ParseLexedMethodDefs(ParsingClass &Class); void ParseLexedMethodDef(LexedMethod &LM); void ParseLexedMemberInitializers(ParsingClass &Class); void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI); void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod); bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks); bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK); bool ConsumeAndStoreConditional(CachedTokens &Toks); bool ConsumeAndStoreUntil(tok::TokenKind T1, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true) { return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken); } bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true); //===--------------------------------------------------------------------===// // C99 6.9: External Definitions. struct ParsedAttributesWithRange : ParsedAttributes { ParsedAttributesWithRange(AttributeFactory &factory) : ParsedAttributes(factory) {} void clear() { ParsedAttributes::clear(); Range = SourceRange(); } SourceRange Range; }; struct ParsedAttributesViewWithRange : ParsedAttributesView { ParsedAttributesViewWithRange() : ParsedAttributesView() {} void clearListOnly() { ParsedAttributesView::clearListOnly(); Range = SourceRange(); } SourceRange Range; }; DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr); bool isDeclarationAfterDeclarator(); bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator); DeclGroupPtrTy ParseDeclarationOrFunctionDefinition( ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr, AccessSpecifier AS = AS_none); DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs, ParsingDeclSpec &DS, AccessSpecifier AS); void SkipFunctionBody(); Decl *ParseFunctionDefinition(ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), LateParsedAttrList *LateParsedAttrs = nullptr); void ParseKNRParamDeclarations(Declarator &D); // EndLoc, if non-NULL, is filled with the location of the last token of // the simple-asm. ExprResult ParseSimpleAsm(SourceLocation *EndLoc = nullptr); ExprResult ParseAsmStringLiteral(); // Objective-C External Declarations void MaybeSkipAttributes(tok::ObjCKeywordKind Kind); DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs); DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc); Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc, ParsedAttributes &prefixAttrs); class ObjCTypeParamListScope; ObjCTypeParamList *parseObjCTypeParamList(); ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs( ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc, SmallVectorImpl<IdentifierLocPair> &protocolIdents, SourceLocation &rAngleLoc, bool mayBeProtocolList = true); void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc, BalancedDelimiterTracker &T, SmallVectorImpl<Decl *> &AllIvarDecls, bool RBraceMissing); void ParseObjCClassInstanceVariables(Decl *interfaceDecl, tok::ObjCKeywordKind visibility, SourceLocation atLoc); bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P, SmallVectorImpl<SourceLocation> &PLocs, bool WarnOnDeclarations, bool ForObjCContainer, SourceLocation &LAngleLoc, SourceLocation &EndProtoLoc, bool consumeLastToken); /// Parse the first angle-bracket-delimited clause for an /// Objective-C object or object pointer type, which may be either /// type arguments or protocol qualifiers. void parseObjCTypeArgsOrProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken, bool warnOnIncompleteProtocols); /// Parse either Objective-C type arguments or protocol qualifiers; if the /// former, also parse protocol qualifiers afterward. void parseObjCTypeArgsAndProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken); /// Parse a protocol qualifier type such as '<NSCopying>', which is /// an anachronistic way of writing 'id<NSCopying>'. TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc); /// Parse Objective-C type arguments and protocol qualifiers, extending the /// current type with the parsed result. TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc, ParsedType type, bool consumeLastToken, SourceLocation &endLoc); void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey, Decl *CDecl); DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc, ParsedAttributes &prefixAttrs); struct ObjCImplParsingDataRAII { Parser &P; Decl *Dcl; bool HasCFunction; typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer; LateParsedObjCMethodContainer LateParsedObjCMethods; ObjCImplParsingDataRAII(Parser &parser, Decl *D) : P(parser), Dcl(D), HasCFunction(false) { P.CurParsedObjCImpl = this; Finished = false; } ~ObjCImplParsingDataRAII(); void finish(SourceRange AtEnd); bool isFinished() const { return Finished; } private: bool Finished; }; ObjCImplParsingDataRAII *CurParsedObjCImpl; void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl); DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc, ParsedAttributes &Attrs); DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd); Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc); Decl *ParseObjCPropertySynthesize(SourceLocation atLoc); Decl *ParseObjCPropertyDynamic(SourceLocation atLoc); IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation); // Definitions for Objective-c context sensitive keywords recognition. enum ObjCTypeQual { objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref, objc_nonnull, objc_nullable, objc_null_unspecified, objc_NumQuals }; IdentifierInfo *ObjCTypeQuals[objc_NumQuals]; bool isTokIdentifier_in() const; ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx, ParsedAttributes *ParamAttrs); void ParseObjCMethodRequirement(); Decl *ParseObjCMethodPrototype( tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition = true); Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType, tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition=true); void ParseObjCPropertyAttribute(ObjCDeclSpec &DS); Decl *ParseObjCMethodDefinition(); public: //===--------------------------------------------------------------------===// // C99 6.5: Expressions. /// TypeCastState - State whether an expression is or may be a type cast. enum TypeCastState { NotTypeCast = 0, MaybeTypeCast, IsTypeCast }; ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpressionInExprEvalContext( TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseCaseExpression(SourceLocation CaseLoc); ExprResult ParseConstraintExpression(); // Expr that doesn't include commas. ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks, unsigned &NumLineToksConsumed, bool IsUnevaluated); private: ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc); ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc); ExprResult ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec); ExprResult ParseCastExpression(bool isUnaryExpression, bool isAddressOfOperand, bool &NotCastExpr, TypeCastState isTypeCast, bool isVectorLiteral = false); ExprResult ParseCastExpression(bool isUnaryExpression, bool isAddressOfOperand = false, TypeCastState isTypeCast = NotTypeCast, bool isVectorLiteral = false); /// Returns true if the next token cannot start an expression. bool isNotExpressionStart(); /// Returns true if the next token would start a postfix-expression /// suffix. bool isPostfixExpressionSuffixStart() { tok::TokenKind K = Tok.getKind(); return (K == tok::l_square || K == tok::l_paren || K == tok::period || K == tok::arrow || K == tok::plusplus || K == tok::minusminus); } bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less); void checkPotentialAngleBracket(ExprResult &PotentialTemplateName); bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &, const Token &OpToken); bool checkPotentialAngleBracketDelimiter(const Token &OpToken) { if (auto *Info = AngleBrackets.getCurrent(*this)) return checkPotentialAngleBracketDelimiter(*Info, OpToken); return false; } ExprResult ParsePostfixExpressionSuffix(ExprResult LHS); ExprResult ParseUnaryExprOrTypeTraitExpression(); ExprResult ParseBuiltinPrimaryExpression(); ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok, bool &isCastExpr, ParsedType &CastTy, SourceRange &CastRange); typedef SmallVector<Expr*, 20> ExprListTy; typedef SmallVector<SourceLocation, 20> CommaLocsTy; /// ParseExpressionList - Used for C/C++ (argument-)expression-list. bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs, llvm::function_ref<void()> ExpressionStarts = llvm::function_ref<void()>()); /// ParseSimpleExpressionList - A simple comma-separated list of expressions, /// used for misc language extensions. bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs); /// ParenParseOption - Control what ParseParenExpression will parse. enum ParenParseOption { SimpleExpr, // Only parse '(' expression ')' FoldExpr, // Also allow fold-expression <anything> CompoundStmt, // Also allow '(' compound-statement ')' CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}' CastExpr // Also allow '(' type-name ')' <anything> }; ExprResult ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr, bool isTypeCast, ParsedType &CastTy, SourceLocation &RParenLoc); ExprResult ParseCXXAmbiguousParenExpression( ParenParseOption &ExprType, ParsedType &CastTy, BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt); ExprResult ParseCompoundLiteralExpression(ParsedType Ty, SourceLocation LParenLoc, SourceLocation RParenLoc); ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false); ExprResult ParseGenericSelectionExpression(); ExprResult ParseObjCBoolLiteral(); ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T); //===--------------------------------------------------------------------===// // C++ Expressions ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand, Token &Replacement); ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false); bool areTokensAdjacent(const Token &A, const Token &B); void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr, bool EnteringContext, IdentifierInfo &II, CXXScopeSpec &SS); bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext, bool *MayBePseudoDestructor = nullptr, bool IsTypename = false, IdentifierInfo **LastII = nullptr, bool OnlyNamespace = false); //===--------------------------------------------------------------------===// // C++11 5.1.2: Lambda expressions /// Result of tentatively parsing a lambda-introducer. enum class LambdaIntroducerTentativeParse { /// This appears to be a lambda-introducer, which has been fully parsed. Success, /// This is a lambda-introducer, but has not been fully parsed, and this /// function needs to be called again to parse it. Incomplete, /// This is definitely an Objective-C message send expression, rather than /// a lambda-introducer, attribute-specifier, or array designator. MessageSend, /// This is not a lambda-introducer. Invalid, }; // [...] () -> type {...} ExprResult ParseLambdaExpression(); ExprResult TryParseLambdaExpression(); bool ParseLambdaIntroducer(LambdaIntroducer &Intro, LambdaIntroducerTentativeParse *Tentative = nullptr); ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Casts ExprResult ParseCXXCasts(); /// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast. ExprResult ParseBuiltinBitCast(); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Type Identification ExprResult ParseCXXTypeid(); //===--------------------------------------------------------------------===// // C++ : Microsoft __uuidof Expression ExprResult ParseCXXUuidof(); //===--------------------------------------------------------------------===// // C++ 5.2.4: C++ Pseudo-Destructor Expressions ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, ParsedType ObjectType); //===--------------------------------------------------------------------===// // C++ 9.3.2: C++ 'this' pointer ExprResult ParseCXXThis(); //===--------------------------------------------------------------------===// // C++ 15: C++ Throw Expression ExprResult ParseThrowExpression(); ExceptionSpecificationType tryParseExceptionSpecification( bool Delayed, SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &DynamicExceptions, SmallVectorImpl<SourceRange> &DynamicExceptionRanges, ExprResult &NoexceptExpr, CachedTokens *&ExceptionSpecTokens); // EndLoc is filled with the location of the last token of the specification. ExceptionSpecificationType ParseDynamicExceptionSpecification( SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &Exceptions, SmallVectorImpl<SourceRange> &Ranges); //===--------------------------------------------------------------------===// // C++0x 8: Function declaration trailing-return-type TypeResult ParseTrailingReturnType(SourceRange &Range, bool MayBeFollowedByDirectInit); //===--------------------------------------------------------------------===// // C++ 2.13.5: C++ Boolean Literals ExprResult ParseCXXBoolLiteral(); //===--------------------------------------------------------------------===// // C++ 5.2.3: Explicit type conversion (functional notation) ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS); /// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers. /// This should only be called when the current token is known to be part of /// simple-type-specifier. void ParseCXXSimpleTypeSpecifier(DeclSpec &DS); bool ParseCXXTypeSpecifierSeq(DeclSpec &DS); //===--------------------------------------------------------------------===// // C++ 5.3.4 and 5.3.5: C++ new and delete bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs, Declarator &D); void ParseDirectNewDeclarator(Declarator &D); ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start); ExprResult ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start); //===--------------------------------------------------------------------===// // C++ if/switch/while/for condition expression. struct ForRangeInfo; Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc, Sema::ConditionKind CK, ForRangeInfo *FRI = nullptr); //===--------------------------------------------------------------------===// // C++ Coroutines ExprResult ParseCoyieldExpression(); //===--------------------------------------------------------------------===// // C99 6.7.8: Initialization. /// ParseInitializer /// initializer: [C99 6.7.8] /// assignment-expression /// '{' ... ExprResult ParseInitializer() { if (Tok.isNot(tok::l_brace)) return ParseAssignmentExpression(); return ParseBraceInitializer(); } bool MayBeDesignationStart(); ExprResult ParseBraceInitializer(); ExprResult ParseInitializerWithPotentialDesignator(); //===--------------------------------------------------------------------===// // clang Expressions ExprResult ParseBlockLiteralExpression(); // ^{...} //===--------------------------------------------------------------------===// // Objective-C Expressions ExprResult ParseObjCAtExpression(SourceLocation AtLocation); ExprResult ParseObjCStringLiteral(SourceLocation AtLoc); ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc); ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc); ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue); ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc); ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc); ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc); ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc); ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc); bool isSimpleObjCMessageExpression(); ExprResult ParseObjCMessageExpression(); ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); ExprResult ParseAssignmentExprWithObjCMessageExprStart( SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr); //===--------------------------------------------------------------------===// // C99 6.8: Statements and Blocks. /// A SmallVector of statements, with stack size 32 (as that is the only one /// used.) typedef SmallVector<Stmt*, 32> StmtVector; /// A SmallVector of expressions, with stack size 12 (the maximum used.) typedef SmallVector<Expr*, 12> ExprVector; /// A SmallVector of types. typedef SmallVector<ParsedType, 12> TypeVector; StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr, ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt); StmtResult ParseStatementOrDeclaration( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc = nullptr); StmtResult ParseStatementOrDeclarationAfterAttributes( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); StmtResult ParseExprStatement(ParsedStmtContext StmtCtx); StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs, ParsedStmtContext StmtCtx); StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx, bool MissingCase = false, ExprResult Expr = ExprResult()); StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx); StmtResult ParseCompoundStatement(bool isStmtExpr = false); StmtResult ParseCompoundStatement(bool isStmtExpr, unsigned ScopeFlags); void ParseCompoundStatementLeadingPragmas(); bool ConsumeNullStmt(StmtVector &Stmts); StmtResult ParseCompoundStatementBody(bool isStmtExpr = false); bool ParseParenExprOrCondition(StmtResult *InitStmt, Sema::ConditionResult &CondResult, SourceLocation Loc, Sema::ConditionKind CK); StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc); StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc); StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc); StmtResult ParseDoStatement(); StmtResult ParseForStatement(SourceLocation *TrailingElseLoc); StmtResult ParseGotoStatement(); StmtResult ParseContinueStatement(); StmtResult ParseBreakStatement(); StmtResult ParseReturnStatement(); StmtResult ParseAsmStatement(bool &msAsm); StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc); StmtResult ParsePragmaLoopHint(StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); /// Describes the behavior that should be taken for an __if_exists /// block. enum IfExistsBehavior { /// Parse the block; this code is always used. IEB_Parse, /// Skip the block entirely; this code is never used. IEB_Skip, /// Parse the block as a dependent block, which may be used in /// some template instantiations but not others. IEB_Dependent }; /// Describes the condition of a Microsoft __if_exists or /// __if_not_exists block. struct IfExistsCondition { /// The location of the initial keyword. SourceLocation KeywordLoc; /// Whether this is an __if_exists block (rather than an /// __if_not_exists block). bool IsIfExists; /// Nested-name-specifier preceding the name. CXXScopeSpec SS; /// The name we're looking for. UnqualifiedId Name; /// The behavior of this __if_exists or __if_not_exists block /// should. IfExistsBehavior Behavior; }; bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result); void ParseMicrosoftIfExistsStatement(StmtVector &Stmts); void ParseMicrosoftIfExistsExternalDeclaration(); void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType, ParsedAttributes &AccessAttrs, AccessSpecifier &CurAS); bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs, bool &InitExprsOk); bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names, SmallVectorImpl<Expr *> &Constraints, SmallVectorImpl<Expr *> &Exprs); //===--------------------------------------------------------------------===// // C++ 6: Statements and Blocks StmtResult ParseCXXTryBlock(); StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false); StmtResult ParseCXXCatchBlock(bool FnCatch = false); //===--------------------------------------------------------------------===// // MS: SEH Statements and Blocks StmtResult ParseSEHTryBlock(); StmtResult ParseSEHExceptBlock(SourceLocation Loc); StmtResult ParseSEHFinallyBlock(SourceLocation Loc); StmtResult ParseSEHLeaveStatement(); //===--------------------------------------------------------------------===// // Objective-C Statements StmtResult ParseObjCAtStatement(SourceLocation atLoc, ParsedStmtContext StmtCtx); StmtResult ParseObjCTryStmt(SourceLocation atLoc); StmtResult ParseObjCThrowStmt(SourceLocation atLoc); StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc); StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc); //===--------------------------------------------------------------------===// // C99 6.7: Declarations. /// A context for parsing declaration specifiers. TODO: flesh this /// out, there are other significant restrictions on specifiers than /// would be best implemented in the parser. enum class DeclSpecContext { DSC_normal, // normal context DSC_class, // class context, enables 'friend' DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list DSC_trailing, // C++11 trailing-type-specifier in a trailing return type DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration DSC_top_level, // top-level/namespace declaration context DSC_template_param, // template parameter context DSC_template_type_arg, // template type argument context DSC_objc_method_result, // ObjC method result context, enables 'instancetype' DSC_condition // condition declaration context }; /// Is this a context in which we are parsing just a type-specifier (or /// trailing-type-specifier)? static bool isTypeSpecifier(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: return false; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return true; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which we can perform class template argument /// deduction? static bool isClassTemplateDeductionContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_type_specifier: return true; case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Information on a C++0x for-range-initializer found while parsing a /// declaration which turns out to be a for-range-declaration. struct ForRangeInit { SourceLocation ColonLoc; ExprResult RangeExpr; bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); } }; struct ForRangeInfo : ForRangeInit { StmtResult LoopVar; }; DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, SourceLocation *DeclSpecStart = nullptr); DeclGroupPtrTy ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, bool RequireSemi, ForRangeInit *FRI = nullptr, SourceLocation *DeclSpecStart = nullptr); bool MightBeDeclarator(DeclaratorContext Context); DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context, SourceLocation *DeclEnd = nullptr, ForRangeInit *FRI = nullptr); Decl *ParseDeclarationAfterDeclarator(Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo()); bool ParseAsmAttributesAfterDeclarator(Declarator &D); Decl *ParseDeclarationAfterDeclaratorAndAttributes( Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ForRangeInit *FRI = nullptr); Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope); Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope); /// When in code-completion, skip parsing of the function/method body /// unless the body contains the code-completion point. /// /// \returns true if the function body was skipped. bool trySkippingFunctionBody(); bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC, ParsedAttributesWithRange &Attrs); DeclSpecContext getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context); void ParseDeclarationSpecifiers( DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal, LateParsedAttrList *LateAttrs = nullptr); bool DiagnoseMissingSemiAfterTagDefinition( DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext, LateParsedAttrList *LateAttrs = nullptr); void ParseSpecifierQualifierList( DeclSpec &DS, AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal); void ParseObjCTypeQualifierList(ObjCDeclSpec &DS, DeclaratorContext Context); void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC); void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl); void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType, Decl *TagDecl); void ParseStructDeclaration( ParsingDeclSpec &DS, llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback); bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false); bool isTypeSpecifierQualifier(); /// isKnownToBeTypeSpecifier - Return true if we know that the specified token /// is definitely a type-specifier. Return false if it isn't part of a type /// specifier or if we're not sure. bool isKnownToBeTypeSpecifier(const Token &Tok) const; /// Return true if we know that we are definitely looking at a /// decl-specifier, and isn't part of an expression such as a function-style /// cast. Return false if it's no a decl-specifier, or we're not sure. bool isKnownToBeDeclarationSpecifier() { if (getLangOpts().CPlusPlus) return isCXXDeclarationSpecifier() == TPResult::True; return isDeclarationSpecifier(true); } /// isDeclarationStatement - Disambiguates between a declaration or an /// expression statement, when parsing function bodies. /// Returns true for declaration, false for expression. bool isDeclarationStatement() { if (getLangOpts().CPlusPlus) return isCXXDeclarationStatement(); return isDeclarationSpecifier(true); } /// isForInitDeclaration - Disambiguates between a declaration or an /// expression in the context of the C 'clause-1' or the C++ // 'for-init-statement' part of a 'for' statement. /// Returns true for declaration, false for expression. bool isForInitDeclaration() { if (getLangOpts().OpenMP) Actions.startOpenMPLoop(); if (getLangOpts().CPlusPlus) return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true); return isDeclarationSpecifier(true); } /// Determine whether this is a C++1z for-range-identifier. bool isForRangeIdentifier(); /// Determine whether we are currently at the start of an Objective-C /// class message that appears to be missing the open bracket '['. bool isStartOfObjCClassMessageMissingOpenBracket(); /// Starting with a scope specifier, identifier, or /// template-id that refers to the current class, determine whether /// this is a constructor declarator. bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false); /// Specifies the context in which type-id/expression /// disambiguation will occur. enum TentativeCXXTypeIdContext { TypeIdInParens, TypeIdUnambiguous, TypeIdAsTemplateArgument }; /// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know /// whether the parens contain an expression or a type-id. /// Returns true for a type-id and false for an expression. bool isTypeIdInParens(bool &isAmbiguous) { if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdInParens, isAmbiguous); isAmbiguous = false; return isTypeSpecifierQualifier(); } bool isTypeIdInParens() { bool isAmbiguous; return isTypeIdInParens(isAmbiguous); } /// Checks if the current tokens form type-id or expression. /// It is similar to isTypeIdInParens but does not suppose that type-id /// is in parenthesis. bool isTypeIdUnambiguously() { bool IsAmbiguous; if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous); return isTypeSpecifierQualifier(); } /// isCXXDeclarationStatement - C++-specialized function that disambiguates /// between a declaration or an expression statement, when parsing function /// bodies. Returns true for declaration, false for expression. bool isCXXDeclarationStatement(); /// isCXXSimpleDeclaration - C++-specialized function that disambiguates /// between a simple-declaration or an expression-statement. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. /// Returns false if the statement is disambiguated as expression. bool isCXXSimpleDeclaration(bool AllowForRangeDecl); /// isCXXFunctionDeclarator - Disambiguates between a function declarator or /// a constructor-style initializer, when parsing declaration statements. /// Returns true for function declarator and false for constructor-style /// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration /// might be a constructor-style initializer. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr); struct ConditionDeclarationOrInitStatementState; enum class ConditionOrInitStatement { Expression, ///< Disambiguated as an expression (either kind). ConditionDecl, ///< Disambiguated as the declaration form of condition. InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement. ForRangeDecl, ///< Disambiguated as a for-range declaration. Error ///< Can't be any of the above! }; /// Disambiguates between the different kinds of things that can happen /// after 'if (' or 'switch ('. This could be one of two different kinds of /// declaration (depending on whether there is a ';' later) or an expression. ConditionOrInitStatement isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt, bool CanBeForRangeDecl); bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous); bool isCXXTypeId(TentativeCXXTypeIdContext Context) { bool isAmbiguous; return isCXXTypeId(Context, isAmbiguous); } /// TPResult - Used as the result value for functions whose purpose is to /// disambiguate C++ constructs by "tentatively parsing" them. enum class TPResult { True, False, Ambiguous, Error }; /// Based only on the given token kind, determine whether we know that /// we're at the start of an expression or a type-specifier-seq (which may /// be an expression, in C++). /// /// This routine does not attempt to resolve any of the trick cases, e.g., /// those involving lookup of identifiers. /// /// \returns \c TPR_true if this token starts an expression, \c TPR_false if /// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot /// tell. TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind); /// isCXXDeclarationSpecifier - Returns TPResult::True if it is a /// declaration specifier, TPResult::False if it is not, /// TPResult::Ambiguous if it could be either a decl-specifier or a /// function-style cast, and TPResult::Error if a parsing error was /// encountered. If it could be a braced C++11 function-style cast, returns /// BracedCastResult. /// Doesn't consume tokens. TPResult isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False, bool *InvalidAsDeclSpec = nullptr); /// Given that isCXXDeclarationSpecifier returns \c TPResult::True or /// \c TPResult::Ambiguous, determine whether the decl-specifier would be /// a type-specifier other than a cv-qualifier. bool isCXXDeclarationSpecifierAType(); /// Determine whether the current token sequence might be /// '<' template-argument-list '>' /// rather than a less-than expression. TPResult isTemplateArgumentList(unsigned TokensToSkip); /// Determine whether an identifier has been tentatively declared as a /// non-type. Such tentative declarations should not be found to name a type /// during a tentative parse, but also should not be annotated as a non-type. bool isTentativelyDeclared(IdentifierInfo *II); // "Tentative parsing" functions, used for disambiguation. If a parsing error // is encountered they will return TPResult::Error. // Returning TPResult::True/False indicates that the ambiguity was // resolved and tentative parsing may stop. TPResult::Ambiguous indicates // that more tentative parsing is necessary for disambiguation. // They all consume tokens, so backtracking should be used after calling them. TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl); TPResult TryParseTypeofSpecifier(); TPResult TryParseProtocolQualifiers(); TPResult TryParsePtrOperatorSeq(); TPResult TryParseOperatorId(); TPResult TryParseInitDeclaratorList(); TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true, bool mayHaveDirectInit = false); TPResult TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr, bool VersusTemplateArg = false); TPResult TryParseFunctionDeclarator(); TPResult TryParseBracketDeclarator(); TPResult TryConsumeDeclarationSpecifier(); public: TypeResult ParseTypeName(SourceRange *Range = nullptr, DeclaratorContext Context = DeclaratorContext::TypeNameContext, AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr, ParsedAttributes *Attrs = nullptr); private: void ParseBlockId(SourceLocation CaretLoc); /// Are [[]] attributes enabled? bool standardAttributesAllowed() const { const LangOptions &LO = getLangOpts(); return LO.DoubleSquareBracketAttributes; } // Check for the start of an attribute-specifier-seq in a context where an // attribute is not allowed. bool CheckProhibitedCXX11Attribute() { assert(Tok.is(tok::l_square)); if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square)) return false; return DiagnoseProhibitedCXX11Attribute(); } bool DiagnoseProhibitedCXX11Attribute(); void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation) { if (!standardAttributesAllowed()) return; if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) && Tok.isNot(tok::kw_alignas)) return; DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation); } void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation); void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs, DeclSpec &DS, Sema::TagUseKind TUK); // FixItLoc = possible correct location for the attributes void ProhibitAttributes(ParsedAttributesWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clear(); } void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clearListOnly(); } void DiagnoseProhibitedAttributes(const SourceRange &Range, SourceLocation FixItLoc); // Forbid C++11 and C2x attributes that appear on certain syntactic locations // which standard permits but we don't supported yet, for example, attributes // appertain to decl specifiers. void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs, unsigned DiagID); /// Skip C++11 and C2x attributes and return the end location of the /// last one. /// \returns SourceLocation() if there are no attributes. SourceLocation SkipCXX11Attributes(); /// Diagnose and skip C++11 and C2x attributes that appear in syntactic /// locations where attributes are not allowed. void DiagnoseAndSkipCXX11Attributes(); /// Parses syntax-generic attribute arguments for attributes which are /// known to the implementation, and adds them to the given ParsedAttributes /// list with the given attribute syntax. Returns the number of arguments /// parsed for the attribute. unsigned ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void MaybeParseGNUAttributes(Declarator &D, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParsedAttributes attrs(AttrFactory); SourceLocation endLoc; ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D); D.takeAttributes(attrs, endLoc); } } void MaybeParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) ParseGNUAttributes(attrs, endLoc, LateAttrs); } void ParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr, Declarator *D = nullptr); void ParseGNUAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax, Declarator *D); IdentifierLoc *ParseIdentifierLoc(); unsigned ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void MaybeParseCXX11Attributes(Declarator &D) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrs(AttrFactory); SourceLocation endLoc; ParseCXX11Attributes(attrs, &endLoc); D.takeAttributes(attrs, endLoc); } } void MaybeParseCXX11Attributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrsWithRange(AttrFactory); ParseCXX11Attributes(attrsWithRange, endLoc); attrs.takeAllFrom(attrsWithRange); } } void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *endLoc = nullptr, bool OuterMightBeMessageSend = false) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) ParseCXX11Attributes(attrs, endLoc); } void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs, SourceLocation *EndLoc = nullptr); void ParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *EndLoc = nullptr); /// Parses a C++11 (or C2x)-style attribute argument list. Returns true /// if this results in adding an attribute to the ParsedAttributes list. bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc); IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc); void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square)) ParseMicrosoftAttributes(attrs, endLoc); } void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs); void ParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr); void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr) { const auto &LO = getLangOpts(); if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec)) ParseMicrosoftDeclSpecs(Attrs, End); } void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr); bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs); void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs); void DiagnoseAndSkipExtendedMicrosoftTypeAttributes(); SourceLocation SkipExtendedMicrosoftTypeAttributes(); void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs); void ParseBorlandTypeAttributes(ParsedAttributes &attrs); void ParseOpenCLKernelAttributes(ParsedAttributes &attrs); void ParseOpenCLQualifiers(ParsedAttributes &Attrs); /// Parses opencl_unroll_hint attribute if language is OpenCL v2.0 /// or higher. /// \return false if error happens. bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) { if (getLangOpts().OpenCL) return ParseOpenCLUnrollHintAttribute(Attrs); return true; } /// Parses opencl_unroll_hint attribute. /// \return false if error happens. bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs); void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs); VersionTuple ParseVersionTuple(SourceRange &Range); void ParseAvailabilityAttribute(IdentifierInfo &Availability, SourceLocation AvailabilityLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); Optional<AvailabilitySpec> ParseAvailabilitySpec(); ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc); void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol, SourceLocation Loc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated, SourceLocation ObjCBridgeRelatedLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseAttributeWithTypeArg(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeofSpecifier(DeclSpec &DS); SourceLocation ParseDecltypeSpecifier(DeclSpec &DS); void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS, SourceLocation StartLoc, SourceLocation EndLoc); void ParseUnderlyingTypeSpecifier(DeclSpec &DS); void ParseAtomicSpecifier(DeclSpec &DS); ExprResult ParseAlignArgument(SourceLocation Start, SourceLocation &EllipsisLoc); void ParseAlignmentSpecifier(ParsedAttributes &Attrs, SourceLocation *endLoc = nullptr); VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const; VirtSpecifiers::Specifier isCXX11VirtSpecifier() const { return isCXX11VirtSpecifier(Tok); } void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface, SourceLocation FriendLoc); bool isCXX11FinalKeyword() const; /// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to /// enter a new C++ declarator scope and exit it when the function is /// finished. class DeclaratorScopeObj { Parser &P; CXXScopeSpec &SS; bool EnteredScope; bool CreatedScope; public: DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss) : P(p), SS(ss), EnteredScope(false), CreatedScope(false) {} void EnterDeclaratorScope() { assert(!EnteredScope && "Already entered the scope!"); assert(SS.isSet() && "C++ scope was not set!"); CreatedScope = true; P.EnterScope(0); // Not a decl scope. if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS)) EnteredScope = true; } ~DeclaratorScopeObj() { if (EnteredScope) { assert(SS.isSet() && "C++ scope was cleared ?"); P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS); } if (CreatedScope) P.ExitScope(); } }; /// ParseDeclarator - Parse and verify a newly-initialized declarator. void ParseDeclarator(Declarator &D); /// A function that parses a variant of direct-declarator. typedef void (Parser::*DirectDeclParseFunction)(Declarator&); void ParseDeclaratorInternal(Declarator &D, DirectDeclParseFunction DirectDeclParser); enum AttrRequirements { AR_NoAttributesParsed = 0, ///< No attributes are diagnosed. AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes. AR_GNUAttributesParsed = 1 << 1, AR_CXX11AttributesParsed = 1 << 2, AR_DeclspecAttributesParsed = 1 << 3, AR_AllAttributesParsed = AR_GNUAttributesParsed | AR_CXX11AttributesParsed | AR_DeclspecAttributesParsed, AR_VendorAttributesParsed = AR_GNUAttributesParsed | AR_DeclspecAttributesParsed }; void ParseTypeQualifierListOpt( DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed, bool AtomicAllowed = true, bool IdentifierRequired = false, Optional<llvm::function_ref<void()>> CodeCompletionHandler = None); void ParseDirectDeclarator(Declarator &D); void ParseDecompositionDeclarator(Declarator &D); void ParseParenDeclarator(Declarator &D); void ParseFunctionDeclarator(Declarator &D, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker, bool IsAmbiguous, bool RequiresArg = false); bool ParseRefQualifier(bool &RefQualifierIsLValueRef, SourceLocation &RefQualifierLoc); bool isFunctionDeclaratorIdentifierList(); void ParseFunctionDeclaratorIdentifierList( Declarator &D, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo); void ParseParameterDeclarationClause( Declarator &D, ParsedAttributes &attrs, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo, SourceLocation &EllipsisLoc); void ParseBracketDeclarator(Declarator &D); void ParseMisplacedBracketDeclarator(Declarator &D); //===--------------------------------------------------------------------===// // C++ 7: Declarations [dcl.dcl] /// The kind of attribute specifier we have found. enum CXX11AttributeKind { /// This is not an attribute specifier. CAK_NotAttributeSpecifier, /// This should be treated as an attribute-specifier. CAK_AttributeSpecifier, /// The next tokens are '[[', but this is not an attribute-specifier. This /// is ill-formed by C++11 [dcl.attr.grammar]p6. CAK_InvalidAttributeSpecifier }; CXX11AttributeKind isCXX11AttributeSpecifier(bool Disambiguate = false, bool OuterMightBeMessageSend = false); void DiagnoseUnexpectedNamespace(NamedDecl *Context); DeclGroupPtrTy ParseNamespace(DeclaratorContext Context, SourceLocation &DeclEnd, SourceLocation InlineLoc = SourceLocation()); struct InnerNamespaceInfo { SourceLocation NamespaceLoc; SourceLocation InlineLoc; SourceLocation IdentLoc; IdentifierInfo *Ident; }; using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>; void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs, unsigned int index, SourceLocation &InlineLoc, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker); Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context); Decl *ParseExportDeclaration(); DeclGroupPtrTy ParseUsingDirectiveOrDeclaration( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); Decl *ParseUsingDirective(DeclaratorContext Context, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributes &attrs); struct UsingDeclarator { SourceLocation TypenameLoc; CXXScopeSpec SS; UnqualifiedId Name; SourceLocation EllipsisLoc; void clear() { TypenameLoc = EllipsisLoc = SourceLocation(); SS.clear(); Name.clear(); } }; bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D); DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, SourceLocation &DeclEnd, AccessSpecifier AS = AS_none); Decl *ParseAliasDeclarationAfterDeclarator( const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS, ParsedAttributes &Attrs, Decl **OwnedType = nullptr); Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd); Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // C++ 9: classes [class] and C structs/unions. bool isValidAfterTypeSpecifier(bool CouldBeBitfield); void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, bool EnteringContext, DeclSpecContext DSC, ParsedAttributesWithRange &Attributes); void SkipCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, unsigned TagType, Decl *TagDecl); void ParseCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, ParsedAttributesWithRange &Attrs, unsigned TagType, Decl *TagDecl); ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction, SourceLocation &EqualLoc); bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo, VirtSpecifiers &VS, ExprResult &BitfieldSize, LateParsedAttrList &LateAttrs); void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D, VirtSpecifiers &VS); DeclGroupPtrTy ParseCXXClassMemberDeclaration( AccessSpecifier AS, ParsedAttributes &Attr, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ParsingDeclRAIIObject *DiagsFromTParams = nullptr); DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas( AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs, DeclSpec::TST TagType, Decl *Tag); void ParseConstructorInitializer(Decl *ConstructorDecl); MemInitResult ParseMemInitializer(Decl *ConstructorDecl); void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo, Decl *ThisDecl); //===--------------------------------------------------------------------===// // C++ 10: Derived classes [class.derived] TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc, SourceLocation &EndLocation); void ParseBaseClause(Decl *ClassDecl); BaseResult ParseBaseSpecifier(Decl *ClassDecl); AccessSpecifier getAccessSpecifierIfPresent() const; bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, IdentifierInfo *Name, SourceLocation NameLoc, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Id, bool AssumeTemplateId); bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Result); //===--------------------------------------------------------------------===// // OpenMP: Directives and clauses. /// Parse clauses for '#pragma omp declare simd'. DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parses OpenMP context selectors and calls \p Callback for each /// successfully parsed context selector. bool parseOpenMPContextSelectors( SourceLocation Loc, llvm::function_ref< void(SourceRange, const Sema::OpenMPDeclareVariantCtsSelectorData &)> Callback); /// Parse clauses for '#pragma omp declare variant'. void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse clauses for '#pragma omp declare target'. DeclGroupPtrTy ParseOMPDeclareTargetClauses(); /// Parse '#pragma omp end declare target'. void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind, SourceLocation Loc); /// Parses declarative OpenMP directives. DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl( AccessSpecifier &AS, ParsedAttributesWithRange &Attrs, DeclSpec::TST TagType = DeclSpec::TST_unspecified, Decl *TagDecl = nullptr); /// Parse 'omp declare reduction' construct. DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS); /// Parses initializer for provided omp_priv declaration inside the reduction /// initializer. void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm); /// Parses 'omp declare mapper' directive. DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS); /// Parses variable declaration in 'omp declare mapper' directive. TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range, DeclarationName &Name, AccessSpecifier AS = AS_none); /// Parses simple list of variables. /// /// \param Kind Kind of the directive. /// \param Callback Callback function to be called for the list elements. /// \param AllowScopeSpecifier true, if the variables can have fully /// qualified names. /// bool ParseOpenMPSimpleVarList( OpenMPDirectiveKind Kind, const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> & Callback, bool AllowScopeSpecifier); /// Parses declarative or executable directive. /// /// \param StmtCtx The context in which we're parsing the directive. StmtResult ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx); /// Parses clause of kind \a CKind for directive of a kind \a Kind. /// /// \param DKind Kind of current directive. /// \param CKind Kind of current clause. /// \param FirstClause true, if this is the first clause of a kind \a CKind /// in current directive. /// OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind, bool FirstClause); /// Parses clause with a single expression of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses simple clause of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses clause with a single expression and an additional argument /// of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses clause without any additional arguments. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false); /// Parses clause with the list of variables of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); public: /// Parses simple expression in parens for single-expression clauses of OpenMP /// constructs. /// \param RLoc Returned location of right paren. ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc, bool IsAddressOfOperand = false); /// Data used for parsing list of variables in OpenMP clauses. struct OpenMPVarListDataTy { Expr *TailExpr = nullptr; SourceLocation ColonLoc; SourceLocation RLoc; CXXScopeSpec ReductionOrMapperIdScopeSpec; DeclarationNameInfo ReductionOrMapperId; OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown; OpenMPLinearClauseKind LinKind = OMPC_LINEAR_val; SmallVector<OpenMPMapModifierKind, OMPMapClause::NumberOfModifiers> MapTypeModifiers; SmallVector<SourceLocation, OMPMapClause::NumberOfModifiers> MapTypeModifiersLoc; OpenMPMapClauseKind MapType = OMPC_MAP_unknown; bool IsMapTypeImplicit = false; SourceLocation DepLinMapLoc; }; /// Parses clauses with list. bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, SmallVectorImpl<Expr *> &Vars, OpenMPVarListDataTy &Data); bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext, bool AllowDestructorName, bool AllowConstructorName, bool AllowDeductionGuide, ParsedType ObjectType, SourceLocation *TemplateKWLoc, UnqualifiedId &Result); /// Parses the mapper modifier in map, to, and from clauses. bool parseMapperModifier(OpenMPVarListDataTy &Data); /// Parses map-type-modifiers in map clause. /// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list) /// where, map-type-modifier ::= always | close | mapper(mapper-identifier) bool parseMapTypeModifiers(OpenMPVarListDataTy &Data); private: //===--------------------------------------------------------------------===// // C++ 14: Templates [temp] // C++ 14.1: Template Parameters [temp.param] Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS); Decl *ParseSingleDeclarationAfterTemplate( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); bool ParseTemplateParameters(unsigned Depth, SmallVectorImpl<NamedDecl *> &TemplateParams, SourceLocation &LAngleLoc, SourceLocation &RAngleLoc); bool ParseTemplateParameterList(unsigned Depth, SmallVectorImpl<NamedDecl*> &TemplateParams); bool isStartOfTemplateTypeParameter(); NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position); void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc, SourceLocation CorrectLoc, bool AlreadyHasEllipsis, bool IdentifierHasName); void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc, Declarator &D); // C++ 14.3: Template arguments [temp.arg] typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList; bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc, bool ConsumeLastToken, bool ObjCGenericList); bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken, SourceLocation &LAngleLoc, TemplateArgList &TemplateArgs, SourceLocation &RAngleLoc); bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &TemplateName, bool AllowTypeAnnotation = true); void AnnotateTemplateIdTokenAsType(bool IsClassName = false); bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs); ParsedTemplateArgument ParseTemplateTemplateArgument(); ParsedTemplateArgument ParseTemplateArgument(); Decl *ParseExplicitInstantiation(DeclaratorContext Context, SourceLocation ExternLoc, SourceLocation TemplateLoc, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); // C++2a: Template, concept definition [temp] Decl * ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // Modules DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl); Decl *ParseModuleImport(SourceLocation AtLoc); bool parseMisplacedModuleImport(); bool tryParseMisplacedModuleImport() { tok::TokenKind Kind = Tok.getKind(); if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include) return parseMisplacedModuleImport(); return false; } bool ParseModuleName( SourceLocation UseLoc, SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path, bool IsImport); //===--------------------------------------------------------------------===// // C++11/G++: Type Traits [Type-Traits.html in the GCC manual] ExprResult ParseTypeTrait(); //===--------------------------------------------------------------------===// // Embarcadero: Arary and Expression Traits ExprResult ParseArrayTypeTrait(); ExprResult ParseExpressionTrait(); //===--------------------------------------------------------------------===// // Preprocessor code-completion pass-through void CodeCompleteDirective(bool InConditional) override; void CodeCompleteInConditionalExclusion() override; void CodeCompleteMacroName(bool IsDefinition) override; void CodeCompletePreprocessorExpression() override; void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned ArgumentIndex) override; void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override; void CodeCompleteNaturalLanguage() override; }; } // end namespace clang #endif
GB_binop__second_bool.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__second_bool) // A.*B function (eWiseMult): GB (_AemultB_08__second_bool) // A.*B function (eWiseMult): GB (_AemultB_02__second_bool) // A.*B function (eWiseMult): GB (_AemultB_04__second_bool) // A.*B function (eWiseMult): GB (_AemultB_bitmap__second_bool) // A*D function (colscale): GB (_AxD__second_bool) // D*A function (rowscale): GB (_DxB__second_bool) // C+=B function (dense accum): GB (_Cdense_accumB__second_bool) // C+=b function (dense accum): GB (_Cdense_accumb__second_bool) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__second_bool) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: bool // A type: bool // A pattern? 1 // B type: bool // B pattern? 0 // BinaryOp: cij = bij #define GB_ATYPE \ bool #define GB_BTYPE \ bool #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ ; // true if values of A are not used #define GB_A_IS_PATTERN \ 1 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ bool bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = y ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 1 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SECOND || GxB_NO_BOOL || GxB_NO_SECOND_BOOL) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__second_bool) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__second_bool) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__second_bool) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type bool bool bwork = (*((bool *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__second_bool) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__second_bool) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__second_bool) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; bool alpha_scalar ; bool beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((bool *) alpha_scalar_in)) ; beta_scalar = (*((bool *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__second_bool) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__second_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__second_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__second_bool) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; bool x = (*((bool *) x_input)) ; bool *Bx = (bool *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; bool bij = GBX (Bx, p, false) ; Cx [p] = bij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; bool *Ax = (bool *) Ax_input ; bool y = (*((bool *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = y ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = GBX (Ax, pA, false) ; \ Cx [pC] = aij ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ bool #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool x = (*((const bool *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ bool } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = y ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool y = (*((const bool *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
descriptors.h
/* Copyright (c) 2012, MAURO BIANCO, UGO VARETTO, SWISS NATIONAL SUPERCOMPUTING CENTRE (CSCS) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Swiss National Supercomputing Centre (CSCS) nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL MAURO BIANCO, UGO VARETTO, OR SWISS NATIONAL SUPERCOMPUTING CENTRE (CSCS), BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _DESCRIPTORS_H_ #define _DESCRIPTORS_H_ #include <utils/array.h> #include <vector> #include <proc_grids_2D.h> #include <Halo_Exchange_2D.h> #include <proc_grids_3D.h> #include <Halo_Exchange_3D.h> #include <utils/make_array.h> #include <assert.h> #include <boost/type_traits/remove_pointer.hpp> // #include <boost/type_traits.hpp> // #include <boost/utility/enable_if.hpp> #include <utils/boollist.h> #include <gcl_parameters.h> #include <empty_field_base.h> #include <translate.h> #include <boost/preprocessor/arithmetic/inc.hpp> #include <boost/preprocessor/repetition/enum_params.hpp> #include <boost/preprocessor/repetition/repeat.hpp> #include <boost/preprocessor/repetition/enum_binary_params.hpp> #include <boost/preprocessor/punctuation/comma_if.hpp> #include <utils/numerics.h> #include <descriptors_fwd.h> #include <descriptor_base.h> #include "helpers_impl.h" #include <access.h> namespace GCL { /** \class empty_field_no_dt Class containint the information about a data field (grid). It doe not contains any reference to actual data of the field, it only describes the fields though the halo descriptions. The number of dimensions as a template argument and the size of the first dimension, the size of the non-halo data field, the halo width before and after the actual data, then the same for the second dimension, the third, etc. This information is encoded in halo_descriptor. A dimension of the field is described as: \code |-----|------|---------------|---------|----| | pad0|minus | length | plus |pad1| ^begin ^end | total_length | \endcode \tparam DIMS the number of dimensions of the data field */ template <int DIMS> class empty_field_no_dt: public empty_field_base<int,DIMS> { typedef empty_field_base<int,DIMS> base_type; public: /** Constructor that receive the pointer to the data. This is explicit and must then be called. */ explicit empty_field_no_dt() {} void setup() const {} const halo_descriptor* raw_array() const {return &(base_type::halos[0]);} /** void pack(GCL::array<int, D> const& eta, iterator &it) Pack the elements of a data field passed in input as iterator_in to be sent using the iterator_out passed in that points to data buffers. At the end the iterator_out points to the element next to the last inserted. In inout the iterator_out points to the elements to be insered \param[in] eta the eta parameter as indicated in \link MULTI_DIM_ACCESS \endlink \param[in] field_ptr iterator pointing to data field data \param[in,out] it iterator pointing to the data. */ template <typename iterator_in, typename iterator_out> void pack(GCL::array<int, 2> const& eta, iterator_in const* field_ptr, iterator_out *& it) const { for (int j=base_type::halos[1].loop_low_bound_inside(eta[1]); j<=base_type::halos[1].loop_high_bound_inside(eta[1]); ++j) { for (int i=base_type::halos[0].loop_low_bound_inside(eta[0]); i<=base_type::halos[0].loop_high_bound_inside(eta[0]); ++i) { *(reinterpret_cast<iterator_in*>(it)) = field_ptr[GCL::access (i,j, base_type::halos[0].total_length() ,base_type::halos[1].total_length())]; reinterpret_cast<char*&>(it) += sizeof(iterator_in); } } } template <typename iterator_in, typename iterator_out> void pack(GCL::array<int, 3> const& eta, iterator_in const* field_ptr, iterator_out *& it) const { for (int k=base_type::halos[2].loop_low_bound_inside(eta[2]); k<=base_type::halos[2].loop_high_bound_inside(eta[2]); ++k) { for (int j=base_type::halos[1].loop_low_bound_inside(eta[1]); j<=base_type::halos[1].loop_high_bound_inside(eta[1]); ++j) { for (int i=base_type::halos[0].loop_low_bound_inside(eta[0]); i<=base_type::halos[0].loop_high_bound_inside(eta[0]); ++i) { *(reinterpret_cast<iterator_in*>(it)) = field_ptr[GCL::access (i,j,k, base_type::halos[0].total_length(), base_type::halos[1].total_length(), base_type::halos[2].total_length())]; reinterpret_cast<char*&>(it) += sizeof(iterator_in); } } } } /** void unpack(GCL::array<int, D> const& eta, iterator &it) Unpack the elements into a data field passed in input as iterator_in that have being received in data obtained by the iterator_out passed in that points to data buffers. At the end the iterator points to the element next to the last read element. In inout the iterator points to the elements to be extracted from buffers and put int the halo region. \param[in] eta the eta parameter as explained in \link MULTI_DIM_ACCESS \endlink of the sending neighbor \param[in] field_ptr iterator pointing to data field data \param[in,out] it iterator pointing to the data in buffers. */ template <typename iterator_in, typename iterator_out> void unpack(GCL::array<int, 2> const& eta, iterator_in * field_ptr, iterator_out *& it) const { for (int j=base_type::halos[1].loop_low_bound_outside(eta[1]); j<=base_type::halos[1].loop_high_bound_outside(eta[1]); ++j) { for (int i=base_type::halos[0].loop_low_bound_outside(eta[0]); i<=base_type::halos[0].loop_high_bound_outside(eta[0]); ++i) { field_ptr[GCL::access(i,j, base_type::halos[0].total_length(), base_type::halos[1].total_length())] = *(reinterpret_cast<iterator_in*>(it)); reinterpret_cast<char*&>(it) += sizeof(iterator_in); } } } template <typename iterator_in, typename iterator_out> void unpack(GCL::array<int, 3> const& eta, iterator_in * field_ptr, iterator_out* &it) const { for (int k=base_type::halos[2].loop_low_bound_outside(eta[2]); k<=base_type::halos[2].loop_high_bound_outside(eta[2]); ++k) { for (int j=base_type::halos[1].loop_low_bound_outside(eta[1]); j<=base_type::halos[1].loop_high_bound_outside(eta[1]); ++j) { for (int i=base_type::halos[0].loop_low_bound_outside(eta[0]); i<=base_type::halos[0].loop_high_bound_outside(eta[0]); ++i) { field_ptr[GCL::access(i,j,k, base_type::halos[0].total_length(), base_type::halos[1].total_length(), base_type::halos[2].total_length())] = *(reinterpret_cast<iterator_in*>(it)); reinterpret_cast<char*&>(it) += sizeof(iterator_in); } } } } template <typename iterator> void pack_all(GCL::array<int, DIMS> const&, iterator &it) const {} /** This method takes a tuple eta identifiyng a neighbor \link MULTI_DIM_ACCESS \endlink and a list of data fields and pack all the data corresponding to the halo described by the class. The data is packed starting at position pointed by iterator and the iterator will point to the next free position at the end of the operation. \param[in] eta the eta parameter as explained in \link MULTI_DIM_ACCESS \endlink of the receiving neighbor \param[in,out] it iterator pointing to storage area where data is packed \param[in] field the first data field to be processed \param[in] args the rest of the list of data fields to be packed (they may have different datatypes). */ #ifdef __GXX_EXPERIMENTAL_CXX0X__ template <typename iterator, typename FIRST, typename... FIELDS> void pack_all(GCL::array<int, DIMS> const& eta, iterator &it, FIRST const & field, const FIELDS&... args) const { pack(eta, field, it); pack_all(eta, it, args...); } #else #define MACRO_IMPL(z, n, _) \ template <typename iterator, \ BOOST_PP_ENUM_PARAMS_Z(z, BOOST_PP_INC(n), typename FIELD)> \ void pack_all(GCL::array<int, DIMS> const& eta, \ iterator &it, \ BOOST_PP_ENUM_BINARY_PARAMS_Z(z, BOOST_PP_INC(n), FIELD, const &arg)) const { \ pack_all(eta, it BOOST_PP_COMMA_IF(n) BOOST_PP_ENUM_PARAMS_Z(z, n, arg)); \ pack(eta, arg ## n, it); \ } BOOST_PP_REPEAT(GCL_MAX_FIELDS, MACRO_IMPL, all) #undef MACRO_IMPL #endif template <typename iterator> void unpack_all(GCL::array<int, DIMS> const&, iterator &it) const {} /** This method takes a tuple eta identifiyng a neighbor \link MULTI_DIM_ACCESS \endlink and a list of data fields and pack all the data corresponding to the halo described by the class. The data is packed starting at position pointed by iterator and the iterator will point to the next free position at the end of the operation. \param[in] eta the eta parameter as explained in \link MULTI_DIM_ACCESS \endlink of the sending neighbor \param[in,out] it iterator pointing to the data to be unpacked \param[in] field the first data field to be processed \param[in] args the rest of the list of data fields where data has to be unpacked into (they may have different datatypes). */ #ifdef __GXX_EXPERIMENTAL_CXX0X__ template <typename iterator, typename FIRST, typename... FIELDS> void unpack_all(GCL::array<int, DIMS> const& eta, iterator &it, FIRST const & field, const FIELDS&... args) const { unpack(eta, field, it); unpack_all(eta, it, args...); } #else #define MACRO_IMPL(z, n, _) \ template <typename iterator, \ BOOST_PP_ENUM_PARAMS_Z(z, BOOST_PP_INC(n), typename FIELD)> \ void unpack_all(GCL::array<int, DIMS> const& eta, \ iterator &it, \ BOOST_PP_ENUM_BINARY_PARAMS_Z(z, BOOST_PP_INC(n), FIELD, const &arg)) const { \ unpack_all(eta, it BOOST_PP_COMMA_IF(n) BOOST_PP_ENUM_PARAMS_Z(z, n, arg)); \ unpack(eta, arg ## n, it); \ } BOOST_PP_REPEAT(GCL_MAX_FIELDS, MACRO_IMPL, all) #undef MACRO_IMPL #endif }; template <int I> std::ostream& operator<<(std::ostream &s, empty_field_no_dt<I> const &ef) { s << "empty_field_no_dt "; for (int i=0; i<I; ++i) s << ef.raw_array()[i] << ", " ; return s; } /** \class field_descriptor_no_dt Class containint the information about a data field (grid). It contains a pointer to the first element of the data field, the number of dimensions as a template argument and the size of the first dimension, the size of the non-halo data field, the halo width before and after the actual data, then the same for the second dimension, the third, etc. This information is encoded in halo_descriptor. A dimension of the field is described as: \code |-----|------|---------------|---------|----| | pad0|minus | length | plus |pad1| ^begin ^end | total_length | \endcode \tparam DataType type of lements of the datafield \tparam DIMS the number of dimensions of the data field */ template <typename DataType, int DIMS> class field_descriptor_no_dt: public empty_field_no_dt<DIMS> { DataType* fieldptr; // Pointer to the data field typedef empty_field_no_dt<DIMS> base_type; public: /** Constructor that receive the pointer to the data. This is explicit and must then be called. \param[in] _fp DataType* pointer to the data field */ explicit field_descriptor_no_dt(DataType *_fp): fieldptr(_fp) {} /** void pack(GCL::array<int, D> const& eta, iterator &it) Pack the elements to be sent using the iterator passed in. At the end the iterator points to the element next to the last inserted. In inout the iterator points to the elements to be insered \param[in] eta the eta parameter as indicated in \link MULTI_DIM_ACCESS \endlink \param[in,out] it iterator pointing to the data. */ template <typename iterator> void pack(GCL::array<int, DIMS> const& eta, iterator &it) const { base_type::pack(eta, fieldptr, it); } /** void unpack(GCL::array<int, D> const& eta, iterator &it) Unpack the elements received using the iterator passed in.. At the end the iterator points to the element next to the last read element. In inout the iterator points to the elements to be extracted from buffers and put int the halo region. \param[in] eta the eta parameter as explained in \link MULTI_DIM_ACCESS \endlink of the sending neighbor \param[in,out] it iterator pointing to the data in buffers. */ template <typename iterator> void unpack(GCL::array<int, DIMS> const& eta, iterator &it) const { base_type::unpack(eta, fieldptr, it); } }; /** Class containing the list of data fields associated with an handler. A handler identifies the data fileds that must be updated together in the computation. The _ut suffix stand for "uniform type", that is, all the data fields in this descriptor have the same data type, which is equal to the template argument. The order in which data fields are registered is important, since it dictated the order in which the data is packed, transfered and unpacked. All processes must register the data fields in the order and with the same corresponding sizes. \tparam DataType type of the elements of the data fields associated to the handler. \tparam DIMS Number of dimensions of the grids. \tparam HaloExch Communication patter with halo exchange. */ template <typename DataType, int DIMS, typename HaloExch > class hndlr_descriptor_ut : public descriptor_base<HaloExch> { typedef hndlr_descriptor_ut<DataType,DIMS,HaloExch> this_type; std::vector<field_descriptor_no_dt<DataType, DIMS> > field; GCL::array<DataType*, _impl::static_pow3<DIMS>::value> send_buffer; //One entry will not be used... GCL::array<DataType*, _impl::static_pow3<DIMS>::value> recv_buffer; public: typedef descriptor_base<HaloExch> base_type; typedef typename base_type::pattern_type pattern_type; /** Type of the computin grid associated to the pattern */ typedef typename pattern_type::grid_type grid_type; /** Type of the translation used to map dimensions to buffer addresses */ typedef translate_t<DIMS, typename default_layout_map<DIMS>::type > translate; private: grid_type procgrid; hndlr_descriptor_ut(hndlr_descriptor_ut const &) {} public: /** Constructor \param[in] c The object of the class used to specify periodicity in each dimension \param[in] comm MPI communicator (typically MPI_Comm_world) */ explicit hndlr_descriptor_ut(typename grid_type::period_type const &c, MPI_Comm comm) : field() , procgrid(c, comm) , base_type(procgrid) {} /** Constructor \param[in] c The object of the class used to specify periodicity in each dimension \param[in] _P Number of processors the pattern is running on (numbered from 0 to _P-1 \param[in] _pid Integer identifier of the process calling the constructor */ explicit hndlr_descriptor_ut(typename grid_type::period_type const &c, int _P, int _pid) : field() , procgrid(c,_P,_pid) , base_type(procgrid) {} /** Constructor \param[in] g A processor grid that will execute the pattern */ explicit hndlr_descriptor_ut(grid_type const &g) : field() , procgrid(g) , base_type(procgrid) {} /** Add a data field to the handler descriptor. Returns the index of the field for later use. \param[in] ptr pointer to the datafield \return index of the field in the handler desctiptor */ size_t register_field(DataType *ptr) { field.push_back(field_descriptor_no_dt<DataType, DIMS>(ptr)); return field.size()-1; } /** Register the halo relative to a given dimension with a given data field/ \param[in] D index of data field to be affected \param[in] I index of dimension for which the information is passed \param[in] minus Please see field_descriptor_no_dt, halo_descriptor or \link MULTI_DIM_ACCESS \endlink for details \param[in] plus Please see field_descriptor_no_dt, halo_descriptor or \link MULTI_DIM_ACCESS \endlink for details \param[in] begin Please see field_descriptor_no_dt, halo_descriptor or \link MULTI_DIM_ACCESS \endlink for details \param[in] end Please see field_descriptor_no_dt, halo_descriptor or \link MULTI_DIM_ACCESS \endlink for details \param[in] t_len Please see field_descriptor_no_dt, halo_descriptor or \link MULTI_DIM_ACCESS \endlink for details */ void register_halo(size_t D, size_t I, int minus, int plus, int begin, int end, int t_len) { field[D].add_halo(I, minus, plus, begin, end, t_len); } int size() const { return field.size(); } field_descriptor_no_dt<DataType, DIMS> const & data_field(int I) const {return field[I];} /** Given the coordinates of a neighbor (2D), return the total number of elements to be sent to that neighbor associated with the handler of the manager. */ template <typename ARRAY> int total_pack_size(ARRAY const & tuple) const { int S=0; for (int i=0; i < size(); ++i) S += data_field(i).send_buffer_size(tuple); return S; } /** Given the coordinates of a neighbor (2D), return the total number of elements to be received from that neighbor associated with the handler of the manager. */ template <typename ARRAY> int total_unpack_size(ARRAY const &tuple) const { int S=0; for (int i=0; i < size(); ++i) S += data_field(i).recv_buffer_size(tuple); return S; } /** Function to setup internal data structures for data exchange and preparing eventual underlying layers The use of this function is deprecated */ void allocate_buffers() { setup(); } /** Function to setup internal data structures for data exchange and preparing eventual underlying layers */ void setup() { _impl::allocation_service<this_type>()(this); } /** Function to pack data to be sent */ void pack() const { _impl::pack_service<this_type>()(this); } /** Function to unpack received data */ void unpack() const { _impl::unpack_service<this_type>()(this); } /// Utilities /** Retrieve the pattern from which the computing grid and other information can be retrieved. The function is available only if the underlying communication library is a Level 3 pattern. It would not make much sense otherwise. If used to get process grid information additional information can be found in \link GRIDS_INTERACTION \endlink */ pattern_type const & pattern() const {return base_type::haloexch;} // FRIENDING friend class _impl::allocation_service<this_type>; friend class _impl::pack_service<this_type>; friend class _impl::unpack_service<this_type>; }; /** Class containing the description of one halo and a communication pattern. A communication is triggered when a list of data fields are passed to the exchange functions, when the data according to the halo descriptors are exchanged. This class is needed when the addresses and the number of the data fields changes dynamically but the sizes are constant. Data elements for each hndlr_dynamic_ut must be the same. \tparam DIMS Number of dimensions of the grids. \tparam HaloExch Communication pattern with halo exchange. */ template <typename DataType, int DIMS, typename HaloExch, typename proc_layout> class hndlr_dynamic_ut<DataType, DIMS, HaloExch, proc_layout, gcl_cpu, 2> : public descriptor_base<HaloExch> { typedef hndlr_dynamic_ut<DataType,DIMS,HaloExch,proc_layout, gcl_cpu, 2> this_type; public: empty_field_no_dt<DIMS> halo; private: GCL::array<DataType*, _impl::static_pow3<DIMS>::value> send_buffer; //One entry will not be used... GCL::array<DataType*, _impl::static_pow3<DIMS>::value> recv_buffer; public: typedef gcl_cpu arch_type; typedef descriptor_base<HaloExch> base_type; typedef typename base_type::pattern_type pattern_type; /** Type of the computin grid associated to the pattern */ typedef typename pattern_type::grid_type grid_type; /** Type of the translation used to map dimensions to buffer addresses */ typedef translate_t<DIMS, typename default_layout_map<DIMS>::type > translate; private: hndlr_dynamic_ut(hndlr_dynamic_ut const &) {} public: /** Constructor \param[in] c The object of the class used to specify periodicity in each dimension \param[in] comm MPI communicator (typically MPI_Comm_world) */ explicit hndlr_dynamic_ut(typename grid_type::period_type const &c, MPI_Comm comm) : base_type(c,comm) , halo() {} ~hndlr_dynamic_ut() { #ifdef _GCL_CHECK_DESTRUCTOR std::cout << "Destructor " << __FILE__ << ":" << __LINE__ << std::endl; #endif for (int i = -1; i <= 1; ++i) for (int j = -1; j <= 1; ++j) for (int k = -1; k <= 1; ++k) { if (!send_buffer[translate()(i,j,k)]) _impl::gcl_alloc<DataType, arch_type>::free(send_buffer[translate()(i,j,k)]); if (!recv_buffer[translate()(i,j,k)]) _impl::gcl_alloc<DataType, arch_type>::free(recv_buffer[translate()(i,j,k)]); } } /** Constructor \param[in] c The object of the class used to specify periodicity in each dimension \param[in] _P Number of processors the pattern is running on (numbered from 0 to _P-1 \param[in] _pid Integer identifier of the process calling the constructor */ explicit hndlr_dynamic_ut(typename grid_type::period_type const &c, int _P, int _pid) : halo() , base_type::haloexch(grid_type(c,_P,_pid))//procgrid) {} /** Constructor \param[in] g A processor grid that will execute the pattern */ explicit hndlr_dynamic_ut(grid_type const &g) : halo() , base_type::haloexch(g) {} /** Function to setup internal data structures for data exchange and preparing eventual underlying layers The use of this function is deprecated \param max_fields_n Maximum number of data fields that will be passed to the communication functions */ void allocate_buffers(int max_fields_n) { setup(max_fields_n); } /** Function to setup internal data structures for data exchange and preparing eventual underlying layers \param max_fields_n Maximum number of data fields that will be passed to the communication functions */ void setup(int max_fields_n) { _impl::allocation_service<this_type>()(this, max_fields_n); } #ifdef GCL_TRACE void set_pattern_tag(int tag) { base_type::haloexch.set_pattern_tag(tag); }; #endif /** Function to pack data to be sent \param[in] _fields data fields to be packed */ #ifdef __GXX_EXPERIMENTAL_CXX0X__ template <typename... FIELDS> void pack(const FIELDS&... _fields) const { pack_dims<DIMS,0>()(*this, _fields... ); } #else #define MACRO_IMPL(z, n, _) \ template <BOOST_PP_ENUM_PARAMS_Z(z, BOOST_PP_INC(n), typename FIELD)> \ void pack(BOOST_PP_ENUM_BINARY_PARAMS_Z(z, BOOST_PP_INC(n), FIELD, const &_field)) const { \ pack_dims<DIMS,0>()(*this, BOOST_PP_ENUM_PARAMS_Z(z, BOOST_PP_INC(n), _field)); \ } BOOST_PP_REPEAT(GCL_MAX_FIELDS, MACRO_IMPL, all) #undef MACRO_IMPL #endif /** Function to unpack received data \param[in] _fields data fields where to unpack data */ #ifdef __GXX_EXPERIMENTAL_CXX0X__ template <typename... FIELDS> void unpack(const FIELDS&... _fields) const { unpack_dims<DIMS,0>()(*this, _fields... ); } #else #define MACRO_IMPL(z, n, _) \ template <BOOST_PP_ENUM_PARAMS_Z(z, BOOST_PP_INC(n), typename FIELD)> \ void unpack(BOOST_PP_ENUM_BINARY_PARAMS_Z(z, BOOST_PP_INC(n), FIELD, const &_field)) const { \ unpack_dims<DIMS,0>()(*this, BOOST_PP_ENUM_PARAMS_Z(z, BOOST_PP_INC(n), _field)); \ } BOOST_PP_REPEAT(GCL_MAX_FIELDS, MACRO_IMPL, all) #undef MACRO_IMPL #endif /** Function to unpack received data \param[in] fields vector with data fields pointers to be packed from */ void pack(std::vector<DataType*> const& fields) { pack_vector_dims<DIMS,0>()(*this, fields); } /** Function to unpack received data \param[in] fields vector with data fields pointers to be unpacked into */ void unpack(std::vector<DataType*> const& fields) { unpack_vector_dims<DIMS,0>()(*this, fields); } /// Utilities /** Retrieve the pattern from which the computing grid and other information can be retrieved. The function is available only if the underlying communication library is a Level 3 pattern. It would not make much sense otherwise. If used to get process grid information additional information can be found in \link GRIDS_INTERACTION \endlink */ pattern_type const & pattern() const {return base_type::haloexch;} // FRIENDING friend class _impl::allocation_service<this_type>; //friend class _impl::pack_service<this_type>; //friend class _impl::unpack_service<this_type>; private: template <int I, int dummy> struct pack_dims {}; template <int dummy> struct pack_dims<2, dummy> { #ifdef __GXX_EXPERIMENTAL_CXX0X__ template <typename T, typename... FIELDS> void operator()(const T& hm, const FIELDS&... _fields) const { #pragma omp parallel for schedule(dynamic, 1) collapse(2) for (int ii=-1; ii<=1; ++ii) { for (int jj=-1; jj<=1; ++jj) { if ((ii!=0 || jj!=0) && (hm.pattern().proc_grid().proc(ii,jj) != -1)) { DataType *it = &(hm.send_buffer[translate()(ii,jj)][0]); hm.halo.pack_all(gcl_utils::make_array(ii,jj), it, _fields...); } } } } #else #ifndef _GCL_GPU_ #define PUT_OMP _Pragma("omp parallel for schedule(dynamic) collapse(2)") #else #define PUT_OMP #endif #define MACRO_IMPL(z, n, _) \ template <typename T, BOOST_PP_ENUM_PARAMS_Z(z, BOOST_PP_INC(n), typename FIELD)> \ void operator()(const T& hm, BOOST_PP_ENUM_BINARY_PARAMS_Z(z, BOOST_PP_INC(n), FIELD, const &_field) ) const { \ PUT_OMP \ for (int ii=-1; ii<=1; ++ii) { \ for (int jj=-1; jj<=1; ++jj) { \ if ((ii!=0 || jj!=0) && (hm.pattern().proc_grid().proc(ii,jj) != -1)) { \ DataType *it = &(hm.send_buffer[translate()(ii,jj)][0]); \ hm.halo.pack_all(gcl_utils::make_array(ii,jj), it, BOOST_PP_ENUM_PARAMS_Z(z, BOOST_PP_INC(n), _field)); \ } \ } \ } \ } BOOST_PP_REPEAT(GCL_MAX_FIELDS, MACRO_IMPL, all) #undef MACRO_IMPL #undef PUT_OMP #endif }; template <int dummy> struct pack_dims<3, dummy> { #ifdef __GXX_EXPERIMENTAL_CXX0X__ template <typename T, typename... FIELDS> void operator()(const T& hm, const FIELDS&... _fields) const { #pragma omp parallel for schedule(dynamic, 1) collapse(3) for (int ii=-1; ii<=1; ++ii) { for (int jj=-1; jj<=1; ++jj) { for (int kk=-1; kk<=1; ++kk) { typedef proc_layout map_type; const int ii_P = map_type().template select<0>(ii,jj,kk); const int jj_P = map_type().template select<1>(ii,jj,kk); const int kk_P = map_type().template select<2>(ii,jj,kk); if ((ii!=0 || jj!=0 || kk!=0) && (hm.pattern().proc_grid().proc(ii_P,jj_P,kk_P) != -1)) { DataType *it = &(hm.send_buffer[translate()(ii,jj,kk)][0]); hm.halo.pack_all(gcl_utils::make_array(ii,jj,kk), it, _fields...); } } } } } #else #ifndef _GCL_GPU_ #define PUT_OMP _Pragma("omp parallel for schedule(dynamic) collapse(3)") #else #define PUT_OMP #endif #define MACRO_IMPL(z, n, _) \ template <typename T, BOOST_PP_ENUM_PARAMS_Z(z, BOOST_PP_INC(n), typename FIELD)> \ void operator()(const T& hm, BOOST_PP_ENUM_BINARY_PARAMS_Z(z, BOOST_PP_INC(n), FIELD, const &_field) ) const { \ PUT_OMP \ for (int ii=-1; ii<=1; ++ii) { \ for (int jj=-1; jj<=1; ++jj) { \ for (int kk=-1; kk<=1; ++kk) { \ typedef proc_layout map_type; \ const int ii_P = map_type().template select<0>(ii,jj,kk); \ const int jj_P = map_type().template select<1>(ii,jj,kk); \ const int kk_P = map_type().template select<2>(ii,jj,kk); \ if ((ii!=0 || jj!=0 || kk!=0) && (hm.pattern().proc_grid().proc(ii_P,jj_P,kk_P) != -1)) { \ DataType *it = &(hm.send_buffer[translate()(ii,jj,kk)][0]); \ hm.halo.pack_all(gcl_utils::make_array(ii,jj,kk), it, BOOST_PP_ENUM_PARAMS_Z(z, BOOST_PP_INC(n), _field)); \ } \ } \ } \ } \ } BOOST_PP_REPEAT(GCL_MAX_FIELDS, MACRO_IMPL, all) #undef MACRO_IMPL #undef PUT_OMP #endif }; template <int I, int dummy> struct unpack_dims {}; template <int dummy> struct unpack_dims<2, dummy> { #ifdef __GXX_EXPERIMENTAL_CXX0X__ template <typename T, typename... FIELDS> void operator()(const T& hm, const FIELDS&... _fields) const { #pragma omp parallel for schedule(dynamic, 1) collapse(2) for (int ii=-1; ii<=1; ++ii) { for (int jj=-1; jj<=1; ++jj) { if ((ii!=0 || jj!=0) && (hm.pattern().proc_grid().proc(ii,jj) != -1)) { DataType *it = &(hm.recv_buffer[translate()(ii,jj)][0]); hm.halo.unpack_all(gcl_utils::make_array(ii,jj), it, _fields...); } } } } #else #ifndef _GCL_GPU_ #define PUT_OMP _Pragma("omp parallel for schedule(dynamic) collapse(2)") #else #define PUT_OMP #endif #define MACRO_IMPL(z, n, _) \ template <typename T, BOOST_PP_ENUM_PARAMS_Z(z, BOOST_PP_INC(n), typename FIELD)> \ void operator()(const T& hm, BOOST_PP_ENUM_BINARY_PARAMS_Z(z, BOOST_PP_INC(n), FIELD, const &_field) ) const { \ PUT_OMP \ for (int ii=-1; ii<=1; ++ii) { \ for (int jj=-1; jj<=1; ++jj) { \ if ((ii!=0 || jj!=0) && (hm.pattern().proc_grid().proc(ii,jj) != -1)) { \ DataType *it = &(hm.recv_buffer[translate()(ii,jj)][0]); \ hm.halo.unpack_all(gcl_utils::make_array(ii,jj), it, BOOST_PP_ENUM_PARAMS_Z(z, BOOST_PP_INC(n), _field)); \ } \ } \ } \ } BOOST_PP_REPEAT(GCL_MAX_FIELDS, MACRO_IMPL, all) #undef MACRO_IMPL #undef PUT_OMP #endif }; template <int dummy> struct unpack_dims<3, dummy> { #ifdef __GXX_EXPERIMENTAL_CXX0X__ template <typename T, typename... FIELDS> void operator()(const T& hm, const FIELDS&... _fields) const { #pragma omp parallel for schedule(dynamic, 1) collapse(3) for (int ii=-1; ii<=1; ++ii) { for (int jj=-1; jj<=1; ++jj) { for (int kk=-1; kk<=1; ++kk) { typedef proc_layout map_type; const int ii_P = map_type().template select<0>(ii,jj,kk); const int jj_P = map_type().template select<1>(ii,jj,kk); const int kk_P = map_type().template select<2>(ii,jj,kk); if ((ii!=0 || jj!=0 || kk!=0) && (hm.pattern().proc_grid().proc(ii_P,jj_P,kk_P) != -1)) { DataType *it = &(hm.recv_buffer[translate()(ii,jj,kk)][0]); hm.halo.unpack_all(gcl_utils::make_array(ii,jj,kk), it, _fields...); } } } } } #else #ifndef _GCL_GPU_ #define PUT_OMP _Pragma("omp parallel for schedule(dynamic) collapse(3)") #else #define PUT_OMP #endif #define MACRO_IMPL(z, n, _) \ template <typename T, BOOST_PP_ENUM_PARAMS_Z(z, BOOST_PP_INC(n), typename FIELD)> \ void operator()(const T& hm, BOOST_PP_ENUM_BINARY_PARAMS_Z(z, BOOST_PP_INC(n), FIELD, const &_field) ) const { \ PUT_OMP \ for (int ii=-1; ii<=1; ++ii) { \ for (int jj=-1; jj<=1; ++jj) { \ for (int kk=-1; kk<=1; ++kk) { \ typedef proc_layout map_type; \ const int ii_P = map_type().template select<0>(ii,jj,kk); \ const int jj_P = map_type().template select<1>(ii,jj,kk); \ const int kk_P = map_type().template select<2>(ii,jj,kk); \ if ((ii!=0 || jj!=0 || kk!=0) && (hm.pattern().proc_grid().proc(ii_P,jj_P,kk_P) != -1)) { \ DataType *it = &(hm.recv_buffer[translate()(ii,jj,kk)][0]); \ hm.halo.unpack_all(gcl_utils::make_array(ii,jj,kk), it, BOOST_PP_ENUM_PARAMS_Z(z, BOOST_PP_INC(n), _field)); \ } \ } \ } \ } \ } BOOST_PP_REPEAT(GCL_MAX_FIELDS, MACRO_IMPL, all) #undef MACRO_IMPL #undef PUT_OMP #endif }; template <int I, int dummy> struct pack_vector_dims {}; template <int dummy> struct pack_vector_dims<2, dummy> { template <typename T> void operator()(const T& hm, std::vector<DataType*> const& fields) const { #pragma omp parallel for schedule(dynamic, 1) collapse(2) for (int ii=-1; ii<=1; ++ii) { for (int jj=-1; jj<=1; ++jj) { if ((ii!=0 || jj!=0) && (hm.pattern().proc_grid().proc(ii,jj) != -1)) { DataType *it = &(hm.send_buffer[translate()(ii,jj)][0]); for (size_t i=0; i<fields.size(); ++i) { hm.halo.pack(gcl_utils::make_array(ii,jj), fields[i], it); } } } } } }; template <int dummy> struct pack_vector_dims<3, dummy> { template <typename T> void operator()(const T& hm, std::vector<DataType*> const& fields) const { #pragma omp parallel for schedule(dynamic, 1) collapse(3) for (int ii=-1; ii<=1; ++ii) { for (int jj=-1; jj<=1; ++jj) { for (int kk=-1; kk<=1; ++kk) { typedef proc_layout map_type; const int ii_P = map_type().template select<0>(ii,jj,kk); const int jj_P = map_type().template select<1>(ii,jj,kk); const int kk_P = map_type().template select<2>(ii,jj,kk); if ((ii!=0 || jj!=0 || kk!=0) && (hm.pattern().proc_grid().proc(ii_P,jj_P,kk_P) != -1)) { DataType *it = &(hm.send_buffer[translate()(ii,jj,kk)][0]); for (size_t i=0; i<fields.size(); ++i) { hm.halo.pack(gcl_utils::make_array(ii,jj,kk), fields[i], it); } } } } } } }; template <int I, int dummy> struct unpack_vector_dims {}; template <int dummy> struct unpack_vector_dims<2, dummy> { template <typename T> void operator()(const T& hm, std::vector<DataType*> const& fields) const { #pragma omp parallel for schedule(dynamic, 1) collapse(2) for (int ii=-1; ii<=1; ++ii) { for (int jj=-1; jj<=1; ++jj) { if ((ii!=0 || jj!=0) && (hm.pattern().proc_grid().proc(ii,jj) != -1)) { DataType *it = &(hm.recv_buffer[translate()(ii,jj)][0]); for (size_t i=0; i<fields.size(); ++i) { hm.halo.unpack(gcl_utils::make_array(ii,jj), fields[i], it); } } } } } }; template <int dummy> struct unpack_vector_dims<3, dummy> { template <typename T> void operator()(const T& hm, std::vector<DataType*> const& fields) const { #pragma omp parallel for schedule(dynamic, 1) collapse(3) for (int ii=-1; ii<=1; ++ii) { for (int jj=-1; jj<=1; ++jj) { for (int kk=-1; kk<=1; ++kk) { typedef proc_layout map_type; const int ii_P = map_type().template select<0>(ii,jj,kk); const int jj_P = map_type().template select<1>(ii,jj,kk); const int kk_P = map_type().template select<2>(ii,jj,kk); if ((ii!=0 || jj!=0 || kk!=0) && (hm.pattern().proc_grid().proc(ii_P,jj_P,kk_P) != -1)) { DataType *it = &(hm.recv_buffer[translate()(ii,jj,kk)][0]); for (size_t i=0; i<fields.size(); ++i) { hm.halo.unpack(gcl_utils::make_array(ii,jj,kk), fields[i], it); } } } } } } }; }; } // namespace #endif
sievePar.c
/* * Adapted from: http://w...content-available-to-author-only...s.org/sieve-of-eratosthenes */ /* Sequencial: 4,044s Paralelo sem escolher schedule: 3,851s Paralelo escolhendo schedule: 2,638s Speedup: 1,53 */ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <string.h> #include <math.h> #include <omp.h> int sieveOfEratosthenes(int n) { // Create a boolean array "prime[0..n]" and initialize // all entries it as true. A value in prime[i] will // finally be false if i is Not a prime, else true. int primes = 0; bool *prime = (bool*) malloc((n+1)*sizeof(bool)); int sqrt_n = sqrt(n); memset(prime, true,(n+1)*sizeof(bool)); #pragma omp parallel for schedule(dynamic, 100) num_threads(2) for (int p=2; p <= sqrt_n; p++) { // If prime[p] is not changed, then it is a prime if (prime[p] == true) { // Update all multiples of p for (int i=p*2; i<=n; i += p) prime[i] = false; } } // count prime numbers #pragma omp parallel for reduction(+:primes) num_threads(2) for (int p=2; p<=n; p++) { if (prime[p]) primes++; } return(primes); } int main() { int n = 100000000; printf("%d\n",sieveOfEratosthenes(n)); return 0; }
nqueens-test.c
/**********************************************************************************************/ /* This program is part of the Barcelona OpenMP Tasks Suite */ /* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */ /* Copyright (C) 2009 Universitat Politecnica de Catalunya */ /* */ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation; either version 2 of the License, or */ /* (at your option) any later version. */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ /* GNU General Public License for more details. */ /* */ /* You should have received a copy of the GNU General Public License */ /* along with this program; if not, write to the Free Software */ /* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /**********************************************************************************************/ /* * Original code from the Cilk project (by Keith Randall) * * Copyright (c) 2000 Massachusetts Institute of Technology * Copyright (c) 2000 Matteo Frigo */ #include <stdlib.h> #include <stdio.h> #include <memory.h> #include <alloca.h> #include "bots.h" #include <omp.h> static int solutions[] = { 1, 0, 0, 2, 10, /* 5 */ 4, 40, 92, 352, 724, /* 10 */ 2680, 14200, 73712, 365596, }; #define MAX_SOLUTIONS sizeof(solutions)/sizeof(int) int mycount=0; #pragma omp threadprivate(mycount) int total_count; /* * <a> contains array of <n> queen positions. Returns 1 * if none of the queens conflict, and returns 0 otherwise. */ int ok(int n, char *a) { int i, j; char p, q; for (i = 0; i < n; i++) { p = a[i]; for (j = i + 1; j < n; j++) { q = a[j]; if (q == p || q == p - (j - i) || q == p + (j - i)) return 0; } } return 1; } void nqueens_ser (int n, int j, char *a, int *solutions) { int i,res; if (n == j) { /* good solution, count it */ *solutions = 1; return; } *solutions = 0; /* try each possible position for queen <j> */ for (i = 0; i < n; i++) { a[j] = i; if (ok(j + 1, a)) { nqueens_ser(n, j + 1, a,&res); *solutions += res; } } } //It seems entirely possible to convert the allocas and arrays to a single int, // and some atomic increments for the solution. // It will also require tracking of depth, so that calculation of all states with // a state that has been determined invalid will not continue. void nqueens(int n, int j, char *a, int *solutions) { int i; int *csols; if (n == j) { *solutions = 1; return; } *solutions = 0; csols = alloca(n*sizeof(int)); memset(csols,0,n*sizeof(int)); /* try each possible position for queen <j> */ for (i = 0; i < n; i++) { #pragma omp task untied { char * b = alloca((j + 1) * sizeof(char)); memcpy(b, a, j * sizeof(char)); b[j] = i; if (ok(j + 1, b)) nqueens(n, j + 1, b,&csols[i]); } } #pragma omp taskwait } void find_queens (int size) { total_count=0; bots_message("Computing N-Queens algorithm (n=%d) ", size); #pragma omp parallel { #pragma omp single { char *a = alloca(size * sizeof(char)); nqueens(size, 0, a, &total_count); } } bots_message(" completed!\n"); } int verify_queens (int size) { if( size > MAX_SOLUTIONS ) return BOTS_RESULT_NA; if( total_count == solutions[size-1]) return BOTS_RESULT_SUCCESSFUL; return BOTS_RESULT_UNSUCCESSFUL; }
task_unitied_thread_threadid.c
// RUN: %libomp-compile-and-run // REQUIRES: abt #include "omp_testsuite.h" #include <string.h> #include <stdio.h> int test_task_untied_thread_threadid(int num_threads) { int vals[num_threads]; memset(vals, 0, sizeof(int) * num_threads); omp_set_max_active_levels(2); #pragma omp parallel num_threads(num_threads / 2 + 1) #pragma omp master { int i; for (i = 0; i < num_threads; i++) { #pragma omp task firstprivate(i) untied { ABT_thread abt_thread; ABT_EXIT_IF_FAIL(ABT_thread_self(&abt_thread)); int local_vals[num_threads]; memset(local_vals, 0, sizeof(int) * num_threads); int j; #pragma omp parallel for num_threads(num_threads) for (j = 0; j < num_threads; j++) { int l2_omp_thread_id = omp_get_thread_num(); ABT_thread l2_abt_thread; ABT_EXIT_IF_FAIL(ABT_thread_self(&l2_abt_thread)); // Context switching in OpenMP. #pragma omp taskyield int l2_omp_thread_id2 = omp_get_thread_num(); if (l2_omp_thread_id == l2_omp_thread_id2) { local_vals[j] += 1; } ABT_thread l2_abt_thread2; ABT_EXIT_IF_FAIL(ABT_thread_self(&l2_abt_thread2)); ABT_bool l2_abt_thread_equal; ABT_EXIT_IF_FAIL(ABT_thread_equal(l2_abt_thread, l2_abt_thread2, &l2_abt_thread_equal)); if (l2_abt_thread_equal == ABT_TRUE) { local_vals[j] += 2; } // Context switching in Argobots. ABT_EXIT_IF_FAIL(ABT_thread_yield()); int l2_omp_thread_id3 = omp_get_thread_num(); if (l2_omp_thread_id2 == l2_omp_thread_id3) { local_vals[j] += 4; } } // Check child threads. int child_fail = 0; for (j = 0; j < num_threads; j++) { if (local_vals[i] != 7) { child_fail = 1; } } if (!child_fail) { vals[i] += 1; } ABT_thread abt_thread2; ABT_EXIT_IF_FAIL(ABT_thread_self(&abt_thread2)); ABT_bool abt_thread_equal; ABT_EXIT_IF_FAIL(ABT_thread_equal(abt_thread, abt_thread2, &abt_thread_equal)); if (abt_thread_equal == ABT_TRUE) { vals[i] += 2; } } } } int index; for (index = 0; index < num_threads; index++) { if (vals[index] != 3) { printf("vals[%d] == %d\n", index, vals[index]); return 0; } } return 1; } int main() { int i, num_failed = 0; for (i = 0; i < REPETITIONS; i++) { if (!test_task_untied_thread_threadid(i + 1)) { num_failed++; } } return num_failed; }
sudoValidator.c
/* Universidad del Valle de Guatemala Sistemas operativos Ing. Erick Pineda sudokoValidator.c Proposito: Dado una solucion en txt, se verifica si la solucion es correcta Lab3 */ #include <fcntl.h> #include <stdio.h> #include <unistd.h> #include <sys/shm.h> #include <sys/stat.h> #include <sys/syscall.h> #include <sys/mman.h> #include <sys/types.h> #include <stdlib.h> #include <string.h> #include <pthread.h> #include <omp.h> //Sudoko instanciado de caracteres strins char sudoku[9][9]; int correct_columns, correct_rows; // Verificacion de filas en el sudoku int verify_rows() { //Paralelizacion nested omp_set_nested(1); // 9 hilos para cada celda/columna omp_set_num_threads(9); int grid[9]; int valid = 0; #pragma omp parallel for private(grid) schedule(dynamic) for (int i = 0; i < 9; i++) { char nums_validos[] = "123456789"; char *num; //Basado en panda, descartananddo \0 for (num = &nums_validos[0]; *num != '\0'; num++) { int not_num = 0; int j = 0; while (not_num == 0 && j < 9) { if (sudoku[i][j] == *num) not_num = 1; j++; } if (not_num == 0) valid = -1; } printf("En la verificacion de las filas el hilo en proceso es: %ld \n", syscall(SYS_gettid)); } return valid; } int verify_columns() { //Paralelizacion nested omp_set_nested(1); omp_set_num_threads(9); int grid[9]; int valid = 0; #pragma omp parallel for schedule(dynamic) for (int i = 0; i < 9; i++) { char nums_validos[] = "123456789"; char *num; //Basado en panda, descartananddo \0 for (num = &nums_validos[0]; *num != '\0'; num++) { int not_num = 0; int j = 0; while (not_num == 0 && j < 9) { if (sudoku[j][i] == *num) not_num = 1; j++; } if (not_num == 0) valid = -1; } printf("En la verificacion de las columnas el hilo en proceso es: %ld \n", syscall(SYS_gettid)); } return valid; } int verify_rows_nums(char temp[9][9]) { omp_set_nested(1); omp_set_num_threads(9); int grid[9]; int valid = 0; #pragma omp parallel for private(grid) schedule(dynamic) for (int i = 0; i < 9; i++) { char nums_validos[] = "123456789"; char *num; //Basado en panda, descartananddo \0 for (num = &nums_validos[0]; *num != '\0'; num++) { int not_num = 0; int j = 0; while (not_num == 0 && j < 9) { if (temp[i][j] == *num) not_num = 1; j++; } if (not_num == 0) valid = -1; } } return valid; } //Verificacion de los subarrays sacados de: https://www.geeksforgeeks.org/check-if-given-sudoku-solution-is-valid-or-not/ int verify_Subarrays() { omp_set_nested(1); omp_set_num_threads(3); char temp_sudoku[9][9]; int row = 0, column = 0; int grid[9]; // #pragma omp parallel for private(grid) for (int x = 0; x < 3; x++) { //#pragma omp parallel for for (int y = 0; y < 3; y++) { // #pragma omp parallel for for (int i = 0; i < 3; i++) { // #pragma omp parallel for for (int j = 0; j < 3; j++) { temp_sudoku[9][9] = sudoku[i + (x * 3)][j + (y * 3)]; column++; } } column = 0; row++; } } return verify_rows_nums(temp_sudoku); } void *complete_column_verification() { printf("Hijo Columna ID: %ld \n", syscall(SYS_gettid)); correct_columns = verify_columns(); pthread_exit(0); } void *complete_row_verification() { printf("Hijo Fila ID: %ld \n", syscall(SYS_gettid)); correct_rows = verify_rows(); pthread_exit(0); } /* Maps Sudoku string to array */ void mapping_Sudoku(int fd) { //Paralelizacion nested omp_set_nested(1); omp_set_num_threads(9); struct stat stat_s; int status_sudoku = fstat(fd, &stat_s); int size = stat_s.st_size; //Mappeando el archivo txt char *ptr = (char *)mmap(0, size, PROT_READ, MAP_PRIVATE, fd, 0); int char_pos = 0; int grid[9]; #pragma omp parallel for private(grid) schedule(dynamic) for (int i = 0; i < 9; i++) { #pragma omp parallel for for (int j = 0; j < 9; j++) { sudoku[i][j] = ptr[char_pos]; char_pos++; } } munmap(ptr,size); close(fd); } int main(int argc, char *argv[]) { omp_set_num_threads(1); //gcc -fopenmp -pthreads -o sudokoValidator sudokoValidator.c if (argc < 2) { printf("Archivo de suduko ingresado incorrectamente. \n"); return 1; } int input; // Lee el txt, y saca error en caso no se encuentra el archivo txt if ((input = open(argv[1], O_RDONLY)) < 0) { perror("Archivo de sudoko que fue ingresado no se puede abrir. \n"); return 1; } else { mapping_Sudoku(input); // Idetinficiacion del padre antes del primer hijo pid_t parent_pid = getpid(); int child = fork(); if (child < 0) { perror("Error de hacer Fork."); return 1; } else if (child == 0) { // Identidades a strings del proceso hijo char p_pid[6]; sprintf(p_pid, "%d", (int)parent_pid); execlp("ps", "ps", "-p", p_pid, "-lLf", NULL); } else { // Proceso padre al inicio del programa pthread_t col_verification; // instancia de los hilos que verifica las columnas if (pthread_create(&col_verification, NULL, complete_column_verification, NULL)) { perror("Error al crear el Hilo"); return 1; } if (pthread_join(col_verification, NULL)) { perror("Error al intentar unirse el hilo."); return 1; } printf("Hijo principal es : %ld \n", syscall(SYS_gettid)); usleep(30000); printf("Hijo terminado ... \n"); // instancia de los hilos que verifica las filas pthread_t row_verification; if (pthread_create(&row_verification, NULL, complete_row_verification, NULL)) { perror("Error al crear el Hilo"); return 1; } if (pthread_join(row_verification, NULL)) { perror("Error al intentar unirse el hilo."); return 1; } // Verificacion del sudoku if (correct_rows == 0 && correct_columns == 0) { printf("|--- Solucion Valida ---| \n"); } else { printf("|--- Solucion Invalida :/ ---|\n"); } // Segundo hijo int child2 = fork(); if (child2 == 0) { //Mismo procedimiento que el anterior char p_pid[6]; sprintf(p_pid, "%d", (int)parent_pid); execlp("ps", "ps", "-p", p_pid, "-lLf", NULL); } else { //Proceso sigue en el padre usleep(30000); printf("Hijo terminado... \n"); return 0; } } } }
errors.h
#ifndef __errors_h__ #define __errors_h__ #include <iostream> #include <string> static constexpr int ERROR_POSITIVE = 1; ///< Return Error static constexpr int ERROR_RANGE = 2; ///< Return Error static constexpr int ERROR_ACCURACY = 3; ///< Return Error static constexpr int ERROR_EXACT = 4; ///< Return Error static constexpr int ERROR_INFINITE = 5; ///< Return Error static constexpr int ERROR_WEIGHTS = 6; ///< Return Error static constexpr int ERROR_PATTERN = 7; ///< Return Error static constexpr int ERROR_PROTOCOL = 8; ///< Return Error static constexpr int ERROR_MESSAGES = 91; ///< Return Error static constexpr int ERROR_INVALID_MESSAGES = 92; ///< Return Error // ERRORS 101-105 are related to raising errors in parse_args.h static constexpr int ERROR_MAGNETIZATION = 111; ///< Return Error static constexpr int ERROR_ATANHERF_FILE = 112; ///< Return Error /** * @brief Raise error because N is not positive * * @param N The given value of N */ inline void error_Npositive (const long int & N) { std :: cerr << "N must be positive; given: " << N << std :: endl; std :: exit(ERROR_POSITIVE); } /** * @brief Raise error because K is not positive. * * @param K The given value of K. */ inline void error_Kpositive (const int & K) { std :: cerr << "K must be positive; given: " << K << std :: endl; std :: exit(ERROR_POSITIVE); } /** * @brief Raise error because max_iters is not positive. * * @param max_iters The given value of max_iters. */ inline void error_maxiters (const long int & max_iters) { std :: cerr << "max_iters must be non-negative; given: " << max_iters << std :: endl; std :: exit(ERROR_POSITIVE); } /** * @brief Raise error because max_steps is not positive. * * @param max_steps The given value of max_stes. */ inline void error_maxsteps (const long int & max_steps) { std :: cerr << "max_steps must be non-negative; given: " << max_steps << std :: endl; std :: exit(ERROR_POSITIVE); } /** * @brief Raise error because damping is not bounded in [0, 1). * * @param damping The given value of damping. */ inline void error_damping (const double & damping) { std :: cerr << "damping must be in [0,1); given: " << damping << std :: endl; std :: exit(ERROR_RANGE); } /** * @brief Raise error because randfact is not bounded in [0, 1). * * @param randfact The given value of randfact. */ inline void error_randfact (const double & randfact) { std :: cerr << "randfact must be in [0,1); given: " << randfact << std :: endl; std :: exit(ERROR_RANGE); } /** * @brief Raise error because accuracy has not a valid value. * * @param accuracy The given value of accuracy. */ inline void error_accuracy (const std :: string & accuracy) { std :: cerr << "Invalid accuracy variable given. Given : " << accuracy << std :: endl; std :: exit(ERROR_ACCURACY); } /** * @brief Raise error because too many accuracies were given. * */ inline void error_num_accuracy () { std :: cerr << "Too many accuracy variables given. Needed two." << std :: endl; std :: exit(ERROR_ACCURACY); } /** * @brief Raise error because accuracy1 has not a valid value. * * @param accuracy1 The given value of accuracy1. */ inline void error_accuracy1 (const std :: string & accuracy1) { std :: cerr << "accuracy1 must be one of 'exact', 'accurate' or 'none'; given: " << accuracy1 << std :: endl; std :: exit(ERROR_ACCURACY); } /** * @brief Raise error because accuracy2 has not a valid value. * * @param accuracy2 The given value of accuracy2. */ inline void error_accuracy2 (const std :: string & accuracy2) { std :: cerr << "accuracy2 must be one of 'exact', 'accurate' or 'none'; given: " << accuracy2 << std :: endl; std :: exit(ERROR_ACCURACY); } /** * @brief Raise error because with accuracy1 == 'exact' the value of N must be odd. * * @param N The given value of N. */ inline void error_Nexact (const int & N) { std :: cerr << "when accuracy1 == 'exact', N must be odd; given: " << N << std :: endl; std :: exit(ERROR_EXACT); } /** * @brief Raise error because with accuracy2 == 'exact' the value of K must be odd. * * @param K The given value of K. */ inline void error_Kexact (const int & K) { std :: cerr << "when accuracy2 == 'exact', K must be odd; given: " << K << std :: endl; std :: exit(ERROR_EXACT); } /** * @brief Raise error because beta is finite (not yet supported). * * @param beta The given value of beta. */ inline void error_infinite (const double & beta) { #ifdef _OPENMP #pragma omp single #endif std :: cerr << "finite beta not yet supported; given: " << beta << std :: endl; std :: exit(ERROR_INFINITE); } /** * @brief Raise error because weight filename is not found. * * @param filename The given value of filename. */ inline void error_message_weights (const std :: string & filename) { #ifdef _OPENMP #pragma omp single #endif std :: cerr << "Weights file not found! Given : " << filename << std :: endl; std :: exit(ERROR_WEIGHTS); } /** * @brief Raise error because input filename is not found. * * @param filename The given value of filename. */ inline void error_pattern (const std :: string & filename) { std :: cerr << "Input file not found! Given: " << filename << std :: endl; std :: exit(ERROR_PATTERN); } /** * @brief Raise error because protocol type is invalid * * @param prot The given value of protocol type */ inline void error_protocol (const std :: string & prot) { std :: cerr << "Invalid protocol parameter. Given: " << prot << ". Possible values are \"scoping\", \"pseudo_reinforcement\", \"free_scoping\" and \"standard_reinforcement\"" << std :: endl; std :: exit(ERROR_PATTERN); } /** * @brief Raise error because messages filename is not found. * * @param filename The given value of filename. */ inline void error_messages (const std :: string & filename) { std :: cerr << "Messages file not found! Given: " << filename << std :: endl; std :: exit(ERROR_MESSAGES); } /** * @brief Raise error because messages filename is not well formatted. * * @param filename The given value of filename. */ inline void error_invalid_messages (const std :: string & filename) { std :: cerr << "Invalid messages file! Given: " << filename << std :: endl; std :: exit(ERROR_INVALID_MESSAGES); } /** * @brief Raise error because Mag parameter is not valid. * * @param mag The given value of mag. */ inline void error_magnetization (const int & mag) { std :: cerr << "Invalid magnetization found. Given : " << mag << std :: endl; std :: exit(ERROR_MAGNETIZATION); } /** * @brief Raise error because atanherf interpolation filename is not found. * It suggests how to download it. * */ inline void error_atanherf_file () { std :: cerr << "Atanherf interpolation coefficients file not found." " Please download it before running to prevent some troubles." " The appropriated script could be used and you can find it in the 'scripts' directory." " Just run 'python download_atanherf.py'" << std :: endl; std :: exit(ERROR_ATANHERF_FILE); } #endif // __errors_h__
gemm_symm_int8.h
// chgemm is pleased to support the open source community by supporting ncnn available. // // author:tpoisonooo (https://github.com/tpoisonooo/chgemm) implement symmetric int8 GEMM on aarch64. // // Copyright (C) 2019 tpoisonooo. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #pragma once #if __aarch64__ #define DECOMPOSE_K \ int ktmp = k; \ int k8 = k >> 3; \ int k8_even = (k8 % 2 == 0) ? 0 : 1; \ k -= (k8 << 3); \ int k4 = k >> 2; \ k -= (k4 << 2); \ int k2 = k >> 1; \ k -= (k2 << 1); \ int k1 = k; \ k = ktmp; #define DECOMPOSE_N \ int ntmp = n; \ int n4 = n >> 2; \ n -= (n4 << 2); \ int n2 = n >> 1; \ n -= (n2 << 1); \ int n1 = n; \ n = ntmp; #define PRINT_MATRIX 0 #if PRINT_MATRIX static void print_int8_matrix(char* name, const int8_t* a, int m, int k, int ldx) { fprintf(stdout, "------------- %s \n", name); for (int i = 0; i < m; ++i) { for (int j = 0; j < k; ++j) { fprintf(stdout, "%d \t", a[i * ldx + j]); } fprintf(stdout, "\n\n"); } } static void print_int32_matrix(char* name, const int32_t* a, int m, int k, int ldx) { fprintf(stdout, "------------- %s \n", name); for (int i = 0; i < m; ++i) { for (int j = 0; j < k; ++j) { fprintf(stdout, "%d \t", a[i * ldx + j]); } fprintf(stdout, "\n\n"); } } static void print_fp32_vec(char* name, const float* a, int len) { fprintf(stdout, "------------- %s \n", name); for (int i = 0; i < len; ++i) { fprintf(stdout, "%f \t", a[i]); } fprintf(stdout, "\n\n"); } #endif static void reorder_b(const int8_t* b, int8_t* sb, const int k, const int n, const int ldx) { #if PRINT_MATRIX print_int8_matrix("b", b, k, n, ldx); int8_t* origin = sb; #endif int i = 0; for (; i + 3 < n; i += 4) { const int8_t* p0 = b + i; const int8_t* p1 = b + 1 * ldx + i; const int8_t* p2 = b + 2 * ldx + i; const int8_t* p3 = b + 3 * ldx + i; const int8_t* p4 = b + 4 * ldx + i; const int8_t* p5 = b + 5 * ldx + i; const int8_t* p6 = b + 6 * ldx + i; const int8_t* p7 = b + 7 * ldx + i; int j = 0; for (; j + 7 < k; j += 8) { sb[0] = p0[0]; sb[1] = p1[0]; sb[2] = p2[0]; sb[3] = p3[0]; sb[4] = p4[0]; sb[5] = p5[0]; sb[6] = p6[0]; sb[7] = p7[0]; sb[8] = p0[1]; sb[9] = p1[1]; sb[10] = p2[1]; sb[11] = p3[1]; sb[12] = p4[1]; sb[13] = p5[1]; sb[14] = p6[1]; sb[15] = p7[1]; sb[16] = p0[2]; sb[17] = p1[2]; sb[18] = p2[2]; sb[19] = p3[2]; sb[20] = p4[2]; sb[21] = p5[2]; sb[22] = p6[2]; sb[23] = p7[2]; sb[24] = p0[3]; sb[25] = p1[3]; sb[26] = p2[3]; sb[27] = p3[3]; sb[28] = p4[3]; sb[29] = p5[3]; sb[30] = p6[3]; sb[31] = p7[3]; sb += 32; p0 += 8 * ldx; p1 += 8 * ldx; p2 += 8 * ldx; p3 += 8 * ldx; p4 += 8 * ldx; p5 += 8 * ldx; p6 += 8 * ldx; p7 += 8 * ldx; } if (j + 3 < k) { j += 4; sb[0] = p0[0]; sb[1] = p1[0]; sb[2] = p2[0]; sb[3] = p3[0]; sb[4] = p0[1]; sb[5] = p1[1]; sb[6] = p2[1]; sb[7] = p3[1]; sb[8] = p0[2]; sb[9] = p1[2]; sb[10] = p2[2]; sb[11] = p3[2]; sb[12] = p0[3]; sb[13] = p1[3]; sb[14] = p2[3]; sb[15] = p3[3]; sb += 16; p0 += 4 * ldx; p1 += 4 * ldx; p2 += 4 * ldx; p3 += 4 * ldx; } if (j + 1 < k) { j += 2; sb[0] = p0[0]; sb[1] = p1[0]; sb[2] = p0[1]; sb[3] = p1[1]; sb[4] = p0[2]; sb[5] = p1[2]; sb[6] = p0[3]; sb[7] = p1[3]; sb += 8; p0 += 2 * ldx; p1 += 2 * ldx; } if (j < k) { sb[0] = p0[0]; sb[1] = p0[1]; sb[2] = p0[2]; sb[3] = p0[3]; sb += 4; p0 += ldx; } } if (i + 1 < n) { const int8_t* p0 = b + i; const int8_t* p1 = b + 1 * ldx + i; const int8_t* p2 = b + 2 * ldx + i; const int8_t* p3 = b + 3 * ldx + i; const int8_t* p4 = b + 4 * ldx + i; const int8_t* p5 = b + 5 * ldx + i; const int8_t* p6 = b + 6 * ldx + i; const int8_t* p7 = b + 7 * ldx + i; int j = 0; for (; j + 7 < k; j += 8) { sb[0] = p0[0]; sb[1] = p1[0]; sb[2] = p2[0]; sb[3] = p3[0]; sb[4] = p4[0]; sb[5] = p5[0]; sb[6] = p6[0]; sb[7] = p7[0]; sb[8] = p0[1]; sb[9] = p1[1]; sb[10] = p2[1]; sb[11] = p3[1]; sb[12] = p4[1]; sb[13] = p5[1]; sb[14] = p6[1]; sb[15] = p7[1]; sb += 16; p0 += 8 * ldx; p1 += 8 * ldx; p2 += 8 * ldx; p3 += 8 * ldx; p4 += 8 * ldx; p5 += 8 * ldx; p6 += 8 * ldx; p7 += 8 * ldx; } if (j + 3 < k) { j += 4; sb[0] = p0[0]; sb[1] = p1[0]; sb[2] = p2[0]; sb[3] = p3[0]; sb[4] = p0[1]; sb[5] = p1[1]; sb[6] = p2[1]; sb[7] = p3[1]; sb += 8; p0 += 4 * ldx; p1 += 4 * ldx; p2 += 4 * ldx; p3 += 4 * ldx; } if (j + 1 < k) { j += 2; sb[0] = p0[0]; sb[1] = p1[0]; sb[2] = p0[1]; sb[3] = p1[1]; sb += 4; p0 += 2 * ldx; p1 += 2 * ldx; } if (j < k) { sb[0] = p0[0]; sb[1] = p0[1]; sb += 2; p0 += ldx; } i += 2; } if (i < n) { const int8_t* p0 = b + i; const int8_t* p1 = b + 1 * ldx + i; const int8_t* p2 = b + 2 * ldx + i; const int8_t* p3 = b + 3 * ldx + i; const int8_t* p4 = b + 4 * ldx + i; const int8_t* p5 = b + 5 * ldx + i; const int8_t* p6 = b + 6 * ldx + i; const int8_t* p7 = b + 7 * ldx + i; int j = 0; for (; j + 7 < k; j += 8) { sb[0] = p0[0]; sb[1] = p1[0]; sb[2] = p2[0]; sb[3] = p3[0]; sb[4] = p4[0]; sb[5] = p5[0]; sb[6] = p6[0]; sb[7] = p7[0]; sb += 8; p0 += 8 * ldx; p1 += 8 * ldx; p2 += 8 * ldx; p3 += 8 * ldx; p4 += 8 * ldx; p5 += 8 * ldx; p6 += 8 * ldx; p7 += 8 * ldx; } if (j + 3 < k) { j += 4; sb[0] = p0[0]; sb[1] = p1[0]; sb[2] = p2[0]; sb[3] = p3[0]; sb += 4; p0 += 4 * ldx; p1 += 4 * ldx; p2 += 4 * ldx; p3 += 4 * ldx; } if (j + 1 < k) { j += 2; sb[0] = p0[0]; sb[1] = p1[0]; sb += 2; p0 += 2 * ldx; p1 += 2 * ldx; } if (j < k) { sb[0] = p0[0]; sb += 1; p0 += ldx; } } #if PRINT_MATRIX print_int8_matrix("sb", origin, k, n, n); #endif } static void reorder_a(int8_t* a, int8_t* sa, int m, const int k, const int ldx) { #if PRINT_MATRIX print_int8_matrix("a", a, m, k, ldx); int8_t* origin = sa; #endif int i = 0; for (; i + 3 < m; i += 4) { int8_t* p0 = a; int8_t* p1 = a + ldx; int8_t* p2 = a + 2 * ldx; int8_t* p3 = a + 3 * ldx; int j = 0; for (; j + 7 < k; j += 8) { asm volatile( "ld1 {v0.8b}, [%0], #8 \n" "ld1 {v1.8b}, [%1], #8 \n" "ld1 {v2.8b}, [%2], #8 \n" "ld1 {v3.8b}, [%3], #8 \n" "st1 {v0.8b, v1.8b, v2.8b, v3.8b}, [%4], #32\n" : "=r"(p0), "=r"(p1), "=r"(p2), "=r"(p3), "=r"(sa) : "0"(p0), "1"(p1), "2"(p2), "3"(p3), "4"(sa) : "cc", "memory", "v0", "v1", "v2", "v3"); } if (j + 3 < k) { j += 4; asm volatile( "ld1 {v0.8b}, [%0] \n" "add %0, %0, #4 \n" "ld1 {v1.8b}, [%1] \n" "add %1, %1, #4 \n" "ld1 {v2.8b}, [%2] \n" "add %2, %2, #4 \n" "ld1 {v3.8b}, [%3] \n" "add %3, %3, #4 \n" "trn1 v0.2s, v0.2s, v1.2s \n" "st1 {v0.8b}, [%4], #8 \n" "trn1 v2.2s, v2.2s, v3.2s \n" "st1 {v2.8b}, [%4], #8 \n" : "=r"(p0), "=r"(p1), "=r"(p2), "=r"(p3), "=r"(sa) : "0"(p0), "1"(p1), "2"(p2), "3"(p3), "4"(sa) : "cc", "memory", "v0", "v1", "v2", "v3"); } if (j + 1 < k) { j += 2; asm volatile( "ld1 {v0.8b}, [%0] \n" "add %0, %0, #2 \n" "ld1 {v1.8b}, [%1] \n" "add %1, %1, #2 \n" "ld1 {v2.8b}, [%2] \n" "add %2, %2, #2 \n" "ld1 {v3.8b}, [%3] \n" "add %3, %3, #2 \n" "trn1 v0.4h, v0.4h, v1.4h \n" "trn1 v2.4h, v2.4h, v3.4h \n" "trn1 v0.2s, v0.2s, v2.2s \n" "st1 {v0.8b}, [%4], #8 \n" : "=r"(p0), "=r"(p1), "=r"(p2), "=r"(p3), "=r"(sa) : "0"(p0), "1"(p1), "2"(p2), "3"(p3), "4"(sa) : "cc", "memory", "v0", "v1", "v2", "v3"); } if (j < k) { *sa++ = *p0; *sa++ = *p1; *sa++ = *p2; *sa++ = *p3; } a += 4 * ldx; } if (i + 1 < m) { i += 2; int8_t* p0 = a; int8_t* p1 = a + ldx; int j = 0; for (; j + 7 < k; j += 8) { asm volatile( "ld1 {v0.8b}, [%0], #8 \n" "ld1 {v1.8b}, [%1], #8 \n" "st1 {v0.8b, v1.8b}, [%2], #16\n" : "=r"(p0), "=r"(p1), "=r"(sa) : "0"(p0), "1"(p1), "2"(sa) : "cc", "memory", "v0", "v1"); } if (j + 3 < k) { j += 4; asm volatile( "ld1 {v0.8b}, [%0] \n" "add %0, %0, #4 \n" "ld1 {v1.8b}, [%1] \n" "add %1, %1, #4 \n" "trn1 v0.2s, v0.2s, v1.2s \n" "st1 {v0.8b}, [%2], #8 \n" : "=r"(p0), "=r"(p1), "=r"(sa) : "0"(p0), "1"(p1), "2"(sa) : "cc", "memory", "v0", "v1"); } if (j + 1 < k) { j += 2; sa[0] = p0[0]; sa[1] = p0[1]; sa[2] = p1[0]; sa[3] = p1[1]; sa += 4; p0 += 2; p1 += 2; } if (j < k) { sa[0] = p0[0]; sa[1] = p1[0]; sa += 2; } a += 2 * ldx; } if (i < m) { memcpy(sa, a, sizeof(int8_t) * ldx); } #if PRINT_MATRIX print_int8_matrix("sa", origin, m, k, k); #endif } static void int8kernel_m1(void* dst, int8_t* sa, int8_t* sb, int, int k, int n, int, float* scales, float* bias) { void* pc = dst; int8_t* pa = sa; int8_t* pb = sb; DECOMPOSE_K DECOMPOSE_N if (n4 > 0) { asm volatile( "9: \n" " eor v8.16b, v8.16b, v8.16b \n" " eor v9.16b, v9.16b, v9.16b \n" " eor v10.16b, v10.16b, v10.16b\n" " eor v11.16b, v11.16b, v11.16b\n" " mov x8, %0 // PanelA\n" " cmp %w4, #0 \n" " beq 1f \n" " mov w19, %w4 \n" " cmp %w3, #0 \n" " beq 2f// loop number is even \n" " // start loopm1_kd8_nd4\n" " subs w19, w19, #1 \n" " ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [%1], #32 // load four lines of B\n" " ld1 {v2.8b}, [%0], #8 // load two lines of PanelA\n" " smull v0.8h, v4.8b, v2.8b \n" " saddlp v8.4s, v0.8h \n" " smull v0.8h, v5.8b, v2.8b \n" " saddlp v9.4s, v0.8h \n" " smull v0.8h, v6.8b, v2.8b \n" " saddlp v10.4s, v0.8h \n" " smull v0.8h, v7.8b, v2.8b \n" " saddlp v11.4s, v0.8h \n" " cmp w19, #0 \n" " beq 3f \n" " 2: \n" " ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [%1], #32 \n" " ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [%1], #32\n" " ld1 {v2.8b, v3.8b}, [%0], #16 \n" " smull v0.8h, v2.8b, v4.8b \n" " smlal v0.8h, v3.8b, v12.8b \n" " sadalp v8.4s, v0.8h \n" " smull v1.8h, v2.8b, v5.8b \n" " smlal v1.8h, v3.8b, v13.8b \n" " sadalp v9.4s, v1.8h \n" " smull v0.8h, v2.8b, v6.8b \n" " smlal v0.8h, v3.8b, v14.8b \n" " sadalp v10.4s, v0.8h \n" " smull v1.8h, v2.8b, v7.8b \n" " smlal v1.8h, v3.8b, v15.8b \n" " sadalp v11.4s, v1.8h \n" " subs w19, w19, #2 \n" " bne 2b \n" " 3: \n" " addp v8.4s, v8.4s, v9.4s \n" " addp v10.4s, v10.4s, v11.4s\n" " addp v8.4s, v8.4s, v10.4s \n" " // start process kd4 kd2 kd1 cases\n" " 1: \n" " cmp %w5, #0 \n" " beq 4f \n" " // start subkernel_m1n4k4 \n" " ld1 {v4.8b, v5.8b}, [%1], #16 // load B4x4\n" " sxtl v4.8h, v4.8b \n" " sxtl v5.8h, v5.8b \n" " mov v6.d[0], v4.d[1] \n" " mov v7.d[0], v5.d[1] \n" " ld1 {v2.8b}, [%0] // load A1x4\n" " add %0, %0, #4 \n" " sxtl v2.8h, v2.8b \n" " smull v12.4s, v2.4h, v4.4h \n" " smull v13.4s, v2.4h, v6.4h \n" " smull v14.4s, v2.4h, v5.4h \n" " smull v15.4s, v2.4h, v7.4h \n" " addp v12.4s, v12.4s, v13.4s\n" " addp v14.4s, v14.4s, v15.4s\n" " addp v12.4s, v12.4s, v14.4s\n" " add v8.4s, v8.4s, v12.4s \n" " 4: \n" " cmp %w6, #0 \n" " beq 5f \n" " // start subkernel_m1n4k2\n" " ld1 {v4.8b}, [%0] // load A1x2 \n" " add %0, %0, #2 \n" " ld1 {v0.8b}, [%1], #8 // load B2x4 \n" " mov v4.h[1], v4.h[0] \n" " mov v4.s[1], v4.s[0] \n" " smull v0.8h, v0.8b, v4.8b \n" " sadalp v8.4s, v0.8h \n" " 5: \n" " cmp %w7, #0 \n" " beq 6f \n" " // start subkernel_m1n4k1 \n" " ld1 {v4.8b}, [%1] // load B1x4\n" " add %1, %1, #4 \n" " ld1 {v2.8b}, [%0] // load A1x1\n" " add %0, %0, #1 \n" " sxtl v4.8h, v4.8b \n" " sxtl v2.8h, v2.8b \n" " smlal v8.4s, v4.4h, v2.h[0]\n" " 6: \n" " cmp %9, #0 \n" " beq 7f \n" " ldr w24, [%9] \n" " // int32 => fp32 \n" " scvtf v8.4s, v8.4s \n" " // fp32 *= scale_tm \n" " mov v12.s[0], w24 \n" " fmul v8.4s, v8.4s, v12.s[0]\n" " cmp %10, #0 \n" " beq 8f \n" " // fp32 += bias_tm \n" " ldr w24, [%10] \n" " dup v15.4s, w24 \n" " fadd v8.4s, v8.4s, v15.4s \n" " 8: \n" " // fp32 -> int32 \n" " fcvtas v8.4s, v8.4s\n" " // int32 -> int16 \n" " sqxtn v8.4h, v8.4s \n" " // int16 -> int8 \n" " sqxtn v8.8b, v8.8h \n" " // save \n" " st1 {v8.s}[0], [%2]\n" " add %2, %2, #4 \n" " b 10f\n" " 7: \n" " st1 {v8.4s}, [%2], #16 \n" " 10: \n" " subs %w8, %w8, #1 \n" " mov %0, x8 \n" " bne 9b \n" : "=r"(pa), // %0 "=r"(pb), // %1 "=r"(pc), // %2 "=r"(k8_even), // %3 "=r"(k8), // %4 "=r"(k4), // %5 "=r"(k2), // %6 "=r"(k1), // %7 "=r"(n4), // %8 "=r"(scales), // %9 "=r"(bias) // %10 : "0"(pa), "1"(pb), "2"(pc), "3"(k8_even), "4"(k8), "5"(k4), "6"(k2), "7"(k1), "8"(n4), "9"(scales), "10"(bias) : "cc", "memory", "x8", "w19", "w24", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } if (n2 > 0) { asm volatile( "9: \n" " eor v8.16b, v8.16b, v8.16b \n" " eor v9.16b, v9.16b, v9.16b \n" " eor v10.16b, v10.16b, v10.16b\n" " eor v11.16b, v11.16b, v11.16b\n" " mov x8, %0 // PanelA\n" " cmp %w4, #0 \n" " beq 1f // k <= 7\n" " mov w19, %w4\n" " cmp %w3, #0 \n" " beq 2f // loop number is even \n" " // start loopmd1_kd8_nd2 \n" " subs w19, w19, #1 \n" " ld1 {v4.8b, v5.8b}, [%1], #16 // load two lines of B\n" " ld1 {v2.8b}, [%0], #8 // load two lines of PanelA\n" " smull v0.8h, v4.8b, v2.8b \n" " saddlp v8.4s, v0.8h \n" " smull v0.8h, v5.8b, v2.8b \n" " saddlp v9.4s, v0.8h \n" " cmp w19, #0 \n" " beq 3f \n" " 2: \n" " ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [%1], #32\n" " ld1 {v2.8b, v3.8b}, [%0], #16 \n" " smull v0.8h, v2.8b, v4.8b \n" " smlal v0.8h, v3.8b, v6.8b \n" " sadalp v8.4s, v0.8h \n" " smull v1.8h, v2.8b, v5.8b \n" " smlal v1.8h, v3.8b, v7.8b \n" " sadalp v9.4s, v1.8h \n" " subs w19, w19, #2 \n" " bne 2b \n" " 3: \n" " addp v8.4s, v8.4s, v9.4s \n" " addp v8.4s, v8.4s, v8.4s \n" " // start process kd4 kd2 kd1 cases \n" " 1: \n" " cmp %w5, 0 \n" " beq 4f \n" " // start subkernel_m1n2k4 \n" " ld1 {v4.8b}, [%1], #8 // load B4x2\n" " sxtl v4.8h, v4.8b \n" " mov v6.d[0], v4.d[1] \n" " ld1 {v2.8b}, [%0] // load A1x4\n" " add %0, %0, #4 \n" " sxtl v2.8h, v2.8b \n" " smull v9.4s, v2.4h, v4.4h \n" " smull v10.4s, v2.4h, v6.4h \n" " addp v9.4s, v9.4s, v10.4s \n" " addp v9.4s, v9.4s, v9.4s \n" " add v8.4s, v8.4s, v9.4s \n" " 4: \n" " cmp %w6, 0 \n" " beq 5f \n" " // start subkernel_m1n2k2 \n" " ld1 {v4.8b}, [%0] // load A1x2\n" " add %0, %0, #2 \n" " ld1 {v0.8b}, [%1] // load B2x2\n" " add %1, %1, #4 \n" " mov v4.h[1], v4.h[0] \n" " smull v0.8h, v4.8b, v0.8b \n" " saddlp v0.4s, v0.8h \n" " add v8.4s, v8.4s, v0.4s \n" " 5: \n" " cmp %w7, 0 \n" " beq 6f \n" " // start subkernel_m1n2k1 \n" " ld1 {v4.8b}, [%1] // load B1x2\n" " add %1, %1, #2 \n" " ld1 {v2.8b}, [%0] // load A1x1\n" " add %0, %0, #2 \n" " sxtl v4.8h, v4.8b \n" " sxtl v2.8h, v2.8b \n" " smlal v8.4s, v4.4h, v2.h[0]\n" " 6: \n" " cmp %9, #0 \n" " beq 7f \n" " // v12: s0 s1 \n" " ldr w24, [%9] \n" " mov v12.s[0], w24 \n" " mov v12.s[1], v12.s[0] \n" " // int32 => fp32 \n" " scvtf v8.2s, v8.2s \n" " // fp32 *= scale_tm \n" " fmul v8.2s, v8.2s, v12.2s \n" " cmp %10, #0 \n" " beq 8f \n" " // fp32 += bias_tm \n" " ldr w24, [%10] \n" " mov v12.s[0], w24 \n" " mov v12.s[1], v12.s[0] \n" " fadd v8.2s, v8.2s, v12.2s \n" " 8:\n" " // fp32 -> int32 \n" " fcvtas v8.2s, v8.2s\n" " // int32 -> int16 \n" " sqxtn v8.4h, v8.4s \n" " // int16 -> int8 \n" " sqxtn v8.8b, v8.8h \n" " // save \n" " st1 {v8.h}[0], [%2]\n" " add %2, %2, #2 \n" " b 10f\n" " 7: \n" " st1 {v8.2s}, [%2], #8 \n" " 10: \n" " mov %0, x8 \n" : "=r"(pa), // %0 "=r"(pb), // %1 "=r"(pc), // %2 "=r"(k8_even), // %3 "=r"(k8), // %4 "=r"(k4), // %5 "=r"(k2), // %6 "=r"(k1), // %7 "=r"(n4), // %8 "=r"(scales), // %9 "=r"(bias) // %10 : "0"(pa), "1"(pb), "2"(pc), "3"(k8_even), "4"(k8), "5"(k4), "6"(k2), "7"(k1), "8"(n4), "9"(scales), "10"(bias) : "cc", "memory", "x8", "w19", "w24", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } if (n1 > 0) { asm volatile( "9: \n" " eor v8.16b, v8.16b, v8.16b \n" " eor v9.16b, v9.16b, v9.16b \n" " eor v10.16b, v10.16b, v10.16b\n" " eor v11.16b, v11.16b, v11.16b\n" " cmp %w4, #0 \n" " beq 1f // k <= 7 \n" " mov w19, %w4\n" " cmp %w3, #0 \n" " beq 2f // loop number is even \n" " // start loopkd8_nd1 \n" " subs w19, w19, #1 \n" " ld1 {v4.8b}, [%1], #8 // load B line \n" " ld1 {v2.8b}, [%0], #8 // load A line \n" " smull v0.8h, v4.8b, v2.8b \n" " saddlp v8.4s, v0.8h \n" " cmp w19, #0 \n" " beq 3f \n" " 2: \n" " ld1 {v4.8b, v5.8b}, [%1], #16 \n" " ld1 {v24.8b, v25.8b}, [%0], #16\n" " smull v0.8h, v24.8b, v4.8b \n" " smlal v0.8h, v25.8b, v5.8b \n" " sadalp v8.4s, v0.8h \n" " subs w19, w19, #2 \n" " bne 2b \n" " 3: \n" " addp v8.4s, v8.4s, v8.4s \n" " addp v8.4s, v8.4s, v8.4s \n" " // start process kd4 kd2 kd1 cases\n" " 1: \n" " cmp %w5, 0 \n" " beq 4f \n" " // start subkernel_m1n1k4 \n" " ld1 {v4.8b}, [%1] // load B4x1\n" " add %1, %1, #4 \n" " sxtl v4.8h, v4.8b // extend B4x1 to v4\n" " ld1 {v2.8b}, [%0] // load A1x4\n" " add %0, %0, #4 \n" " sxtl v2.8h, v2.8b \n" " smull v9.4s, v2.4h, v4.4h \n" " addp v9.4s, v9.4s, v9.4s \n" " addp v9.4s, v9.4s, v9.4s \n" " add v8.4s, v8.4s, v9.4s \n" " 4: \n" " cmp %w6, 0 \n" " beq 5f \n" " // start subkernel_m1n1k2 \n" " ld1 {v4.8b}, [%0] // load A1x2\n" " add %0, %0, #2 \n" " ld1 {v0.8b}, [%1] // load B2x1\n" " add %1, %1, #2 \n" " smull v0.8h, v0.8b, v4.8b \n" " saddlp v0.4s, v0.8h \n" " add v8.4s, v8.4s, v0.4s \n" " 5: \n" " cmp %w7, 0 \n" " beq 6f \n" " // start subkernel_m1n1k1 \n" " ld1 {v0.8b}, [%1] // load B1x1 \n" " add %1, %1, #1 \n" " ld1 {v1.8b}, [%0] // load A1x1 \n" " add %0, %0, #1 \n" " sxtl v1.8h, v1.8b \n" " sxtl v0.8h, v0.8b \n" " smull v0.4s, v1.4h, v0.h[0] \n" " add v8.4s, v8.4s, v0.4s \n" " 6: \n" " cmp %9, #0 \n" " beq 7f \n" " // int32 => fp32 \n" " scvtf v8.2s, v8.2s \n" " // fp32 *= scale_tm\n" " ldr w24, [%9] \n" " mov v12.s[0], w24 \n" " fmul v8.2s, v8.2s, v12.2s \n" " cmp %10, #0 \n" " beq 8f \n" " // fp32 += bias_tm \n" " ldr w24, [%10] \n" " mov v12.s[0], w24 \n" " fadd v8.2s, v8.2s, v12.2s \n" " 8: \n" " // fp32 -> int32 \n" " fcvtas v8.2s, v8.2s\n" " // int32 -> int16 \n" " sqxtn v8.4h, v8.4s \n" " // int16 -> int8 \n" " sqxtn v8.8b, v8.8h \n" " // save \n" " st1 {v8.b}[0], [%2]\n" " b 10f \n" " 7: \n" " st1 {v8.s}[0], [%2] \n" " 10: \n" " mov x0, #0 \n" : "=r"(pa), // %0 "=r"(pb), // %1 "=r"(pc), // %2 "=r"(k8_even), // %3 "=r"(k8), // %4 "=r"(k4), // %5 "=r"(k2), // %6 "=r"(k1), // %7 "=r"(n4), // %8 "=r"(scales), // %9 "=r"(bias) // %10 : "0"(pa), "1"(pb), "2"(pc), "3"(k8_even), "4"(k8), "5"(k4), "6"(k2), "7"(k1), "8"(n4), "9"(scales), "10"(bias) : "cc", "memory", "x0", "x8", "w19", "w24", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } } static void int8kernel_m2(void* dst, int8_t* sa, int8_t* sb, int, int k, int n, int ldc, float* scales, float* bias) { void *pc0, *pc1; if (scales == 0) { pc0 = (int32_t*)dst; pc1 = ((int32_t*)pc0) + ldc; } else { pc0 = dst; pc1 = ((int8_t*)pc0) + ldc; } int8_t* pa = sa; int8_t* pb = sb; DECOMPOSE_K DECOMPOSE_N if (n4 > 0) { asm volatile( "9: \n" " eor v8.16b, v8.16b, v8.16b \n" " eor v9.16b, v9.16b, v9.16b \n" " eor v10.16b, v10.16b, v10.16b \n" " eor v11.16b, v11.16b, v11.16b \n" " eor v12.16b, v12.16b, v12.16b \n" " eor v13.16b, v13.16b, v13.16b \n" " eor v14.16b, v14.16b, v14.16b \n" " eor v15.16b, v15.16b, v15.16b \n" " eor v16.16b, v16.16b, v16.16b \n" " eor v17.16b, v17.16b, v17.16b \n" " eor v18.16b, v18.16b, v18.16b \n" " eor v19.16b, v19.16b, v19.16b \n" " eor v20.16b, v20.16b, v20.16b \n" " eor v21.16b, v21.16b, v21.16b \n" " eor v22.16b, v22.16b, v22.16b \n" " eor v23.16b, v23.16b, v23.16b \n" " mov x8, %0 // PanelA \n" " cmp %w5, #0 \n" " beq 1f \n" " mov w17, %w5 \n" " cmp %w4, #0 \n" " beq 2f // loop number is even \n" " // start loopm2_kd8_nd4\n" " subs w17, w17, #1 \n" " ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [%1], #32 // load four lines of B\n" " ld1 {v2.8b, v3.8b}, [%0], #16 // load two lines of PanelA \n" " smull v0.8h, v4.8b, v2.8b \n" " smull v1.8h, v4.8b, v3.8b \n" " saddlp v8.4s, v0.8h \n" " saddlp v12.4s, v1.8h \n" " smull v0.8h, v5.8b, v2.8b \n" " smull v1.8h, v5.8b, v3.8b \n" " saddlp v9.4s, v0.8h \n" " saddlp v13.4s, v1.8h \n" " smull v0.8h, v6.8b, v2.8b \n" " smull v1.8h, v6.8b, v3.8b \n" " saddlp v10.4s, v0.8h \n" " saddlp v14.4s, v1.8h \n" " smull v0.8h, v7.8b, v2.8b \n" " smull v1.8h, v7.8b, v3.8b \n" " saddlp v11.4s, v0.8h \n" " saddlp v15.4s, v1.8h \n" " cmp w17, #0 \n" " beq 3f \n" " 2: \n" " add x12, %1, #32 \n" " ld1 {v4.8b, v5.8b}, [%1], #16 \n" " ld1 {v2.8b, v3.8b}, [%0], #16 \n" " smull v0.8h, v4.8b, v2.8b \n" " smull v1.8h, v5.8b, v2.8b \n" " ld1 {v6.8b, v7.8b}, [x12], #16 \n" " ld1 {v24.8b, v25.8b}, [%0], #16\n" " smlal v0.8h, v6.8b, v24.8b \n" " smlal v1.8h, v7.8b, v24.8b \n" " sadalp v8.4s, v0.8h\n" " sadalp v9.4s, v1.8h\n" " smull v0.8h, v4.8b, v3.8b \n" " smull v1.8h, v5.8b, v3.8b \n" " smlal v0.8h, v6.8b, v25.8b \n" " smlal v1.8h, v7.8b, v25.8b \n" " sadalp v12.4s, v0.8h\n" " sadalp v13.4s, v1.8h\n" " // start v10v11, v14v15, v18v19, v22v23, error here!\n" " ld1 {v4.8b, v5.8b}, [%1], #16 \n" " smull v0.8h, v4.8b, v2.8b \n" " smull v1.8h, v5.8b, v2.8b \n" " ld1 {v6.8b, v7.8b}, [x12], #16 \n" " smlal v0.8h, v6.8b, v24.8b \n" " smlal v1.8h, v7.8b, v24.8b \n" " sadalp v10.4s, v0.8h \n" " sadalp v11.4s, v1.8h \n" " smull v0.8h, v4.8b, v3.8b \n" " smull v1.8h, v5.8b, v3.8b \n" " smlal v0.8h, v6.8b, v25.8b \n" " smlal v1.8h, v7.8b, v25.8b \n" " sadalp v14.4s, v0.8h \n" " sadalp v15.4s, v1.8h \n" " add %1, %1, #32 \n" " subs w17, w17, #2 \n" " bne 2b \n" " 3: \n" " addp v8.4s, v8.4s, v9.4s \n" " addp v10.4s, v10.4s, v11.4s\n" " addp v12.4s, v12.4s, v13.4s\n" " addp v14.4s, v14.4s, v15.4s\n" " addp v8.4s, v8.4s, v10.4s \n" " addp v9.4s, v12.4s, v14.4s \n" " // start process kd4 kd2 kd1 cases \n" " 1: \n" " cmp %w6, #0 \n" " beq 4f \n" " // start subkernel_m2n4k4 \n" " ld1 {v4.8b, v5.8b}, [%1], #16 // load B4x4\n" " sxtl v4.8h, v4.8b \n" " sxtl v5.8h, v5.8b \n" " mov v6.d[0], v4.d[1] \n" " mov v7.d[0], v5.d[1] \n" " ld1 {v2.8b}, [%0], #8 // load A2x4\n" " sxtl v2.8h, v2.8b \n" " mov v3.d[0], v2.d[1] \n" " smull v12.4s, v2.4h, v4.4h \n" " smull v13.4s, v2.4h, v6.4h \n" " smull v14.4s, v2.4h, v5.4h \n" " smull v15.4s, v2.4h, v7.4h \n" " addp v12.4s, v12.4s, v13.4s\n" " addp v14.4s, v14.4s, v15.4s\n" " addp v12.4s, v12.4s, v14.4s\n" " add v8.4s, v8.4s, v12.4s \n" " smull v16.4s, v3.4h, v4.4h \n" " smull v17.4s, v3.4h, v6.4h \n" " smull v18.4s, v3.4h, v5.4h \n" " smull v19.4s, v3.4h, v7.4h \n" " addp v16.4s, v16.4s, v17.4s\n" " addp v18.4s, v18.4s, v19.4s\n" " addp v16.4s, v16.4s, v18.4s\n" " add v9.4s, v9.4s, v16.4s \n" " 4: \n" " cmp %w7, #0 \n" " beq 5f \n" " // start subkernel_m2n4k2 \n" " ld1 {v4.8b}, [%0] // load A2x2 \n" " add %0, %0, #4 \n" " ld1 {v0.8b}, [%1], #8 // load B2x4 \n" " // 00 11 22 33 \n" " rev32 v1.4h, v0.4h // 11 00 33 22 \n" " rev64 v2.2s, v0.2s // 22 33 00 11 \n" " rev64 v3.4h, v0.4h // 33 22 11 00 \n" " smull v12.8h, v4.8b, v0.8b \n" " smull v13.8h, v4.8b, v1.8b \n" " smull v14.8h, v4.8b, v2.8b \n" " smull v15.8h, v4.8b, v3.8b \n" " saddlp v12.4s, v12.8h \n" " saddlp v13.4s, v13.8h \n" " saddlp v14.4s, v14.8h \n" " saddlp v15.4s, v15.8h \n" " mov v16.s[0], v12.s[0] \n" " mov v16.s[1], v13.s[0] \n" " mov v16.s[2], v14.s[0] \n" " mov v16.s[3], v15.s[0] \n" " mov v17.s[0], v13.s[1] \n" " mov v17.s[1], v12.s[1] \n" " mov v17.s[2], v15.s[1] \n" " mov v17.s[3], v14.s[1] \n" " add v8.4s, v8.4s, v16.4s \n" " add v9.4s, v9.4s, v17.4s \n" " 5: \n" " cmp %w8, #0 \n" " beq 6f \n" " // start subkernel_m2n4k1 \n" " ld1 {v4.8b}, [%1] // load B1x4\n" " add %1, %1, #4 \n" " ld1 {v2.8b}, [%0] // load A2x1\n" " add %0, %0, #2 \n" " sxtl v4.8h, v4.8b \n" " sxtl v2.8h, v2.8b \n" " smlal v8.4s, v4.4h, v2.h[0]\n" " smlal v9.4s, v4.4h, v2.h[1]\n" " 6: \n" " cmp %10, #0 \n" " beq 7f \n" " ld1 {v12.2s}, [%10] \n" " // int32 => fp32 \n" " scvtf v8.4s, v8.4s \n" " scvtf v9.4s, v9.4s \n" " // fp32 *= scale_tm \n" " fmul v8.4s, v8.4s, v12.s[0]\n" " fmul v9.4s, v9.4s, v12.s[1]\n" " cmp %11, #0 \n" " beq 8f \n" " // fp32 += scales_tm \n" " ld1 {v14.2s}, [%11] \n" " dup v15.4s, v14.s[0] \n" " fadd v8.4s, v8.4s, v15.4s \n" " dup v15.4s, v14.s[1] \n" " fadd v9.4s, v9.4s, v15.4s \n" " 8: \n" " // fp32 -> int32 \n" " fcvtas v8.4s, v8.4s\n" " fcvtas v9.4s, v9.4s\n" " // int32 -> int16 \n" " sqxtn v6.4h, v8.4s \n" " sqxtn2 v6.8h, v9.4s\n" " // int16 -> int8 \n" " sqxtn v8.8b, v6.8h \n" " // save \n" " st1 {v8.s}[0], [%2] \n" " add %2, %2, #4 \n" " st1 {v8.s}[1], [%3] \n" " add %3, %3, #4 \n" " b 10f \n" " 7: \n" " st1 {v8.4s}, [%2], #16 \n" " st1 {v9.4s}, [%3], #16 \n" " 10: \n" " subs %w9, %w9, #1 \n" " mov %0, x8 \n" " bne 9b \n" : "=r"(pa), // %0 "=r"(pb), // %1 "=r"(pc0), // %2 "=r"(pc1), // %3 "=r"(k8_even), // %4 "=r"(k8), // %5 "=r"(k4), // %6 "=r"(k2), // %7 "=r"(k1), // %8 "=r"(n4), // %9 "=r"(scales), // %10 "=r"(bias) // %11 : "0"(pa), "1"(pb), "2"(pc0), "3"(pc1), "4"(k8_even), "5"(k8), "6"(k4), "7"(k2), "8"(k1), "9"(n4), "10"(scales), "11"(bias) : "cc", "memory", "x8", "w17", "x12", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } if (n2 > 0) { asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "9: \n" " mov x8, %0 // PanelA \n" " cmp %w5, #0 \n" " beq 1f \n" " mov w17, %w5 \n" " cmp %w4, #0 \n" " beq 2f // loop number is even \n" " // start loopmd2_kd8_nd2 \n" " subs w17, w17, #1 \n" " ld1 {v4.8b, v5.8b}, [%1], #16 // load two lines of B\n" " ld1 {v2.8b, v3.8b}, [%0], #16 // load two lines of PanelA\n" " smull v0.8h, v4.8b, v2.8b \n" " smull v1.8h, v4.8b, v3.8b \n" " saddlp v8.4s, v0.8h \n" " saddlp v12.4s, v1.8h \n" " smull v0.8h, v5.8b, v2.8b \n" " smull v1.8h, v5.8b, v3.8b \n" " saddlp v9.4s, v0.8h \n" " saddlp v13.4s, v1.8h \n" " cmp w17, #0 \n" " beq 3f \n" " 2: \n" " ld1 {v4.8b, v5.8b}, [%1], #16 \n" " ld1 {v2.8b, v3.8b}, [%0], #16 \n" " smull v0.8h, v4.8b, v2.8b \n" " ld1 {v6.8b, v7.8b}, [%1], #16 \n" " smull v1.8h, v5.8b, v2.8b \n" " ld1 {v24.8b, v25.8b}, [%0], #16\n" " smlal v0.8h, v6.8b, v24.8b \n" " smlal v1.8h, v7.8b, v24.8b \n" " sadalp v8.4s, v0.8h\n" " sadalp v9.4s, v1.8h\n" " smull v0.8h, v4.8b, v3.8b \n" " smull v1.8h, v5.8b, v3.8b \n" " smlal v0.8h, v6.8b, v25.8b \n" " smlal v1.8h, v7.8b, v25.8b \n" " sadalp v12.4s, v0.8h \n" " sadalp v13.4s, v1.8h \n" " subs w17, w17, #2 \n" " bne 2b \n" " 3: \n" " addp v8.4s, v8.4s, v9.4s \n" " addp v12.4s, v12.4s, v13.4s\n" " addp v8.4s, v8.4s, v8.4s \n" " addp v12.4s, v12.4s, v12.4s\n" " // start process kd4 kd2 kd1 cases\n" " 1: \n" " cmp %w6, #0 \n" " beq 4f \n" " // start subkernel_m2n2k4 \n" " ld1 {v4.8b}, [%1], #8 // load B4x2\n" " sxtl v4.8h, v4.8b \n" " mov v6.d[0], v4.d[1] \n" " ld1 {v2.8b}, [%0], #8 // load first A2x4\n" " sxtl v2.8h, v2.8b \n" " mov v3.d[0], v2.d[1] \n" " smull v9.4s, v2.4h, v4.4h \n" " smull v10.4s, v2.4h, v6.4h \n" " addp v9.4s, v9.4s, v10.4s \n" " addp v9.4s, v9.4s, v9.4s \n" " add v8.4s, v8.4s, v9.4s \n" " smull v13.4s, v3.4h, v4.4h \n" " smull v14.4s, v3.4h, v6.4h \n" " addp v13.4s, v13.4s, v14.4s\n" " addp v13.4s, v13.4s, v13.4s\n" " add v12.4s, v12.4s, v13.4s \n" " 4: \n" " cmp %w7, 0 \n" " beq 5f \n" " // start subkernel_m2n2k2 \n" " ld1 {v4.8b}, [%0] // load A2x2\n" " add %0, %0, #4 \n" " ld1 {v0.8b}, [%1] // load B2x2\n" " add %1, %1, #4 \n" " // 00 11\n" " rev32 v1.4h, v0.4h // 11 00\n" " smull v21.8h, v4.8b, v0.8b \n" " smull v22.8h, v4.8b, v1.8b \n" " saddlp v21.4s, v21.8h \n" " saddlp v22.4s, v22.8h \n" " mov v9.s[0], v21.s[0] \n" " mov v9.s[1], v22.s[0] \n" " add v8.4s, v8.4s, v9.4s \n" " mov v13.s[0], v22.s[1] \n" " mov v13.s[1], v21.s[1] \n" " add v12.4s, v12.4s, v13.4s \n" " 5: \n" " cmp %w8, #0 \n" " beq 6f \n" " // start subkernel_m2n2k1 \n" " ld1 {v4.8b}, [%1] // load B1x2\n" " add %1, %1, #2 \n" " ld1 {v2.8b}, [%0] // load A4x1\n" " add %0, %0, #2 \n" " sxtl v4.8h, v4.8b \n" " sxtl v2.8h, v2.8b \n" " smlal v8.4s, v4.4h, v2.h[0]\n" " smlal v12.4s, v4.4h, v2.h[1] \n" " 6: \n" " cmp %9, #0 \n" " beq 7f \n" " mov v8.d[1], v12.d[0] \n" " // v12: 0 1 \n" " ld1 {v12.2s}, [%9] \n" " zip1 v12.4s, v12.4s, v12.4s\n" " // v12: 0 0 1 1 \n" " // int32 => fp32 \n" " scvtf v8.4s, v8.4s \n" " // fp32 *= scale_tm \n" " fmul v8.4s, v8.4s, v12.4s \n" " cmp %10, #0 \n" " beq 8f \n" " // fp32 += bias_tm \n" " ld1 {v12.2s}, [%10] \n" " zip1 v12.4s, v12.4s, v12.4s\n" " fadd v8.4s, v8.4s, v12.4s \n" " 8: \n" " // fp32 -> int32 \n" " fcvtas v8.4s, v8.4s \n" " // int32 -> int16 \n" " sqxtn v8.4h, v8.4s \n" " // int16 -> int8 \n" " sqxtn v8.8b, v8.8h \n" " // save \n" " st1 {v8.h}[0], [%2] \n" " add %2, %2, #2 \n" " st1 {v8.h}[1], [%3] \n" " add %3, %3, #2 \n" " b 10f \n" " 7:" " st1 {v8.2s}, [%2], #8 \n" " st1 {v12.2s}, [%3], #8 \n" " 10: \n" " mov %0, x8 \n" : "=r"(pa), // %0 "=r"(pb), // %1 "=r"(pc0), // %2 "=r"(pc1), // %3 "=r"(k8_even), // %4 "=r"(k8), // %5 "=r"(k4), // %6 "=r"(k2), // %7 "=r"(k1), // %8 "=r"(scales), // %9 "=r"(bias) // %10 : "0"(pa), "1"(pb), "2"(pc0), "3"(pc1), "4"(k8_even), "5"(k8), "6"(k4), "7"(k2), "8"(k1), "9"(scales), "10"(bias) : "cc", "memory", "x8", "x12", "w17", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } if (n1 > 0) { asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "9: \n" " cmp %w5, #0 \n" " beq 1f // k <=7\n" " mov w17, %w5\n" " cmp %w4, #0 \n" " beq 2f // loop number is even \n" " // start loopkd8_nd1 \n" " subs w17, w17, #1 \n" " ld1 {v4.8b}, [%1], #8 // load four lines of B\n" " ld1 {v2.8b, v3.8b}, [%0], #16 // load two lines of PanelA\n" " smull v0.8h, v4.8b, v2.8b \n" " smull v1.8h, v4.8b, v3.8b \n" " saddlp v8.4s, v0.8h \n" " saddlp v12.4s, v1.8h \n" " cmp w17, #0 \n" " beq 3f \n" " 2: \n" " ld1 {v4.8b, v5.8b}, [%1], #16 \n" " ld1 {v24.8b, v25.8b, v26.8b, v27.8b}, [%0], #32\n" " smull v0.8h, v24.8b, v4.8b \n" " smlal v0.8h, v26.8b, v5.8b \n" " sadalp v8.4s, v0.8h \n" " smull v1.8h, v25.8b, v4.8b \n" " smlal v1.8h, v27.8b, v5.8b \n" " sadalp v12.4s, v1.8h \n" " subs w17, w17, #2 \n" " bne 2b \n" " 3: \n" " addp v8.4s, v8.4s, v8.4s \n" " addp v8.4s, v8.4s, v8.4s \n" " addp v12.4s, v12.4s, v12.4s\n" " addp v12.4s, v12.4s, v12.4s\n" " // start process kd4 kd2 kd1 cases\n" " 1: \n" " cmp %w6, #0 \n" " beq 4f \n" " // start subkernel_m2n1k2 \n" " ld1 {v4.8b}, [%1] // load B4x1\n" " add %1, %1, #4 \n" " sxtl v4.8h, v4.8b // extend B4x1 to v4\n" " ld1 {v2.8b}, [%0], #8 // load A2x4 \n" " sxtl v2.8h, v2.8b \n" " mov v5.d[0], v2.d[1] \n" " smull v9.4s, v2.4h, v4.4h \n" " addp v9.4s, v9.4s, v9.4s \n" " addp v9.4s, v9.4s, v9.4s \n" " add v8.4s, v8.4s, v9.4s \n" " smull v13.4s, v5.4h, v4.4h \n" " addp v13.4s, v13.4s, v13.4s\n" " addp v13.4s, v13.4s, v13.4s\n" " add v12.4s, v12.4s, v13.4s \n" " 4: \n" " cmp %w7, 0 \n" " beq 5f \n" " // start subkernel_m2n1k2 \n" " ld1 {v4.8b}, [%0] // load A2x2\n" " add %0, %0, #4 \n" " ld1 {v0.8b}, [%1] // load B2x1\n" " add %1, %1, #2 \n" " mov v0.h[1], v0.h[0] \n" " smull v0.8h, v0.8b, v4.8b \n" " saddlp v0.4s, v0.8h \n" " mov v9.s[0], v0.s[0] \n" " add v8.4s, v8.4s, v9.4s \n" " mov v13.s[0], v0.s[1] \n" " add v12.4s, v12.4s, v13.4s \n" " 5: \n" " cmp %w8, 0 \n" " beq 6f \n" " // start subkernel_m2n1k1 \n" " ld1 {v0.8b}, [%1] // load B1x1\n" " add %1, %1, #1 \n" " ld1 {v1.8b}, [%0] // load A2x1\n" " add %0, %0, #2 \n" " sxtl v1.8h, v1.8b \n" " sxtl v0.8h, v0.8b \n" " smull v0.4s, v1.4h, v0.h[0]\n" " mov v1.s[0], v0.s[1] \n" " add v8.4s, v8.4s, v0.4s \n" " add v12.4s, v12.4s, v1.4s \n" " 6: \n" " cmp %w9, #0 \n" " beq 7f \n" " mov v8.s[1], v12.s[0] \n" " // v12: s0 s1 \n" " ld1 {v12.2s}, [%9] \n" " // int32 => fp32 \n" " scvtf v8.2s, v8.2s \n" " // fp32 *= scale_tm \n" " fmul v8.2s, v8.2s, v12.2s \n" " cmp %10, #0 \n" " beq 8f \n" " // fp32 += bias_tm \n" " ld1 {v12.2s}, [%10] \n" " fadd v8.2s, v8.2s, v12.2s \n" " 8: \n" " // fp32 -> int32 \n" " fcvtas v8.2s, v8.2s \n" " // int32 -> int16 \n" " sqxtn v8.4h, v8.4s \n" " // int16 -> int8 \n" " sqxtn v8.8b, v8.8h \n" " // save \n" " st1 {v8.b}[0], [%2] \n" " st1 {v8.b}[1], [%3] \n" " b 10f \n" " 7: \n" " st1 {v8.s}[0], [%2] \n" " st1 {v12.s}[0], [%3] \n" " 10: \n" " mov x0, #0 \n" : "=r"(pa), // %0 "=r"(pb), // %1 "=r"(pc0), // %2 "=r"(pc1), // %3 "=r"(k8_even), // %4 "=r"(k8), // %5 "=r"(k4), // %6 "=r"(k2), // %7 "=r"(k1), // %8 "=r"(scales), // %9 "=r"(bias) // %10 : "0"(pa), "1"(pb), "2"(pc0), "3"(pc1), "4"(k8_even), "5"(k8), "6"(k4), "7"(k2), "8"(k1), "9"(scales), "10"(bias) : "cc", "memory", "x0", "x8", "w17", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } } static void int8kernel_m4(void* dst, int8_t* sa, int8_t* sb, int, int k, int n, int ldc, float* scales, float* bias) { void *pc0, *pc1, *pc2, *pc3; if (scales == 0) { pc0 = (int32_t*)dst; pc1 = ((int32_t*)pc0) + ldc; pc2 = ((int32_t*)pc1) + ldc; pc3 = ((int32_t*)pc2) + ldc; } else { pc0 = dst; pc1 = ((int8_t*)pc0) + ldc; pc2 = ((int8_t*)pc1) + ldc; pc3 = ((int8_t*)pc2) + ldc; } int8_t* pa = sa; int8_t* pb = sb; DECOMPOSE_K DECOMPOSE_N if (n4 > 0) { asm volatile( "8: \n" " eor v8.8b, v8.8b, v8.8b \n" " eor v9.8b, v9.8b, v9.8b \n" " eor v10.8b, v10.8b, v10.8b \n" " eor v11.8b, v11.8b, v11.8b \n" " eor v12.8b, v12.8b, v12.8b \n" " eor v13.8b, v13.8b, v13.8b \n" " eor v14.8b, v14.8b, v14.8b \n" " eor v15.8b, v15.8b, v15.8b \n" " eor v16.8b, v16.8b, v16.8b \n" " eor v17.8b, v17.8b, v17.8b \n" " eor v18.8b, v18.8b, v18.8b \n" " eor v19.8b, v19.8b, v19.8b \n" " eor v20.8b, v20.8b, v20.8b \n" " eor v21.8b, v21.8b, v21.8b \n" " eor v22.8b, v22.8b, v22.8b \n" " eor v23.8b, v23.8b, v23.8b \n" " mov x8, %0 \n" " cmp %w7, #0 \n" " beq 1f \n" " mov w20, %w7 \n" " cmp %w6, #0 \n" " beq 2f \n" " subs w20, w20, #1 \n" " ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [%1], #32 \n" " ld1 {v2.8b, v3.8b}, [%0], #16 \n" " smull v0.8h, v4.8b, v2.8b \n" " smull v1.8h, v4.8b, v3.8b \n" " saddlp v8.4s, v0.8h \n" " saddlp v12.4s, v1.8h \n" " smull v0.8h, v5.8b, v2.8b \n" " smull v1.8h, v5.8b, v3.8b \n" " saddlp v9.4s, v0.8h \n" " saddlp v13.4s, v1.8h \n" " smull v0.8h, v6.8b, v2.8b \n" " smull v1.8h, v6.8b, v3.8b \n" " saddlp v10.4s, v0.8h \n" " saddlp v14.4s, v1.8h \n" " smull v0.8h, v7.8b, v2.8b \n" " smull v1.8h, v7.8b, v3.8b \n" " saddlp v11.4s, v0.8h \n" " ld1 {v2.8b, v3.8b}, [%0], #16 \n" " saddlp v15.4s, v1.8h \n" " smull v0.8h, v4.8b, v2.8b \n" " smull v1.8h, v4.8b, v3.8b \n" " saddlp v16.4s, v0.8h \n" " saddlp v20.4s, v1.8h \n" " smull v0.8h, v5.8b, v2.8b \n" " smull v1.8h, v5.8b, v3.8b \n" " saddlp v17.4s, v0.8h \n" " saddlp v21.4s, v1.8h \n" " smull v0.8h, v6.8b, v2.8b \n" " smull v1.8h, v6.8b, v3.8b \n" " saddlp v18.4s, v0.8h \n" " saddlp v22.4s, v1.8h \n" " smull v0.8h, v7.8b, v2.8b \n" " smull v1.8h, v7.8b, v3.8b \n" " saddlp v19.4s, v0.8h \n" " saddlp v23.4s, v1.8h \n" " cmp w20, #0 \n" " beq 3f \n" " 2: \n" " add x15, %x1, #32 \n" " add x14, %x0, #32 \n" " ld1 {v4.8b, v5.8b}, [%1], #16\n" " ld1 {v2.8b, v3.8b}, [%0], #16\n" " smull v0.8h, v4.8b, v2.8b \n" " ld1 {v6.8b, v7.8b}, [x15], #16 \n" " smull v1.8h, v5.8b, v2.8b \n" " ld1 {v24.8b, v25.8b}, [x14], #16\n" " smlal v0.8h, v6.8b, v24.8b\n" " smlal v1.8h, v7.8b, v24.8b\n" " sadalp v8.4s, v0.8h\n" " sadalp v9.4s, v1.8h\n" " smull v0.8h, v4.8b, v3.8b \n" " smull v1.8h, v5.8b, v3.8b \n" " smlal v0.8h, v6.8b, v25.8b \n" " smlal v1.8h, v7.8b, v25.8b \n" " sadalp v12.4s, v0.8h\n" " sadalp v13.4s, v1.8h\n" " // finish v8v9 v12v13, start proc v16v17,v20v21\n" " ld1 {v28.8b, v29.8b}, [%0], #16 \n" " smull v0.8h, v4.8b, v28.8b \n" " smull v1.8h, v5.8b, v28.8b \n" " ld1 {v26.8b, v27.8b}, [x14], #16\n" " smlal v0.8h, v6.8b, v26.8b \n" " smlal v1.8h, v7.8b, v26.8b \n" " sadalp v16.4s, v0.8h \n" " sadalp v17.4s, v1.8h \n" " smull v0.8h, v4.8b, v29.8b \n" " smull v1.8h, v5.8b, v29.8b \n" " smlal v0.8h, v6.8b, v27.8b \n" " smlal v1.8h, v7.8b, v27.8b \n" " sadalp v20.4s, v0.8h \n" " sadalp v21.4s, v1.8h \n" " // start v10v11, v14v15, v18v19, v22v23\n" " ld1 {v4.8b, v5.8b}, [%1], #16 \n" " smull v0.8h, v4.8b, v2.8b \n" " smull v1.8h, v5.8b, v2.8b \n" " ld1 {v6.8b, v7.8b}, [x15], #16 \n" " smlal v0.8h, v6.8b, v24.8b \n" " smlal v1.8h, v7.8b, v24.8b \n" " sadalp v10.4s, v0.8h \n" " sadalp v11.4s, v1.8h \n" " smull v0.8h, v4.8b, v3.8b \n" " smull v1.8h, v5.8b, v3.8b \n" " smlal v0.8h, v6.8b, v25.8b \n" " smlal v1.8h, v7.8b, v25.8b \n" " sadalp v14.4s, v0.8h \n" " sadalp v15.4s, v1.8h \n" " smull v0.8h, v4.8b, v28.8b \n" " smull v1.8h, v5.8b, v28.8b \n" " smlal v0.8h, v6.8b, v26.8b \n" " smlal v1.8h, v7.8b, v26.8b \n" " sadalp v18.4s, v0.8h \n" " sadalp v19.4s, v1.8h \n" " smull v0.8h, v4.8b, v29.8b \n" " smull v1.8h, v5.8b, v29.8b \n" " smlal v0.8h, v6.8b, v27.8b \n" " smlal v1.8h, v7.8b, v27.8b \n" " sadalp v22.4s, v0.8h \n" " sadalp v23.4s, v1.8h \n" " add %0, %0, #32 \n" " add %1, %1, #32 \n" " subs w20, w20, #2 \n" " bne 2b \n" // start nd2 " 3: \n" " addp v8.4s, v8.4s, v9.4s \n" " addp v10.4s, v10.4s, v11.4s\n" " addp v12.4s, v12.4s, v13.4s\n" " addp v14.4s, v14.4s, v15.4s\n" " addp v16.4s, v16.4s, v17.4s\n" " addp v18.4s, v18.4s, v19.4s\n" " addp v20.4s, v20.4s, v21.4s\n" " addp v22.4s, v22.4s, v23.4s\n" " addp v8.4s, v8.4s, v10.4s \n" " addp v9.4s, v12.4s, v14.4s \n" " addp v10.4s, v16.4s, v18.4s\n" " addp v11.4s, v20.4s, v22.4s\n" " // start process kd4 kd2 kd1 cases\n" " 1: \n" " cmp %w8, #0 \n" " beq 4f \n" " // start subkernel_m4n4k4\n" " ld1 {v4.8b, v5.8b}, [%1], #16 // load B4x4\n" " sxtl v4.8h, v4.8b \n" " mov v6.d[0], v4.d[1] \n" " sxtl v5.8h, v5.8b \n" " mov v7.d[0], v5.d[1] \n" " ld1 {v2.8b}, [%0], #8 // load A2x4\n" " sxtl v2.8h, v2.8b \n" " mov v3.d[0], v2.d[1] \n" " smull v12.4s, v2.4h, v4.4h \n" " smull v13.4s, v2.4h, v6.4h \n" " smull v14.4s, v2.4h, v5.4h \n" " addp v12.4s, v12.4s, v13.4s\n" " smull v15.4s, v2.4h, v7.4h \n" " addp v14.4s, v14.4s, v15.4s\n" " addp v12.4s, v12.4s, v14.4s\n" " smull v16.4s, v3.4h, v4.4h \n" " add v8.4s, v8.4s, v12.4s \n" " smull v17.4s, v3.4h, v6.4h \n" " smull v18.4s, v3.4h, v5.4h \n" " addp v16.4s, v16.4s, v17.4s\n" " smull v19.4s, v3.4h, v7.4h \n" " addp v18.4s, v18.4s, v19.4s\n" " addp v16.4s, v16.4s, v18.4s\n" " add v9.4s, v9.4s, v16.4s \n" " ld1 {v2.8b}, [%0], #8 // load next A2x4\n" " sxtl v2.8h, v2.8b \n" " mov v3.d[0], v2.d[1] \n" " smull v12.4s, v2.4h, v4.4h \n" " smull v13.4s, v2.4h, v6.4h \n" " smull v14.4s, v2.4h, v5.4h \n" " addp v12.4s, v12.4s, v13.4s\n" " smull v15.4s, v2.4h, v7.4h \n" " addp v14.4s, v14.4s, v15.4s\n" " addp v12.4s, v12.4s, v14.4s\n" " smull v16.4s, v3.4h, v4.4h \n" " add v10.4s, v10.4s, v12.4s \n" " smull v17.4s, v3.4h, v6.4h \n" " smull v18.4s, v3.4h, v5.4h \n" " addp v16.4s, v16.4s, v17.4s\n" " smull v19.4s, v3.4h, v7.4h \n" " addp v18.4s, v18.4s, v19.4s\n" " addp v16.4s, v16.4s, v18.4s\n" " add v11.4s, v11.4s, v16.4s \n" " 4: \n" " cmp %w9, #0 \n" " beq 5f \n" " // start subkernel_m4n4k2 \n" " ld1 {v0.8b}, [%1], #8 // load B2x4 \n" " // 00 11 22 33 \n" " rev32 v1.4h, v0.4h // 11 00 33 22 \n" " rev64 v2.2s, v0.2s // 22 33 00 11 \n" " ld1 {v4.8b}, [%0], #8 // load A4x2 \n" " rev64 v3.4h, v0.4h // 33 22 11 00 \n" " smull v12.8h, v4.8b, v0.8b \n" " smull v13.8h, v4.8b, v1.8b \n" " saddlp v12.4s, v12.8h \n" " smull v14.8h, v4.8b, v2.8b \n" " saddlp v13.4s, v13.8h \n" " smull v15.8h, v4.8b, v3.8b \n" " saddlp v14.4s, v14.8h \n" " saddlp v15.4s, v15.8h \n" " mov v16.s[0], v12.s[0] \n" " mov v16.s[1], v13.s[0] \n" " mov v16.s[2], v14.s[0] \n" " mov v16.s[3], v15.s[0] \n" " mov v17.s[0], v13.s[1] \n" " mov v17.s[1], v12.s[1] \n" " mov v17.s[2], v15.s[1] \n" " mov v17.s[3], v14.s[1] \n" " mov v18.s[0], v14.s[2] \n" " mov v18.s[1], v15.s[2] \n" " mov v18.s[2], v12.s[2] \n" " mov v18.s[3], v13.s[2] \n" " mov v19.s[0], v15.s[3] \n" " mov v19.s[1], v14.s[3] \n" " mov v19.s[2], v13.s[3] \n" " mov v19.s[3], v12.s[3] \n" " add v8.4s, v8.4s, v16.4s \n" " add v9.4s, v9.4s, v17.4s \n" " add v10.4s, v10.4s, v18.4s \n" " add v11.4s, v11.4s, v19.4s \n" " 5: \n" " cmp %w10, #0 \n" " beq 6f \n" " // start subkernel_m4n4k1\n" " ld1 {v4.8b}, [%1] // load B1x4\n" " add %1, %1, #4 \n" " ld1 {v2.8b}, [%0] // load A4x1\n" " add %0, %0, #4 \n" " sxtl v4.8h, v4.8b \n" " sxtl v2.8h, v2.8b \n" " smlal v8.4s, v4.4h, v2.h[0] \n" " smlal v9.4s, v4.4h, v2.h[1] \n" " smlal v10.4s, v4.4h, v2.h[2] \n" " smlal v11.4s, v4.4h, v2.h[3] \n" " 6: \n" " cmp %12, #0 \n" " beq 9f \n" " ld1 {v12.4s}, [%12] \n" " // int32 => fp32 \n" " scvtf v8.4s, v8.4s \n" " scvtf v9.4s, v9.4s \n" " scvtf v10.4s, v10.4s \n" " scvtf v11.4s, v11.4s \n" " // fp32 *= scale_tm \n" " fmul v8.4s, v8.4s, v12.s[0] \n" " fmul v9.4s, v9.4s, v12.s[1] \n" " fmul v10.4s, v10.4s, v12.s[2] \n" " fmul v11.4s, v11.4s, v12.s[3] \n" " cmp %13, #0 \n" " beq 7f \n" " ld1 {v14.4s}, [%13] \n" " dup v15.4s, v14.s[0] \n" " fadd v8.4s, v8.4s, v15.4s \n" " dup v15.4s, v14.s[1] \n" " fadd v9.4s, v9.4s, v15.4s \n" " dup v15.4s, v14.s[2] \n" " fadd v10.4s, v10.4s, v15.4s\n" " dup v15.4s, v14.s[3] \n" " fadd v11.4s, v11.4s, v15.4s\n" " 7: \n" " // fp32 -> int32 \n" " fcvtas v8.4s, v8.4s \n" " fcvtas v9.4s, v9.4s \n" " fcvtas v10.4s, v10.4s \n" " fcvtas v11.4s, v11.4s \n" " // int32 -> int16 \n" " sqxtn v6.4h, v8.4s \n" " sqxtn2 v6.8h, v9.4s \n" " sqxtn v7.4h, v10.4s \n" " sqxtn2 v7.8h, v11.4s \n" " // int16 -> int8 \n" " sqxtn v8.8b, v6.8h \n" " sqxtn v9.8b, v7.8h \n" " // save \n" " st1 {v8.s}[0], [%2] \n" " add %x2, %x2, #4 \n" " st1 {v8.s}[1], [%3] \n" " add %x3, %x3, #4 \n" " st1 {v9.s}[0], [%4] \n" " add %x4, %x4, #4 \n" " st1 {v9.s}[1], [%5] \n" " add %x5, %x5, #4 \n" " b 10f \n" " 9: \n" " st1 {v8.4s}, [%x2], #16 \n" " st1 {v9.4s}, [%x3], #16 \n" " st1 {v10.4s}, [%x4], #16 \n" " st1 {v11.4s}, [%x5], #16 \n" " 10: \n" " subs %x11, %x11, #1 \n" " mov %x0, x8 \n" " bne 8b \n" : "=r"(pa), // %0 "=r"(pb), // %1 "=r"(pc0), // %2 "=r"(pc1), // %3 "=r"(pc2), // %4 "=r"(pc3), // %5 "=r"(k8_even), // %6 "=r"(k8), // %7 "=r"(k4), // %8 "=r"(k2), // %9 "=r"(k1), // %10 "=r"(n4), // %11 "=r"(scales), // %12 "=r"(bias) // %13 : "0"(pa), "1"(pb), "2"(pc0), "3"(pc1), "4"(pc2), "5"(pc3), "6"(k8_even), "7"(k8), "8"(k4), "9"(k2), "10"(k1), "11"(n4), "12"(scales), "13"(bias) : "cc", "memory", "x8", "w20", "x14", "x15", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } if (n2 > 0) { asm volatile( " eor v8.8b, v8.8b, v8.8b \n" " eor v9.8b, v9.8b, v9.8b \n" " eor v10.8b, v10.8b, v10.8b \n" " eor v11.8b, v11.8b, v11.8b \n" " eor v12.8b, v12.8b, v12.8b \n" " eor v13.8b, v13.8b, v13.8b \n" " eor v14.8b, v14.8b, v14.8b \n" " eor v15.8b, v15.8b, v15.8b \n" " eor v16.8b, v16.8b, v16.8b \n" " eor v17.8b, v17.8b, v17.8b \n" " eor v18.8b, v18.8b, v18.8b \n" " eor v19.8b, v19.8b, v19.8b \n" " eor v20.8b, v20.8b, v20.8b \n" " eor v21.8b, v21.8b, v21.8b \n" " eor v22.8b, v22.8b, v22.8b \n" " eor v23.8b, v23.8b, v23.8b \n" "9: \n" " mov x8, %x0 // PanelA \n" " cmp %w7, #0 \n" " beq 1f // k <= 7 \n" " mov w20, %w7 \n" " cmp %w6, #0 \n" " beq 2f// loop number is even \n" " // start loopkd8_nd2 \n" " subs w20, w20, #1 \n" " ld1 {v4.8b, v5.8b}, [%1], #16 // load two lines of B\n" " ld1 {v2.8b, v3.8b}, [%0], #16 // load two lines of PanelA\n" " smull v0.8h, v4.8b, v2.8b \n" " smull v1.8h, v4.8b, v3.8b \n" " saddlp v8.4s, v0.8h \n" " saddlp v12.4s, v1.8h \n" " smull v0.8h, v5.8b, v2.8b \n" " smull v1.8h, v5.8b, v3.8b \n" " saddlp v9.4s, v0.8h \n" " saddlp v13.4s, v1.8h \n" " ld1 {v2.8b, v3.8b}, [%0], #16 \n" " smull v0.8h, v4.8b, v2.8b \n" " smull v1.8h, v4.8b, v3.8b \n" " saddlp v16.4s, v0.8h \n" " saddlp v20.4s, v1.8h \n" " smull v0.8h, v5.8b, v2.8b \n" " smull v1.8h, v5.8b, v3.8b \n" " saddlp v17.4s, v0.8h \n" " saddlp v21.4s, v1.8h \n" " cmp w20, #0 \n" " beq 3f \n" " 2: \n" " add x15, %1, #16 \n" " add x14, %0, #32 \n" " ld1 {v4.8b, v5.8b}, [%1], #16 \n" " ld1 {v2.8b, v3.8b}, [%0], #16 \n" " smull v0.8h, v4.8b, v2.8b \n" " ld1 {v6.8b, v7.8b}, [x15], #16 \n" " smull v1.8h, v5.8b, v2.8b \n" " ld1 {v24.8b, v25.8b}, [x14], #16 \n" " smlal v0.8h, v6.8b, v24.8b \n" " smlal v1.8h, v7.8b, v24.8b \n" " sadalp v8.4s, v0.8h \n" " sadalp v9.4s, v1.8h \n" " smull v0.8h, v4.8b, v3.8b \n" " smull v1.8h, v5.8b, v3.8b \n" " smlal v0.8h, v6.8b, v25.8b \n" " smlal v1.8h, v7.8b, v25.8b \n" " sadalp v12.4s, v0.8h \n" " sadalp v13.4s, v1.8h \n" " // finish v8v9 v12v13, start proc v16v17,v20v21\n" " ld1 {v28.8b, v29.8b}, [%0], #16\n" " smull v0.8h, v4.8b, v28.8b\n" " smull v1.8h, v5.8b, v28.8b\n" " ld1 {v26.8b, v27.8b}, [x14], #16\n" " smlal v0.8h, v6.8b, v26.8b\n" " smlal v1.8h, v7.8b, v26.8b\n" " sadalp v16.4s, v0.8h\n" " sadalp v17.4s, v1.8h\n" " smull v0.8h, v4.8b, v29.8b\n" " smull v1.8h, v5.8b, v29.8b\n" " smlal v0.8h, v6.8b, v27.8b\n" " smlal v1.8h, v7.8b, v27.8b\n" " sadalp v20.4s, v0.8h\n" " sadalp v21.4s, v1.8h\n" " add %0, %0, #32 \n" " add %1, %1, #16 \n" " subs w20, w20, #2 \n" " bne 2b \n" " 3: \n" " addp v8.4s, v8.4s, v9.4s \n" " addp v12.4s, v12.4s, v13.4s\n" " addp v16.4s, v16.4s, v17.4s\n" " addp v20.4s, v20.4s, v21.4s\n" " addp v8.4s, v8.4s, v8.4s \n" " addp v12.4s, v12.4s, v12.4s\n" " addp v16.4s, v16.4s, v16.4s\n" " addp v20.4s, v20.4s, v20.4s\n" " // start process kd4 kd2 kd1 cases\n" " 1: \n" " cmp %w8, 0 \n" " beq 4f \n" " // start subkernel_m4n2k4 \n" " ld1 {v4.8b}, [%1], #8 // load B4x2\n" " sxtl v4.8h, v4.8b \n" " mov v6.d[0], v4.d[1] \n" " ld1 {v2.8b}, [%0], #8 // load first A2x4\n" " sxtl v2.8h, v2.8b \n" " mov v3.d[0], v2.d[1] \n" " smull v9.4s, v2.4h, v4.4h \n" " smull v10.4s, v2.4h, v6.4h \n" " addp v9.4s, v9.4s, v10.4s \n" " addp v9.4s, v9.4s, v9.4s \n" " add v8.4s, v8.4s, v9.4s \n" " smull v13.4s, v3.4h, v4.4h \n" " smull v14.4s, v3.4h, v6.4h \n" " addp v13.4s, v13.4s, v14.4s\n" " addp v13.4s, v13.4s, v13.4s\n" " add v12.4s, v12.4s, v13.4s \n" " ld1 {v2.8b}, [%0], #8 // load next A2x4\n" " sxtl v2.8h, v2.8b \n" " mov v3.d[0], v2.d[1] \n" " smull v17.4s, v2.4h, v4.4h \n" " smull v18.4s, v2.4h, v6.4h \n" " addp v17.4s, v17.4s, v18.4s\n" " addp v17.4s, v17.4s, v17.4s\n" " add v16.4s, v16.4s, v17.4s \n" " smull v21.4s, v3.4h, v4.4h \n" " smull v22.4s, v3.4h, v6.4h \n" " addp v21.4s, v21.4s, v22.4s\n" " addp v21.4s, v21.4s, v21.4s\n" " add v20.4s, v20.4s, v21.4s \n" " 4: \n" " cmp %w9, 0 \n" " beq 5f \n" " // start subkernel_m4n2k2 \n" " ld1 {v4.8b}, [%0], #8 //load A4x2\n" " ld1 {v0.8b}, [%1] // load B2x2 \n" " add %1, %1, #4 \n" " // 00 11 22 33 \n" " rev32 v1.4h, v0.4h // 11 00 33 22 \n" " rev64 v2.2s, v0.2s // 22 33 00 11 \n" " rev64 v3.4h, v0.4h // 33 22 11 00 \n" " smull v21.8h, v4.8b, v0.8b \n" " smull v22.8h, v4.8b, v1.8b \n" " smull v23.8h, v4.8b, v2.8b \n" " smull v24.8h, v4.8b, v3.8b \n" " saddlp v21.4s, v21.8h \n" " saddlp v22.4s, v22.8h \n" " saddlp v23.4s, v23.8h \n" " saddlp v24.4s, v24.8h \n" " mov v9.s[0], v21.s[0] \n" " mov v9.s[1], v22.s[0] \n" " add v8.4s, v8.4s, v9.4s\n" " mov v13.s[0], v22.s[1] \n" " mov v13.s[1], v21.s[1] \n" " add v12.4s, v12.4s, v13.4s \n" " mov v17.s[0], v23.s[2] \n" " mov v17.s[1], v24.s[2] \n" " add v16.4s, v16.4s, v17.4s \n" " mov v21.s[0], v24.s[3] \n" " mov v21.s[1], v23.s[3] \n" " add v20.4s, v20.4s, v21.4s \n" " 5: \n" " cmp %w10, 0 \n" " beq 6f \n" " // start subkernel_m4n2k1\n" " ld1 {v4.8b}, [%1] // load B1x2\n" " add %1, %1, #2 \n" " ld1 {v2.8b}, [%0] // load A4x1\n" " add %0, %0, #4 \n" " sxtl v4.8h, v4.8b \n" " sxtl v2.8h, v2.8b \n" " smlal v8.4s, v4.4h, v2.h[0] \n" " smlal v12.4s, v4.4h, v2.h[1] \n" " smlal v16.4s, v4.4h, v2.h[2] \n" " smlal v20.4s, v4.4h, v2.h[3] \n" " 6: \n" " cmp %11, #0 \n" " beq 7f \n" " mov v8.d[1], v12.d[0] \n" " mov v16.d[1], v20.d[0] \n" " // v12: 0 1 2 3 \n" " ld1 {v12.4s}, [%11] \n" " zip2 v13.4s, v12.4s, v12.4s \n" " zip1 v12.4s, v12.4s, v12.4s \n" " // v12: 0 0 1 1 \n" " // v13: 2 2 3 3 \n" " // int32 => fp32 \n" " scvtf v8.4s, v8.4s \n" " scvtf v16.4s, v16.4s \n" " // fp32 *= scale_tm \n" " fmul v8.4s, v8.4s, v12.4s \n" " fmul v16.4s, v16.4s, v13.4s\n" " cmp %12, #0 \n" " beq 8f // skip add scales \n" " // fp32 += scales_tm \n" " ld1 {v12.4s}, [%12] \n" " zip2 v13.4s, v12.4s, v12.4s\n" " zip1 v12.4s, v12.4s, v12.4s\n" " fadd v8.4s, v8.4s, v12.4s \n" " fadd v16.4s, v16.4s, v13.4s\n" " 8: \n" " // fp32 -> int32 \n" " fcvtas v8.4s, v8.4s \n" " fcvtas v16.4s, v16.4s \n" " // int32 -> int16 \n" " sqxtn v8.4h, v8.4s \n" " sqxtn v16.4h, v16.4s \n" " // int16 -> int8 \n" " sqxtn v8.8b, v8.8h \n" " sqxtn v16.8b, v16.8h \n" " // save \n" " st1 {v8.h}[0], [%2] \n" " add %2, %2, #2 \n" " st1 {v8.h}[1], [%3] \n" " add %3, %3, #2 \n" " st1 {v16.h}[0], [%4] \n" " add %4, %4, #2 \n" " st1 {v16.h}[1], [%5] \n" " add %5, %5, #2 \n" " b 10f \n" " 7: \n" " st1 {v8.2s}, [%2], #8 \n" " st1 {v12.2s}, [%3], #8 \n" " st1 {v16.2s}, [%4], #8 \n" " st1 {v20.2s}, [%5], #8 \n" " 10: \n" " mov %0, x8 \n" : "=r"(pa), // %0 "=r"(pb), // %1 "=r"(pc0), // %2 "=r"(pc1), // %3 "=r"(pc2), // %4 "=r"(pc3), // %5 "=r"(k8_even), // %6 "=r"(k8), // %7 "=r"(k4), // %8 "=r"(k2), // %9 "=r"(k1), // %10 "=r"(scales), // %11 "=r"(bias) // %12 : "0"(pa), "1"(pb), "2"(pc0), "3"(pc1), "4"(pc2), "5"(pc3), "6"(k8_even), "7"(k8), "8"(k4), "9"(k2), "10"(k1), "11"(scales), "12"(bias) : "cc", "memory", "x8", "w20", "x14", "x15", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } if (n1 > 0) { asm volatile( " eor v8.8b, v8.8b, v8.8b \n" " eor v9.8b, v9.8b, v9.8b \n" " eor v10.8b, v10.8b, v10.8b \n" " eor v11.8b, v11.8b, v11.8b \n" " eor v12.8b, v12.8b, v12.8b \n" " eor v13.8b, v13.8b, v13.8b \n" " eor v14.8b, v14.8b, v14.8b \n" " eor v15.8b, v15.8b, v15.8b \n" " eor v16.8b, v16.8b, v16.8b \n" " eor v17.8b, v17.8b, v17.8b \n" " eor v18.8b, v18.8b, v18.8b \n" " eor v19.8b, v19.8b, v19.8b \n" " eor v20.8b, v20.8b, v20.8b \n" " eor v21.8b, v21.8b, v21.8b \n" " eor v22.8b, v22.8b, v22.8b \n" " eor v23.8b, v23.8b, v23.8b \n" "1: \n" " cmp %w7, #0 \n" " beq 10f \n" " mov w20, %w7 \n" " cmp %w6, #0 \n" " beq 11f// loop number is even \n" " // start loopkd8_nd1 \n" " subs w20, w20, #1 \n" " ld1 {v4.8b}, [%1], #8 // load four lines of B\n" " ld1 {v2.8b, v3.8b}, [%0], #16 // load two lines of PanelA\n" " smull v0.8h, v4.8b, v2.8b \n" " smull v1.8h, v4.8b, v3.8b \n" " saddlp v8.4s, v0.8h \n" " saddlp v12.4s, v1.8h \n" " ld1 {v2.8b, v3.8b}, [%0], #16 \n" " smull v0.8h, v4.8b, v2.8b \n" " smull v1.8h, v4.8b, v3.8b \n" " saddlp v16.4s, v0.8h \n" " saddlp v20.4s, v1.8h \n" " cmp w20, #0 \n" " beq 12f \n" " 11: \n" " ld1 {v4.8b, v5.8b}, [%1], #16 \n" " ld1 {v24.8b, v25.8b, v26.8b, v27.8b}, [%0], #32\n" " ld1 {v28.8b, v29.8b, v30.8b, v31.8b}, [%0], #32\n" " smull v0.8h, v24.8b, v4.8b \n" " smlal v0.8h, v28.8b, v5.8b \n" " sadalp v8.4s, v0.8h \n" " smull v1.8h, v25.8b, v4.8b \n" " smlal v1.8h, v29.8b, v5.8b \n" " sadalp v12.4s, v1.8h \n" " smull v0.8h, v26.8b, v4.8b \n" " smlal v0.8h, v30.8b, v5.8b \n" " sadalp v16.4s, v0.8h \n" " smull v1.8h, v27.8b, v4.8b \n" " smlal v1.8h, v31.8b, v5.8b \n" " sadalp v20.4s, v1.8h \n" " subs w20, w20, #2 \n" " bne 11b \n" " 12: \n" " addp v8.4s, v8.4s, v8.4s \n" " addp v8.4s, v8.4s, v8.4s \n" " addp v12.4s, v12.4s, v12.4s\n" " addp v12.4s, v12.4s, v12.4s\n" " addp v16.4s, v16.4s, v16.4s\n" " addp v16.4s, v16.4s, v16.4s\n" " addp v20.4s, v20.4s, v20.4s\n" " addp v20.4s, v20.4s, v20.4s\n" " // start process kd4 kd2 kd1 cases\n" " 10: \n" " cmp %w8, #0 \n" " beq 13f \n" " // start subkernel_m4n1k2 \n" " ld1 {v4.8b}, [%1] // load B4x1\n" " add %x1, %x1, #4 \n" " sxtl v4.8h, v4.8b // extend B4x1 to v4 \n" " ld1 {v2.8b, v3.8b}, [%0], #16 // load A4x4\n" " sxtl v2.8h, v2.8b \n" " mov v5.d[0], v2.d[1] \n" " sxtl v3.8h, v3.8b \n" " mov v6.d[0], v3.d[1] // extend A4x4 to v2,v5,v3,v6\n" " smull v9.4s, v2.4h, v4.4h \n" " addp v9.4s, v9.4s, v9.4s \n" " addp v9.4s, v9.4s, v9.4s \n" " add v8.4s, v8.4s, v9.4s \n" " smull v13.4s, v5.4h, v4.4h \n" " addp v13.4s, v13.4s, v13.4s\n" " addp v13.4s, v13.4s, v13.4s\n" " add v12.4s, v12.4s, v13.4s \n" " smull v17.4s, v3.4h, v4.4h \n" " addp v17.4s, v17.4s, v17.4s\n" " addp v17.4s, v17.4s, v17.4s\n" " add v16.4s, v16.4s, v17.4s \n" " smull v21.4s, v6.4h, v4.4h \n" " addp v21.4s, v21.4s, v21.4s\n" " addp v21.4s, v21.4s, v21.4s\n" " add v20.4s, v20.4s, v21.4s \n" " 13: \n" " cmp %w9, #0 \n" " beq 14f \n" " // start subkernel_m4n1k2 \n" " ld1 {v4.8b}, [%0], #8 // load A4x2 \n" " ld1 {v0.8b}, [%1] // load B2x1 \n" " add %1, %1, #2 \n" " mov v0.h[1], v0.h[0] \n" " mov v0.s[1], v0.s[0] \n" " smull v0.8h, v0.8b, v4.8b \n" " saddlp v0.4s, v0.8h \n" " mov v9.s[0], v0.s[0] \n" " add v8.4s, v8.4s, v9.4s \n" " mov v13.s[0], v0.s[1] \n" " add v12.4s, v12.4s, v13.4s \n" " mov v17.s[0], v0.s[2] \n" " add v16.4s, v16.4s, v17.4s \n" " mov v21.s[0], v0.s[3] \n" " add v20.4s, v20.4s, v21.4s \n" " 14: \n" " cmp %w10, #0 \n" " beq 15f \n" " // start subkernel_m4n1k1 \n" " ld1 {v4.8b}, [%1] // load B1x1\n" " add %1, %1, #1 \n" " ld1 {v2.8b}, [%0] // load A4x1\n" " add %0, %0, #4 \n" " sxtl v4.8h, v4.8b \n" " sxtl v2.8h, v2.8b \n" " smull v0.4s, v2.4h, v4.h[0]\n" " add v8.4s, v8.4s, v0.4s \n" " mov v13.s[0], v0.s[1] \n" " add v12.4s, v12.4s, v13.4s \n" " mov v17.s[0], v0.s[2] \n" " add v16.4s, v16.4s, v17.4s \n" " mov v21.s[0], v0.s[3] \n" " add v20.4s, v20.4s, v21.4s \n" " 15: \n" // REQUANT " cmp %11, #0 \n" " beq 16f \n" " mov v8.s[1], v12.s[0] \n" " mov v8.s[2], v16.s[0] \n" " mov v8.s[3], v20.s[0] \n" " // v12: s0 s1 s2 s3 \n" " ld1 {v12.4s}, [%11] \n" " // int32 => fp32 \n" " scvtf v8.4s, v8.4s \n" " // fp32 *= scale_tm \n" " fmul v8.4s, v8.4s, v12.4s \n" " cmp %12, #0 \n" " beq 17f \n" " // fp32 += bias_tm \n" " ld1 {v12.4s}, [%12] \n" " fadd v8.4s, v8.4s, v12.4s \n" " 17: \n" " // fp32 -> int32 \n" " fcvtas v8.4s, v8.4s \n" " // int32 -> int16 \n" " sqxtn v8.4h, v8.4s \n" " // int16 -> int8 \n" " sqxtn v8.8b, v8.8h \n" " // save \n" " st1 {v8.b}[0], [%2] \n" " st1 {v8.b}[1], [%3] \n" " st1 {v8.b}[2], [%4] \n" " st1 {v8.b}[3], [%5] \n" " b 2f \n" " // no need to add the last output pointer\n" " 16: \n" " st1 {v8.s}[0], [%2] \n" " st1 {v12.s}[0], [%3] \n" " st1 {v16.s}[0], [%4] \n" " st1 {v20.s}[0], [%5] \n" " 2: \n" " mov x0, #0 \n" : "=r"(pa), // %0 "=r"(pb), // %1 "=r"(pc0), // %2 "=r"(pc1), // %3 "=r"(pc2), // %4 "=r"(pc3), // %5 "=r"(k8_even), // %6 "=r"(k8), // %7 "=r"(k4), // %8 "=r"(k2), // %9 "=r"(k1), // %10 "=r"(scales), // %11 "=r"(bias) // %12 : "0"(pa), "1"(pb), "2"(pc0), "3"(pc1), "4"(pc2), "5"(pc3), "6"(k8_even), "7"(k8), "8"(k4), "9"(k2), "10"(k1), "11"(scales), "12"(bias) : "cc", "memory", "x0", "x8", "w20", "x14", "x15", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } } #undef DECOMPOSE_K #undef DECOMPOSE_N static void int8kernel(void* dst, const int8_t* sa, const int8_t* sb, int m, int k, int n, int ldc, float* scales, float* bias, const ncnn::Option& opt) { int8_t* pa = (int8_t*)sa; int8_t* pb = (int8_t*)sb; const int nn = (m >> 2) << 2; if (scales == 0) { int32_t* pc = (int32_t*)dst; #if PRINT_MATRIX int32_t* origin = pc; #endif #pragma omp parallel for num_threads(opt.num_threads) for (int i = 0; i < nn; i += 4) { int8kernel_m4((void*)(pc + i * ldc), pa + i * k, pb, m, k, n, ldc, 0, 0); } pa += nn * k; pc += nn * ldc; switch (m - nn) { case 3: int8kernel_m2((void*)pc, pa, pb, m, k, n, ldc, 0, 0); pc += 2 * ldc; pa += 2 * k; int8kernel_m1((void*)pc, pa, pb, m, k, n, ldc, 0, 0); break; case 2: int8kernel_m2((void*)pc, pa, pb, m, k, n, ldc, 0, 0); break; case 1: int8kernel_m1((void*)pc, pa, pb, m, k, n, ldc, 0, 0); break; case 0: default: break; } #if PRINT_MATRIX print_int32_matrix("pc", origin, m, n, ldc); #endif } else { int8_t* pc = (int8_t*)dst; #if PRINT_MATRIX print_fp32_vec("scales", scales, m); #endif #pragma omp parallel for num_threads(opt.num_threads) for (int i = 0; i < nn; i += 4) { int8kernel_m4((void*)(pc + i * ldc), pa + i * k, pb, m, k, n, ldc, scales + i, (bias == 0) ? 0 : bias + i); } pa += nn * k; pc += nn * ldc; scales += nn; bias = (bias == 0) ? 0 : bias + nn; switch (m - nn) { case 3: int8kernel_m2((void*)pc, pa, pb, m, k, n, ldc, scales, bias); pc += 2 * ldc; pa += 2 * k; scales += 2; bias = (bias == 0) ? 0 : bias + 2; int8kernel_m1((void*)pc, pa, pb, m, k, n, ldc, scales, bias); break; case 2: int8kernel_m2((void*)pc, pa, pb, m, k, n, ldc, scales, bias); break; case 1: int8kernel_m1((void*)pc, pa, pb, m, k, n, ldc, scales, bias); break; case 0: default: break; } } return; } #ifdef PRINT_MATRIX #undef PRINT_MATRIX #endif #endif
GB_unaryop__minv_uint64_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint64_int64 // op(A') function: GB_tran__minv_uint64_int64 // C type: uint64_t // A type: int64_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 64) #define GB_ATYPE \ int64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 64) ; // casting #define GB_CASTING(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT64 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint64_int64 ( uint64_t *Cx, // Cx and Ax may be aliased int64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint64_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
test.c
#include <stdint.h> #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" #define STB_IMAGE_WRITE_IMPLEMENTATION #include "stb_image_write.h" #include "omp.h" #include <stdio.h> #include <string.h> #include <stdlib.h> #include <math.h> int changeA(int a) { return a + 1; } int find(int answer[5], int found) { int row, col; printf("%d\n", found); int num; int a = 5; #pragma omp task shared(a, found) firstprivate(num,row) if(found == 0) { printf("%d\n", found); a = changeA(a); for (row = 0; row < 5; row++) { #pragma omp critical found = 1; if(answer[row] == 1) find(answer, found); } } printf("Main %d \n", a); return 0; } int main() { //#pragma omp parallel { double time1 = omp_get_wtime(); int height = 3; int width = 2; double sigma = 3; double sum=0.0; int found = 0 ; int matrix[] = {0,0,0,1,0}; #pragma omp parallel shared(found) { #pragma omp single { printf("answer %d\n", find(matrix, found)); } } printf("Elapsed time: %0.20lf\n", omp_get_wtime() - time1); } }
keepass_fmt_plug.c
/* KeePass cracker patch for JtR. Hacked together during May of * 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>. * * Support for cracking KeePass databases, which use key file(s), was added by * m3g9tr0n (Spiros Fraganastasis) and Dhiru Kholia in September of 2014. * * Support for all types of keyfile within Keepass 1.x ans Keepass 2.x was * added by Fist0urs <eddy.maaalou at gmail.com> * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_KeePass; #elif FMT_REGISTERS_H john_register_one(&fmt_KeePass); #else #include "sha2.h" #include <string.h> #include "stdint.h" #include <assert.h> #include <errno.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "aes.h" #include "twofish.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 1 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "KeePass" #define FORMAT_NAME "" #define FORMAT_TAG "$keepass$*" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "SHA256 AES 32/" ARCH_BITS_STR " " SHA2_LIB #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE 0 #define BINARY_ALIGN MEM_ALIGN_NONE #define SALT_SIZE sizeof(struct custom_salt) #if ARCH_ALLOWS_UNALIGNED // Avoid a compiler bug, see #1284 #define SALT_ALIGN 1 #else // salt align of 4 was crashing on sparc due to the long long value. #define SALT_ALIGN sizeof(long long) #endif #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests KeePass_tests[] = { {"$keepass$*1*50000*124*60eed105dac456cfc37d89d950ca846e*72ffef7c0bc3698b8eca65184774f6cd91a9356d338e5140e47e319a87f5e46a*8725bdfd3580cf054a1564dc724aaffe*8e58cc08af2462ddffe2ee39735ad14b15e8cb96dc05ef70d8e64d475eca7bf5*1*752*71d7e65fb3e20b288da8cd582b5c2bc3b63162eef6894e5e92eea73f711fe86e7a7285d5ac9d5ffd07798b83673b06f34180b7f5f3d05222ebf909c67e6580c646bcb64ad039fcdc6f33178fe475739a562dc78012f6be3104da9af69e0e12c2c9c5cd7134bb99d5278f2738a40155acbe941ff2f88db18daf772c7b5fc1855ff9e93ceb35a1db2c30cabe97a96c58b07c16912b2e095e530cc8c24041e7d4876b842f2e7c6df41d08da8c5c4f2402dd3241c3367b6e6e06cd0fa369934e78a6aab1479756a15264af09e3c8e1037f07a58f70f4bf634737ff58725414db10d7b2f61a7ed69878bc0de8bb99f3795bf9980d87992848cd9b9abe0fa6205a117ab1dd5165cf11ffa10b765e8723251ea0907bbc5f3eef8cf1f08bb89e193842b40c95922f38c44d0c3197033a5c7c926a33687aa71c482c48381baa4a34a46b8a4f78715f42eccbc8df80ee3b43335d92bdeb3bb0667cf6da83a018e4c0cd5803004bf6c300b9bee029246d16bd817ff235fcc22bb8c729929499afbf90bf787e98479db5ff571d3d727059d34c1f14454ff5f0a1d2d025437c2d8db4a7be7b901c067b929a0028fe8bb74fa96cb84831ccd89138329708d12c76bd4f5f371e43d0a2d234e5db2b3d6d5164e773594ab201dc9498078b48d4303dd8a89bf81c76d1424084ebf8d96107cb2623fb1cb67617257a5c7c6e56a8614271256b9dd80c76b6d668de4ebe17574ad617f5b1133f45a6d8621e127fcc99d8e788c535da9f557d91903b4e388108f02e9539a681d42e61f8e2f8b06654d4dec308690902a5c76f55b3d79b7c9a0ce994494bc60eff79ff41debc3f2684f40fc912f09035aae022148238ba6f5cfb92f54a5fb28cbb417ff01f39cc464e95929fba5e19be0251bef59879303063e6392c3a49032af3d03d5c9027868d5d6a187698dd75dfc295d2789a0e6cf391a380cc625b0a49f3084f45558ac273b0bbe62a8614db194983b2e207cef7deb1fa6a0bd39b0215d72bf646b599f187ee0009b7b458bb4930a1aea55222099446a0250a975447ff52", "openwall"}, {"$keepass$*2*6000*222*e54497d3d9be3e310a817a13515225a87773ba71557a88673c34db824550be7b*d405c4f7e3c7b2b142fda44c3d55d3afab1c91a6aca7c81c1ff7e61b3f03be85*7eb45af0af777ecb57f0159b9ffa528b*0af7d9facefb20378e8666389de7586ea72e9527dc78bf5dfe5f1b455060a3e6*9b0d1893678dea77f88bf66e6986adbc5a8095e4a09c7e9744bad42ac49133a7", "password"}, {"$keepass$*1*50000*124*f7465d646bab0a86197fcf2b778ea9c1*ec24a474b0745f9ff1de44ac3e0a274dda83375ecec45eb9ddc40b524fb51df2*f7f17dd2a15c4cf13fb4c8a504298fb3*e7765dba9ed64686a2c0b712de95bd0051a20b331ea0f77133e6afbb9faa1479*1*608*e5802225bf18755620355ad67efa87335532197ce45ee8374a5d23478557414b110426904671c49b266672c02e334c4261d52a9a0723d050329319f8d3b06a6d9507e5b30c78823beea101f52bde5ecdb6b6d0d2627fc254678416b39d2ba43ebce229c0b25f8c530975bc617be602d36e95a6e83c99c7264d5cc994af762460942830ac06b03d30c84c000d01061a938c274d78d383040c8cf5e69e7fbbaf6b46a7061399087f1db2747cd83afdb2b36e6077cecdc3b5c3b3f29f3a1ef537e8c798f8d614f9866a19a53b463aa81632e9aca43ebff9c787ca20a416a4051f16e4ececb84ea853fcc48a988e2d77cb385a2add3b858a18ee73783695a093628a0082d928ffeea39db585a478647e29395fdf2e3e8f54dc5b8277712d8cf5e8a266780944889fb46408b8afb614c3b8e7152b8cc865368d0ae000404234c11c8a77ebc521326683c00967a474cf82336afd1cb8f867db5f6cc7f5c9ae755c0fd0b4c9554ad26bef0b10f0c70978746090034e16922ee9cf38eb251515117cc62da3a62a6fd8a5dab0c10e857b2e2489d2521e1903d6b107c16fd1bf6565fc2953ea3206481ab6c466dba43777076c58ada7cb1883043f4747b2b80731476057598054ea9ec9de1645b4034f6569f579e70a021cc0a490dfa703def725846d0693d7cb02dea430905470db56663953b81b72f7543d6db7713afbcc91919b23cff80290a1053f34516c0b2c7a1f4bec1718994563ae188c2f65e20378537f88be2ebc6c47fbadabbd33414ffa30f115be0abdc89182e0a77d8d5c258d9ec5005415890218eb456fdcb79f1b15031289a0909fc6d8ae48ca6d2d699b6e0cd2e76462", "crackthis"}, {"$keepass$*1*50000*124*e144905f9aa746e1b2c382c807125d02*dd08f46898a3e75c458a44f34ec5391d3f3eb62b24dbda3d5e486e36312168cc*376ae8d5e8430d0a18e7bb4a0baddf75*5fa8dfc2f440ad296f1562683d06bf2717ae7e8ed343a279f54292f9fc8229ab*1*608*3ce1e03a1452e44b609ebe7326db4ef133ca25c325cc7cc5795ef92358011e2d32a1cb7cadc6f412b1d0a09f67f1444dfec73ed770507683360962d26b0c2b0384bcf9aba2cf1b3e4b5d7083ceaf5f941a2b99ec68d574eb58fe79e94d90b81c8f1f0ccfd35b16d415e8e203c06138eb6a1144520ef98bcdb33d669d2ab4aef2ab739e6dbc3f2ea5c6eef8410ca1555262181d8379b516551eb9d6a23eeb515bd8ef12735a635b25743c1188642486dd1fa4544138a361bcfc108f689bfb90f81d9808adcbd509f057cdbfd1cd31ee8b542956292f9bcca21fabeacc9ba96b335223103a72f94d9b04bcba9d74fada62e0d5bf2da142e413a373ea3c97ff1d50109532f5d041c5f77bea28cdea00388ab9dd3afc72bc266ff44c34221d751738545056e83d7558cf02ffc6f5a57163526ffff9a7de1c6276d4815a812c165ef0293bb951bcbc2cf389d20e188a6c24d1bc5322ee0bc6972b765fb199b28d6e14c3b795bd5d7d4f0672352dfed4870cf59480bab0f39f2a20ac162e8365b6e3dcb4a7fec1baafcb8c806726a777c7a5832a0d1c12568c2d9cad8dc04b1ce3506dbc1bf9663d625cfccb2d3c1cb6b96eee0f34e019b0145e903feed4683abe2568f2c0007c02c57b43f4ee585f9760d5b04c8581e25421b6b5bb370a5b48965b64584b1ed444ea52101af2b818b71eb0f9ae7942117273a3aff127641e17779580b48168c5575a8d843a87dee1088e0fde62bb2100e5b2e178daa463aeaeb1d4ff0544445aab09a7bdc684bd948f21112004dcc678e9c5f8cf8ba6113244b7c72d544f37cbc6baed6ddc76b9ccba6480abfb79a80dda4cdf7e218f396a749b4e5f", "password"}, /* CMIYC 2013 "pro" hard hash */ {"$keepass$*2*6000*222*a279e37c38b0124559a83fa452a0269d56dc4119a5866d18e76f1f3fd536d64d*7ec7a06bc975ea2ae7c8dcb99e826a308564849b6b25d858cbbc78475af3733f*d477c849bf2278b7a1f626c81e343553*e61db922e9b77a161e9b674ddadfb8c660d61b5f68d97a3b1596ae94cfa9d169*7c80c7db9de77f176e86ba11697152c4c8f182bdb8133ad1bca22e9ec5bc275b", "Sh4RK%nAD0*"}, /* twofish version 1 hash from http://openwall.info/wiki/john/sample-non-hashes#KeePass */ {"$keepass$*1*50000*1*1ff21bd79aa8e9c3f439281a4ce6a97b*cfbdb00057ee0c9e889ca9d93b069ab5ae19f78852bc21aae4f60d0d325e0034*c1a7e6138a49a2dcfb3a84afbc1d918b*a704f9d060f0de5a070155d1d5a8727da92f404242cb3aa2b9aa53a145f87474*1*608*c2d3d18e416af56788d1c3e4257da9ce6e5dcad4db012d7422d17b4527bbb2bb994d9db03907ae01cc1565f5fd0729b930c9ee352426c57de5dee7e941e1d6aedeaf2b0e6509819385de9b4dd6a09979b3edfa0959a7186c422031e426f18d295c55ac616aabeec99f89e696be1d585950ef16a94ae610f2449cc3964bb63ec6043ef36c89117bc78e99e5fbf083b48cb84f85a964e8a037018b3afc2cc55fbe7d74cbdb53d5a54bcd202a1d0a342dbf48a8f7a24264cde8d800a506bf134008b1d8d9b8dd80c19511d9f43b3c23b19eb4a7dcf584f80c49961f73dcba3d2d0390a39a683ddcc8771b49cc3c673ea0aa902d075e25bc814608e2e6d1d6218a6379fd677bc5daaa18b6f5a021d2f661338ca8cc3645dc6cddb860af222a5cdb59a5e2a2c1921203344ced4e2154446239f6c1af8c1bace8207e0f519ea9c08db2f5d0bde0416b09ef6c530213e648641ae56c9af9fbdcb0a286cc4de121655697b9eb00c0fd89ed7269c3859eca20e0c7b60be8d2a1323eb915139cf90c55f9cff01a5bdf757e09ee6d64c2de9aec8d3ea42feeb67caf51b9ba1a80b435e271fdb7f9144ca31e41671768b2c5e8adf70245fdf52005de418efbe2a156d19eeb2ed9e97a0ddb133d11bd8655356d9d3edbbdbf9d0db345b2eb2c1f550ce070f5b0f8f8e58a6ffd52ae8089627dc4a0dac4b4846349066bfa0d2f395d2cb3871e57e353d622e0904a9f54a3e4706797d95b34619f792c15ab8efb3ac523becc3023f01aaad169bc08db8d01e2dd22eff8f6b4f7b741d196bc3de466590011e6d5c9703a19c07d96d26fe1ad93d0931454730ee1f3146428a126d1ed02763f827ff4", "twofish"}, /* keyfile test cases*/ {"$keepass$*1*6000*0*1a1d38235ccbeae4ca2a9edfbd3b290c*8e1e81b37a6161b6033fbd6dd350aaeaa0712cf2649fe40e3fbbaa4b61684f54*d9517d352aea00c2b7f57f1154b9c0a0*0a8ae9b13347402c242d7cde4d58d01f1e129287eaf62df768856bbb9d0633a1*1*1360*6555a7e9eca9d5a2c9504a5c888846f0a8902fa31e3dc90f8fcc118856d5daabcaaf4316c4d589e11cce5b9a209e9a7ec1db5b848a706c78f7c7dfac4fd9ea86ac15af500518766dbf4525ee7c1b477a8fec4abdd6f4ad36894ec5aee0c9a5662c5091ceb61b3aa99ff3eacd687ed797b0a1e8ceecd5c51456cb1f70dadf0fda190752e4efe4fb101d5fc5d7745ff01d68cb4c0cc32c6003f85c310e43d7d659748bfc260cbb329c4076c2c9948386c74bb967362a98d6490dbe340f5d440b557b105edd5561836fbb6894f4a1d9a5cd0182536a28f60ca268d682065f8f5226e24a07d635a3c4f04760094cee033fb2f7c3a0cbdf7f174d31c827f6911a75ca95b21332bb47ea6359aa2d70ff4b16e8481cd536e0ec4ba90963edda754b6e0e694855e4f266899b3dd2b0f74c3e688caa376b22810945249ac4e1c38e8d1093ce272ed45d26037a1fd6e0cfcdbdf096c8b2795ba736641bafe9938b6eb2b40ea347f9c49952c118d86ec671c065e3c94f0de2409fec2fde318ad7e6dd0189baf4fa0044fc1d2974b9dafb1608f4bca525706e44ca6af09e305ad29f5e4ba0831145713d5d8b6d6d955c4b5ca031e34b4292aee5383179e1e0afe92ee6565e69825c90bb5e79612a4ad4a3babbd4a75b5481ea710c93595781b71532c17730409482e6b59bb9831be4efadadf36eda5bc5fcf0f3541aaba6662807e531a3e28078f5960e50f80e624c5434b545c1232fdd64359f53b90d6635107f4f005ac02110eebdbdda4f2c92addd686059e9d799a55902526f87f78b8844e2000f82e7b5c8ba3a19fe26117c43f69ba26eee75cc385737791ca4554ce935af26c50331963e500605e87ac3602a76669bf6318e797ef01fe1c25e567cc864de11bd00f555fdf188648bf4179658e325be39a4050b7b01553422e5cd1bbaf5e8f75ce34f0e92f1253c880d4e77f484f14817e288f01efbfe1a8f8b90e9d18b86898856bdf3ee6b5754853cb99a746fa0b753f1a49f529a89d9a0c2fbd5365477be829190dbf491bc886f66ae1bfe014a7e23a420f76a4a0d0d5ebcea51dc0021651a6cdbe5c89a7ae8bfdae2e30d404c31790c0aba8791793ce3072adf21e5a3c5b5e4f9cea82ebff5070e13f94300d5688523ba2a142ae8f82f6ef940e69beba1d665ab17a2ae471500fc48ded336b27450f08dfe07fa5e556963f035a01950f43b2f649bf7f552e9ee7154f5ffdec109fd5bdf0e879d044ef4b78e590ac769efcdd7dad74228872af966d2e8d976336de1ee4289e933288b5b0b43195df1c248176ac944f5e99918dbc067f93d15e95602c9cb8246f378377785b7ebfee44f81b385a3e1c9c5276e4b477c4841af871e6b0e3f4387c58cea01fe2aff04df0f51ac93757172d7537ee0df51ec931564ed2c8a11a45da8c03644d0bc93a14d9f79555250b9c8245690bc1c72ea7e9104a9f570680f704c1f8759a65e210e1b9a855b46ed6801354175b27fc288a7bc39a2003f4400c124ec41d7f54f67be99f778895d9c3e33623a346021215a369487457e78322dbd71a3d969b3e22dfea987ac93d5c4f8252142824f5a67e54a2b1b78ea928fbb63653e122555f6c76150f2541bdad6524f69964c91e9175406d0b824e175e63c7677d990341ee69c4ca9612a05e3bd2ed304c45cd97051aaf0b63c0d917af8d01723e215bb93f816b51d79e29e4e885b98f8ca8320443503c07e67b4d546f544ffced62ef7298a8ac6175f77c180900f638466cd15d6511d7b16992a8e0674563c02fe7776079ee92739bc142a1e601b3aaee284f6f828656e43e58b93bcfd5f69b6aa8c003788d1ae88f569f64402d64e18cb8ffc2268013fe4da9ba7da557da3e259623168b7fd57cf0e4c8327bae66e02bc12978725022ef4cc03b4021d3a*1*64*3a96fb77fbbbca7336ee699f17be31fde552191128553c6d89bfce4035dc0af0", "choupinette"}, {"$keepass$*2*6000*222*aa511591cb50394d044f31abb2febdb2788c9ee41d78a53f3efe0f83fdd64e81*7ceab79302a794cef818d9426e53a78458f82e72575967c4fb3788d4bc685874*1c5c1c0c475ee2f22bd56e9c75cfd67c*e7bf79115c83a0236260c71c17a816f9bd9288a683eb4b5e0d48666c66e97774*53f26838a293b392bfde1ad21b444b834cf5c02155a1378ac496653b2f3779ec*1*64*98df4f35fe74c031992d81a639305c4520f303fd1ca4bb09b53e33032b44c46a", "kukudanlaplace"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int any_cracked, *cracked; static size_t cracked_size; static struct custom_salt { long long offset; int version; int isinline; int keyfilesize; int have_keyfile; int contentsize; // unsigned char contents[LINE_BUFFER_SIZE]; unsigned char contents[0x30000]; // We need to fix this in some other way, now that LINE_BUFFER_SIZE has been dropped so heavily! unsigned char final_randomseed[32]; unsigned char enc_iv[16]; unsigned char keyfile[32]; unsigned char contents_hash[32]; unsigned char transf_randomseed[32]; unsigned char expected_bytes[32]; uint32_t key_transf_rounds; int algorithm; // 1 for Twofish } *cur_salt; static void transform_key(char *masterkey, struct custom_salt *csp, unsigned char *final_key) { // First, hash the masterkey SHA256_CTX ctx; unsigned char hash[32]; unsigned char temphash[32]; int i; AES_KEY akey; SHA256_Init(&ctx); SHA256_Update(&ctx, masterkey, strlen(masterkey)); SHA256_Final(hash, &ctx); if(csp->version == 2 && cur_salt->have_keyfile == 0) { SHA256_Init(&ctx); SHA256_Update(&ctx, hash, 32); SHA256_Final(hash, &ctx); } memset(&akey, 0, sizeof(AES_KEY)); if(AES_set_encrypt_key(csp->transf_randomseed, 256, &akey) < 0) { fprintf(stderr, "AES_set_encrypt_key failed!\n"); } if (cur_salt->have_keyfile) { SHA256_CTX composite_ctx; SHA256_Init(&composite_ctx); SHA256_Update(&composite_ctx, hash, 32); memcpy(temphash, cur_salt->keyfile, 32); SHA256_Update(&composite_ctx, temphash, 32); SHA256_Final(hash, &composite_ctx); } // Next, encrypt the created hash i = csp->key_transf_rounds >> 2; while (i--) { AES_encrypt(hash, hash, &akey); AES_encrypt(hash, hash, &akey); AES_encrypt(hash, hash, &akey); AES_encrypt(hash, hash, &akey); AES_encrypt(hash+16, hash+16, &akey); AES_encrypt(hash+16, hash+16, &akey); AES_encrypt(hash+16, hash+16, &akey); AES_encrypt(hash+16, hash+16, &akey); } i = csp->key_transf_rounds & 3; while (i--) { AES_encrypt(hash, hash, &akey); AES_encrypt(hash+16, hash+16, &akey); } // Finally, hash it again... SHA256_Init(&ctx); SHA256_Update(&ctx, hash, 32); SHA256_Final(hash, &ctx); // ...and hash the result together with the randomseed SHA256_Init(&ctx); if(csp->version == 1) { SHA256_Update(&ctx, csp->final_randomseed, 16); } else { SHA256_Update(&ctx, csp->final_randomseed, 32); } SHA256_Update(&ctx, hash, 32); SHA256_Final(final_key, &ctx); } static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = 1; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); any_cracked = 0; cracked_size = sizeof(*cracked) * self->params.max_keys_per_crypt; cracked = mem_calloc(cracked_size, 1); Twofish_initialise(); } static void done(void) { MEM_FREE(cracked); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy; char *keeptr; char *p; int version, res, contentsize, extra; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += FORMAT_TAG_LEN; if ((p = strtokm(ctcopy, "*")) == NULL) /* version */ goto err; version = atoi(p); if (version != 1 && version != 2) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* rounds */ goto err; if (!isdec(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* offset */ goto err; if (!isdec(p)) /* TODO: what values are 'valid' here, and can we check something ? */ goto err; if ((p = strtokm(NULL, "*")) == NULL) /* final random seed */ goto err; res = hexlenl(p, &extra); if (extra || (res != 32 && res != 64)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* transf random seed */ goto err; if (hexlenl(p, &extra) != 64 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* env_iv */ goto err; if (hexlenl(p, &extra) != 32 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* hash or expected bytes*/ goto err; if (hexlenl(p, &extra) != 64 || extra) goto err; if (version == 1) { if ((p = strtokm(NULL, "*")) == NULL) /* inline flag */ goto err; res = atoi(p); if (res != 1) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* content size */ goto err; contentsize = atoi(p); if ((p = strtokm(NULL, "*")) == NULL) /* content */ goto err; if (!contentsize || hexlenl(p, &extra) / 2 != contentsize || extra) goto err; p = strtokm(NULL, "*"); // keyfile handling if (p) { res = atoi(p); if (res == 1) { if ((p = strtokm(NULL, "*")) == NULL) goto err; res = atoi(p); if ((p = strtokm(NULL, "*")) == NULL) goto err; if (res != 64 || strlen(p) != 64 || !ishexlc(p)) goto err; } else goto err; } } else { if ((p = strtokm(NULL, "*")) == NULL) /* content */ goto err; if (hexlenl(p, &extra) != 64 || extra) goto err; p = strtokm(NULL, "*"); // keyfile handling if (p) { res = atoi(p); if (res == 1) { if ((p = strtokm(NULL, "*")) == NULL) goto err; res = atoi(p); if ((p = strtokm(NULL, "*")) == NULL) goto err; if (res != 64 || strlen(p) != 64 || !ishexlc(p)) goto err; } else goto err; } } MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += FORMAT_TAG_LEN; /* skip over "$keepass$*" */ p = strtokm(ctcopy, "*"); cs.version = atoi(p); if(cs.version == 1) { p = strtokm(NULL, "*"); cs.key_transf_rounds = atoi(p); p = strtokm(NULL, "*"); // cs.offset = atoll(p); // Twofish handling hack! cs.algorithm = atoll(p); p = strtokm(NULL, "*"); for (i = 0; i < 16; i++) cs.final_randomseed[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < 32; i++) cs.transf_randomseed[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < 16; i++) cs.enc_iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < 32; i++) cs.contents_hash[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.isinline = atoi(p); if(cs.isinline == 1) { p = strtokm(NULL, "*"); cs.contentsize = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.contentsize; i++) cs.contents[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; } p = strtokm(NULL, "*"); if (p) { /* keyfile handling */ p = strtokm(NULL, "*"); cs.keyfilesize = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < 32; i++) cs.keyfile[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; cs.have_keyfile = 1; } } else { p = strtokm(NULL, "*"); cs.key_transf_rounds = atoi(p); p = strtokm(NULL, "*"); // cs.offset = atoll(p); // Twofish handling hack cs.algorithm = atoll(p); p = strtokm(NULL, "*"); for (i = 0; i < 32; i++) cs.final_randomseed[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < 32; i++) cs.transf_randomseed[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < 16; i++) cs.enc_iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < 32; i++) cs.expected_bytes[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < 32; i++) cs.contents[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); if (p) { /* keyfile handling */ p = strtokm(NULL, "*"); cs.keyfilesize = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < 32; i++) cs.keyfile[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; cs.have_keyfile = 1; } } MEM_FREE(keeptr); if (cs.algorithm != 0 && cs.algorithm != 1) // offset hijacking! cs.algorithm = 0; // AES return (void *)&cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; if (any_cracked) { memset(cracked, 0, cracked_size); any_cracked = 0; } #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { unsigned char final_key[32]; //unsigned char decrypted_content[LINE_BUFFER_SIZE]; unsigned char decrypted_content[0x30000]; SHA256_CTX ctx; unsigned char iv[16]; unsigned char out[32]; int pad_byte; int datasize; AES_KEY akey; Twofish_key tkey; // derive and set decryption key transform_key(saved_key[index], cur_salt, final_key); if (cur_salt->algorithm == 0) { /* AES decrypt cur_salt->contents with final_key */ memcpy(iv, cur_salt->enc_iv, 16); memset(&akey, 0, sizeof(AES_KEY)); if(AES_set_decrypt_key(final_key, 256, &akey) < 0) { fprintf(stderr, "AES_set_decrypt_key failed in crypt!\n"); } } else if (cur_salt->algorithm == 1) { memcpy(iv, cur_salt->enc_iv, 16); memset(&tkey, 0, sizeof(Twofish_key)); Twofish_prepare_key(final_key, 32, &tkey); } if (cur_salt->version == 1 && cur_salt->algorithm == 0) { AES_cbc_encrypt(cur_salt->contents, decrypted_content, cur_salt->contentsize, &akey, iv, AES_DECRYPT); pad_byte = decrypted_content[cur_salt->contentsize-1]; datasize = cur_salt->contentsize - pad_byte; SHA256_Init(&ctx); SHA256_Update(&ctx, decrypted_content, datasize); SHA256_Final(out, &ctx); if(!memcmp(out, cur_salt->contents_hash, 32)) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } } else if (cur_salt->version == 2 && cur_salt->algorithm == 0) { AES_cbc_encrypt(cur_salt->contents, decrypted_content, 32, &akey, iv, AES_DECRYPT); if(!memcmp(decrypted_content, cur_salt->expected_bytes, 32)) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } } else if (cur_salt->version == 1 && cur_salt->algorithm == 1) { /* KeePass 1.x with Twofish */ int crypto_size; crypto_size = Twofish_Decrypt(&tkey, cur_salt->contents, decrypted_content, cur_salt->contentsize, iv); datasize = crypto_size; // awesome, right? if (datasize <= cur_salt->contentsize && datasize > 0) { SHA256_Init(&ctx); SHA256_Update(&ctx, decrypted_content, datasize); SHA256_Final(out, &ctx); if(!memcmp(out, cur_salt->contents_hash, 32)) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } } } else { // KeePass version 2 with Twofish is TODO. Twofish support under KeePass version 2 // requires a third-party plugin. See http://keepass.info/plugins.html for details. abort(); } } return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return cracked[index]; } static void KeePass_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->key_transf_rounds; } /* * The version shouldn't have a significant impact * on performance. Nevertless, report it as the 2nd * "tunable cost". */ static unsigned int keepass_version(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->version; } struct fmt_main fmt_KeePass = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", "version", }, { FORMAT_TAG }, KeePass_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { iteration_count, keepass_version, }, fmt_default_source, { fmt_default_binary_hash /* Not usable with $SOURCE_HASH$ */ }, fmt_default_salt_hash, NULL, set_salt, KeePass_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash /* Not usable with $SOURCE_HASH$ */ }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
fixed_gaussian_basis.h
#ifndef METHODS_FIXED_GAUSSIAN_BASIS_H #define METHODS_FIXED_GAUSSIAN_BASIS_H #include "quartz_internal/util/auto_generator.h" namespace method { // use your method name to create a subspace for your // implementation of details namespace fgb { namespace details { inline arma::mat overlap_matrix(const arma::mat & points, const arma::cube & covariances) { if (points.n_cols != covariances.n_slices) { throw Error( "Different number of points between the points and covariances"); } if (points.n_rows != covariances.n_rows) { throw Error("Different dimension between the points and covariances"); } arma::mat overlap(points.n_cols, points.n_cols); #pragma omp parallel for for (arma::uword i = 0; i < overlap.n_rows; i++) { for (arma::uword j = i; j < overlap.n_cols; j++) { const arma::vec col_i = points.col(i); const arma::vec col_j = points.col(j); const auto gaussian = math::Gaussian<double>(covariances.slice(i), col_i) * math::Gaussian<double>(covariances.slice(j), col_j); overlap(i, j) = gaussian.integral(); } } overlap = overlap + overlap.t(); overlap.diag() /= 2.0; return overlap; } template<typename Function> auto linear_combination_in_gaussian_basis(const Function & function, const arma::mat & overlap, const arma::mat & points, const arma::cube & covariances) { if (points.n_cols != covariances.n_slices) { throw Error( "Different number of points between the points and covariances"); } if (points.n_rows != covariances.n_rows) { throw Error("Different dimension between the points and covariances"); } if (overlap.n_rows != points.n_cols) { throw Error("Mismatch between the overlap matrix and points"); } const arma::vec test_gaussian_mean = points.col(0); const arma::mat test_gaussian_covariance = covariances.slice(0); const auto test_gaussian = math::Gaussian<double>(test_gaussian_covariance, test_gaussian_mean); const auto test_multiplied = function * test_gaussian; auto integral = test_multiplied.integral(); arma::Col<decltype(integral)> expectations = arma::Col<decltype(integral)>(points.n_cols).eval(); #pragma omp parallel for for (arma::uword i = 0; i < expectations.n_elem; i++) { const arma::vec col_i = points.col(i); const auto multiplied = function * math::Gaussian<double>(covariances.slice(i), col_i); expectations(i) = multiplied.integral(); } const arma::Col<decltype(integral)> coefs = arma::inv(overlap) * expectations; return coefs; } inline arma::cube covariances_generator(const arma::uvec & grid, const arma::mat & range, const double widening = 1.0) { if (grid.n_elem != range.n_rows) { throw Error("Different dimension between the grid and range"); } const arma::vec spacing = (range.col(1) - range.col(0)) / (grid - 1) * widening; arma::cube result(spacing.n_elem, spacing.n_elem, arma::prod(grid)); #pragma omp parallel for for (arma::uword i = 0; i < result.n_slices; i++) { result.slice(i) = arma::diagmat(spacing); } return result; } } // namespace details struct State { public: arma::mat points; arma::cube covariances; arma::mat overlap; arma::vec weights; arma::vec masses; // Establish an easy way to construct your State template<typename PhaseSpaceDistribution> State(const PhaseSpaceDistribution & initial, const arma::uvec & grid, const arma::mat & range, const arma::vec & masses, const double widening = 1.0) : points(math::space::points_generate(grid, range)), covariances(details::covariances_generator(grid, range, widening)), overlap(details::overlap_matrix(points, covariances)), weights(details::linear_combination_in_gaussian_basis(initial, overlap, points, covariances)), masses(masses) { if (grid.n_elem % 2 != 0) { throw Error("Odd number of dimension - it is not likely a phase space"); } if (grid.n_rows != range.n_rows) { throw Error("Different dimension between the grid and the range"); } if (grid.n_rows != 2 * masses.n_rows) { throw Error("Different dimension between the grid and the masses"); } } template<typename PhaseSpaceDistribution> State(const PhaseSpaceDistribution & initial, const arma::uvec & grid, const arma::mat & range, const double widening = 1.0) : points(math::space::points_generate(grid, range)), covariances(details::covariances_generator(grid, range, widening)), overlap(details::overlap_matrix(points, covariances)), weights(details::linear_combination_in_gaussian_basis(initial, overlap, points, covariances)), masses(arma::ones<arma::vec>(grid.n_elem / 2)) { if (grid.n_elem % 2 != 0) { throw Error("Odd number of dimension - it is not likely a phase space"); } if (grid.n_rows != range.n_rows) { throw Error("Different dimension between the grid and the range"); } if (grid.n_rows != 2 * masses.n_rows) { throw Error("Different dimension between the grid and the masses"); } } inline State(const arma::mat & points, const arma::vec & weights, const arma::mat & overlap, const arma::cube & covariances, const arma::vec & masses) : points(points), covariances(covariances), overlap(overlap), weights(weights), masses(masses) { if (points.n_cols != weights.n_elem) { throw Error("Different number of points and corresponding weights"); } if (points.n_rows != 2 * masses.n_rows) { throw Error("Different dimension between the points and the masses"); } } inline arma::uword dim() const { return points.n_rows / 2; } inline arma::vec mean() const { return this->points * this->weights / arma::sum(this->weights); } inline State normalise() const { return State(this->points, this->weights / arma::sum(this->weights), this->overlap, this->covariances, this->masses); } inline arma::vec positional_expectation() const { arma::uword dim = this->dim(); return this->points.rows(0, dim - 1) * this->weights / arma::sum(this->weights); } inline arma::vec momentum_expectation() const { arma::uword dim = this->dim(); return this->points.rows(dim, 2 * dim - 1) * this->weights / arma::sum(this->weights); } inline arma::mat covariance_expectation() const { arma::mat result(arma::size(this->covariances.slice(0)), arma::fill::zeros); #pragma omp parallel for for (arma::uword i = 0; i < this->covariances.n_slices; i++) { result += this->covariances.slice(i) * weights(i); } return (result + this->points * arma::diagmat(this->weights) * this->points.t()) / arma::sum(weights) - this->mean() * this->mean().t(); } inline math::Gaussian<double> packet(const arma::uword i) const { if (this->covariances.n_slices <= i) { throw Error("packet enquiry out of bound"); } math::Gaussian<double> result(this->covariances.slice(i), this->points.col(i)); return result; } State operator+(const State & B) const { if (!arma::approx_equal(this->points, B.points, "abs_diff", 1e-16) || !arma::approx_equal(this->masses, B.masses, "abs_diff", 1e-16) || !arma::approx_equal(this->covariances, B.covariances, "abs_diff", 1e-16)) { throw Error("Different fgb states are being added"); } return State(this->points, this->weights + B.weights, this->overlap, this->covariances, this->masses); } State operator*(const double B) const { return State(this->points, this->weights * B, this->overlap, this->covariances, this->masses); } }; struct Operator { public: math::Polynomial<double> hamiltonian; arma::mat fock; arma::mat propagation_matrix; Operator(const State & state, const math::Polynomial<double> & potential) : hamiltonian(quartz::hamiltonian(potential, state.masses)) { const arma::uword total = state.covariances.n_slices; arma::mat f(total, total, arma::fill::zeros); #pragma omp parallel for for (arma::uword i = 0; i < total; i++) { for (arma::uword j = i + 1; j < total; j++) { const auto gaussian_i = math::GaussianWithPoly(state.packet(i)); const auto gaussian_j = math::GaussianWithPoly(state.packet(j)); const auto moyal = moyal_bracket(gaussian_i, this->hamiltonian, this->hamiltonian.grade() / 2); f(i,j) = (moyal * gaussian_j).integral(); } } this->fock = f - f.t(); this->propagation_matrix = arma::inv(state.overlap) * this->fock; } inline PropagationType propagation_type() const { return Classic; } State operator()(const State & state) const { return State(state.points, this->propagation_matrix * state.weights, state.overlap, state.covariances, state.masses); } }; } // namespace fgb } #endif //METHODS_FIXED_GAUSSIAN_BASIS_H
Sphere.h
#ifndef SPHERE_HEADER #define SPHERE_HEADER #include "basic.h" #include <MiscLib/Vector.h> #include <stdexcept> #include <GfxTL/HyperplaneCoordinateSystem.h> #include <utility> #include "PointCloud.h" #include <ostream> #include <istream> #include <stdio.h> #include <utility> #include <MiscLib/NoShrinkVector.h> #include "LevMarLSWeight.h" #include "LevMarFitting.h" #ifndef DLL_LINKAGE #define DLL_LINKAGE #endif namespace schnabel { struct DLL_LINKAGE InvalidTetrahedonError : public std::runtime_error { InvalidTetrahedonError(); }; class DLL_LINKAGE Sphere { public: enum { RequiredSamples = 2 }; Sphere(); Sphere(const Vec3f &center, float radius); Sphere(const Vec3f &p1, const Vec3f &p2, const Vec3f &p3, const Vec3f &p4); bool Init(const MiscLib::Vector< Vec3f > &samples); bool Init(const Vec3f &p1, const Vec3f &p2, const Vec3f &p3, const Vec3f &p4); bool Init2(const Vec3f &p1, const Vec3f &p2, const Vec3f &n1, const Vec3f &n2); bool Init(bool binary, std::istream *i); void Init(FILE *i); void Init(float *array); inline float Distance(const Vec3f &p) const; inline void Normal(const Vec3f &p, Vec3f *normal) const; inline float DistanceAndNormal(const Vec3f &p, Vec3f *normal) const; inline float SignedDistance(const Vec3f &p) const; void Project(const Vec3f &p, Vec3f *pp) const; const Vec3f &Center() const; void Center(const Vec3f &center) { m_center = center; } float Radius() const; void Radius(float radius) { m_radius = radius; } bool LeastSquaresFit(const PointCloud &pc, MiscLib::Vector< size_t >::const_iterator begin, MiscLib::Vector< size_t >::const_iterator end); template< class IteratorT > bool LeastSquaresFit(IteratorT begin, IteratorT end); bool Fit(const PointCloud &pc, MiscLib::Vector< size_t >::const_iterator begin, MiscLib::Vector< size_t >::const_iterator end) { return LeastSquaresFit(pc, begin, end); } static bool Interpolate(const MiscLib::Vector< Sphere > &spheres, const MiscLib::Vector< float > &weights, Sphere *is); void Serialize(bool binary, std::ostream *o) const; static size_t SerializedSize(); void Serialize(FILE *o) const; void Serialize(float* array) const; static size_t SerializedFloatSize(); void Transform(float scale, const Vec3f &translate); inline unsigned int Intersect(const Vec3f &p, const Vec3f &r, float *first, float *second) const; private: template< class WeightT > class LevMarSimpleSphere : public WeightT { public: enum { NumParams = 4 }; typedef float ScalarType; template< class IteratorT > ScalarType Chi(const ScalarType *params, IteratorT begin, IteratorT end, ScalarType *values, ScalarType *temp) const { ScalarType chi = 0; int size = end - begin; #pragma omp parallel for schedule(static) reduction(+:chi) for(int idx = 0; idx < size; ++idx) { float s = begin[idx][0] - params[0]; s *= s; for(unsigned int j = 1; j < 3; ++j) { float ss = begin[idx][j] - params[j]; s += ss * ss; } values[idx] = WeightT::Weigh(std::sqrt(s) - params[3]); chi += values[idx] * values[idx]; } return chi; } template< class IteratorT > void Derivatives(const ScalarType *params, IteratorT begin, IteratorT end, const ScalarType *values, const ScalarType *temp, ScalarType *matrix) const { int size = end - begin; #pragma omp parallel for schedule(static) for(int idx = 0; idx < size; ++idx) { float s[3]; s[0] = begin[idx][0] - params[0]; float sl = s[0] * s[0]; for(unsigned int i = 1; i < 3; ++i) { s[i] = begin[idx][i] - params[i]; sl += s[i] * s[i]; } sl = std::sqrt(sl); matrix[idx * NumParams + 0] = -s[0] / sl; matrix[idx * NumParams + 1] = -s[1] / sl; matrix[idx * NumParams + 2] = -s[2] / sl; matrix[idx * NumParams + 3] = -1; WeightT::template DerivWeigh< NumParams >(sl - params[3], matrix + idx * NumParams); } } void Normalize(ScalarType *) const {} }; template< class WeightT > class LevMarSphere : public WeightT { public: enum { NumParams = 7 }; typedef float ScalarType; // parametrization: params[0] - params[2] = normal // params[3] - params[5] = point // params[6] = 1 / radius template< class IteratorT > ScalarType Chi(const ScalarType *params, IteratorT begin, IteratorT end, ScalarType *values, ScalarType *temp) const { ScalarType chi = 0; ScalarType radius = 1 / params[6]; Vec3f center = -radius * Vec3f(params[0], params[1], params[2]) + Vec3f(params[3], params[4], params[5]); int size = end - begin; #pragma omp parallel for schedule(static) reduction(+:chi) for(int idx = 0; idx < size; ++idx) { temp[idx] = (begin[idx] - center).length(); chi += (values[idx] = WeightT::Weigh(temp[idx] - radius)) * values[idx]; } return chi; } template< class IteratorT > void Derivatives(const ScalarType *params, IteratorT begin, IteratorT end, const ScalarType *values, const ScalarType *temp, ScalarType *matrix) const { Vec3f normal(params[0], params[1], params[2]); Vec3f point(params[3], params[4], params[5]); int size = end - begin; #pragma omp parallel for schedule(static) for(int idx = 0; idx < size; ++idx) { ScalarType denominator = -1.f / temp[idx] * params[6]; matrix[idx * NumParams + 0] = (matrix[idx * NumParams + 3] = (point[0] - normal[0] * params[6] - begin[idx][0])) * denominator; matrix[idx * NumParams + 1] = (matrix[idx * NumParams + 4] = (point[1] - normal[1] * params[6] - begin[idx][1])) * denominator; matrix[idx * NumParams + 2] = (matrix[idx * NumParams + 5] = (point[2] - normal[2] * params[6] - begin[idx][2])) * denominator; matrix[idx * NumParams + 3] /= temp[idx]; matrix[idx * NumParams + 4] /= temp[idx]; matrix[idx * NumParams + 5] /= temp[idx]; matrix[idx * NumParams + 6] = (normal[0] * matrix[idx * NumParams + 3] + normal[1] * matrix[idx * NumParams + 4] + normal[2] * matrix[idx * NumParams + 5] + 1) * params[6] * params[6]; WeightT::template DerivWeigh< NumParams >(temp[idx] - 1.f / params[6], matrix + idx * NumParams); } } void Normalize(ScalarType *params) const { ScalarType len = std::sqrt(params[0] * params[0] + params[1] * params[1] + params[2] * params[2]); params[0] /= len; params[1] /= len; params[2] /= len; } }; private: Vec3f m_center; float m_radius; }; inline float Sphere::Distance(const Vec3f &p) const { return fabs((m_center - p).length() - m_radius); } inline void Sphere::Normal(const Vec3f &p, Vec3f *normal) const { *normal = p - m_center; normal->normalize(); } inline float Sphere::DistanceAndNormal(const Vec3f &p, Vec3f *normal) const { *normal = p - m_center; float l = normal->length(); if(l > 0) *normal /= l; return fabs(l - m_radius); } inline float Sphere::SignedDistance(const Vec3f &p) const { return (m_center - p).length() - m_radius; } template< class IteratorT > bool Sphere::LeastSquaresFit(IteratorT begin, IteratorT end) { LevMarSimpleSphere< LevMarLSWeight > levMarSphere; float param[4]; for(size_t i = 0; i < 3; ++i) param[i] = m_center[i]; param[3] = m_radius; if(!LevMar(begin, end, levMarSphere, param)) return false; for(size_t i = 0; i < 3; ++i) m_center[i] = param[i]; m_radius = param[3]; return true; } inline unsigned int Sphere::Intersect(const Vec3f &p, const Vec3f &r, float *first, float *second) const { using namespace std; Vec3f kDiff = p - m_center; float fA0 = kDiff.dot(kDiff) - m_radius*m_radius; float fA1, fDiscr, fRoot; if (fA0 <= 0) { // P is inside the sphere fA1 = r.dot(kDiff); fDiscr = fA1*fA1 - fA0; fRoot = sqrt(fDiscr); *first = -fA1 + fRoot; return 1; } // else: P is outside the sphere fA1 = r.dot(kDiff); if (fA1 >= 0) return 0; fDiscr = fA1*fA1 - fA0; if(fDiscr < 0) return 0; else if(fDiscr >= /* zero tolerance eps */ 1e-7f) { fRoot = sqrt(fDiscr); *first = -fA1 - fRoot; *second = -fA1 + fRoot; return 2; } *first = -fA1; return 1; } class DLL_LINKAGE SphereAsSquaresParametrization { public: SphereAsSquaresParametrization() {} SphereAsSquaresParametrization(const Sphere &sphere, const Vec3f &planeNormal); void Init(const Sphere &sphere, const Vec3f &planeNormal); // returns < 0 if point is on lower hemisphere float Parameters(const Vec3f &p, std::pair< float, float > *param) const; bool InSpace(const std::pair< float, float > &param, bool lower, Vec3f *p) const; bool InSpace(const std::pair< float, float > &param, bool lower, Vec3f *p, Vec3f *n) const; void Transform(const GfxTL::MatrixXX< 3, 3, float > &rot, const GfxTL::Vector3Df &trans); void HyperplaneCoordinateSystem( Vec3f* hcs0, Vec3f* hcs1, Vec3f* hcs2 ) const; private: void Hemisphere2Disk(const Vec3f &p, std::pair< float, float > *inDisk) const; void Disk2Square(const std::pair< float, float > &inDisk, std::pair< float, float > *inSquare) const; void Square2Disk(const std::pair< float, float > &inSquare, std::pair< float, float > *inDisk) const; void Disk2Hemisphere(const std::pair< float, float > &inDisk, Vec3f *p) const; private: Sphere m_sphere; Vec3f m_planeNormal; GfxTL::HyperplaneCoordinateSystem< float, 3 > m_hcs; }; class DLL_LINKAGE UpperSphereAsSquaresParametrization : public SphereAsSquaresParametrization { public: UpperSphereAsSquaresParametrization() {} UpperSphereAsSquaresParametrization(const SphereAsSquaresParametrization &p) : SphereAsSquaresParametrization(p) {} bool InSpace(const std::pair< float, float > &param, Vec3f *p) const { return SphereAsSquaresParametrization::InSpace(param, false, p); } bool InSpace(const std::pair< float, float > &param, Vec3f *p, Vec3f *n) const { return SphereAsSquaresParametrization::InSpace(param, false, p, n); } bool InSpace(float u, float v, Vec3f *p) const { return SphereAsSquaresParametrization::InSpace(std::make_pair(u, v), false, p); } bool InSpace(float u, float v, Vec3f *p, Vec3f *n) const { return SphereAsSquaresParametrization::InSpace(std::make_pair(u, v), false, p, n); } }; class DLL_LINKAGE LowerSphereAsSquaresParametrization : public SphereAsSquaresParametrization { public: LowerSphereAsSquaresParametrization() {} LowerSphereAsSquaresParametrization(const SphereAsSquaresParametrization &p) : SphereAsSquaresParametrization(p) {} bool InSpace(const std::pair< float, float > &param, Vec3f *p) const { return SphereAsSquaresParametrization::InSpace(param, true, p); } bool InSpace(const std::pair< float, float > &param, Vec3f *p, Vec3f *n) const { return SphereAsSquaresParametrization::InSpace(param, true, p, n); } bool InSpace(float u, float v, Vec3f *p) const { return SphereAsSquaresParametrization::InSpace(std::make_pair(u, v), true, p); } bool InSpace(float u, float v, Vec3f *p, Vec3f *n) const { return SphereAsSquaresParametrization::InSpace(std::make_pair(u, v), true, p, n); } }; } //...ns schnabel #endif
quantize.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE % % Q Q U U A A NN N T I ZZ E % % Q Q U U AAAAA N N N T I ZZZ EEEEE % % Q QQ U U A A N NN T I ZZ E % % QQQQ UUU A A N N T IIIII ZZZZZ EEEEE % % % % % % MagickCore Methods to Reduce the Number of Unique Colors in an Image % % % % Software Design % % John Cristy % % July 1992 % % % % % % Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Realism in computer graphics typically requires using 24 bits/pixel to % generate an image. Yet many graphic display devices do not contain the % amount of memory necessary to match the spatial and color resolution of % the human eye. The Quantize methods takes a 24 bit image and reduces % the number of colors so it can be displayed on raster device with less % bits per pixel. In most instances, the quantized image closely % resembles the original reference image. % % A reduction of colors in an image is also desirable for image % transmission and real-time animation. % % QuantizeImage() takes a standard RGB or monochrome images and quantizes % them down to some fixed number of colors. % % For purposes of color allocation, an image is a set of n pixels, where % each pixel is a point in RGB space. RGB space is a 3-dimensional % vector space, and each pixel, Pi, is defined by an ordered triple of % red, green, and blue coordinates, (Ri, Gi, Bi). % % Each primary color component (red, green, or blue) represents an % intensity which varies linearly from 0 to a maximum value, Cmax, which % corresponds to full saturation of that color. Color allocation is % defined over a domain consisting of the cube in RGB space with opposite % vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax = % 255. % % The algorithm maps this domain onto a tree in which each node % represents a cube within that domain. In the following discussion % these cubes are defined by the coordinate of two opposite vertices: % The vertex nearest the origin in RGB space and the vertex farthest from % the origin. % % The tree's root node represents the entire domain, (0,0,0) through % (Cmax,Cmax,Cmax). Each lower level in the tree is generated by % subdividing one node's cube into eight smaller cubes of equal size. % This corresponds to bisecting the parent cube with planes passing % through the midpoints of each edge. % % The basic algorithm operates in three phases: Classification, % Reduction, and Assignment. Classification builds a color description % tree for the image. Reduction collapses the tree until the number it % represents, at most, the number of colors desired in the output image. % Assignment defines the output image's color map and sets each pixel's % color by restorage_class in the reduced tree. Our goal is to minimize % the numerical discrepancies between the original colors and quantized % colors (quantization error). % % Classification begins by initializing a color description tree of % sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color description % tree in the storage_class phase for realistic values of Cmax. If % colors components in the input image are quantized to k-bit precision, % so that Cmax= 2k-1, the tree would need k levels below the root node to % allow representing each possible input color in a leaf. This becomes % prohibitive because the tree's total number of nodes is 1 + % sum(i=1, k, 8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing the pixel's color. It updates the following data for each % such node: % % n1: Number of pixels whose color is contained in the RGB cube which % this node represents; % % n2: Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb: Sums of the red, green, and blue component values for all % pixels not classified at a lower depth. The combination of these sums % and n2 will ultimately characterize the mean color of a set of % pixels represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the % quantization error for a node. % % Reduction repeatedly prunes the tree until the number of nodes with n2 % > 0 is less than or equal to the maximum number of colors allowed in % the output image. On any given iteration over the tree, it selects % those nodes whose E count is minimal for pruning and merges their color % statistics upward. It uses a pruning threshold, Ep, to govern node % selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors within % the cubic volume which the node represents. This includes n1 - n2 % pixels whose colors should be defined by nodes at a lower level in the % tree. % % Assignment generates the output image from the pruned tree. The output % image consists of two parts: (1) A color map, which is an array of % color descriptions (RGB triples) for each color present in the output % image; (2) A pixel array, which represents each pixel as an index % into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % This method is based on a similar algorithm written by Paul Raveling. % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/histogram.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/string_.h" #include "magick/thread-private.h" /* Define declarations. */ #if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE) #define CacheShift 2 #else #define CacheShift 3 #endif #define ErrorQueueLength 16 #define MaxNodes 266817 #define MaxTreeDepth 8 #define NodesInAList 1920 /* Typdef declarations. */ typedef struct _RealPixelPacket { MagickRealType red, green, blue, opacity; } RealPixelPacket; typedef struct _NodeInfo { struct _NodeInfo *parent, *child[16]; MagickSizeType number_unique; RealPixelPacket total_color; MagickRealType quantize_error; size_t color_number, id, level; } NodeInfo; typedef struct _Nodes { NodeInfo *nodes; struct _Nodes *next; } Nodes; typedef struct _CubeInfo { NodeInfo *root; size_t colors, maximum_colors; ssize_t transparent_index; MagickSizeType transparent_pixels; RealPixelPacket target; MagickRealType distance, pruning_threshold, next_threshold; size_t nodes, free_nodes, color_number; NodeInfo *next_node; Nodes *node_queue; ssize_t *cache; RealPixelPacket error[ErrorQueueLength]; MagickRealType weights[ErrorQueueLength]; QuantizeInfo *quantize_info; MagickBooleanType associate_alpha; ssize_t x, y; size_t depth; MagickOffsetType offset; MagickSizeType span; } CubeInfo; /* Method prototypes. */ static CubeInfo *GetCubeInfo(const QuantizeInfo *,const size_t,const size_t); static NodeInfo *GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *); static MagickBooleanType AssignImageColors(Image *,CubeInfo *), ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *), DitherImage(Image *,CubeInfo *), SetGrayscaleImage(Image *); static size_t DefineImageColormap(Image *,CubeInfo *,NodeInfo *); static void ClosestColor(const Image *,CubeInfo *,const NodeInfo *), DestroyCubeInfo(CubeInfo *), PruneLevel(const Image *,CubeInfo *,const NodeInfo *), PruneToCubeDepth(const Image *,CubeInfo *,const NodeInfo *), ReduceImageColors(const Image *,CubeInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireQuantizeInfo() allocates the QuantizeInfo structure. % % The format of the AcquireQuantizeInfo method is: % % QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) { QuantizeInfo *quantize_info; quantize_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*quantize_info)); if (quantize_info == (QuantizeInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); GetQuantizeInfo(quantize_info); if (image_info != (ImageInfo *) NULL) { const char *option; quantize_info->dither=image_info->dither; option=GetImageOption(image_info,"dither"); if (option != (const char *) NULL) quantize_info->dither_method=(DitherMethod) ParseCommandOption( MagickDitherOptions,MagickFalse,option); quantize_info->measure_error=image_info->verbose; } return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A s s i g n I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AssignImageColors() generates the output image from the pruned tree. The % output image consists of two parts: (1) A color map, which is an array % of color descriptions (RGB triples) for each color present in the % output image; (2) A pixel array, which represents each pixel as an % index into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % The format of the AssignImageColors() method is: % % MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static inline void AssociateAlphaPixel(const CubeInfo *cube_info, const PixelPacket *pixel,RealPixelPacket *alpha_pixel) { MagickRealType alpha; if ((cube_info->associate_alpha == MagickFalse) || (pixel->opacity == OpaqueOpacity)) { alpha_pixel->red=(MagickRealType) GetPixelRed(pixel); alpha_pixel->green=(MagickRealType) GetPixelGreen(pixel); alpha_pixel->blue=(MagickRealType) GetPixelBlue(pixel); alpha_pixel->opacity=(MagickRealType) GetPixelOpacity(pixel); return; } alpha=(MagickRealType) (QuantumScale*(QuantumRange-GetPixelOpacity(pixel))); alpha_pixel->red=alpha*GetPixelRed(pixel); alpha_pixel->green=alpha*GetPixelGreen(pixel); alpha_pixel->blue=alpha*GetPixelBlue(pixel); alpha_pixel->opacity=(MagickRealType) GetPixelOpacity(pixel); } static inline Quantum ClampToUnsignedQuantum(const MagickRealType value) { if (value <= 0.0) return((Quantum) 0); if (value >= QuantumRange) return((Quantum) QuantumRange); return((Quantum) (value+0.5)); } static inline size_t ColorToNodeId(const CubeInfo *cube_info, const RealPixelPacket *pixel,size_t index) { size_t id; id=(size_t) (((ScaleQuantumToChar(ClampToUnsignedQuantum( GetPixelRed(pixel))) >> index) & 0x01) | ((ScaleQuantumToChar( ClampToUnsignedQuantum(GetPixelGreen(pixel))) >> index) & 0x01) << 1 | ((ScaleQuantumToChar(ClampToUnsignedQuantum(GetPixelBlue(pixel))) >> index) & 0x01) << 2); if (cube_info->associate_alpha != MagickFalse) id|=((ScaleQuantumToChar(ClampToUnsignedQuantum(GetPixelOpacity(pixel))) >> index) & 0x1) << 3; return(id); } static inline MagickBooleanType IsSameColor(const Image *image, const PixelPacket *p,const PixelPacket *q) { if ((GetPixelRed(p) != GetPixelRed(q)) || (GetPixelGreen(p) != GetPixelGreen(q)) || (GetPixelBlue(p) != GetPixelBlue(q))) return(MagickFalse); if ((image->matte != MagickFalse) && (GetPixelOpacity(p) != GetPixelOpacity(q))) return(MagickFalse); return(MagickTrue); } static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info) { #define AssignImageTag "Assign/Image" ssize_t y; /* Allocate image colormap. */ if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image, cube_info->quantize_info->colorspace); else if ((image->colorspace != GRAYColorspace) && (IsRGBColorspace(image->colorspace) == MagickFalse) && (image->colorspace != CMYColorspace)) (void) TransformImageColorspace((Image *) image,RGBColorspace); if (AcquireImageColormap(image,cube_info->colors) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); image->colors=0; cube_info->transparent_pixels=0; cube_info->transparent_index=(-1); (void) DefineImageColormap(image,cube_info,cube_info->root); /* Create a reduced color image. */ if ((cube_info->quantize_info->dither != MagickFalse) && (cube_info->quantize_info->dither_method != NoDitherMethod)) (void) DitherImage(image,cube_info); else { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; status=MagickTrue; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { CubeInfo cube; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; ssize_t count; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); cube=(*cube_info); for (x=0; x < (ssize_t) image->columns; x+=count) { RealPixelPacket pixel; register const NodeInfo *node_info; register ssize_t i; size_t id, index; /* Identify the deepest node containing the pixel's color. */ for (count=1; (x+count) < (ssize_t) image->columns; count++) if (IsSameColor(image,q,q+count) == MagickFalse) break; AssociateAlphaPixel(&cube,q,&pixel); node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)* (QuantumRange+1.0)+1.0); ClosestColor(image,&cube,node_info->parent); index=cube.color_number; for (i=0; i < (ssize_t) count; i++) { if (image->storage_class == PseudoClass) SetPixelIndex(indexes+x+i,index); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRgb(q,image->colormap+index); if (cube.associate_alpha != MagickFalse) SetPixelOpacity(q,image->colormap[index].opacity); } q++; } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AssignImageColors) #endif proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); } if (cube_info->quantize_info->measure_error != MagickFalse) (void) GetImageQuantizeError(image); if ((cube_info->quantize_info->number_colors == 2) && (cube_info->quantize_info->colorspace == GRAYColorspace)) { Quantum intensity; register PixelPacket *restrict q; register ssize_t i; /* Monochrome image. */ q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { intensity=(Quantum) (PixelIntensity(q) < ((MagickRealType) QuantumRange/2.0) ? 0 : QuantumRange); SetPixelRed(q,intensity); SetPixelGreen(q,intensity); SetPixelBlue(q,intensity); q++; } } (void) SyncImage(image); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image,RGBColorspace); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClassifyImageColors() begins by initializing a color description tree % of sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color % description tree in the storage_class phase for realistic values of % Cmax. If colors components in the input image are quantized to k-bit % precision, so that Cmax= 2k-1, the tree would need k levels below the % root node to allow representing each possible input color in a leaf. % This becomes prohibitive because the tree's total number of nodes is % 1 + sum(i=1,k,8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing It updates the following data for each such node: % % n1 : Number of pixels whose color is contained in the RGB cube % which this node represents; % % n2 : Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb : Sums of the red, green, and blue component values for % all pixels not classified at a lower depth. The combination of % these sums and n2 will ultimately characterize the mean color of a % set of pixels represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the quantization % error for a node. % % The format of the ClassifyImageColors() method is: % % MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, % const Image *image,ExceptionInfo *exception) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o image: the image. % */ static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info) { MagickBooleanType associate_alpha; associate_alpha=image->matte; if (cube_info->quantize_info->colorspace == TransparentColorspace) associate_alpha=MagickFalse; if ((cube_info->quantize_info->number_colors == 2) && (cube_info->quantize_info->colorspace == GRAYColorspace)) associate_alpha=MagickFalse; cube_info->associate_alpha=associate_alpha; } static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, const Image *image,ExceptionInfo *exception) { #define ClassifyImageTag "Classify/Image" CacheView *image_view; MagickBooleanType proceed; MagickRealType bisect; NodeInfo *node_info; RealPixelPacket error, mid, midpoint, pixel; size_t count, id, index, level; ssize_t y; /* Classify the first cube_info->maximum_colors colors to a tree depth of 8. */ SetAssociatedAlpha(image,cube_info); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image, cube_info->quantize_info->colorspace); else if ((image->colorspace != GRAYColorspace) && (image->colorspace != CMYColorspace) && (IsRGBColorspace(image->colorspace) == MagickFalse)) (void) TransformImageColorspace((Image *) image,RGBColorspace); midpoint.red=(MagickRealType) QuantumRange/2.0; midpoint.green=(MagickRealType) QuantumRange/2.0; midpoint.blue=(MagickRealType) QuantumRange/2.0; midpoint.opacity=(MagickRealType) QuantumRange/2.0; error.opacity=0.0; image_view=AcquireCacheView(image); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(image,cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) if (IsSameColor(image,p,p+count) == MagickFalse) break; AssociateAlphaPixel(cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((MagickRealType) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= MaxTreeDepth; level++) { bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.opacity+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); if (level == MaxTreeDepth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.opacity=QuantumScale*(pixel.opacity-mid.opacity); node_info->quantize_error+=sqrt((double) (count*error.red*error.red+ count*error.green*error.green+count*error.blue*error.blue+ count*error.opacity*error.opacity)); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*pixel.red; node_info->total_color.green+=count*QuantumScale*pixel.green; node_info->total_color.blue+=count*QuantumScale*pixel.blue; if (cube_info->associate_alpha != MagickFalse) node_info->total_color.opacity+=count*QuantumScale*pixel.opacity; p+=count; } if (cube_info->colors > cube_info->maximum_colors) { PruneToCubeDepth(image,cube_info,cube_info->root); break; } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } for (y++; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(image,cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) if (IsSameColor(image,p,p+count) == MagickFalse) break; AssociateAlphaPixel(cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((MagickRealType) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= cube_info->depth; level++) { bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.opacity+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","%s", image->filename); if (level == cube_info->depth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.opacity=QuantumScale*(pixel.opacity-mid.opacity); node_info->quantize_error+=sqrt((double) (count*error.red*error.red+ count*error.green*error.green+count*error.blue*error.blue+ count*error.opacity*error.opacity)); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*pixel.red; node_info->total_color.green+=count*QuantumScale*pixel.green; node_info->total_color.blue+=count*QuantumScale*pixel.blue; if (cube_info->associate_alpha != MagickFalse) node_info->total_color.opacity+=count*QuantumScale*pixel.opacity; p+=count; } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } image_view=DestroyCacheView(image_view); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image,RGBColorspace); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneQuantizeInfo() makes a duplicate of the given quantize info structure, % or if quantize info is NULL, a new one. % % The format of the CloneQuantizeInfo method is: % % QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o clone_info: Method CloneQuantizeInfo returns a duplicate of the given % quantize info, or if image info is NULL a new one. % % o quantize_info: a structure of type info. % */ MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) { QuantizeInfo *clone_info; clone_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*clone_info)); if (clone_info == (QuantizeInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); GetQuantizeInfo(clone_info); if (quantize_info == (QuantizeInfo *) NULL) return(clone_info); clone_info->number_colors=quantize_info->number_colors; clone_info->tree_depth=quantize_info->tree_depth; clone_info->dither=quantize_info->dither; clone_info->dither_method=quantize_info->dither_method; clone_info->colorspace=quantize_info->colorspace; clone_info->measure_error=quantize_info->measure_error; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o s e s t C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClosestColor() traverses the color cube tree at a particular node and % determines which colormap entry best represents the input color. % % The format of the ClosestColor method is: % % void ClosestColor(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static void ClosestColor(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) ClosestColor(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { MagickRealType pixel; register MagickRealType alpha, beta, distance; register PixelPacket *restrict p; register RealPixelPacket *restrict q; /* Determine if this color is "closest". */ p=image->colormap+node_info->color_number; q=(&cube_info->target); alpha=1.0; beta=1.0; if (cube_info->associate_alpha != MagickFalse) { alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p)); beta=(MagickRealType) (QuantumScale*GetPixelAlpha(q)); } pixel=alpha*GetPixelRed(p)-beta*GetPixelRed(q); distance=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*GetPixelGreen(p)-beta*GetPixelGreen(q); distance+=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*GetPixelBlue(p)-beta*GetPixelBlue(q); distance+=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha-beta; distance+=pixel*pixel; if (distance <= cube_info->distance) { cube_info->distance=distance; cube_info->color_number=node_info->color_number; } } } } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p r e s s I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompressImageColormap() compresses an image colormap by removing any % duplicate or unused color entries. % % The format of the CompressImageColormap method is: % % MagickBooleanType CompressImageColormap(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType CompressImageColormap(Image *image) { QuantizeInfo quantize_info; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsPaletteImage(image,&image->exception) == MagickFalse) return(MagickFalse); GetQuantizeInfo(&quantize_info); quantize_info.number_colors=image->colors; quantize_info.tree_depth=MaxTreeDepth; return(QuantizeImage(&quantize_info,image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineImageColormap() traverses the color cube tree and notes each colormap % entry. A colormap entry is any node in the color cube tree where the % of unique colors is not zero. DefineImageColormap() returns the number of % colors in the image colormap. % % The format of the DefineImageColormap method is: % % size_t DefineImageColormap(Image *image,CubeInfo *cube_info, % NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static size_t DefineImageColormap(Image *image,CubeInfo *cube_info, NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) (void) DefineImageColormap(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { register MagickRealType alpha; register PixelPacket *restrict q; /* Colormap entry is defined by the mean color in this cube. */ q=image->colormap+image->colors; alpha=(MagickRealType) ((MagickOffsetType) node_info->number_unique); alpha=1.0/(fabs(alpha) <= MagickEpsilon ? 1.0 : alpha); if (cube_info->associate_alpha == MagickFalse) { SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.red))); SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.green))); SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.blue))); SetPixelOpacity(q,OpaqueOpacity); } else { MagickRealType opacity; opacity=(MagickRealType) (alpha*QuantumRange* node_info->total_color.opacity); SetPixelOpacity(q,ClampToQuantum(opacity)); if (q->opacity == OpaqueOpacity) { SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.red))); SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.green))); SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.blue))); } else { MagickRealType gamma; gamma=(MagickRealType) (QuantumScale*(QuantumRange- (MagickRealType) q->opacity)); gamma=1.0/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma); SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha* gamma*QuantumRange*node_info->total_color.red))); SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha* gamma*QuantumRange*node_info->total_color.green))); SetPixelBlue(q,ClampToQuantum((MagickRealType) ( alpha*gamma*QuantumRange*node_info->total_color.blue))); if (node_info->number_unique > cube_info->transparent_pixels) { cube_info->transparent_pixels=node_info->number_unique; cube_info->transparent_index=(ssize_t) image->colors; } } } node_info->color_number=image->colors++; } return(image->colors); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyCubeInfo() deallocates memory associated with an image. % % The format of the DestroyCubeInfo method is: % % DestroyCubeInfo(CubeInfo *cube_info) % % A description of each parameter follows: % % o cube_info: the address of a structure of type CubeInfo. % */ static void DestroyCubeInfo(CubeInfo *cube_info) { register Nodes *nodes; /* Release color cube tree storage. */ do { nodes=cube_info->node_queue->next; cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory( cube_info->node_queue->nodes); cube_info->node_queue=(Nodes *) RelinquishMagickMemory( cube_info->node_queue); cube_info->node_queue=nodes; } while (cube_info->node_queue != (Nodes *) NULL); if (cube_info->cache != (ssize_t *) NULL) cube_info->cache=(ssize_t *) RelinquishMagickMemory(cube_info->cache); cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info); cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo % structure. % % The format of the DestroyQuantizeInfo method is: % % QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % */ MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); assert(quantize_info->signature == MagickSignature); quantize_info->signature=(~MagickSignature); quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info); return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DitherImage() distributes the difference between an original image and % the corresponding color reduced algorithm to neighboring pixels using % serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns % MagickTrue if the image is dithered otherwise MagickFalse. % % The format of the DitherImage method is: % % MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static RealPixelPacket **DestroyPixelThreadSet(RealPixelPacket **pixels) { register ssize_t i; assert(pixels != (RealPixelPacket **) NULL); for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++) if (pixels[i] != (RealPixelPacket *) NULL) pixels[i]=(RealPixelPacket *) RelinquishMagickMemory(pixels[i]); pixels=(RealPixelPacket **) RelinquishMagickMemory(pixels); return(pixels); } static RealPixelPacket **AcquirePixelThreadSet(const size_t count) { RealPixelPacket **pixels; register ssize_t i; size_t number_threads; number_threads=GetOpenMPMaximumThreads(); pixels=(RealPixelPacket **) AcquireQuantumMemory(number_threads, sizeof(*pixels)); if (pixels == (RealPixelPacket **) NULL) return((RealPixelPacket **) NULL); (void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=(RealPixelPacket *) AcquireQuantumMemory(count, 2*sizeof(**pixels)); if (pixels[i] == (RealPixelPacket *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static inline ssize_t CacheOffset(CubeInfo *cube_info, const RealPixelPacket *pixel) { #define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift))) #define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift))) #define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift))) #define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift))) ssize_t offset; offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampToUnsignedQuantum(pixel->red))) | GreenShift(ScaleQuantumToChar(ClampToUnsignedQuantum(pixel->green))) | BlueShift(ScaleQuantumToChar(ClampToUnsignedQuantum(pixel->blue)))); if (cube_info->associate_alpha != MagickFalse) offset|=AlphaShift(ScaleQuantumToChar(ClampToUnsignedQuantum( pixel->opacity))); return(offset); } static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info) { #define DitherImageTag "Dither/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; RealPixelPacket **pixels; ssize_t y; /* Distribute quantization error using Floyd-Steinberg. */ pixels=AcquirePixelThreadSet(image->columns); if (pixels == (RealPixelPacket **) NULL) return(MagickFalse); exception=(&image->exception); status=MagickTrue; image_view=AcquireCacheView(image); for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); CubeInfo cube; RealPixelPacket *current, *previous; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; size_t index; ssize_t v; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); cube=(*cube_info); current=pixels[id]+(y & 0x01)*image->columns; previous=pixels[id]+((y+1) & 0x01)*image->columns; v=(ssize_t) ((y & 0x01) ? -1 : 1); for (x=0; x < (ssize_t) image->columns; x++) { RealPixelPacket color, pixel; register ssize_t i; ssize_t u; u=(y & 0x01) ? (ssize_t) image->columns-1-x : x; AssociateAlphaPixel(&cube,q+u,&pixel); if (x > 0) { pixel.red+=7*current[u-v].red/16; pixel.green+=7*current[u-v].green/16; pixel.blue+=7*current[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.opacity+=7*current[u-v].opacity/16; } if (y > 0) { if (x < (ssize_t) (image->columns-1)) { pixel.red+=previous[u+v].red/16; pixel.green+=previous[u+v].green/16; pixel.blue+=previous[u+v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.opacity+=previous[u+v].opacity/16; } pixel.red+=5*previous[u].red/16; pixel.green+=5*previous[u].green/16; pixel.blue+=5*previous[u].blue/16; if (cube.associate_alpha != MagickFalse) pixel.opacity+=5*previous[u].opacity/16; if (x > 0) { pixel.red+=3*previous[u-v].red/16; pixel.green+=3*previous[u-v].green/16; pixel.blue+=3*previous[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.opacity+=3*previous[u-v].opacity/16; } } pixel.red=(MagickRealType) ClampToUnsignedQuantum(pixel.red); pixel.green=(MagickRealType) ClampToUnsignedQuantum(pixel.green); pixel.blue=(MagickRealType) ClampToUnsignedQuantum(pixel.blue); if (cube.associate_alpha != MagickFalse) pixel.opacity=(MagickRealType) ClampToUnsignedQuantum(pixel.opacity); i=CacheOffset(&cube,&pixel); if (cube.cache[i] < 0) { register NodeInfo *node_info; register size_t id; /* Identify the deepest node containing the pixel's color. */ node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)*(QuantumRange+ 1.0)+1.0); ClosestColor(image,&cube,node_info->parent); cube.cache[i]=(ssize_t) cube.color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) cube.cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(indexes+u,index); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRgb(q+u,image->colormap+index); if (cube.associate_alpha != MagickFalse) SetPixelOpacity(q+u,image->colormap[index].opacity); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; /* Store the error. */ AssociateAlphaPixel(&cube,image->colormap+index,&color); current[u].red=pixel.red-color.red; current[u].green=pixel.green-color.green; current[u].blue=pixel.blue-color.blue; if (cube.associate_alpha != MagickFalse) current[u].opacity=pixel.opacity-color.opacity; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FloydSteinbergDither) #endif proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } image_view=DestroyCacheView(image_view); pixels=DestroyPixelThreadSet(pixels); return(MagickTrue); } static MagickBooleanType RiemersmaDither(Image *,CacheView *,CubeInfo *,const unsigned int); static void Riemersma(Image *image,CacheView *image_view,CubeInfo *cube_info, const size_t level,const unsigned int direction) { if (level == 1) switch (direction) { case WestGravity: { (void) RiemersmaDither(image,image_view,cube_info,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); break; } case EastGravity: { (void) RiemersmaDither(image,image_view,cube_info,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); break; } case NorthGravity: { (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); break; } case SouthGravity: { (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); break; } default: break; } else switch (direction) { case WestGravity: { Riemersma(image,image_view,cube_info,level-1,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); Riemersma(image,image_view,cube_info,level-1,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); Riemersma(image,image_view,cube_info,level-1,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); Riemersma(image,image_view,cube_info,level-1,SouthGravity); break; } case EastGravity: { Riemersma(image,image_view,cube_info,level-1,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); Riemersma(image,image_view,cube_info,level-1,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); Riemersma(image,image_view,cube_info,level-1,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); Riemersma(image,image_view,cube_info,level-1,NorthGravity); break; } case NorthGravity: { Riemersma(image,image_view,cube_info,level-1,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); Riemersma(image,image_view,cube_info,level-1,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); Riemersma(image,image_view,cube_info,level-1,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); Riemersma(image,image_view,cube_info,level-1,EastGravity); break; } case SouthGravity: { Riemersma(image,image_view,cube_info,level-1,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); Riemersma(image,image_view,cube_info,level-1,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); Riemersma(image,image_view,cube_info,level-1,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); Riemersma(image,image_view,cube_info,level-1,WestGravity); break; } default: break; } } static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view, CubeInfo *cube_info,const unsigned int direction) { #define DitherImageTag "Dither/Image" MagickBooleanType proceed; RealPixelPacket color, pixel; register CubeInfo *p; size_t index; p=cube_info; if ((p->x >= 0) && (p->x < (ssize_t) image->columns) && (p->y >= 0) && (p->y < (ssize_t) image->rows)) { ExceptionInfo *exception; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t i; /* Distribute error. */ exception=(&image->exception); q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception); if (q == (PixelPacket *) NULL) return(MagickFalse); indexes=GetCacheViewAuthenticIndexQueue(image_view); AssociateAlphaPixel(cube_info,q,&pixel); for (i=0; i < ErrorQueueLength; i++) { pixel.red+=p->weights[i]*p->error[i].red; pixel.green+=p->weights[i]*p->error[i].green; pixel.blue+=p->weights[i]*p->error[i].blue; if (cube_info->associate_alpha != MagickFalse) pixel.opacity+=p->weights[i]*p->error[i].opacity; } pixel.red=(MagickRealType) ClampToUnsignedQuantum(pixel.red); pixel.green=(MagickRealType) ClampToUnsignedQuantum(pixel.green); pixel.blue=(MagickRealType) ClampToUnsignedQuantum(pixel.blue); if (cube_info->associate_alpha != MagickFalse) pixel.opacity=(MagickRealType) ClampToUnsignedQuantum(pixel.opacity); i=CacheOffset(cube_info,&pixel); if (p->cache[i] < 0) { register NodeInfo *node_info; register size_t id; /* Identify the deepest node containing the pixel's color. */ node_info=p->root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(cube_info,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } node_info=node_info->parent; /* Find closest color among siblings and their children. */ p->target=pixel; p->distance=(MagickRealType) (4.0*(QuantumRange+1.0)*((MagickRealType) QuantumRange+1.0)+1.0); ClosestColor(image,p,node_info->parent); p->cache[i]=(ssize_t) p->color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) (1*p->cache[i]); if (image->storage_class == PseudoClass) *indexes=(IndexPacket) index; if (cube_info->quantize_info->measure_error == MagickFalse) { SetPixelRgb(q,image->colormap+index); if (cube_info->associate_alpha != MagickFalse) SetPixelOpacity(q,image->colormap[index].opacity); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) return(MagickFalse); /* Propagate the error as the last entry of the error queue. */ (void) CopyMagickMemory(p->error,p->error+1,(ErrorQueueLength-1)* sizeof(p->error[0])); AssociateAlphaPixel(cube_info,image->colormap+index,&color); p->error[ErrorQueueLength-1].red=pixel.red-color.red; p->error[ErrorQueueLength-1].green=pixel.green-color.green; p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue; if (cube_info->associate_alpha != MagickFalse) p->error[ErrorQueueLength-1].opacity=pixel.opacity-color.opacity; proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span); if (proceed == MagickFalse) return(MagickFalse); p->offset++; } switch (direction) { case WestGravity: p->x--; break; case EastGravity: p->x++; break; case NorthGravity: p->y--; break; case SouthGravity: p->y++; break; } return(MagickTrue); } static inline ssize_t MagickMax(const ssize_t x,const ssize_t y) { if (x > y) return(x); return(y); } static inline ssize_t MagickMin(const ssize_t x,const ssize_t y) { if (x < y) return(x); return(y); } static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info) { CacheView *image_view; MagickBooleanType status; register ssize_t i; size_t depth; if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod) return(FloydSteinbergDither(image,cube_info)); /* Distribute quantization error along a Hilbert curve. */ (void) ResetMagickMemory(cube_info->error,0,ErrorQueueLength* sizeof(*cube_info->error)); cube_info->x=0; cube_info->y=0; i=MagickMax((ssize_t) image->columns,(ssize_t) image->rows); for (depth=1; i != 0; depth++) i>>=1; if ((ssize_t) (1L << depth) < MagickMax((ssize_t) image->columns,(ssize_t) image->rows)) depth++; cube_info->offset=0; cube_info->span=(MagickSizeType) image->columns*image->rows; image_view=AcquireCacheView(image); if (depth > 1) Riemersma(image,image_view,cube_info,depth-1,NorthGravity); status=RiemersmaDither(image,image_view,cube_info,ForgetGravity); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetCubeInfo() initialize the Cube data structure. % % The format of the GetCubeInfo method is: % % CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info, % const size_t depth,const size_t maximum_colors) % % A description of each parameter follows. % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o depth: Normally, this integer value is zero or one. A zero or % one tells Quantize to choose a optimal tree depth of Log4(number_colors). % A tree of this depth generally allows the best representation of the % reference image with the least amount of memory and the fastest % computational speed. In some cases, such as an image with low color % dispersion (a few number of colors), a value other than % Log4(number_colors) is required. To expand the color tree completely, % use a value of 8. % % o maximum_colors: maximum colors. % */ static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info, const size_t depth,const size_t maximum_colors) { CubeInfo *cube_info; MagickRealType sum, weight; register ssize_t i; size_t length; /* Initialize tree to describe color cube_info. */ cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info)); if (cube_info == (CubeInfo *) NULL) return((CubeInfo *) NULL); (void) ResetMagickMemory(cube_info,0,sizeof(*cube_info)); cube_info->depth=depth; if (cube_info->depth > MaxTreeDepth) cube_info->depth=MaxTreeDepth; if (cube_info->depth < 2) cube_info->depth=2; cube_info->maximum_colors=maximum_colors; /* Initialize root node. */ cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL); if (cube_info->root == (NodeInfo *) NULL) return((CubeInfo *) NULL); cube_info->root->parent=cube_info->root; cube_info->quantize_info=CloneQuantizeInfo(quantize_info); if (cube_info->quantize_info->dither == MagickFalse) return(cube_info); /* Initialize dither resources. */ length=(size_t) (1UL << (4*(8-CacheShift))); cube_info->cache=(ssize_t *) AcquireQuantumMemory(length, sizeof(*cube_info->cache)); if (cube_info->cache == (ssize_t *) NULL) return((CubeInfo *) NULL); /* Initialize color cache. */ for (i=0; i < (ssize_t) length; i++) cube_info->cache[i]=(-1); /* Distribute weights along a curve of exponential decay. */ weight=1.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[ErrorQueueLength-i-1]=1.0/weight; weight*=exp(log(((double) QuantumRange+1.0))/(ErrorQueueLength-1.0)); } /* Normalize the weighting factors. */ weight=0.0; for (i=0; i < ErrorQueueLength; i++) weight+=cube_info->weights[i]; sum=0.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[i]/=weight; sum+=cube_info->weights[i]; } cube_info->weights[0]+=1.0-sum; return(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t N o d e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNodeInfo() allocates memory for a new node in the color cube tree and % presets all fields to zero. % % The format of the GetNodeInfo method is: % % NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, % const size_t level,NodeInfo *parent) % % A description of each parameter follows. % % o node: The GetNodeInfo method returns a pointer to a queue of nodes. % % o id: Specifies the child number of the node. % % o level: Specifies the level in the storage_class the node resides. % */ static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, const size_t level,NodeInfo *parent) { NodeInfo *node_info; if (cube_info->free_nodes == 0) { Nodes *nodes; /* Allocate a new queue of nodes. */ nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes)); if (nodes == (Nodes *) NULL) return((NodeInfo *) NULL); nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList, sizeof(*nodes->nodes)); if (nodes->nodes == (NodeInfo *) NULL) return((NodeInfo *) NULL); nodes->next=cube_info->node_queue; cube_info->node_queue=nodes; cube_info->next_node=nodes->nodes; cube_info->free_nodes=NodesInAList; } cube_info->nodes++; cube_info->free_nodes--; node_info=cube_info->next_node++; (void) ResetMagickMemory(node_info,0,sizeof(*node_info)); node_info->parent=parent; node_info->id=id; node_info->level=level; return(node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t i z e E r r o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantizeError() measures the difference between the original % and quantized images. This difference is the total quantization error. % The error is computed by summing over all pixels in an image the distance % squared in RGB space between each reference pixel value and its quantized % value. These values are computed: % % o mean_error_per_pixel: This value is the mean error for any single % pixel in the image. % % o normalized_mean_square_error: This value is the normalized mean % quantization error for any single pixel in the image. This distance % measure is normalized to a range between 0 and 1. It is independent % of the range of red, green, and blue values in the image. % % o normalized_maximum_square_error: Thsi value is the normalized % maximum quantization error for any single pixel in the image. This % distance measure is normalized to a range between 0 and 1. It is % independent of the range of red, green, and blue values in your image. % % The format of the GetImageQuantizeError method is: % % MagickBooleanType GetImageQuantizeError(Image *image) % % A description of each parameter follows. % % o image: the image. % */ MagickExport MagickBooleanType GetImageQuantizeError(Image *image) { CacheView *image_view; ExceptionInfo *exception; IndexPacket *indexes; MagickRealType alpha, area, beta, distance, maximum_error, mean_error, mean_error_per_pixel; size_t index; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->total_colors=GetNumberColors(image,(FILE *) NULL,&image->exception); (void) ResetMagickMemory(&image->error,0,sizeof(image->error)); if (image->storage_class == DirectClass) return(MagickTrue); alpha=1.0; beta=1.0; area=3.0*image->columns*image->rows; maximum_error=0.0; mean_error_per_pixel=0.0; mean_error=0.0; exception=(&image->exception); image_view=AcquireCacheView(image); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { index=1UL*GetPixelIndex(indexes+x); if (image->matte != MagickFalse) { alpha=(MagickRealType) (QuantumScale*(GetPixelAlpha(p))); beta=(MagickRealType) (QuantumScale*(QuantumRange- image->colormap[index].opacity)); } distance=fabs(alpha*GetPixelRed(p)-beta*image->colormap[index].red); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs(alpha*GetPixelGreen(p)-beta*image->colormap[index].green); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs(alpha*GetPixelBlue(p)-beta*image->colormap[index].blue); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; p++; } } image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area; image->error.normalized_mean_error=(double) QuantumScale*QuantumScale* mean_error/area; image->error.normalized_maximum_error=(double) QuantumScale*maximum_error; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetQuantizeInfo() initializes the QuantizeInfo structure. % % The format of the GetQuantizeInfo method is: % % GetQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to a QuantizeInfo structure. % */ MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); (void) ResetMagickMemory(quantize_info,0,sizeof(*quantize_info)); quantize_info->number_colors=256; quantize_info->dither=MagickTrue; quantize_info->dither_method=RiemersmaDitherMethod; quantize_info->colorspace=UndefinedColorspace; quantize_info->measure_error=MagickFalse; quantize_info->signature=MagickSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o s t e r i z e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PosterizeImage() reduces the image to a limited number of colors for a % "poster" effect. % % The format of the PosterizeImage method is: % % MagickBooleanType PosterizeImage(Image *image,const size_t levels, % const MagickBooleanType dither) % MagickBooleanType PosterizeImageChannel(Image *image, % const ChannelType channel,const size_t levels, % const MagickBooleanType dither) % % A description of each parameter follows: % % o image: Specifies a pointer to an Image structure. % % o levels: Number of color levels allowed in each channel. Very low values % (2, 3, or 4) have the most visible effect. % % o dither: Set this integer value to something other than zero to dither % the mapped image. % */ static inline ssize_t MagickRound(MagickRealType x) { /* Round the fraction to nearest integer. */ if (x >= 0.0) return((ssize_t) (x+0.5)); return((ssize_t) (x-0.5)); } MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels, const MagickBooleanType dither) { MagickBooleanType status; status=PosterizeImageChannel(image,DefaultChannels,levels,dither); return(status); } MagickExport MagickBooleanType PosterizeImageChannel(Image *image, const ChannelType channel,const size_t levels,const MagickBooleanType dither) { #define PosterizeImageTag "Posterize/Image" #define PosterizePixel(pixel) (Quantum) (QuantumRange*(MagickRound( \ QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1)) CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; QuantizeInfo *quantize_info; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (i=0; i < (ssize_t) image->colors; i++) { /* Posterize colormap. */ if ((channel & RedChannel) != 0) image->colormap[i].red=PosterizePixel(image->colormap[i].red); if ((channel & GreenChannel) != 0) image->colormap[i].green=PosterizePixel(image->colormap[i].green); if ((channel & BlueChannel) != 0) image->colormap[i].blue=PosterizePixel(image->colormap[i].blue); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=PosterizePixel(image->colormap[i].opacity); } /* Posterize image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,PosterizePixel(GetPixelRed(q))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,PosterizePixel(GetPixelGreen(q))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,PosterizePixel(GetPixelBlue(q))); if (((channel & OpacityChannel) != 0) && (image->matte == MagickTrue)) SetPixelOpacity(q,PosterizePixel(GetPixelOpacity(q))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,PosterizePixel(GetPixelIndex(indexes+x))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_PosterizeImageChannel) #endif proceed=SetImageProgress(image,PosterizeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL); quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels* levels,MaxColormapSize+1); quantize_info->dither=dither; quantize_info->tree_depth=MaxTreeDepth; status=QuantizeImage(quantize_info,image); quantize_info=DestroyQuantizeInfo(quantize_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e C h i l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneChild() deletes the given node and merges its statistics into its % parent. % % The format of the PruneSubtree method is: % % PruneChild(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneChild(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { NodeInfo *parent; register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneChild(image,cube_info,node_info->child[i]); /* Merge color statistics into parent. */ parent=node_info->parent; parent->number_unique+=node_info->number_unique; parent->total_color.red+=node_info->total_color.red; parent->total_color.green+=node_info->total_color.green; parent->total_color.blue+=node_info->total_color.blue; parent->total_color.opacity+=node_info->total_color.opacity; parent->child[node_info->id]=(NodeInfo *) NULL; cube_info->nodes--; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e L e v e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneLevel() deletes all nodes at the bottom level of the color tree merging % their color statistics into their parent node. % % The format of the PruneLevel method is: % % PruneLevel(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneLevel(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneLevel(image,cube_info,node_info->child[i]); if (node_info->level == cube_info->depth) PruneChild(image,cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e T o C u b e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneToCubeDepth() deletes any nodes at a depth greater than % cube_info->depth while merging their color statistics into their parent % node. % % The format of the PruneToCubeDepth method is: % % PruneToCubeDepth(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneToCubeDepth(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneToCubeDepth(image,cube_info,node_info->child[i]); if (node_info->level > cube_info->depth) PruneChild(image,cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImage() analyzes the colors within a reference image and chooses a % fixed number of colors to represent the image. The goal of the algorithm % is to minimize the color difference between the input and output image while % minimizing the processing time. % % The format of the QuantizeImage method is: % % MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, % Image *image) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % */ static MagickBooleanType DirectToColormapImage(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; register ssize_t i; size_t number_colors; ssize_t y; status=MagickTrue; number_colors=(size_t) (image->columns*image->rows); if (AcquireImageColormap(image,number_colors) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); if (image->colors != number_colors) return(MagickFalse); i=0; image_view=AcquireCacheView(image); for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType proceed; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (const PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { image->colormap[i].red=GetPixelRed(q); image->colormap[i].green=GetPixelGreen(q); image->colormap[i].blue=GetPixelBlue(q); image->colormap[i].opacity=GetPixelOpacity(q); SetPixelIndex(indexes+x,i); i++; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) break; proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, Image *image) { CubeInfo *cube_info; MagickBooleanType status; size_t depth, maximum_colors; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; if ((image->columns*image->rows) <= maximum_colors) (void) DirectToColormapImage(image,&image->exception); if ((IsGrayImage(image,&image->exception) != MagickFalse) && (image->matte == MagickFalse)) (void) SetGrayscaleImage(image); if ((image->storage_class == PseudoClass) && (image->colors <= maximum_colors)) return(MagickTrue); depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if ((quantize_info->dither != MagickFalse) && (depth > 2)) depth--; if ((image->matte != MagickFalse) && (depth > 5)) depth--; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,image,&image->exception); if (status != MagickFalse) { /* Reduce the number of colors in the image. */ ReduceImageColors(image,cube_info); status=AssignImageColors(image,cube_info); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImages() analyzes the colors within a set of reference images and % chooses a fixed number of colors to represent the set. The goal of the % algorithm is to minimize the color difference between the input and output % images while minimizing the processing time. % % The format of the QuantizeImages method is: % % MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, % Image *images) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: Specifies a pointer to a list of Image structures. % */ MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, Image *images) { CubeInfo *cube_info; Image *image; MagickBooleanType proceed, status; MagickProgressMonitor progress_monitor; register ssize_t i; size_t depth, maximum_colors, number_images; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickSignature); assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); if (GetNextImageInList(images) == (Image *) NULL) { /* Handle a single image with QuantizeImage. */ status=QuantizeImage(quantize_info,images); return(status); } status=MagickFalse; maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if (quantize_info->dither != MagickFalse) depth--; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) { (void) ThrowMagickException(&images->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return(MagickFalse); } number_images=GetImageListLength(images); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL, image->client_data); status=ClassifyImageColors(cube_info,image,&image->exception); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor,image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } if (status != MagickFalse) { /* Reduce the number of colors in an image sequence. */ ReduceImageColors(images,cube_info); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,image->client_data); status=AssignImageColors(image,cube_info); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor, image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Reduce() traverses the color cube tree and prunes any node whose % quantization error falls below a particular threshold. % % The format of the Reduce method is: % % Reduce(const Image *image,CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void Reduce(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) Reduce(image,cube_info,node_info->child[i]); if (node_info->quantize_error <= cube_info->pruning_threshold) PruneChild(image,cube_info,node_info); else { /* Find minimum pruning threshold. */ if (node_info->number_unique > 0) cube_info->colors++; if (node_info->quantize_error < cube_info->next_threshold) cube_info->next_threshold=node_info->quantize_error; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReduceImageColors() repeatedly prunes the tree until the number of nodes % with n2 > 0 is less than or equal to the maximum number of colors allowed % in the output image. On any given iteration over the tree, it selects % those nodes whose E value is minimal for pruning and merges their % color statistics upward. It uses a pruning threshold, Ep, to govern % node selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors % within the cubic volume which the node represents. This includes n1 - % n2 pixels whose colors should be defined by nodes at a lower level in % the tree. % % The format of the ReduceImageColors method is: % % ReduceImageColors(const Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static void ReduceImageColors(const Image *image,CubeInfo *cube_info) { #define ReduceImageTag "Reduce/Image" MagickBooleanType proceed; MagickOffsetType offset; size_t span; cube_info->next_threshold=0.0; for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; ) { cube_info->pruning_threshold=cube_info->next_threshold; cube_info->next_threshold=cube_info->root->quantize_error-1; cube_info->colors=0; Reduce(image,cube_info,cube_info->root); offset=(MagickOffsetType) span-cube_info->colors; proceed=SetImageProgress(image,ReduceImageTag,offset,span- cube_info->maximum_colors+1); if (proceed == MagickFalse) break; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImage() replaces the colors of an image with the closest color from % a reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, % Image *image,const Image *remap_image) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o remap_image: the reference image. % */ MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, Image *image,const Image *remap_image) { CubeInfo *cube_info; MagickBooleanType status; /* Initialize color cube. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(remap_image != (Image *) NULL); assert(remap_image->signature == MagickSignature); cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,&image->exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; status=AssignImageColors(image,cube_info); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImages() replaces the colors of a sequence of images with the % closest color from a reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, % Image *images,Image *remap_image) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: the image sequence. % % o remap_image: the reference image. % */ MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, Image *images,const Image *remap_image) { CubeInfo *cube_info; Image *image; MagickBooleanType status; assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); image=images; if (remap_image == (Image *) NULL) { /* Create a global colormap for an image sequence. */ status=QuantizeImages(quantize_info,images); return(status); } /* Classify image colors from the reference image. */ cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,&image->exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) { status=AssignImageColors(image,cube_info); if (status == MagickFalse) break; } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetGrayscaleImage() converts an image to a PseudoClass grayscale image. % % The format of the SetGrayscaleImage method is: % % MagickBooleanType SetGrayscaleImage(Image *image) % % A description of each parameter follows: % % o image: The image. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int IntensityCompare(const void *x,const void *y) { PixelPacket *color_1, *color_2; ssize_t intensity; color_1=(PixelPacket *) x; color_2=(PixelPacket *) y; intensity=PixelIntensityToQuantum(color_1)-(ssize_t) PixelIntensityToQuantum(color_2); return((int) intensity); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static MagickBooleanType SetGrayscaleImage(Image *image) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; PixelPacket *colormap; register ssize_t i; ssize_t *colormap_index, j, y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->type != GrayscaleType) (void) TransformImageColorspace(image,GRAYColorspace); colormap_index=(ssize_t *) AcquireQuantumMemory(MaxMap+1, sizeof(*colormap_index)); if (colormap_index == (ssize_t *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); if (image->storage_class != PseudoClass) { ExceptionInfo *exception; for (i=0; i <= (ssize_t) MaxMap; i++) colormap_index[i]=(-1); if (AcquireImageColormap(image,MaxMap+1) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); image->colors=0; status=MagickTrue; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register const PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { register size_t intensity; intensity=ScaleQuantumToMap(GetPixelRed(q)); if (colormap_index[intensity] < 0) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SetGrayscaleImage) #endif if (colormap_index[intensity] < 0) { colormap_index[intensity]=(ssize_t) image->colors; image->colormap[image->colors].red=GetPixelRed(q); image->colormap[image->colors].green=GetPixelGreen(q); image->colormap[image->colors].blue=GetPixelBlue(q); image->colors++; } } SetPixelIndex(indexes+x,colormap_index[intensity]); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); } for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].opacity=(unsigned short) i; qsort((void *) image->colormap,image->colors,sizeof(PixelPacket), IntensityCompare); colormap=(PixelPacket *) AcquireQuantumMemory(image->colors, sizeof(*colormap)); if (colormap == (PixelPacket *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); j=0; colormap[j]=image->colormap[0]; for (i=0; i < (ssize_t) image->colors; i++) { if (IsSameColor(image,&colormap[j],&image->colormap[i]) == MagickFalse) { j++; colormap[j]=image->colormap[i]; } colormap_index[(ssize_t) image->colormap[i].opacity]=j; } image->colors=(size_t) (j+1); image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap); image->colormap=colormap; status=MagickTrue; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register const PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,colormap_index[ScaleQuantumToMap(GetPixelIndex( indexes+x))]); if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); image->type=GrayscaleType; if (IsMonochromeImage(image,&image->exception) != MagickFalse) image->type=BilevelType; return(status); }
GB_binop__bxnor_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bxnor_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__bxnor_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__bxnor_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__bxnor_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bxnor_uint8) // A*D function (colscale): GB (_AxD__bxnor_uint8) // D*A function (rowscale): GB (_DxB__bxnor_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__bxnor_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__bxnor_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxnor_uint8) // C=scalar+B GB (_bind1st__bxnor_uint8) // C=scalar+B' GB (_bind1st_tran__bxnor_uint8) // C=A+scalar GB (_bind2nd__bxnor_uint8) // C=A'+scalar GB (_bind2nd_tran__bxnor_uint8) // C type: uint8_t // A type: uint8_t // A pattern? 0 // B type: uint8_t // B pattern? 0 // BinaryOp: cij = ~((aij) ^ (bij)) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ~((x) ^ (y)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXNOR || GxB_NO_UINT8 || GxB_NO_BXNOR_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bxnor_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bxnor_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bxnor_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__bxnor_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__bxnor_uint8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bxnor_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint8_t alpha_scalar ; uint8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ; beta_scalar = (*((uint8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bxnor_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bxnor_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bxnor_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bxnor_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bxnor_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = ~((x) ^ (bij)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bxnor_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = ~((aij) ^ (y)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ~((x) ^ (aij)) ; \ } GrB_Info GB (_bind1st_tran__bxnor_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ~((aij) ^ (y)) ; \ } GrB_Info GB (_bind2nd_tran__bxnor_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
cancellation_for_sections.c
// RUN: %libomp-compile && env OMP_CANCELLATION=true %libomp-run // Clang had a bug until version 4.0.1 which resulted in a hang. // UNSUPPORTED: abt, clang-3, clang-4.0.0 // Regression test for a bug in cancellation to cover effect of `#pragma omp cancel` // in a loop construct, on sections construct. // Pass condition: Cancellation status from `for` does not persist // to `sections`. #include <stdio.h> #include <omp.h> int result[2] = {0, 0}; void cq416850_for_sections() { unsigned i; // 1) loop #pragma omp for for (i = 0; i < 1; i++) { result[0] = 1; #pragma omp cancel for result[0] = 2; } // printf("thread %d: result[0] = %d, result[1] = %d \n", omp_get_thread_num(), result[0], result[1]); // 2) sections #pragma omp sections { #pragma omp section { result[1] = 1; #pragma omp cancellation point sections result[1] = 2; } } } int main(void) { if(!omp_get_cancellation()) { printf("Cancellation not enabled!\n"); return 2; } #pragma omp parallel num_threads(4) { cq416850_for_sections(); } if (result[0] != 1 || result[1] != 2) { printf("Incorrect values. " "result[0] = %d (expected 1), " "result[1] = %d (expected 2).\n", result[0], result[1]); printf("FAILED\n"); return 1; } printf("PASSED\n"); return 0; }
convolutiondepthwise_3x3.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convdw3x3s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); const float bias0 = bias ? bias[g] : 0.f; const float* kernel0 = kernel + g * 9; float* outptr = out; float* outptr2 = outptr + outw; const float* img0 = bottom_blob.channel(g); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; const float* r3 = img0 + w * 3; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; int i = 0; for (; i + 1 < outh; i += 2) { int remain = outw; for (; remain > 0; remain--) { float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; float sum2 = bias0; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; *outptr = sum; *outptr2 = sum2; r0++; r1++; r2++; r3++; outptr++; outptr2++; } r0 += 2 + w; r1 += 2 + w; r2 += 2 + w; r3 += 2 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr = sum; r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } } } static void convdw3x3s2_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const int tailstep = w - 2 * outw + w; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); const float bias0 = bias ? bias[g] : 0.f; const float* kernel0 = kernel + g * 9; float* outptr = out; const float* img0 = bottom_blob.channel(g); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr = sum; r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } }
Example_standalone.2.c
/* * @@name: standalone.2c * @@type: C * @@compilable: yes * @@linkable: no * @@expect: success * @@version: omp_3.1 */ void standalone_ok() { int a = 1; #pragma omp parallel { if (a != 0) { #pragma omp flush(a) } if (a != 0) { #pragma omp barrier } if (a != 0) { #pragma omp taskwait } if (a != 0) { #pragma omp taskyield } } }
ops.h
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ #pragma once #ifndef OPS_H_ #define OPS_H_ #include <op_boilerplate.h> #include <array/DataTypeUtils.h> #include <helpers/shape.h> #include <vector> #include <Environment.h> #include <loops/summarystatsreduce.h> #define MIN 1e-12 #define MAX_FLOAT 1e37 #define MIN_FLOAT 1e-37 #define MAX_INT 2147483647 #define MIN_CUTFOFF -3.79297773665f #define FLOAT_MIN_NORMAL 1.17549435e-38 #define EPS 1e-5 #define AFFINITY close #define DOUBLE_PI_T T(2.0 * 3.14159265358979323846) #define no_op_exec_special static const bool requiresSpecial = false; static void execSpecial(T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_accumulation static const bool requiresSpecialAccumulation = false; static void execSpecial(T *x, Nd4jLong *xShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){} #ifdef __CUDACC__ #include <helpers/sharedmem.h> #define no_op_exec_special_cuda static __device__ void execSpecialCuda(T *dx, Nd4jLong *xShapeBuffer,T *result, Nd4jLong *resultShapeBuffer,T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_accumulation_cuda static inline __device__ void execSpecialCuda(T *dx, Nd4jLong *xShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, T *reductionBuffer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {} #else // hacky fix for isnan/being being out of scope //#ifdef IOS //#define isinf(x) 0 // this isn't right. But std::isinf fails //#define isnan(x) 0 //#else //#define isnan std::isnan //#define isinf std::isinf //#endif #define no_op_exec_special_cuda #define no_op_exec_special_accumulation_cuda #endif #define SELU_ALPHA 1.6732632423543772848170429916717 #define SELU_LAMBDA 1.0507009873554804934193349852946 #ifdef _OPENMP #pragma omp declare reduction(maxT : float,double,float16 : \ omp_out = nd4j::math::nd4j_max(omp_in, omp_out) )\ initializer (omp_priv=-MAX_FLOAT) #pragma omp declare reduction(minT : float,double,float16 : \ omp_out = nd4j::math::nd4j_min(omp_in, omp_out) )\ initializer (omp_priv=MAX_FLOAT) #pragma omp declare reduction(sumT : float,double,float16 : \ omp_out = omp_in + omp_out)\ initializer (omp_priv=0.0f) #endif namespace functions { namespace indexreduce { template<typename T> struct IndexValue { T value; Nd4jLong index; }; } namespace summarystats { template <typename T> class SummaryStatsData; } } namespace simdOps { template<typename T> class Add { public: op_def static T op(T d1, T d2) { return d1 + d2; } op_def static T op(T d1, T d2, T *params) { return d1 + d2; } op_def static T op(T d1) { return d1; } // op for MetaOps op_def static T op(T d1, T *params) { return d1 + params[0]; } op_def static T startingValue() { return static_cast<T>(0.f); } }; template<typename T> class Subtract { public: op_def static T op(T d1, T d2) { return d1 - d2; } op_def static T op(T d1, T d2, T *params) { return d1 - d2; } op_def static T op(T d1) { return d1; } // op for MetaOps op_def static T op(T d1, T *params) { return d1 - params[0]; } }; template<typename T> class SquaredSubtract { public: op_def static T op(T d1, T d2) { return nd4j::math::nd4j_pow<T>(d1 - d2, static_cast<T>(2.f)); } op_def static T op(T d1, T d2, T *params) { return nd4j::math::nd4j_pow<T>(d1 - d2, static_cast<T>(2.f)); } op_def static T op(T d1) { return d1; } // op for MetaOps op_def static T op(T d1, T *params) { return nd4j::math::nd4j_pow<T>(d1 - params[0], static_cast<T>(2.f)); } }; template<typename T> class ReverseSubtract { public: op_def static T op(T d1, T d2) { return d2 - d1; } op_def static T op(T d1, T d2, T *params) { return d2 - d1; } op_def static T op(T d1) { return d1; } // op for MetaOps op_def static T op(T d1, T *params) { return params[0] - d1; } }; template<typename T> class LogPoisonLossFull { public: op_def static T op(T z, T c) { return (nd4j::math::nd4j_exp<T>(c) - z * c + (z * nd4j::math::nd4j_log<T>(z) - z + static_cast<T>(0.5f) * nd4j::math::nd4j_log<T>(DOUBLE_PI_T * z))); } op_def static T op(T z, T c, T *params) { return (nd4j::math::nd4j_exp<T>(c) - z * c + (z * nd4j::math::nd4j_log<T>(z) - z + static_cast<T>(0.5f) * nd4j::math::nd4j_log<T>(DOUBLE_PI_T * z))); } op_def static T op(T z) { return (z * nd4j::math::nd4j_log<T>(z) - z + static_cast<T>(0.5f) * nd4j::math::nd4j_log<T>(DOUBLE_PI_T * z)); } // op for MetaOps op_def static T op(T z, T *params) { return (nd4j::math::nd4j_exp<T>(params[0]) - z * params[0] + (z * nd4j::math::nd4j_log<T>(z) - z + static_cast<T>(0.5f) * nd4j::math::nd4j_log<T>(DOUBLE_PI_T * z))); } }; template<typename T> class LogPoisonLoss { public: op_def static T op(T z, T c) { return (nd4j::math::nd4j_exp<T>(c) - z * c); } op_def static T op(T z, T c, T *params) { return (nd4j::math::nd4j_exp<T>(c) - z * c); } op_def static T op(T z) { return (z); } // op for MetaOps op_def static T op(T z, T *params) { return (nd4j::math::nd4j_exp<T>(params[0]) - z * params[0]); } }; template<typename T> class Multiply { public: op_def static T op(T d1, T d2) { return d1 * d2; } op_def static T op(T d1, T d2, T *params) { return d1 * d2; } op_def static T op(T d1) { return d1; } // op for MetaOps op_def static T op(T d1, T *params) { return d1 * params[0]; } op_def static T startingValue() { return static_cast<T>(1.f); } }; template<typename T> class Divide { public: op_def static T op(T d1, T d2) { return d1 / d2; } op_def static T op(T d1, T d2, T *params) { return d1 / d2; } op_def static T op(T d1) { return d1; } // op for MetaOps op_def static T op(T d1, T *params) { return d1 / params[0]; } op_def static T startingValue() { return static_cast<T>(1.f); } }; template<typename T> class SafeDivide { public: op_def static T op(T d1, T d2) { if(d2 == static_cast<T>(0.f)) return static_cast<T>(0.f); return d1 / d2; } op_def static T op(T d1, T d2, T *params) { if(d2 == static_cast<T>(0.f)) return static_cast<T>(0.f); return d1 / d2; } op_def static T op(T d1) { return d1; } // op for MetaOps op_def static T op(T d1, T *params) { if(params[0] == static_cast<T>(0.f)) return static_cast<T>(0.f); return d1 / params[0]; } }; template<typename T> class FloorDiv { public: op_def static T op(T d1, T d2) { return nd4j::math::nd4j_floor<T>(d1 / d2); } op_def static T op(T d1, T d2, T *params) { return nd4j::math::nd4j_floor<T>(d1 / d2); } op_def static T op(T d1) { return nd4j::math::nd4j_floor<T>(d1); } // op for MetaOps op_def static T op(T d1, T *params) { return nd4j::math::nd4j_floor<T>(d1 / params[0]); } }; template<typename T> class TruncateDiv { public: op_def static T op(T d1, T d2) { auto i1 = static_cast<int>(d1); auto i2 = static_cast<int>(d2); return static_cast<T>(i1 / i2); } op_def static T op(T d1, T d2, T *params) { auto i1 = static_cast<int>(d1); auto i2 = static_cast<int>(d2); return static_cast<T>(i1 / i2); } op_def static T op(T d1) { return d1; } // op for MetaOps op_def static T op(T d1, T *params) { auto i1 = static_cast<int>(d1); auto i2 = static_cast<int>(params[0]); return static_cast<T>(i1 / i2); } }; template<typename T> class Remainder { public: op_def static T op(T d1, T d2) { return nd4j::math::nd4j_remainder(d1, d2); } op_def static T op(T d1, T d2, T *params) { return nd4j::math::nd4j_remainder(d1, d2); } op_def static T op(T d1) { return d1; } // op for MetaOps op_def static T op(T d1, T *params) { return nd4j::math::nd4j_remainder(d1, params[0]); } }; template<typename T> class FMod { public: op_def static T op(T d1, T d2) { return nd4j::math::nd4j_fmod(d1, d2); } op_def static T op(T d1, T d2, T *params) { return nd4j::math::nd4j_fmod(d1, d2); } op_def static T op(T d1) { return d1; } // op for MetaOps op_def static T op(T d1, T *params) { return nd4j::math::nd4j_fmod(d1, params[0]); } }; template<typename T> class FloorMod { public: op_def static T op(T d1, T d2) { T m = nd4j::math::nd4j_fmod(d1, d2);; return (d1 < static_cast<T>(0.0f)) == (d2 < static_cast<T>(0.0f)) ? m : nd4j::math::nd4j_fmod<T>(m + d2, d2); } op_def static T op(T d1, T d2, T *params) { T m = nd4j::math::nd4j_fmod(d1, d2); return (d1 < static_cast<T>(0.0f)) == (d2 < static_cast<T>(0.0f)) ? m : nd4j::math::nd4j_fmod<T>(m + d2, d2); } op_def static T op(T d1) { return d1; } // op for MetaOps op_def static T op(T d1, T *params) { T m = nd4j::math::nd4j_fmod(d1, params[0]); return (d1 < static_cast<T>(0.0f)) == (params[0] < static_cast<T>(0.0f)) ? m : nd4j::math::nd4j_fmod<T>(m + params[0], params[0]); } }; template<typename T> class ReverseDivide { public: op_def static T op(T d1, T d2) { return d2 / d1; } op_def static T op(T d1, T d2, T *params) { return d2 / d1; } op_def static T op(T d1) { return d1; } // op for MetaOps op_def static T op(T d1, T *params) { return params[0] / d1; } }; template<typename T> class Copy { public: op_def static T op(T d1, T d2) { return d2; } op_def static T op(T d1, T d2, T *params) { return d2; } op_def static T op(T d1) { return d1; } // op for MetaOps op_def static T op(T d1, T *params) { return params[0]; } }; template<typename T> class Copy2 { public: op_def static T op(T d1, T d2) { return d2; } op_def static T op(T d1, T d2, T *params) { return d2; } op_def static T op(T d1) { return d1; } // op for MetaOps op_def static T op(T d1, T *params) { return params[0]; } }; template<typename T> class Axpy { public: op_def static T op(T d1, T d2) { return d2 + d1; } op_def static T op(T d1, T d2, T *params) { T alpha = params[0]; return alpha * d1 + d2; } op_def static T op(T d1) { return d1; } }; template<typename T> class And { public: op_def static T op(T d1, T d2) { return d2 + d1; } op_def static T op(T d1, T d2, T *params) { T comp = params[0]; return d1 != comp && d2 != comp ? static_cast<T>(1.0f) : static_cast<T>(0.0f); } op_def static T op(T d1) { return d1; } // op for MetaOps op_def static T op(T d1, T *params) { return static_cast<T>(119.0f); } }; template<typename T> class Or { public: op_def static T op(T d1, T d2) { return d2 + d1; } op_def static T op(T d1, T d2, T *params) { T comp = params[0]; return d1 != comp || d2 != comp ? static_cast<T>(1.0f) : static_cast<T>(0.0f); } op_def static T op(T d1) { return d1; } // op for MetaOps op_def static T op(T d1, T *params) { return static_cast<T>(119.0f); } }; template<typename T> class Xor { public: op_def static T op(T d1, T d2) { return d2 + d1; } op_def static T op(T d1, T d2, T *params) { T comp = params[0]; return ((d1 == comp && d2 != comp)||(d1 != comp && d2 == comp)) ? static_cast<T>(1.0f) : static_cast<T>(0.0f); } op_def static T op(T d1) { return d1; } }; template<typename T> class Not { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { T comp = params[0]; return d1 == comp ? static_cast<T>(1.0f) : static_cast<T>(0.0f); } }; template<typename T> class LogicalNot { public: op_def static T op(T d1, T d2) { return !((int) d1 && (int) d2); } op_def static T op(T d1, T d2, T *params) { return (T) !((int) d1 && (int) d2); } op_def static T op(T d1) { return d1; } // op for MetaOps op_def static T op(T d1, T *params) { return static_cast<T>(119.0f); } }; template<typename T> class LogicalXor { public: op_def static T op(T d1, T d2) { int i1 = (int) d1; int i2 = (int) d2; return (i1 | i2) &~ (i1 & i2); } op_def static T op(T d1, T d2, T *params) { int i1 = (int) d1; int i2 = (int) d2; return (i1 | i2) &~ (i1 & i2); } op_def static T op(T d1) { return d1; } // op for MetaOps op_def static T op(T d1, T *params) { return static_cast<T>(119.0f); } }; template<typename T> class LogicalAnd { public: op_def static T op(T d1, T d2) { return (int) d1 & (int) d2; } op_def static T op(T d1, T d2, T *params) { return (int) d1 & (int) d2; } op_def static T op(T d1) { return d1; } // op for MetaOps op_def static T op(T d1, T *params) { return static_cast<T>(119.0f); } }; template<typename T> class LogicalOr { public: op_def static T op(T d1, T d2) { return (int) d1 | (int) d2; } op_def static T op(T d1, T d2, T *params) { return (int) d1 | (int) d2; } op_def static T op(T d1) { return d1; } // op for MetaOps op_def static T op(T d1, T *params) { return static_cast<T>(119.0f); } }; template<typename T> class SetValOrLess { public: op_def static T op(T d1, T d2, T *params) { if (d2 < d1) { return d1; } return d2; } }; template<typename T> class Mod { public: /* // just a optional note, feel free to remove later op_def static half op(half d1, half d2, half *params) { return __float2half(simdOps::Mod<float>::op(__half2float(d1), __half2float(d2), nullptr)); } */ op_def static T op(T d1, T d2) { return (int)d1 % (int)d2; } op_def static T op(T d1, T d2, T *params) { return (int)d1 % (int)d2; } // op for MetaOp op_def static T op(T d1, T *params) { return (int)d1 % (int)params[0]; } }; template<typename T> class ReverseMod { public: op_def static T op(T d1, T d2) { return (int)d2 % (int)d1; } op_def static T op(T d1, T d2, T *params) { return (int)d2 % (int)d1; } // op for MetaOp op_def static T op(T d1, T *params) { return (int)params[0] % (int)d1; } }; /** * Whether 2 elements in an array * are epsilion equal */ template<typename T> class Epsilon { public: op_def static T op(T d1, T d2, T *params) { T diff = d1 - d2; T absDiff = nd4j::math::nd4j_abs<T>(diff); if (absDiff <= static_cast<T>(MIN)) return static_cast<T>(1.0f); return static_cast<T>(0.0f); } op_def static T op(T d1, T *params) { return d1; } }; template<typename T> class EqualTo { public: op_def static T op(T d1, T d2) { return d1 == d2; } op_def static T op(T d1, T d2, T *params) { return d1 == d2; } op_def static T op(T d1, T *params) { return d1; } }; template<typename T> class NotEqualTo { public: op_def static T op(T d1, T d2) { return d1 != d2; } op_def static T op(T d1, T d2, T *params) { return d1 != d2; } op_def static T op(T d1, T *params) { return d1; } }; template<typename T> class GreaterThanOrEqual { public: op_def static T op(T d1, T d2) { return d1 >= d2; } op_def static T op(T d1, T d2, T *params) { return d1 >= d2; } // FIXME: this signature clashes with MetaOp stuff op_def static T op(T d1, T *params) { return d1; } }; template<typename T> class GreaterThan { public: op_def static T op(T d1, T d2) { return d1 > d2; } op_def static T op(T d1, T d2, T *params) { return d1 > d2; } // FIXME: this signature clashes with MetaOp stuff op_def static T op(T d1, T *params) { return d1; } }; template<typename T> class LessThan { public: op_def static T op(T d1, T d2) { return d1 < d2; } op_def static T op(T d1, T d2, T *params) { return d1 < d2; } op_def static T op(T d1, T *params) { return d1; } }; template<typename T> class LessThanOrEqual { public: op_def static T op(T d1, T d2) { return d1 <= d2; } op_def static T op(T d1, T d2, T *params) { return d1 <= d2; } op_def static T op(T d1, T *params) { return d1; } }; template<typename T> class Abs { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_abs<T>(d1); } }; template<typename T> class Ceiling { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_ceil<T>(d1); } }; template<typename T> class Cosine { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_cos<T>(d1); } }; template<typename T> class Exp { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_exp<T>(d1); } }; template<typename T> class HardTanhDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return ((d1 >= static_cast<T>(-1.0f) && d1 <= static_cast<T>(1.0f)) ? static_cast<T>(1.0f) : static_cast<T>(0.0f)); } }; template<typename T> class HardTanh { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { if (d1 < static_cast<T>(-1.0f)) return static_cast<T>(-1.0f); else if (d1 > static_cast<T>(1.0f)) return static_cast<T>(1.0f); else return d1; } }; template<typename T> class Floor { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_floor<T>(d1); } }; template<typename T> class Log { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_log<T>(d1); } }; template<typename T> class Log1p { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_log<T>(1+d1); } }; template<typename T> class LogX { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_log<T>(d1) / nd4j::math::nd4j_log<T>(params[0]) ; } }; template<typename T> class StabilizeFP16 { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { if (d1 <= static_cast<T>(0.f)) return static_cast<T>(0.001f); else return d1; } }; template<typename T> class SpecialDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return d1 * (static_cast<T>(1.0f) - d1); } }; template<typename T> class Neg { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return -d1; } }; template<typename T> class Erf { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_erf<T>(d1); } }; template<typename T> class Erfc { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_erfc<T>(d1); } }; template<typename T> class Reciprocal { public: no_op_exec_special no_op_exec_special_cuda // op_def static T op(T d1) { // return (T(1.0f) / d1); // } // op for MetaOps op_def static T op(T d1, T *params) { return (static_cast<T>(1.0f)/d1); } }; template<typename T> class Sqr { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_pow<T>(d1, static_cast<T>(2.f)); } op_def static T op(T d1) { return nd4j::math::nd4j_pow<T>(d1, static_cast<T>(2.0f)); } }; template<typename T> class RelativeError { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_re<T>(d1, params[0]); } op_def static T op(T d1, T d2) { return nd4j::math::nd4j_re<T>(d1, d2); } op_def static T op(T d1, T d2, T *params) { return nd4j::math::nd4j_re<T>(d1, d2); } op_def static T op(T d1) { return static_cast<T>(0.0f); } }; template<typename T> class BinaryRelativeError { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { T d2 = params[0]; T threshold = params[1]; return nd4j::math::nd4j_re<T>(d1, d2) > threshold ? static_cast<T>(1.0f) : static_cast<T>(0.0f); } op_def static T op(T d1, T d2, T *params) { T threshold = params[0]; return nd4j::math::nd4j_re<T>(d1, d2) > threshold ? static_cast<T>(1.0f) : static_cast<T>(0.0f); } op_def static T op(T d1) { return static_cast<T>(0.0f); } }; template<typename T> class BinaryMinimumAbsoluteRelativeError { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { T d2 = params[0]; T thresholdRelative = params[1]; T thresholdAbsolute = params[2]; return nd4j::math::nd4j_re<T>(d1, d2) > thresholdRelative ? (nd4j::math::nd4j_abs<T>(d1 - d2) < thresholdAbsolute ? static_cast<T>(0.0f) : static_cast<T>(1.0f)) : static_cast<T>(0.0f); } op_def static T op(T d1, T d2, T *params) { T thresholdRelative = params[0]; T thresholdAbsolute = params[1]; return nd4j::math::nd4j_re<T>(d1, d2) > thresholdRelative ? (nd4j::math::nd4j_abs<T>(d1 - d2) < thresholdAbsolute ? static_cast<T>(0.0f) : static_cast<T>(1.0f)) : static_cast<T>(0.0f); } op_def static T op(T d1) { return static_cast<T>(0.0f); } }; template<typename T> class Pow { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_pow<T>(d1, params[0]); } op_def static T op(T d1, T d2) { return nd4j::math::nd4j_pow<T>(d1, d2); } op_def static T op(T d1, T d2, T *params) { return nd4j::math::nd4j_pow<T>(d1, d2); } op_def static T op(T d1) { return d1; } }; template<typename T> class PowDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return params[0] * nd4j::math::nd4j_pow<T>(d1, params[0] - static_cast<T>(1.f)); } op_def static T op(T d1, T d2) { return d2 * nd4j::math::nd4j_pow<T>(d1, d2 - static_cast<T>(1.f)); } op_def static T op(T d1, T d2, T *params) { return d2 * nd4j::math::nd4j_pow<T>(d1, d2 - static_cast<T>(1.f)); } op_def static T op(T d1) { return d1; } }; template<typename T> class Round { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_round<T>(d1); } }; template<typename T> class IsNan { public: no_op_exec_special no_op_exec_special_cuda no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_isnan(d1) ? static_cast<T>(1.0f) : static_cast<T>(0.0f); } op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return reduction; } }; template<typename T> class Expm1 { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_exp(d1) - static_cast<T>(1.0f); } }; template<typename T> class IsInf { public: no_op_exec_special no_op_exec_special_cuda no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_isinf<T>(d1) ? static_cast<T>(1.0f) : static_cast<T>(0.0f); } op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return reduction; } }; template<typename T> class IsInfOrNan{ public: no_op_exec_special no_op_exec_special_cuda no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_isfin<T>(d1) ? static_cast<T>(0.0f) : static_cast<T>(1.0f); } op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return reduction; } }; template<typename T> class IsFinite { public: no_op_exec_special no_op_exec_special_cuda no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_isfin<T>(d1) ? static_cast<T>(1.0f) : static_cast<T>(0.0f); } op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return reduction; } }; template<typename T> class ClipByValue { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { if (d1 > params[1]) return params[1]; else if (d1 < params[0]) return params[0]; else return d1; } }; template<typename T> class Swish { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return d1 * nd4j::math::nd4j_sigmoid<T>(d1); } }; template<typename T> class SwishDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { T ex = nd4j::math::nd4j_pow<T>(static_cast<T>(M_E), d1); return (ex * (d1 + ex + static_cast<T>(1.f))) / nd4j::math::nd4j_pow<T>((ex + static_cast<T>(1.f)) , static_cast<T>(2.0f)); } }; template<typename T> class LogSigmoid { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_log(nd4j::math::nd4j_sigmoid<T>(d1)); } }; template<typename T> class LogSigmoidDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { T ex = nd4j::math::nd4j_pow<T>(M_E, d1); return static_cast<T>(1.f) / (ex + static_cast<T>(1.f)); } }; template<typename T> class Sigmoid { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_sigmoid<T>(d1); } }; template<typename T> class SigmoidDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_sigmoidderivative<T>(d1); } }; template<typename T> class HardSigmoid { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_min<T>(static_cast<T>(1.0f), nd4j::math::nd4j_max<T>(static_cast<T>(0.0f), (static_cast<T>(0.2f)) * d1 + static_cast<T>(0.5f))); } }; template<typename T> class HardSigmoidDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return d1 < static_cast<T>(-2.5f) || d1 > static_cast<T>(2.5f) ? static_cast<T>(0.0f) : static_cast<T>(0.2f); } }; /** * Scale to be between a min and max */ template<typename T> class SetRange { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { T min = params[0]; T max = params[1]; if (d1 >= min && d1 <= max) return d1; if (min == static_cast<T>(0.0f) && max == static_cast<T>(1.0f)) { auto val = static_cast<T>(1.0f) / (static_cast<T>(1.0f) + nd4j::math::nd4j_exp<T>(-d1)); return (nd4j::math::nd4j_floor<T>(val * (max - min)) + min); } auto ret = (nd4j::math::nd4j_floor<T>(d1 * (max - min)) + min); return ret; } }; template<typename T> class Sin { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_sin<T>(d1); } }; template<typename T> class Square { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return d1 * d1; } }; template<typename T> class Sqrt { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_sqrt<T>(d1); } }; template<typename T> class RSqrt { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return static_cast<T>(1.0f) / nd4j::math::nd4j_sqrt<T>(d1); } }; template<typename T> class Rint { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_rint<T>(d1); } }; template<typename T> class SoftPlus { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::softplus<T>(d1); } }; template<typename T> class Sign { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return (d1 > static_cast<T>(0.0f)) - (d1 < static_cast<T>(0.0f)); } }; template<typename T> class TimesOneMinus { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return d1 * (static_cast<T>(1.0f) - d1); } }; template<typename T> class RationalTanh { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { // keep 2/3 as runtime variable, to match precision auto dis = (static_cast<T>(2.0f) / static_cast<T>(3.0f)) * d1; auto tanh = nd4j::math::nd4j_sgn<T>(dis) * (static_cast<T>(1.0f) - (static_cast<T>(1.0f) / (static_cast<T>(1.0f) + nd4j::math::nd4j_abs<T>(dis) + nd4j::math::nd4j_pow<T>(dis, static_cast<T>(2.0f)) + static_cast<T>(1.41645f) * nd4j::math::nd4j_pow<T>(dis, static_cast<T>(4.0f)) ))); return static_cast<T>(1.7159f) * tanh; } }; template<typename T> class RationalTanhDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { auto dis = (static_cast<T>(2.0f) / static_cast<T>(3.0f)) * d1; auto a = static_cast<T>(1.0f) + nd4j::math::nd4j_abs<T>(dis) + nd4j::math::nd4j_pow<T>(dis, static_cast<T>(2.)) + static_cast<T>(1.41645f) * nd4j::math::nd4j_pow<T>(dis, static_cast<T>(4.f)); auto tDeriv = (static_cast<T>(1.0f) + nd4j::math::nd4j_sign<T>(dis) * (static_cast<T>(2.0f) * dis + static_cast<T>(4.0f) * static_cast<T>(1.41645f) * nd4j::math::nd4j_pow<T>(dis, static_cast<T>(3.f)))) / (a * a); return static_cast<T>(1.7159f) * (static_cast<T>(2.0f) / static_cast<T>(3.0f)) * tDeriv; } }; template<typename T> class Tanh { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_tanh<T>(d1); } }; template<typename T> class RectifiedTanh { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_max<T>(static_cast<T>(0.0f), nd4j::math::nd4j_tanh<T>(d1)); } }; template<typename T> class RectifiedTanhDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return d1 > static_cast<T>(0.0f) ? nd4j::math::nd4j_tanhderivative<T>(d1) : static_cast<T>(0.0f); } }; template<typename T> class ATanh { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_atanh<T>(d1); } }; template<typename T> class TanhDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_tanhderivative<T>(d1); } }; template<typename T> class Cube { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return d1 * d1 * d1; } }; template<typename T> class CubeDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return 3 * d1 * d1; } }; template<typename T> class ACos { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_acos<T>(d1); } }; template<typename T> class ASinh { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_asinh<T>(d1); } }; template<typename T> class ASinhDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return static_cast<T>(1.f) / (nd4j::math::nd4j_sqrt(nd4j::math::nd4j_pow(d1, static_cast<T>(2.f)) + static_cast<T>(1.f))); } }; template<typename T> class ACosh { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_acosh<T>(d1); } }; template<typename T> class ACoshDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return static_cast<T>(1.f) / (nd4j::math::nd4j_sqrt(d1 - static_cast<T>(1.f)) * nd4j::math::nd4j_sqrt(d1 + static_cast<T>(1.f))); } }; template<typename T> class Ones { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return static_cast<T>(1.0f); } }; template<typename T> class SoftSign { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_softsign<T>(d1); } }; template<typename T> class SoftSignDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_softsignderivative<T>(d1); } }; template<typename T> class MatchCondition { public: no_op_exec_special no_op_exec_special_cuda no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return old + opOutput; } op_def static T update(T old, T opOutput, T *extraParams) { return old + opOutput; } // this op return 1.0 if condition met, 0.0 otherwise op_def static T op(T d1, T *extraParams) { T compare = extraParams[0]; T eps = extraParams[1]; auto mode = static_cast<int>(extraParams[2]); //nd4j_printf("value: %f; comp: %f; eps: %f; mode: %i;\n", d1, compare, eps, mode); switch (mode) { case 0: // equals return nd4j::math::nd4j_abs<T>(d1 - compare) <= eps ? 1.0f : 0.0f; case 1: // not equals return nd4j::math::nd4j_abs<T>(d1 - compare) > eps ? 1.0f : 0.0f; case 2: // less_than return d1 < compare ? 1.0f : 0.0f; case 3: // greater_than return d1 > compare ? 1.0f : 0.0f; case 4: // less_or_equals_than return d1 <= compare ? 1.0f : 0.0f; case 5: // greater_or_equals_than return d1 >= compare ? 1.0f : 0.0f; case 6: // abs_less_than return nd4j::math::nd4j_abs<T>(d1) < compare ? 1.0f : 0.0f; case 7: // abs_greater_than return nd4j::math::nd4j_abs<T>(d1) > compare ? 1.0f : 0.0f; case 8: // is inf return nd4j::math::nd4j_isinf(d1) ? 1.0f : 0.0f; case 9: // is nan return nd4j::math::nd4j_isnan(d1) ? 1.0f : 0.0f; case 10: return (d1 == compare) ? 1.0f : 0.0f; case 11: return (d1 != compare) ? 1.0f : 0.0f; case 12: // abs_greater_or_equals_than return nd4j::math::nd4j_abs<T>(d1) >= compare ? 1.0f : 0.0f; case 13: // abs_less_or_equals_than return nd4j::math::nd4j_abs<T>(d1) <= compare ? 1.0f : 0.0f; default: printf("Undefined match condition: [%i]\n", mode); } return d1; } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return reduction; } }; template<typename T> class ELU { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_elu<T>(d1); } }; template<typename T> class ELUDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_eluderivative<T>(d1); } }; template<typename T> class RELU { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return d1 < params[0] ? params[0] : d1; } }; template<typename T> class RELU6 { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { T relu = d1 < params[0] ? params[0] : d1; return relu < static_cast<T>(6.f) ? relu : static_cast<T>(6.f); } }; template<typename T> class LeakyRELU { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_leakyrelu<T>(d1, params[0]); } }; template<typename T> class SELU { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return d1 > static_cast<T>(0.0f) ? static_cast<T>(SELU_LAMBDA) * d1 : static_cast<T>(SELU_LAMBDA) * (static_cast<T>(SELU_ALPHA) * nd4j::math::nd4j_exp<T>(d1) - static_cast<T>(SELU_ALPHA)); } }; template<typename T> class SELUDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return d1 > static_cast<T>(0.0f) ? static_cast<T>(SELU_LAMBDA) : static_cast<T>(SELU_ALPHA) * static_cast<T>(SELU_LAMBDA) * nd4j::math::nd4j_exp<T>(d1); } }; template<typename T> class LeakyRELUDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { if (d1 >= static_cast<T>(0.0f)) return static_cast<T>(1.0f); else return params[0]; } }; template<typename T> class ASin { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_asin<T>(d1); } }; template<typename T> class Sinh { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_sinh<T>(d1); } }; template<typename T> class SinhDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_cosh<T>(d1); } }; template<typename T> class Cosh { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_cosh<T>(d1); } }; template<typename T> class Tan { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_tan<T>(d1); } }; template<typename T> class TanDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return static_cast<T>(1.0f) / nd4j::math::nd4j_pow<T>(nd4j::math::nd4j_cos<T>(d1), static_cast<T>(2.0f)); } }; template<typename T> class ATan { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return nd4j::math::nd4j_atan(d1); } }; template<typename T> class Atan2 { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T d2) { return nd4j::math::nd4j_atan2<T>(d2, d1); } op_def static T op(T d1, T d2, T *params) { return nd4j::math::nd4j_atan2<T>(d2, d1); } // op for MetaOps op_def static T op(T d1, T *params) { return nd4j::math::nd4j_atan2<T>(params[0], d1); } }; template<typename T> class Identity { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return d1; } }; template<typename T> class Stabilize { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { T k = params[0]; if (d1 * k > static_cast<T>(- MIN_CUTFOFF)) return static_cast<T>(- MIN_CUTFOFF) / k; else if (d1 * k < static_cast<T>(MIN_CUTFOFF)) return static_cast<T>(MIN_CUTFOFF) / k; return d1; } }; template<typename T> class Step { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return (d1 > params[0] ? static_cast<T>(1.0f) : static_cast<T>(0.0f)); } }; template<typename T> class OneMinus { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { return static_cast<T>(1.0f) - d1; } }; template<typename T> class Sum { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T op(T d1, T *extraParams) { return d1; } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return reduction; } }; template<typename T> class ShannonEntropy { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T op(T d1, T *extraParams) { return nd4j::math::nd4j_pow<T>(d1, static_cast<T>(2.0f)) * nd4j::math::nd4j_log<T>(nd4j::math::nd4j_pow<T>(d1, static_cast<T>(2.0f))); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return -reduction; } }; template<typename T> class LogEntropy { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T op(T d1, T *extraParams) { return d1 * nd4j::math::nd4j_log<T>(d1); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { //entropy is -sum(p(x) * log(p(x))); log entropy is log of this return nd4j::math::nd4j_log<T>(-reduction); } }; template<typename T> class Entropy { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T op(T d1, T *extraParams) { return d1 * nd4j::math::nd4j_log<T>(d1); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return -reduction; //entropy is -sum(p(x) * log(p(x))) } }; template<typename T> class ASum { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return nd4j::math::nd4j_abs<T>(opOutput) + nd4j::math::nd4j_abs<T>(old); } op_def static T update(T old, T opOutput, T *extraParams) { return nd4j::math::nd4j_abs<T>(opOutput) + nd4j::math::nd4j_abs<T>(old); } op_def static T op(T d1, T *extraParams) { return nd4j::math::nd4j_abs<T>(d1); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return nd4j::math::nd4j_abs<T>(reduction); } }; template<typename T> class CountNonZero { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T op(T d1, T *extraParams) { return d1 == static_cast<T>(0.0f) ? static_cast<T>(0.0f) : static_cast<T>(1.0f); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return reduction; } }; template<typename T> class CountZero { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T op(T d1, T *extraParams) { return d1 == static_cast<T>(0.0f) ? static_cast<T>(1.0f) : static_cast<T>(0.0f); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return reduction; } }; template<typename T> class Prod { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(1.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput * old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput * old; } op_def static T op(T d1, T *extraParams) { return d1; } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return reduction; } }; template<typename T> class Any { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T op(T d1, T *extraParams) { return d1; } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return reduction > static_cast<T>(0.0f) ? static_cast<T>(1.0f) : static_cast<T>(0.0f) ; } }; template<typename T> class All { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(1.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput * old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput * old; } op_def static T op(T d1, T *extraParams) { return d1; } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return reduction > static_cast<T>(0.0f) ? static_cast<T>(1.0f) : static_cast<T>(0.0f); } }; template<typename T> class Mean { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T op(T d1, T *extraParams) { return d1; } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return reduction / (int) n; } }; template<typename T> class AMean { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return nd4j::math::nd4j_abs<T>(opOutput) + nd4j::math::nd4j_abs<T>(old); } op_def static T update(T old, T opOutput, T *extraParams) { return nd4j::math::nd4j_abs<T>(opOutput) + nd4j::math::nd4j_abs<T>(old); } op_def static T op(T d1, T *extraParams) { return nd4j::math::nd4j_abs<T>(d1); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return nd4j::math::nd4j_abs<T>(reduction) / static_cast<T>(n); } }; template<typename T> class Max { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return input[0]; } op_def static T merge(T old, T opOutput, T *extraParams) { return nd4j::math::nd4j_max<T>(old, opOutput); } op_def static T update(T old, T opOutput, T *extraParams) { return nd4j::math::nd4j_max<T>(opOutput, old); } op_def static T op(T d1, T d2, T *params) { return nd4j::math::nd4j_max<T>(d1, d2); } op_def static T op(T d1, T d2) { return nd4j::math::nd4j_max<T>(d1, d2); } // FIXME: this signature overlaps with MetaOp op_def static T op(T d1, T *extraParams) { return d1; } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return reduction; } }; template<typename T> class AMax { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return input[0]; } op_def static T merge(T old, T opOutput, T *extraParams) { return nd4j::math::nd4j_max<T>(nd4j::math::nd4j_abs<T>(old), nd4j::math::nd4j_abs<T>(opOutput)); } op_def static T update(T old, T opOutput, T *extraParams) { return nd4j::math::nd4j_max<T>(nd4j::math::nd4j_abs<T>(opOutput), nd4j::math::nd4j_abs<T>(old)); } op_def static T op(T d1, T d2, T *params) { return nd4j::math::nd4j_max<T>(nd4j::math::nd4j_abs<T>(d1), nd4j::math::nd4j_abs<T>(d2)); } op_def static T op(T d1, T d2) { return nd4j::math::nd4j_abs<T>(d1) > nd4j::math::nd4j_abs<T>(d2) ? d1 : d2; } // FIXME: this signature overlaps with MetaOp op_def static T op(T d1, T *extraParams) { return nd4j::math::nd4j_abs<T>(d1); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return nd4j::math::nd4j_abs<T>(reduction); } }; template<typename T> class AMin { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return input[0]; } op_def static T merge(T old, T opOutput, T *extraParams) { return nd4j::math::nd4j_min<T>(nd4j::math::nd4j_abs<T>(old), nd4j::math::nd4j_abs<T>(opOutput)); } op_def static T update(T old, T opOutput, T *extraParams) { return nd4j::math::nd4j_min<T>(nd4j::math::nd4j_abs<T>(opOutput), nd4j::math::nd4j_abs<T>(old)); } op_def static T op(T d1, T d2, T *params) { return nd4j::math::nd4j_min(nd4j::math::nd4j_abs<T>(d1), nd4j::math::nd4j_abs<T>(d2)); } op_def static T op(T d1, T d2) { return nd4j::math::nd4j_abs<T>(d1) < nd4j::math::nd4j_abs<T>(d2) ? d1 : d2; } // FIXME: this signature overlaps with MetaOp op_def static T op(T d1, T *extraParams) { return nd4j::math::nd4j_abs<T>(d1); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return nd4j::math::nd4j_abs<T>(reduction); } }; template<typename T> class Min { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return input[0]; } op_def static T merge(T old, T opOutput, T *extraParams) { return nd4j::math::nd4j_min<T>(old, opOutput); } op_def static T update(T old, T opOutput, T *extraParams) { return nd4j::math::nd4j_min<T>(opOutput, old); } op_def static T op(T d1, T d2, T *params) { return nd4j::math::nd4j_min(d1, d2); } op_def static T op(T d1, T d2) { return nd4j::math::nd4j_min(d1, d2); } // FIXME: this signature overlaps with MetaOp op_def static T op(T d1, T *extraParams) { return d1; } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return reduction; } }; template<typename T> class Norm1 { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T op(T d1, T *extraParams) { return nd4j::math::nd4j_abs<T>(d1); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return reduction; } }; template<typename T> class Norm2 { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return nd4j::math::nd4j_sqrt<T>(reduction); } op_def static T op(T d1, T *extraParams) { return d1 * d1; } }; template<typename T> class SquaredNorm { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T op(T d1, T *extraParams) { return d1 * d1; } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return reduction; } }; template<typename T> class NormFrobenius { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T op(T d1, T *extraParams) { T v = nd4j::math::nd4j_abs(d1); return v * v; } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return nd4j::math::nd4j_sqrt<T>(reduction); } }; template<typename T> class NormP { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T op(T d1, T *extraParams) { return nd4j::math::nd4j_pow(nd4j::math::nd4j_abs(d1), extraParams[0]); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return nd4j::math::nd4j_pow(reduction, static_cast<T>(1.0f) / extraParams[0]); } }; template<typename T> class NormMax { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return nd4j::math::nd4j_max<T>(nd4j::math::nd4j_abs<T>(old), nd4j::math::nd4j_abs<T>(opOutput)); } op_def static T op(T d1, T *extraParams) { return d1; } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return nd4j::math::nd4j_max<T>(nd4j::math::nd4j_abs<T>(reduction), nd4j::math::nd4j_abs<T>(reduction)); } }; template<typename T> class Variance { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return old + opOutput; } op_def static T update(T old, T opOutput, T *extraParams) { return old + opOutput; } op_def static T op(T d1, T *extraParams) { T mean = extraParams[0]; T ret = d1 - mean; return ret * ret; } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { // T bias = extraParams[1]; // return (reduction - (nd4j::math::nd4j_pow<T>(bias, static_cast<T>(2.0f)) / static_cast<T>(n))) / (n - 1) return reduction / static_cast<T>(n - 1); } }; /** * Standard deviation of a buffer */ template<typename T> class StandardDeviation { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static T startingValue(const T *input) { return static_cast<T>(0.0f); } op_def static T merge(T old, T opOutput, T *extraParams) { return old + opOutput; } op_def static T update(T old, T opOutput, T *extraParams) { return old + opOutput; } op_def static T op(T d1, T *extraParams) { T mean = extraParams[0]; T ret = d1 - mean; return ret * ret; } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { T ret = Variance<T>::postProcess(reduction, n, extraParams); T sqrtRet = nd4j::math::nd4j_sqrt<T>(ret); return sqrtRet; } }; template<typename T> class CosineSimilarity { public: static const int extraParamsLen = 2; op_def static T *generateExtraParams() { //T *extraParams = new T[2]; return nullptr; } op_def static void finalizeExtraParams(T *extraParams) { //delete[] extraParams; } op_def static T startingValue(T *input) { return static_cast<T>(0.0f); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return reduction / (nd4j::math::nd4j_sqrt<T>(extraParams[0]) * nd4j::math::nd4j_sqrt<T>(extraParams[1])); } op_def static T op(T d1, T d2, T *extraParams) { extraParams[0] += d1 * d1; extraParams[1] += d2 * d2; return (d1 * d2); } op_def static void aggregateExtraParams(T *extraParamsTotal, T *extraParamsLocal) { extraParamsTotal[0] += extraParamsLocal[0]; extraParamsTotal[1] += extraParamsLocal[1]; } #ifdef __CUDACC__ static _CUDA_D inline T opAtomic(T d1, T d2, T *extraParams) { nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0],static_cast<T>(d1 * d1)); nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1],static_cast<T>(d2 * d2)); return (d1 * d2); } #endif op_def static T update(T old, T opOutput, T *extraParams) { return old + opOutput; } op_def static T merge(T old, T opOutput, T *extraParams) { return update(old, opOutput, extraParams); } }; template<typename T> class JaccardDistance { public: static const int extraParamsLen = 2; op_def static T *generateExtraParams() { //T *extraParams = new T[2]; return nullptr; } op_def static void finalizeExtraParams(T *extraParams) { //delete[] extraParams; } op_def static T startingValue(T *input) { return static_cast<T>(0.0f); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { // num / denom return (static_cast<T>(1.0f)) - (extraParams[0] / extraParams[1]); } op_def static T num(T d1, T d2) { return nd4j::math::nd4j_min<T>(d1, d2); } op_def static T denom(T d1, T d2) { return nd4j::math::nd4j_max<T>(d1, d2); } op_def static T op(T d1, T d2, T *extraParams) { extraParams[0] += num(d1, d2); extraParams[1] += denom(d1, d2); return static_cast<T>(0.0f); } op_def static void aggregateExtraParams(T *extraParamsTotal, T *extraParamsLocal) { extraParamsTotal[0] += extraParamsLocal[0]; extraParamsTotal[1] += extraParamsLocal[1]; } #ifdef __CUDACC__ __device__ static inline T opAtomic(T d1, T d2, T *extraParams) { nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0],num(d1, d2)); nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1], denom(d1, d2)); return static_cast<T>(0.0f); } #endif op_def static T update(T old, T opOutput, T *extraParams) { return old + opOutput; } op_def static T merge(T old, T opOutput, T *extraParams) { return update(old, opOutput, extraParams); } }; template<typename T> class SimpleHammingDistance { public: static const int extraParamsLen = 0; op_def static T *generateExtraParams() { //T *extraParams = new T[2]; return nullptr; } op_def static void finalizeExtraParams(T *extraParams) { //delete[] extraParams; } op_def static T startingValue(T *input) { return static_cast<T>(0.0f); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return static_cast<T>(reduction / n); } op_def static T op(T d1, T d2, T *extraParams) { return (d1 == d2) ? static_cast<T>(0.0f) : static_cast<T>(1.0f); } op_def static void aggregateExtraParams(T *extraParamsTotal, T *extraParamsLocal) { } #ifdef __CUDACC__ __device__ static inline T opAtomic(T d1, T d2, T *extraParams) { return op(d1, d2, extraParams); } #endif op_def static T update(T old, T opOutput, T *extraParams) { return old + opOutput; } op_def static T merge(T old, T opOutput, T *extraParams) { return update(old, opOutput, extraParams); } }; template<typename T> class CosineDistance { public: static const int extraParamsLen = 2; op_def static T *generateExtraParams() { //T *extraParams = new T[2]; return nullptr; } op_def static void finalizeExtraParams(T *extraParams) { //delete[] extraParams; } op_def static T startingValue(T *input) { return static_cast<T>(0.0f); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return (static_cast<T>(1.0f)) - (reduction / (nd4j::math::nd4j_sqrt<T>(extraParams[0]) * nd4j::math::nd4j_sqrt<T>(extraParams[1]))); } op_def static T op(T d1, T d2, T *extraParams) { extraParams[0] += nd4j::math::nd4j_abs<T>(d1) * nd4j::math::nd4j_abs<T>(d1); extraParams[1] += nd4j::math::nd4j_abs<T>(d2) * nd4j::math::nd4j_abs<T>(d2); return (d1 * d2); } op_def static void aggregateExtraParams(T *extraParamsTotal, T *extraParamsLocal) { extraParamsTotal[0] += extraParamsLocal[0]; extraParamsTotal[1] += extraParamsLocal[1]; } #ifdef __CUDACC__ static _CUDA_D inline T opAtomic(T d1, T d2, T *extraParams) { nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0], nd4j::math::nd4j_abs<T>(d1) * nd4j::math::nd4j_abs<T>(d1)); nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1], nd4j::math::nd4j_abs<T>(d2) * nd4j::math::nd4j_abs<T>(d2)); return (d1 * d2); } #endif op_def static T update(T old, T opOutput, T *extraParams) { return old + opOutput; } op_def static T merge(T old, T opOutput, T *extraParams) { return update(old, opOutput, extraParams); } }; /** * Dot product between 2 arrays */ template<typename T> class Dot { public: static const int extraParamsLen = 0; op_def static T * generateExtraParams() { return nullptr; } op_def static void finalizeExtraParams(T *extraParamsRef) { //no-op //delete[] * extraParamsRef; } op_def static T startingValue(T *input) { return static_cast<T>(0.0f); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParamsRef) { return reduction; } op_def static T op(T d1, T d2, T *extraParamsRef) { return d1 * d2; } #ifdef __CUDACC__ __device__ static inline T opAtomic(T d1, T d2, T *extraParamsRef) { return op(d1, d2, extraParamsRef); } #endif op_def static T update(T old, T opOutput, T *extraParamsRef) { return opOutput + old; } op_def static T merge(T old, T opOutput, T *extraParamsRef) { return update(old, opOutput, extraParamsRef); } op_def static void aggregateExtraParams(T *extraParamsTotal, T *extraParamsLocal) {} }; /** * Op to check equality within arrays */ template<typename T> class EqualsWithEps { public: static const int extraParamsLen = 0; op_def static T * generateExtraParams() { return nullptr; } op_def static void finalizeExtraParams(T *extraParamsRef) { //no-op } op_def static T startingValue(T *input) { return static_cast<T>(0.0f); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParamsRef) { return reduction; } op_def static T op(T d1, T d2, T *extraParamsRef) { T eps = extraParamsRef[2]; T diff = nd4j::math::nd4j_abs<T>(d1 - d2); // works well except in the range of very large numbers if (diff <= eps) return static_cast<T>(0.f); // Knuth approach // works well except in the range of very small numbers if (diff <= nd4j::math::nd4j_max(nd4j::math::nd4j_abs(d1), nd4j::math::nd4j_abs(d2)) * eps) return static_cast<T>(0.f); return static_cast<T>(1.f); } #ifdef __CUDACC__ __device__ static inline T opAtomic(T d1, T d2, T *extraParamsRef) { return op(d1, d2, extraParamsRef); } #endif op_def static T update(T old, T opOutput, T *extraParamsRef) { return opOutput + old; } op_def static T merge(T old, T opOutput, T *extraParamsRef) { return update(old, opOutput, extraParamsRef); } op_def static void aggregateExtraParams(T *extraParamsTotal, T *extraParamsLocal) {} }; template<typename T> class EuclideanDistance { public: static const int extraParamsLen = 0; op_def static T * generateExtraParams() { return nullptr; } op_def static void finalizeExtraParams(T *extraParamsRef) { //no-op } op_def static T startingValue(T *input) { return static_cast<T>(0.0f); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParamsRef) { return nd4j::math::nd4j_sqrt<T>(reduction); } op_def static T op(T d1, T d2, T *extraParamsRef) { T ret = d1 - d2; return ret * ret; } #ifdef __CUDACC__ __device__ static inline T opAtomic(T d1, T d2, T *extraParamsRef) { return op(d1, d2, extraParamsRef); } #endif op_def static T update(T old, T opOutput, T *extraParamsRef) { return opOutput + old; } op_def static T merge(T old, T opOutput, T *extraParamsRef) { return update(old, opOutput, extraParamsRef); } op_def static void aggregateExtraParams(T *extraParamsTotal, T *extraParamsLocal) {} }; template<typename T> class ManhattanDistance { public: static const int extraParamsLen = 0; op_def static T * generateExtraParams() { return nullptr; } op_def static void finalizeExtraParams(T *extraParamsRef) { //no-op } op_def static T startingValue(T *input) { return static_cast<T>(0.0f); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParamsRef) { return reduction; } op_def static T op(T d1, T d2, T *extraParamsRef) { return nd4j::math::nd4j_abs<T>(d1 - d2); } op_def static T update(T old, T opOutput, T *extraParamsRef) { return old + opOutput; } op_def static void aggregateExtraParams(T *extraParamsTotal, T *extraParamsLocal) { } #ifdef __CUDACC__ __device__ static inline T opAtomic(T d1, T d2, T *extraParamsRef) { return op(d1, d2, extraParamsRef); } #endif #ifndef __clang__ #pragma omp declare simd uniform(extraParamsRef) #endif op_def static T merge(T old, T opOutput, T *extraParamsRef) { return update(old, opOutput, extraParamsRef); } }; template<typename T> class IndexAbsoluteMax { public: #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> val, T *extraParams) { return nd4j::math::nd4j_abs<T>(val); } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> update( functions::indexreduce::IndexValue<T> old, functions::indexreduce::IndexValue<T> opOutput, T *extraParams) { opOutput.value = nd4j::math::nd4j_abs<T>(opOutput.value); old.value = nd4j::math::nd4j_abs<T>(old.value); if (opOutput.value > old.value) return opOutput; #ifdef __CUDACC__ // workaround for cuda race condition at merge phase else if (opOutput.value == old.value && opOutput.index < old.index) return opOutput; #elif defined(__GNUC__) #endif return old; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> merge( functions::indexreduce::IndexValue<T> f1, functions::indexreduce::IndexValue<T> f2, T *extraParams) { if (nd4j::math::nd4j_abs<T>(f1.value) > nd4j::math::nd4j_abs<T>(f2.value)) return f2; return f1; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> postProcess( functions::indexreduce::IndexValue<T> reduction, int n, int xOffset, T *dx, int incx, T *extraParams, T *result) { return reduction; } #ifdef __CUDACC__ __host__ __device__ #endif static inline T startingValue(T *input) { return MIN_FLOAT; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> startingIndexValue(T *input) { functions::indexreduce::IndexValue<T> local; local.value = startingValue(input); local.index = 0; return local; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> d1, functions::indexreduce::IndexValue<T> d2, T *extraParams) { return d1; } }; template<typename T> class FirstIndex { public: #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> val, T *extraParams) { return val; } #ifdef __CUDACC__ __host__ __device__ #endif static functions::indexreduce::IndexValue<T> update( functions::indexreduce::IndexValue<T> old, functions::indexreduce::IndexValue<T> opOutput, T *extraParams) { #ifdef __CUDACC__ if (opOutput.index < 0) return old; #endif T res = simdOps::MatchCondition<T>::op(opOutput.value, extraParams); //printf("res: %f; oldIdx: %i; newIdx: %i\n", res, old.index, opOutput.index); if (res == static_cast<T>(0.0f)) return old; if (old.index < 0) return opOutput; if (old.index > opOutput.index) return opOutput; return old; } #ifdef __CUDACC__ __host__ __device__ #endif static inline T startingValue(T *input) { return - nd4j::DataTypeUtils::max<T>(); } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> startingIndexValue(T *input) { functions::indexreduce::IndexValue<T> local; local.value = startingValue(input); local.index = -1; return local; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> d1, functions::indexreduce::IndexValue<T> d2, T *extraParams) { return d1; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> merge( functions::indexreduce::IndexValue<T> f1, functions::indexreduce::IndexValue<T> f2, T *extraParams) { if (f1.index > f2.index) return f2; return f1; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> postProcess( functions::indexreduce::IndexValue<T> reduction, int n, int xOffset, T *dx, int incx, T *extraParams, T *result) { return reduction; } }; template<typename T> class LastIndex { public: #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> val, T *extraParams) { return val; } #ifdef __CUDACC__ __host__ __device__ #endif static functions::indexreduce::IndexValue<T> update( functions::indexreduce::IndexValue<T> old, functions::indexreduce::IndexValue<T> opOutput, T *extraParams) { #ifdef __CUDACC__ if (opOutput.index < 0) return old; #endif T res = simdOps::MatchCondition<T>::op(opOutput.value, extraParams); if (res == static_cast<T>(0.0f)) return old; if (old.index < 0) return opOutput; if (old.index < opOutput.index) return opOutput; return old; } #ifdef __CUDACC__ __host__ __device__ #endif static inline T startingValue(T *input) { return -nd4j::DataTypeUtils::max<T>(); } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> startingIndexValue(T *input) { functions::indexreduce::IndexValue<T> local; local.value = startingValue(input); local.index = -1; return local; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> d1, functions::indexreduce::IndexValue<T> d2, T *extraParams) { return d1; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> merge( functions::indexreduce::IndexValue<T> f1, functions::indexreduce::IndexValue<T> f2, T *extraParams) { if (f1.index < f2.index) return f2; return f1; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> postProcess( functions::indexreduce::IndexValue<T> reduction, int n, int xOffset, T *dx, int incx, T *extraParams, T *result) { return reduction; } }; template<typename T> class IndexMax { public: #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> val, T *extraParams) { return val; } #ifdef __CUDACC__ __host__ __device__ #endif static functions::indexreduce::IndexValue<T> update( functions::indexreduce::IndexValue<T> old, functions::indexreduce::IndexValue<T> opOutput, T *extraParams) { if (opOutput.value > old.value) { return opOutput; } #ifdef __CUDACC__ // workaround for cuda race condition at merge phase else if (opOutput.value == old.value && opOutput.index < old.index) return opOutput; #elif defined(__GNUC__) #endif return old; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> merge( functions::indexreduce::IndexValue<T> f1, functions::indexreduce::IndexValue<T> f2, T *extraParams) { if (f1.value > f2.value) return f2; return f1; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> postProcess( functions::indexreduce::IndexValue<T> reduction, int n, int xOffset, T *dx, int incx, T *extraParams, T *result) { return reduction; } #ifdef __CUDACC__ __host__ __device__ #endif static inline T startingValue(T *input) { return -nd4j::DataTypeUtils::max<T>(); } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> startingIndexValue(T *input) { functions::indexreduce::IndexValue<T> local; local.value = startingValue(input); local.index = 0; return local; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> d1, functions::indexreduce::IndexValue<T> d2, T *extraParams) { return d1; } }; template<typename T> class IndexAbsoluteMin { public: #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> op( functions::indexreduce::IndexValue<T> val, T *extraParams) { return val; } #ifdef __CUDACC__ __host__ __device__ #endif static inline T startingValue(T *input) { return nd4j::DataTypeUtils::max<T>(); } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> startingIndexValue(T *input) { functions::indexreduce::IndexValue<T> local; local.value = startingValue(input); local.index = 0; return local; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> update( functions::indexreduce::IndexValue<T> old, functions::indexreduce::IndexValue<T> opOutput, T *extraParams) { opOutput.value = nd4j::math::nd4j_abs<T>(opOutput.value); old.value = nd4j::math::nd4j_abs<T>(old.value); if (opOutput.value < old.value) return opOutput; #ifdef __CUDACC__ // workaround for cuda race condition at merge phase else if (opOutput.value == old.value && opOutput.index < old.index) return opOutput; #elif defined(__GNUC__) #endif return old; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> merge( functions::indexreduce::IndexValue<T> f1, functions::indexreduce::IndexValue<T> f2, T *extraParams) { if (nd4j::math::nd4j_abs<T>(f1.value) < nd4j::math::nd4j_abs<T>(f2.value)) return f2; return f1; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> postProcess( functions::indexreduce::IndexValue<T> reduction, int n, int xOffset, T *dx, int incx, T *extraParams, T *result) { return reduction; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> d1, functions::indexreduce::IndexValue<T> d2, T *extraParams) { return d1; } }; template<typename T> class IndexMin { public: #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> op( functions::indexreduce::IndexValue<T> val, T *extraParams) { return val; } #ifdef __CUDACC__ __host__ __device__ #endif static inline T startingValue(T *input) { return nd4j::DataTypeUtils::max<T>(); } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> startingIndexValue(T *input) { functions::indexreduce::IndexValue<T> local; local.value = startingValue(input); local.index = 0; return local; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> update( functions::indexreduce::IndexValue<T> old, functions::indexreduce::IndexValue<T> opOutput, T *extraParams) { if (opOutput.value < old.value) return opOutput; #ifdef __CUDACC__ // workaround for cuda race condition at merge phase else if (opOutput.value == old.value && opOutput.index < old.index) return opOutput; #elif defined(__GNUC__) #endif return old; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> merge( functions::indexreduce::IndexValue<T> f1, functions::indexreduce::IndexValue<T> f2, T *extraParams) { if (f1.value < f2.value) return f2; return f1; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> postProcess( functions::indexreduce::IndexValue<T> reduction, int n, int xOffset, T *dx, int incx, T *extraParams, T *result) { return reduction; } #ifdef __CUDACC__ __host__ __device__ #endif static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> d1, functions::indexreduce::IndexValue<T> d2, T *extraParams) { return d1; } }; template<typename T> class SummaryStatsVariance { public: static _CUDA_HD inline T getValue(const bool biasCorrected, functions::summarystats::SummaryStatsData<T> val) { if (biasCorrected) { T ret = val.varianceBiasCorrected(); if (ret < static_cast<T>(0.0f)) return val.variance(); return ret; } return val.variance(); } static _CUDA_HD inline functions::summarystats::SummaryStatsData<T> op(functions::summarystats::SummaryStatsData<T> d1,T *extraParams) { return d1; } }; template<typename T> class SummaryStatsStandardDeviation { public: static _CUDA_HD inline T getValue(const bool biasCorrected, functions::summarystats::SummaryStatsData<T> val) { if (biasCorrected) { T ret = val.varianceBiasCorrected(); if (ret < static_cast<T>(0.0f)) return nd4j::math::nd4j_sqrt(val.variance()); else return nd4j::math::nd4j_sqrt(ret); } return nd4j::math::nd4j_sqrt(val.variance()); } static _CUDA_HD inline functions::summarystats::SummaryStatsData<T> op(functions::summarystats::SummaryStatsData<T> d1,T *extraParams) { return d1; } }; template<typename T> class DropOut { public: no_op_exec_special no_op_exec_special_cuda inline _CUDA_D static T op(T d1, T *params) { T prob = params[0]; #ifdef __CUDACC__ T length = params[1]; T tid = gridDim.x * blockDim.x + threadIdx.x; T rnd = nd4j::math::nd4j_abs<T>(nd4j::math::nd4j_cos<T>(static_cast<T>(clock64()) * static_cast<T>(tid) + static_cast<T>(length) * static_cast<T>(tid))); #else T rnd = static_cast<T>(rand() / RAND_MAX); #endif return rnd >= prob ? static_cast<T>(0.0f) : d1; } }; template<typename T> class DropOutInverted { public: no_op_exec_special no_op_exec_special_cuda #ifdef __CUDACC__ __device__ #endif inline static T op(T d1, T *params) { T prob = params[0]; #ifdef __CUDACC__ T length = params[1]; T tid = gridDim.x * blockDim.x + threadIdx.x; T rnd = nd4j::math::nd4j_abs<T>(nd4j::math::nd4j_cos<T>(static_cast<T>(clock64()) * static_cast<T>(tid) + static_cast<T>(length) * static_cast<T>(tid))); #else T rnd = static_cast<T>(rand() / RAND_MAX); #endif return rnd >= prob ? static_cast<T>(0.0f) : d1 / prob; } }; template<typename T> class ReplaceNans { public: no_op_exec_special no_op_exec_special_cuda op_def static T op(T d1, T *params) { T replacement = params[0]; return nd4j::math::nd4j_isnan(d1) ? replacement : d1 ; } }; // this op is used for conditional pairwise transforms only template<typename T> class CompareAndReplace{ public: no_op_exec_special no_op_exec_special_cuda // op definition for PairWise Transform op_def static T op(T d1, T d2, T *params) { T compare = params[0]; T eps = params[2]; int mode = (int) params[3]; if (mode == 0) // equals if (nd4j::math::nd4j_abs<T>(d1 - compare) <= eps) return d2; else return d1; else if (mode == 1) // not equals eps if (nd4j::math::nd4j_abs<T>(d1 - compare) > eps) return d2; else return d1; else if (mode == 2) // less_than eps if (d1 < compare) return d2; else return d1; else if (mode ==3) // greater_than if (d1 > compare) return d2; else return d1; else if (mode == 4) // less_or_equals_than if (d1 <= compare) return d2; else return d1; else if (mode == 5) // greater_or_equals_than if (d1 >= compare) return d2; else return d1; else if (mode == 6) // abs_less_than if (nd4j::math::nd4j_abs<T>(d1) < compare) return d2; else return d1; else if (mode == 7) // abs_greater_than if (nd4j::math::nd4j_abs<T>(d1) > compare) return d2; else return d1; else if (mode == 8) // is inf if (nd4j::math::nd4j_isinf(d1)) return d2; else return d1; else if (mode == 9) // is nan if (nd4j::math::nd4j_isnan(d1)) return d2; else return d1; else if (mode == 10) if (d1 == compare) return d2; else return d1; else if (mode == 11) if (d1 != compare) return d2; else return d1; else if (mode == 12) // abs_greater_or_equals_than if (nd4j::math::nd4j_abs<T>(d1) >= compare) return d2; else return d1; else if (mode == 13) // abs_less_or_equals_than if (nd4j::math::nd4j_abs<T>(d1) <= compare) return d2; else return d1; else printf("Undefined boolean operation: [%i]\n", mode); return d1; } }; template<typename T> class CompareAndSet { public: no_op_exec_special no_op_exec_special_cuda // op definition for Transform op_def static T op(T d1, T *params) { T compare = params[0]; T set = params[1]; T eps = params[2]; // with mode == 0 we do set if d1 equals to compare, and with mode == 1 - we go otherwise int mode = (int) params[3]; if (mode == 0) // equals if (nd4j::math::nd4j_abs<T>(d1 - compare) <= eps) return set; else return d1; //return nd4j::math::nd4j_abs<T>(d1 - compare) <= eps ? set : d1; else if (mode == 1) // not equals if (nd4j::math::nd4j_abs<T>(d1 - compare) > eps) return set; else return d1; //return nd4j::math::nd4j_abs<T>(d1 - compare) > eps ? set : d1; else if (mode == 2) // less_than if (d1 < compare) return set; else return d1; else if (mode ==3) // greater_than if (d1 > compare) return set; else return d1; else if (mode == 4) // less_or_equals_than if (d1 <= compare) return set; else return d1; else if (mode == 5) // greater_or_equals_than if (d1 >= compare) return set; else return d1; else if (mode == 6) // abs_less_than if (nd4j::math::nd4j_abs<T>(d1) < compare) return set; else return d1; else if (mode == 7) // abs_greater_than if (nd4j::math::nd4j_abs<T>(d1) > compare) return set; else return d1; else if (mode == 8) // is inf if (nd4j::math::nd4j_isinf(d1)) return set; else return d1; else if (mode == 9) // is nan if (nd4j::math::nd4j_isnan(d1)) return set; else return d1; else if (mode == 10) if (d1 == compare) return set; else return d1; else if (mode == 11) if (d1 != compare) return set; else return d1; else if (mode == 12) // abs_greater_or_equals_than if (nd4j::math::nd4j_abs<T>(d1) >= compare) return set; else return d1; else if (mode == 13) // abs_less_or_equals_than if (nd4j::math::nd4j_abs<T>(d1) <= compare) return set; else return d1; else printf("Undefined boolean operation: [%i]\n", mode); return d1; } // op definition for PairWise Transform op_def static T op(T d1, T d2, T *params) { T compare = params[0]; T eps = params[2]; int mode = (int) params[3]; if (mode == 0) // equals if (nd4j::math::nd4j_abs<T>(d2 - compare) <= eps) return d2; else return d1; else if (mode == 1) // not equals if (nd4j::math::nd4j_abs<T>(d2 - compare) > eps) return d2; else return d1; else if (mode == 2) // less_than if (d2 < compare) return d2; else return d1; else if (mode ==3) // greater_than if (d2 > compare) return d2; else return d1; else if (mode == 4) // less_or_equals_than if (d2 <= compare) return d2; else return d1; else if (mode == 5) // greater_or_equals_than if (d2 >= compare) return d2; else return d1; else if (mode == 6) // abs_less_than if (nd4j::math::nd4j_abs<T>(d2) < compare) return d2; else return d1; else if (mode == 7) // abs_greater_than if (nd4j::math::nd4j_abs<T>(d2) > compare) return d2; else return d1; else if (mode == 8) // is inf if (nd4j::math::nd4j_isinf(d2)) return d2; else return d1; else if (mode == 9) // is nan if (nd4j::math::nd4j_isnan(d2)) return d2; else return d1; else if (mode == 10) if (d2 == compare) return d2; else return d1; else if (mode == 11) if (d2 != compare) return d2; else return d1; else if (mode == 12) // abs_greater_or_equals_than if (nd4j::math::nd4j_abs<T>(d1) >= compare) return d2; else return d1; else if (mode == 13) // abs_less_or_equals_than if (nd4j::math::nd4j_abs<T>(d1) <= compare) return d2; else return d1; else printf("Undefined boolean operation: [%i]\n", mode); return d1; } }; } #endif
deconvolution_packnto1.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void deconvolution_packnto1_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_packnto1, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { const int packn = csrr_vlenb() / 4; const word_type vl = vsetvl_e32m1(packn); int w = bottom_blob.w; int h = bottom_blob.h; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1; const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1; const int maxk = kernel_w * kernel_h; const float* bias_data_ptr = bias_data; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float sum = 0.f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } vfloat32m1_t _sum = vfmv_v_f_f32m1(0.f, vl); const float* kptr = (const float*)weight_data_packnto1 + maxk * channels * p * packn; // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); for (int y = 0; y < kernel_h; y++) { int sys = (i + y * dilation_h - (kernel_extent_h - 1)); if (sys < 0 || sys % stride_h != 0) continue; int sy = sys / stride_h; if (sy >= h) continue; for (int x = 0; x < kernel_w; x++) { int sxs = (j + x * dilation_w - (kernel_extent_w - 1)); if (sxs < 0 || sxs % stride_w != 0) continue; int sx = sxs / stride_w; if (sx >= w) continue; const float* sptr = m.row(sy) + sx * packn; int k = y * kernel_w + x; vfloat32m1_t _val = vle32_v_f32m1(sptr, vl); vfloat32m1_t _w = vle32_v_f32m1(kptr + k * packn, vl); _sum = vfmacc_vv_f32m1(_sum, _val, _w, vl); } } kptr += maxk * packn; } sum = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m1_f32m1(vfloat32m1_t(), _sum, vfmv_s_f_f32m1(vfloat32m1_t(), sum, vl), vl)); sum = activation_ss(sum, activation_type, activation_params); outptr[j] = sum; } outptr += outw; } } }
SpatialReplicationPadding.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/SpatialReplicationPadding.c" #else static void THNN_(SpatialReplicationPadding_updateOutput_frame)( real *input_p, real *output_p, long nslices, long iwidth, long iheight, long owidth, long oheight, int pad_l, int pad_r, int pad_t, int pad_b) { int iStartX = fmax(0, -pad_l); int iStartY = fmax(0, -pad_t); int oStartX = fmax(0, pad_l); int oStartY = fmax(0, pad_t); long k, ip_x, ip_y; #pragma omp parallel for private(k, ip_x, ip_y) for (k = 0; k < nslices; k++) { long i, j; for (i = 0; i < oheight; i++) { for (j = 0; j < owidth; j++) { if (j < pad_l) { ip_x = pad_l; } else if (j >= pad_l && j < iwidth + pad_l) { ip_x = j; } else { ip_x = iwidth + pad_l - 1; } ip_x = ip_x - oStartX + iStartX; if (i < pad_t) { ip_y = pad_t; } else if (i >= pad_t && i < iheight + pad_t) { ip_y = i; } else { ip_y = iheight + pad_t - 1; } ip_y = ip_y - oStartY + iStartY; real *dest_p = output_p + k*owidth*oheight + i * owidth + j; real *src_p = input_p + k*iwidth*iheight + ip_y * iwidth + ip_x; *dest_p = *src_p; } } } } void THNN_(SpatialReplicationPadding_updateOutput)(THNNState *state, THTensor *input, THTensor *output, int pad_l, int pad_r, int pad_t, int pad_b) { int dimw = 2; int dimh = 1; int dimslices = 0; long nbatch = 1; long nslices; long iheight; long iwidth; long oheight; long owidth; real *input_data; real *output_data; THNN_ARGCHECK(input->nDimension == 3 || input->nDimension == 4, 2, input, "3D or 4D (batch mode) tensor expected for input, but got: %s"); if (input->nDimension == 4) { nbatch = input->size[0]; dimw++; dimh++; dimslices++; } /* sizes */ nslices = input->size[dimslices]; iheight = input->size[dimh]; iwidth = input->size[dimw]; oheight = iheight + pad_t + pad_b; owidth = iwidth + pad_l + pad_r; THArgCheck(owidth >= 1 || oheight >= 1 , 2, "input (H: %d, W: %d)is too small." " Calculated output H: %d W: %d", iheight, iwidth, oheight, owidth); /* get contiguous input */ input = THTensor_(newContiguous)(input); /* resize output */ if (input->nDimension == 3) { THTensor_(resize3d)(output, nslices, oheight, owidth); input_data = THTensor_(data)(input); output_data = THTensor_(data)(output); THNN_(SpatialReplicationPadding_updateOutput_frame)(input_data, output_data, nslices, iwidth, iheight, owidth, oheight, pad_l, pad_r, pad_t, pad_b); } else { long p; THTensor_(resize4d)(output, nbatch, nslices, oheight, owidth); input_data = THTensor_(data)(input); output_data = THTensor_(data)(output); #pragma omp parallel for private(p) for (p = 0; p < nbatch; p++) { THNN_(SpatialReplicationPadding_updateOutput_frame)( input_data+p*nslices*iwidth*iheight, output_data+p*nslices*owidth*oheight, nslices, iwidth, iheight, owidth, oheight, pad_l, pad_r, pad_t, pad_b); } } /* cleanup */ THTensor_(free)(input); } static void THNN_(SpatialReplicationPadding_updateGradInput_frame)( real *ginput_p, real *goutput_p, long nslices, long iwidth, long iheight, long owidth, long oheight, int pad_l, int pad_r, int pad_t, int pad_b) { int iStartX = fmax(0, -pad_l); int iStartY = fmax(0, -pad_t); int oStartX = fmax(0, pad_l); int oStartY = fmax(0, pad_t); long k, ip_x, ip_y; #pragma omp parallel for private(k, ip_x, ip_y) for (k = 0; k < nslices; k++) { long i, j; for (i = 0; i < oheight; i++) { for (j = 0; j < owidth; j++) { if (j < pad_l) { ip_x = pad_l; } else if (j >= pad_l && j < iwidth + pad_l) { ip_x = j; } else { ip_x = iwidth + pad_l - 1; } ip_x = ip_x - oStartX + iStartX; if (i < pad_t) { ip_y = pad_t; } else if (i >= pad_t && i < iheight + pad_t) { ip_y = i; } else { ip_y = iheight + pad_t - 1; } ip_y = ip_y - oStartY + iStartY; real *src_p = goutput_p + k*owidth*oheight + i * owidth + j; real *dest_p = ginput_p + k*iwidth*iheight + ip_y * iwidth + ip_x; *dest_p += *src_p; } } } } void THNN_(SpatialReplicationPadding_updateGradInput)(THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, int pad_l, int pad_r, int pad_t, int pad_b) { int dimw = 2; int dimh = 1; int dimslices = 0; long nbatch = 1; long nslices; long iheight; long iwidth; long oheight; long owidth; if (input->nDimension == 4) { nbatch = input->size[0]; dimw++; dimh++; dimslices++; } /* sizes */ nslices = input->size[dimslices]; iheight = input->size[dimh]; iwidth = input->size[dimw]; oheight = iheight + pad_t + pad_b; owidth = iwidth + pad_l + pad_r; THArgCheck(owidth == THTensor_(size)(gradOutput, dimw), 3, "gradOutput width unexpected. Expected: %d, Got: %d", owidth, THTensor_(size)(gradOutput, dimw)); THArgCheck(oheight == THTensor_(size)(gradOutput, dimh), 3, "gradOutput height unexpected. Expected: %d, Got: %d", oheight, THTensor_(size)(gradOutput, dimh)); /* get contiguous gradOutput */ gradOutput = THTensor_(newContiguous)(gradOutput); /* resize */ THTensor_(resizeAs)(gradInput, input); THTensor_(zero)(gradInput); /* backprop */ if (input->nDimension == 3) { THNN_(SpatialReplicationPadding_updateGradInput_frame)( THTensor_(data)(gradInput), THTensor_(data)(gradOutput), nslices, iwidth, iheight, owidth, oheight, pad_l, pad_r, pad_t, pad_b); } else { long p; #pragma omp parallel for private(p) for (p = 0; p < nbatch; p++) { THNN_(SpatialReplicationPadding_updateGradInput_frame)( THTensor_(data)(gradInput) + p * nslices * iheight * iwidth, THTensor_(data)(gradOutput) + p * nslices * oheight * owidth, nslices, iwidth, iheight, owidth, oheight, pad_l, pad_r, pad_t, pad_b); } } /* cleanup */ THTensor_(free)(gradOutput); } #endif
DRB028-privatemissing-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* tmp should be annotated as private to avoid race condition. Data race pairs: tmp@65:5 vs. tmp@66:12 tmp@65:5 vs. tmp@65:5 */ #include <stdlib.h> #include <stdio.h> int main(int argc, char* argv[]) { int i; int tmp; int len=100; int a[100]; #pragma omp parallel for private(i ) for (i=0;i<len;i++) a[i]=i; #pragma omp parallel for private(i ,tmp ) for (i=0;i<len;i++) { tmp =a[i]+i; a[i] = tmp; } printf("a[50]=%d\n", a[50]); return 0; }
sections.c
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // REQUIRES: ompt #include "callback.h" #include <omp.h> int main() { int x = 0; #pragma omp parallel num_threads(2) { //implicit barrier after sections with nowait but with lastprivates //implicit barrier at end of sections #pragma omp sections { #pragma omp section { #pragma omp atomic x++; } #pragma omp section { #pragma omp atomic x++; } } } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_sync_region' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_sync_region_wait' // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // master thread implicit barrier at sections end // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}} // CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}} // CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}} // CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}} // master thread implicit barrier at parallel end // CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}} // CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}} // CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}} // CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}} // worker thread implicit barrier at sections end // CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}} // CHECK: {{^}}[[THREAD_ID]]: ompt_event_wait_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}} // CHECK: {{^}}[[THREAD_ID]]: ompt_event_wait_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}} // CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}} // worker thread implicit barrier at parallel end // CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[NULL]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_wait_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[NULL]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_wait_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[NULL]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[NULL]] return 0; }
6676.c
// this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c' as parsed by frontend compiler rose void kernel_fdtd_2d(int tmax, int nx, int ny, double ex[1000 + 0][1200 + 0], double ey[1000 + 0][1200 + 0], double hz[1000 + 0][1200 + 0], double _fict_[500 + 0]) { int t10; int t8; int t6; int t4; int t2; for (t2 = 0; t2 <= tmax - 1; t2 += 1) { for (t4 = 0; t4 <= ny - 1; t4 += 1) ey[0][t4] = _fict_[t2]; #pragma omp parallel for for (t4 = 1; t4 <= nx - 1; t4 += 16) for (t6 = t4; t6 <= (t4 + 15 < nx - 1 ? t4 + 15 : nx - 1); t6 += 1) for (t8 = 0; t8 <= ny - 1; t8 += 16) for (t10 = t8; t10 <= (ny - 1 < t8 + 15 ? ny - 1 : t8 + 15); t10 += 1) ey[t6][t10] = ey[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6 - 1][t10]); #pragma omp parallel for for (t4 = 0; t4 <= nx - 1; t4 += 16) for (t6 = t4; t6 <= (t4 + 15 < nx - 1 ? t4 + 15 : nx - 1); t6 += 1) for (t8 = 1; t8 <= ny - 1; t8 += 16) for (t10 = t8; t10 <= (ny - 1 < t8 + 15 ? ny - 1 : t8 + 15); t10 += 1) ex[t6][t10] = ex[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6][t10 - 1]); #pragma omp parallel for for (t4 = 0; t4 <= nx - 2; t4 += 16) for (t6 = t4; t6 <= (t4 + 15 < nx - 2 ? t4 + 15 : nx - 2); t6 += 1) for (t8 = 0; t8 <= ny - 2; t8 += 16) for (t10 = t8; t10 <= (ny - 2 < t8 + 15 ? ny - 2 : t8 + 15); t10 += 1) hz[t6][t10] = hz[t6][t10] - 0.69999999999999996 * (ex[t6][t10 + 1] - ex[t6][t10] + ey[t6 + 1][t10] - ey[t6][t10]); } }
ast-dump-openmp-begin-declare-variant_1.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s | FileCheck %s // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -x c++| FileCheck %s // expected-no-diagnostics int also_before(void) { return 0; } #pragma omp begin declare variant match(device={kind(gpu)}) int also_after(void) { return 1; } int also_before(void) { return 2; } #pragma omp end declare variant #pragma omp begin declare variant match(device={kind(fpga)}) This text is never parsed! #pragma omp end declare variant int also_after(void) { return 0; } int test(void) { // Should return 0. return also_after() + also_before(); } // Make sure: // - we do not see the ast nodes for the gpu kind // - we do not choke on the text in the kind(fpga) guarded scopes // CHECK: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:7:1> line:5:5 used also_before 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:23, line:7:1> // CHECK-NEXT: | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <line:6:3, col:10> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_3:0x[a-z0-9]*]] <col:10> 'int' 0 // CHECK-NEXT: |-FunctionDecl [[ADDR_4:0x[a-z0-9]*]] <line:25:1, line:27:1> line:25:5 used also_after 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_5:0x[a-z0-9]*]] <col:22, line:27:1> // CHECK-NEXT: | `-ReturnStmt [[ADDR_6:0x[a-z0-9]*]] <line:26:3, col:10> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_7:0x[a-z0-9]*]] <col:10> 'int' 0 // CHECK-NEXT: `-FunctionDecl [[ADDR_8:0x[a-z0-9]*]] <line:29:1, line:32:1> line:29:5 test 'int ({{.*}})' // CHECK-NEXT: `-CompoundStmt [[ADDR_9:0x[a-z0-9]*]] <col:16, line:32:1> // CHECK-NEXT: `-ReturnStmt [[ADDR_10:0x[a-z0-9]*]] <line:31:3, col:37> // CHECK-NEXT: `-BinaryOperator [[ADDR_11:0x[a-z0-9]*]] <col:10, col:37> 'int' '+' // CHECK-NEXT: |-CallExpr [[ADDR_12:0x[a-z0-9]*]] <col:10, col:21> 'int' // CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_13:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | `-DeclRefExpr [[ADDR_14:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_4]] 'also_after' 'int ({{.*}})' // CHECK-NEXT: `-CallExpr [[ADDR_15:0x[a-z0-9]*]] <col:25, col:37> 'int' // CHECK-NEXT: `-ImplicitCastExpr [[ADDR_16:0x[a-z0-9]*]] <col:25> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: `-DeclRefExpr [[ADDR_17:0x[a-z0-9]*]] <col:25> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before' 'int ({{.*}})'
hotspot99.c
/** * LICENSE TERMS Copyright (c)2008-2010 University of Virginia All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted without royalty fees or other restrictions, provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the University of Virginia, the Dept. of Computer Science, nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF VIRGINIA OR THE SOFTWARE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. If you use this software or a modified version of it, please cite the most relevant among the following papers: 1) S. Che, M. Boyer, J. Meng, D. Tarjan, J. W. Sheaffer, Sang-Ha Lee and K. Skadron. "Rodinia: A Benchmark Suite for Heterogeneous Computing". IEEE International Symposium on Workload Characterization, Oct 2009. 2) J. Meng and K. Skadron. "Performance Modeling and Automatic Ghost Zone Optimization for Iterative Stencil Loops on GPUs." In Proceedings of the 23rd Annual ACM International Conference on Supercomputing (ICS), June 2009. 3) L.G. Szafaryn, K. Skadron and J. Saucerman. "Experiences Accelerating MATLAB Systems Biology Applications." in Workshop on Biomedicine in Computing (BiC) at the International Symposium on Computer Architecture (ISCA), June 2009. 4) M. Boyer, D. Tarjan, S. T. Acton, and K. Skadron. "Accelerating Leukocyte Tracking using CUDA: A Case Study in Leveraging Manycore Coprocessors." In Proceedings of the International Parallel and Distributed Processing Symposium (IPDPS), May 2009. 5) S. Che, M. Boyer, J. Meng, D. Tarjan, J. W. Sheaffer, and K. Skadron. "A Performance Study of General Purpose Applications on Graphics Processors using CUDA" Journal of Parallel and Distributed Computing, Elsevier, June 2008. 6) S. Che, J. Li, J. W. Sheaffer, K. Skadron, and J. Lach. "Accelerating Compute Intensive Applications with GPUs and FPGAs" In Proceedings of the IEEE Symposium on Application Specific Processors (SASP), June 2008. * */ /** * This file was converted into C99 form by Mehdi Amini * 05 june 2011 */ #include <timing.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #include <omp.h> #include <time.h> #include <sys/time.h> #define STR_SIZE 256 /* maximum power density possible (say 300W for a 10mm x 10mm chip) */ #define MAX_PD (3.0e6) /* required precision in degrees */ #define PRECISION 0.001 #define SPEC_HEAT_SI 1.75e6 #define K_SI 100 /* capacitance fitting factor */ #define FACTOR_CHIP 0.5 //#define OPEN //#define NUM_THREAD 4 /* chip parameters */ double t_chip = 0.0005; double chip_height = 0.016; double chip_width = 0.016; /* ambient temperature, assuming no package at all */ double amb_temp = 80.0; int num_omp_threads; /* Single iteration of the transient solver in the grid model. * advances the solution of the discretized difference equations * by one time step */ void single_iteration(int row, int col, double result[row][col], double temp[row][col], double power[row][col], double Cap, double Rx, double Ry, double Rz, double step) { double delta; int r, c; //printf("num_omp_threads: %d\n", num_omp_threads); #ifdef PGI_ACC #pragma acc region { #endif #ifdef OPEN omp_set_num_threads(num_omp_threads); #pragma omp parallel for shared(power, temp,result) private(r, c, delta) firstprivate(row, col) schedule(static) #endif for (r = 0; r < row; r++) { for (c = 0; c < col; c++) { /* Corner 1 */ if((r == 0) && (c == 0)) { delta = (step / Cap) * (power[0][0] + (temp[0][1] - temp[0][0]) / Rx + (temp[0][col] - temp[0][0]) / Ry + (amb_temp - temp[0][0]) / Rz); } /* Corner 2 */ else if((r == 0) && (c == col - 1)) { delta = (step / Cap) * (power[0][c] + (temp[0][c - 1] - temp[0][c]) / Rx + (temp[1][c] - temp[0][c]) / Ry + (amb_temp - temp[0][c]) / Rz); } /* Corner 3 */ else if((r == row - 1) && (c == col - 1)) { delta = (step / Cap) * (power[r][c] + (temp[r][c - 1] - temp[r][c]) / Rx + (temp[r - 1][c] - temp[r][c]) / Ry + (amb_temp - temp[r][c]) / Rz); } /* Corner 4 */ else if((r == row - 1) && (c == 0)) { delta = (step / Cap) * (power[r][0] + (temp[r][1] - temp[r][0]) / Rx + (temp[r - 1][0] - temp[r][0]) / Ry + (amb_temp - temp[r][0]) / Rz); } /* Edge 1 */ else if(r == 0) { delta = (step / Cap) * (power[0][c] + (temp[0][c + 1] + temp[0][c - 1] - 2.0 * temp[0][c]) / Rx + (temp[1][c] - temp[0][c]) / Ry + (amb_temp - temp[0][c]) / Rz); } /* Edge 2 */ else if(c == col - 1) { delta = (step / Cap) * (power[r][c] + (temp[r + 1][c] + temp[r - 1][c] - 2.0 * temp[r][c]) / Ry + (temp[r][c - 1] - temp[r][c]) / Rx + (amb_temp - temp[r][c]) / Rz); } /* Edge 3 */ else if(r == row - 1) { delta = (step / Cap) * (power[r][c] + (temp[r][c + 1] + temp[r][c - 1] - 2.0 * temp[r][c]) / Rx + (temp[r - 1][c] - temp[r][c]) / Ry + (amb_temp - temp[r][c]) / Rz); } /* Edge 4 */ else if(c == 0) { delta = (step / Cap) * (power[r][0] + (temp[r+1][0] + temp[r-1][0] - 2.0 * temp[r][0]) / Ry + (temp[r+1][0] - temp[r][0]) / Rx + (amb_temp - temp[r][0]) / Rz); } /* Inside the chip */ else { delta = (step / Cap) * (power[r][c] + (temp[r + 1][c] + temp[r-1][c] - 2.0 * temp[r][c]) / Ry + (temp[r][c + 1] + temp[r][c - 1] - 2.0 * temp[r][c]) / Rx + (amb_temp - temp[r][c]) / Rz); } /* Update Temperatures */ result[r][c] = temp[r][c] + delta; } } #ifdef OPEN omp_set_num_threads(num_omp_threads); #pragma omp parallel for shared(result, temp) private(r, c) schedule(static) #endif for (r = 0; r < row; r++) { for (c = 0; c < col; c++) { temp[r][c] = result[r][c]; } } #ifdef PGI_ACC } #endif } /* Transient solver driver routine: simply converts the heat * transfer differential equations to difference equations * and solves the difference equations by iterating */ void compute_tran_temp(int row, int col, double result[row][col], int num_iterations, double temp[row][col], double power[row][col]) { #ifdef VERBOSE int i = 0; #endif double grid_height = chip_height / row; double grid_width = chip_width / col; double Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height; double Rx = grid_width / (2.0 * K_SI * t_chip * grid_height); double Ry = grid_height / (2.0 * K_SI * t_chip * grid_width); double Rz = t_chip / (K_SI * grid_height * grid_width); double max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI); double step = PRECISION / max_slope; double t; #ifdef VERBOSE fprintf(stdout, "total iterations: %d s\tstep size: %g s\n", num_iterations, step); fprintf(stdout, "Rx: %g\tRy: %g\tRz: %g\tCap: %g\n", Rx, Ry, Rz, Cap); #endif for (int i = 0; i < num_iterations; i++) { #ifdef VERBOSE fprintf(stdout, "iteration %d\n", i++); #endif single_iteration(row, col, result, temp, power, Cap, Rx, Ry, Rz, step); } #ifdef VERBOSE fprintf(stdout, "iteration %d\n", i++); #endif } void fatal(char *s) { fprintf(stderr, "error: %s\n", s); exit(1); } void read_input(int grid_rows, int grid_cols, double vect[grid_rows][grid_cols], char *file) { int i, j, index; FILE *fp; char str[STR_SIZE]; double val; fp = fopen(file, "r"); if(!fp) fatal("file could not be opened for reading"); for (i = 0; i < grid_rows; i++) { for (j = 0; j < grid_cols; j++) { char *s = fgets(str, STR_SIZE, fp); if(feof(fp)) fatal("not enough lines in file"); if((sscanf(str, "%lf", &val) != 1)) fatal("invalid file format"); vect[i][j] = val; } } fclose(fp); } void usage(int argc, char **argv) { fprintf(stderr, "Usage: %s <grid_rows> <grid_cols> <sim_time> <no. of threads><temp_file> <power_file>\n", argv[0]); fprintf(stderr, "\t<grid_rows> - number of rows in the grid (positive integer)\n"); fprintf(stderr, "\t<grid_cols> - number of columns in the grid (positive integer)\n"); fprintf(stderr, "\t<sim_time> - number of iterations\n"); fprintf(stderr, "\t<no. of threads> - number of threads\n"); fprintf(stderr, "\t<temp_file> - name of the file containing the initial temperature values of each cell\n"); fprintf(stderr, "\t<power_file> - name of the file containing the dissipated power values of each cell\n"); exit(1); } int main(int argc, char **argv) { int grid_rows, grid_cols, sim_time, i,j; //double *temp, *power, *result; char *tfile, *pfile; /* check validity of inputs */ if(argc != 7) usage(argc, argv); if((grid_rows = atoi(argv[1])) <= 0 || (grid_cols = atoi(argv[2])) <= 0 || (sim_time = atoi(argv[3])) <= 0 || (num_omp_threads = atoi(argv[4])) <= 0) usage(argc, argv); /* allocate memory for the temperature and power arrays */ double temp[grid_rows][grid_cols]; double power[grid_rows][grid_cols]; double result[grid_rows][grid_cols]; memset(temp,0,sizeof(temp)); memset(power,0,sizeof(temp)); memset(result,0,sizeof(temp)); /* read initial temperatures and input power */ tfile = argv[5]; pfile = argv[6]; read_input(grid_rows, grid_cols, temp, tfile); read_input(grid_rows, grid_cols, power, pfile); /* Start timer. */ timer_start(); /* Cheat the compiler to limit the scope of optimisation */ if(argv[5]==0) { memset(temp,0,sizeof(temp)); memset(power,0,sizeof(temp)); memset(result,0,sizeof(temp)); } // Main computation compute_tran_temp(grid_rows, grid_cols, result, sim_time, temp, power); /* Cheat the compiler to limit the scope of optimisation */ if(argv[5]==0) { for(i=0; i < grid_rows; i++) { for(j=0; j < grid_cols; j++) { fprintf(stdout, "%d\t%g\n",(i*grid_cols)+j , temp[i][j]); } } } /* Stop and print timer. */ timer_stop_display(); /*** ***/ /* output results */ #ifdef VERBOSE fprintf(stdout, "Final Temperatures:\n"); #endif #ifdef OUTPUT for(i=0; i < grid_rows; i++) for(j=0; j < grid_cols; j++) { fprintf(stdout, "%d\t%g\n",(i*grid_cols)+j , temp[i][j]); } #endif /* cleanup */ // free(temp); // free(power); return 0; }
compare.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO M M PPPP AAA RRRR EEEEE % % C O O MM MM P P A A R R E % % C O O M M M PPPP AAAAA RRRR EEE % % C O O M M P A A R R E % % CCCC OOO M M P A A R R EEEEE % % % % % % MagickCore Image Comparison Methods % % % % Software Design % % Cristy % % December 2003 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/compare.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/statistic.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p a r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompareImages() compares one or more pixel channels of an image to a % reconstructed image and returns the difference image. % % The format of the CompareImages method is: % % Image *CompareImages(const Image *image,const Image *reconstruct_image, % const MetricType metric,double *distortion,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o metric: the metric. % % o distortion: the computed distortion between the images. % % o exception: return any errors or warnings in this structure. % */ static size_t GetImageChannels(const Image *image) { register ssize_t i; size_t channels; channels=0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) != 0) channels++; } return(channels == 0 ? (size_t) 1 : channels); } MagickExport Image *CompareImages(Image *image,const Image *reconstruct_image, const MetricType metric,double *distortion,ExceptionInfo *exception) { CacheView *highlight_view, *image_view, *reconstruct_view; const char *artifact; double fuzz; Image *clone_image, *difference_image, *highlight_image; MagickBooleanType status; PixelInfo highlight, lowlight, masklight; RectangleInfo geometry; size_t columns, rows; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); assert(distortion != (double *) NULL); *distortion=0.0; if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=GetImageDistortion(image,reconstruct_image,metric,distortion, exception); if (status == MagickFalse) return((Image *) NULL); columns=MagickMax(image->columns,reconstruct_image->columns); rows=MagickMax(image->rows,reconstruct_image->rows); SetGeometry(image,&geometry); geometry.width=columns; geometry.height=rows; clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); (void) SetImageMask(clone_image,ReadPixelMask,(Image *) NULL,exception); difference_image=ExtentImage(clone_image,&geometry,exception); clone_image=DestroyImage(clone_image); if (difference_image == (Image *) NULL) return((Image *) NULL); (void) SetImageAlphaChannel(difference_image,OpaqueAlphaChannel,exception); highlight_image=CloneImage(image,columns,rows,MagickTrue,exception); if (highlight_image == (Image *) NULL) { difference_image=DestroyImage(difference_image); return((Image *) NULL); } status=SetImageStorageClass(highlight_image,DirectClass,exception); if (status == MagickFalse) { difference_image=DestroyImage(difference_image); highlight_image=DestroyImage(highlight_image); return((Image *) NULL); } (void) SetImageMask(highlight_image,ReadPixelMask,(Image *) NULL,exception); (void) SetImageAlphaChannel(highlight_image,OpaqueAlphaChannel,exception); (void) QueryColorCompliance("#f1001ecc",AllCompliance,&highlight,exception); artifact=GetImageArtifact(image,"compare:highlight-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&highlight,exception); (void) QueryColorCompliance("#ffffffcc",AllCompliance,&lowlight,exception); artifact=GetImageArtifact(image,"compare:lowlight-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&lowlight,exception); (void) QueryColorCompliance("#888888cc",AllCompliance,&masklight,exception); artifact=GetImageArtifact(image,"compare:masklight-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&masklight,exception); /* Generate difference image. */ status=MagickTrue; fuzz=GetFuzzyColorDistance(image,reconstruct_image); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); highlight_view=AcquireAuthenticCacheView(highlight_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,highlight_image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p, *magick_restrict q; register Quantum *magick_restrict r; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); r=QueueCacheViewAuthenticPixels(highlight_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL) || (r == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; MagickStatusType difference; register ssize_t i; if ((GetPixelReadMask(image,p) == 0) || (GetPixelReadMask(reconstruct_image,q) == 0)) { SetPixelViaPixelInfo(highlight_image,&masklight,r); p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); r+=GetPixelChannels(highlight_image); continue; } difference=MagickFalse; Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q); if ((distance*distance) > fuzz) { difference=MagickTrue; break; } } if (difference == MagickFalse) SetPixelViaPixelInfo(highlight_image,&lowlight,r); else SetPixelViaPixelInfo(highlight_image,&highlight,r); p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); r+=GetPixelChannels(highlight_image); } sync=SyncCacheViewAuthenticPixels(highlight_view,exception); if (sync == MagickFalse) status=MagickFalse; } highlight_view=DestroyCacheView(highlight_view); reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); (void) CompositeImage(difference_image,highlight_image,image->compose, MagickTrue,0,0,exception); (void) SetImageAlphaChannel(difference_image,OffAlphaChannel,exception); highlight_image=DestroyImage(highlight_image); if (status == MagickFalse) difference_image=DestroyImage(difference_image); return(difference_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e D i s t o r t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDistortion() compares one or more pixel channels of an image to a % reconstructed image and returns the specified distortion metric. % % The format of the GetImageDistortion method is: % % MagickBooleanType GetImageDistortion(const Image *image, % const Image *reconstruct_image,const MetricType metric, % double *distortion,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o metric: the metric. % % o distortion: the computed distortion between the images. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetAbsoluteDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double fuzz; MagickBooleanType status; size_t columns, rows; ssize_t y; /* Compute the absolute difference in pixels between two images. */ status=MagickTrue; fuzz=GetFuzzyColorDistance(image,reconstruct_image); rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t j, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; MagickBooleanType difference; register ssize_t i; if (GetPixelWriteMask(image,p) == 0) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } difference=MagickFalse; Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q); if ((distance*distance) > fuzz) { channel_distortion[i]++; difference=MagickTrue; } } if (difference != MagickFalse) channel_distortion[CompositePixelChannel]++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetAbsoluteError) #endif for (j=0; j <= MaxPixelChannels; j++) distortion[j]+=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); return(status); } static MagickBooleanType GetFuzzDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double area; MagickBooleanType status; register ssize_t j; size_t columns, rows; ssize_t y; status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); area=0.0; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,rows,1) reduction(+:area) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } (void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if ((GetPixelReadMask(image,p) == 0) || (GetPixelReadMask(reconstruct_image,q) == 0)) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image, channel,q)); channel_distortion[i]+=distance*distance; channel_distortion[CompositePixelChannel]+=distance*distance; } area++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetFuzzDistortion) #endif for (j=0; j <= MaxPixelChannels; j++) distortion[j]+=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); area=PerceptibleReciprocal(area); for (j=0; j <= MaxPixelChannels; j++) distortion[j]*=area; distortion[CompositePixelChannel]/=(double) GetImageChannels(image); distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]); return(status); } static MagickBooleanType GetMeanAbsoluteDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double area; MagickBooleanType status; register ssize_t j; size_t columns, rows; ssize_t y; status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); area=0.0; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,rows,1) reduction(+:area) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if ((GetPixelReadMask(image,p) == 0) || (GetPixelReadMask(reconstruct_image,q) == 0)) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=QuantumScale*fabs(Sa*p[i]-Da*GetPixelChannel(reconstruct_image, channel,q)); channel_distortion[i]+=distance; channel_distortion[CompositePixelChannel]+=distance; } area++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetMeanAbsoluteError) #endif for (j=0; j <= MaxPixelChannels; j++) distortion[j]+=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); area=PerceptibleReciprocal(area); for (j=0; j <= MaxPixelChannels; j++) distortion[j]*=area; distortion[CompositePixelChannel]/=(double) GetImageChannels(image); return(status); } static MagickBooleanType GetMeanErrorPerPixel(Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; double area, maximum_error, mean_error; size_t columns, rows; ssize_t y; status=MagickTrue; area=0.0; maximum_error=0.0; mean_error=0.0; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) rows; y++) { register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; break; } for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if ((GetPixelReadMask(image,p) == 0) || (GetPixelReadMask(reconstruct_image,q) == 0)) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=fabs(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q)); distortion[i]+=distance; distortion[CompositePixelChannel]+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=distortion[CompositePixelChannel]/area; image->error.normalized_mean_error=QuantumScale*QuantumScale*mean_error/area; image->error.normalized_maximum_error=QuantumScale*maximum_error; return(status); } static MagickBooleanType GetMeanSquaredDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double area; MagickBooleanType status; register ssize_t j; size_t columns, rows; ssize_t y; status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); area=0.0; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,rows,1) reduction(+:area) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if ((GetPixelReadMask(image,p) == 0) || (GetPixelReadMask(reconstruct_image,q) == 0)) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image, channel,q)); channel_distortion[i]+=distance*distance; channel_distortion[CompositePixelChannel]+=distance*distance; } area++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetMeanSquaredError) #endif for (j=0; j <= MaxPixelChannels; j++) distortion[j]+=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); area=PerceptibleReciprocal(area); for (j=0; j <= MaxPixelChannels; j++) distortion[j]*=area; distortion[CompositePixelChannel]/=GetImageChannels(image); return(status); } static MagickBooleanType GetNormalizedCrossCorrelationDistortion( const Image *image,const Image *reconstruct_image,double *distortion, ExceptionInfo *exception) { #define SimilarityImageTag "Similarity/Image" CacheView *image_view, *reconstruct_view; ChannelStatistics *image_statistics, *reconstruct_statistics; double area; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; size_t columns, rows; ssize_t y; /* Normalize to account for variation due to lighting and exposure condition. */ image_statistics=GetImageStatistics(image,exception); reconstruct_statistics=GetImageStatistics(reconstruct_image,exception); if ((image_statistics == (ChannelStatistics *) NULL) || (reconstruct_statistics == (ChannelStatistics *) NULL)) { if (image_statistics != (ChannelStatistics *) NULL) image_statistics=(ChannelStatistics *) RelinquishMagickMemory( image_statistics); if (reconstruct_statistics != (ChannelStatistics *) NULL) reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory( reconstruct_statistics); return(MagickFalse); } status=MagickTrue; progress=0; for (i=0; i <= MaxPixelChannels; i++) distortion[i]=0.0; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); area=0.0; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) rows; y++) { register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; break; } for (x=0; x < (ssize_t) columns; x++) { if ((GetPixelReadMask(image,p) == 0) || (GetPixelReadMask(reconstruct_image,q) == 0)) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } area++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } } area=PerceptibleReciprocal(area); for (y=0; y < (ssize_t) rows; y++) { register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; break; } for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; if ((GetPixelReadMask(image,p) == 0) || (GetPixelReadMask(reconstruct_image,q) == 0)) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*(image->alpha_trait != UndefinedPixelTrait ? GetPixelAlpha(image,p) : OpaqueAlpha); Da=QuantumScale*(reconstruct_image->alpha_trait != UndefinedPixelTrait ? GetPixelAlpha(reconstruct_image,q) : OpaqueAlpha); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) { distortion[i]+=area*QuantumScale*(p[i]- image_statistics[channel].mean)*(GetPixelChannel( reconstruct_image,channel,q)- reconstruct_statistics[channel].mean); } else { distortion[i]+=area*QuantumScale*(Sa*p[i]- image_statistics[channel].mean)*(Da*GetPixelChannel( reconstruct_image,channel,q)- reconstruct_statistics[channel].mean); } } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,SimilarityImageTag,progress++,rows); if (proceed == MagickFalse) { status=MagickFalse; break; } } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); /* Divide by the standard deviation. */ distortion[CompositePixelChannel]=0.0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double gamma; PixelChannel channel=GetPixelChannelChannel(image,i); gamma=image_statistics[channel].standard_deviation* reconstruct_statistics[channel].standard_deviation; gamma=PerceptibleReciprocal(gamma); distortion[i]=QuantumRange*gamma*distortion[i]; distortion[CompositePixelChannel]+=distortion[i]*distortion[i]; } distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]/ GetImageChannels(image)); /* Free resources. */ reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory( reconstruct_statistics); image_statistics=(ChannelStatistics *) RelinquishMagickMemory( image_statistics); return(status); } static MagickBooleanType GetPeakAbsoluteDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; size_t columns, rows; ssize_t y; status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t j, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if ((GetPixelReadMask(image,p) == 0) || (GetPixelReadMask(reconstruct_image,q) == 0)) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=QuantumScale*fabs(Sa*p[i]-Da*GetPixelChannel(reconstruct_image, channel,q)); if (distance > channel_distortion[i]) channel_distortion[i]=distance; if (distance > channel_distortion[CompositePixelChannel]) channel_distortion[CompositePixelChannel]=distance; } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetPeakAbsoluteError) #endif for (j=0; j <= MaxPixelChannels; j++) if (channel_distortion[j] > distortion[j]) distortion[j]=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); return(status); } static inline double MagickLog10(const double x) { #define Log10Epsilon (1.0e-11) if (fabs(x) < Log10Epsilon) return(log10(Log10Epsilon)); return(log10(fabs(x))); } static MagickBooleanType GetPeakSignalToNoiseRatio(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { MagickBooleanType status; register ssize_t i; status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception); for (i=0; i <= MaxPixelChannels; i++) if (fabs(distortion[i]) < MagickEpsilon) distortion[i]=INFINITY; else distortion[i]=20.0*MagickLog10(1.0/sqrt(distortion[i])); return(status); } static MagickBooleanType GetPerceptualHashDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { ChannelPerceptualHash *channel_phash, *reconstruct_phash; const char *artifact; MagickBooleanType normalize; ssize_t channel; /* Compute perceptual hash in the sRGB colorspace. */ channel_phash=GetImagePerceptualHash(image,exception); if (channel_phash == (ChannelPerceptualHash *) NULL) return(MagickFalse); reconstruct_phash=GetImagePerceptualHash(reconstruct_image,exception); if (reconstruct_phash == (ChannelPerceptualHash *) NULL) { channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory( channel_phash); return(MagickFalse); } artifact=GetImageArtifact(image,"phash:normalize"); normalize=(artifact == (const char *) NULL) || (IsStringTrue(artifact) == MagickFalse) ? MagickFalse : MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) #endif for (channel=0; channel < MaxPixelChannels; channel++) { double difference; register ssize_t i; difference=0.0; for (i=0; i < MaximumNumberOfImageMoments; i++) { double alpha, beta; register ssize_t j; for (j=0; j < (ssize_t) channel_phash[0].number_colorspaces; j++) { alpha=channel_phash[channel].phash[j][i]; beta=reconstruct_phash[channel].phash[j][i]; if (normalize == MagickFalse) difference+=(beta-alpha)*(beta-alpha); else difference=sqrt((beta-alpha)*(beta-alpha)/ channel_phash[0].number_channels); } } distortion[channel]+=difference; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetPerceptualHashDistortion) #endif distortion[CompositePixelChannel]+=difference; } /* Free resources. */ reconstruct_phash=(ChannelPerceptualHash *) RelinquishMagickMemory( reconstruct_phash); channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(channel_phash); return(MagickTrue); } static MagickBooleanType GetRootMeanSquaredDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { MagickBooleanType status; register ssize_t i; status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception); for (i=0; i <= MaxPixelChannels; i++) distortion[i]=sqrt(distortion[i]); return(status); } MagickExport MagickBooleanType GetImageDistortion(Image *image, const Image *reconstruct_image,const MetricType metric,double *distortion, ExceptionInfo *exception) { double *channel_distortion; MagickBooleanType status; size_t length; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); assert(distortion != (double *) NULL); *distortion=0.0; if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); /* Get image distortion. */ length=MaxPixelChannels+1; channel_distortion=(double *) AcquireQuantumMemory(length, sizeof(*channel_distortion)); if (channel_distortion == (double *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(channel_distortion,0,length* sizeof(*channel_distortion)); switch (metric) { case AbsoluteErrorMetric: { status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion, exception); break; } case FuzzErrorMetric: { status=GetFuzzDistortion(image,reconstruct_image,channel_distortion, exception); break; } case MeanAbsoluteErrorMetric: { status=GetMeanAbsoluteDistortion(image,reconstruct_image, channel_distortion,exception); break; } case MeanErrorPerPixelErrorMetric: { status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion, exception); break; } case MeanSquaredErrorMetric: { status=GetMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case NormalizedCrossCorrelationErrorMetric: default: { status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image, channel_distortion,exception); break; } case PeakAbsoluteErrorMetric: { status=GetPeakAbsoluteDistortion(image,reconstruct_image, channel_distortion,exception); break; } case PeakSignalToNoiseRatioErrorMetric: { status=GetPeakSignalToNoiseRatio(image,reconstruct_image, channel_distortion,exception); break; } case PerceptualHashErrorMetric: { status=GetPerceptualHashDistortion(image,reconstruct_image, channel_distortion,exception); break; } case RootMeanSquaredErrorMetric: { status=GetRootMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } } *distortion=channel_distortion[CompositePixelChannel]; channel_distortion=(double *) RelinquishMagickMemory(channel_distortion); (void) FormatImageProperty(image,"distortion","%.*g",GetMagickPrecision(), *distortion); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e D i s t o r t i o n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDistortions() compares the pixel channels of an image to a % reconstructed image and returns the specified distortion metric for each % channel. % % The format of the GetImageDistortions method is: % % double *GetImageDistortions(const Image *image, % const Image *reconstruct_image,const MetricType metric, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o metric: the metric. % % o exception: return any errors or warnings in this structure. % */ MagickExport double *GetImageDistortions(Image *image, const Image *reconstruct_image,const MetricType metric, ExceptionInfo *exception) { double *channel_distortion; MagickBooleanType status; size_t length; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); /* Get image distortion. */ length=MaxPixelChannels+1UL; channel_distortion=(double *) AcquireQuantumMemory(length, sizeof(*channel_distortion)); if (channel_distortion == (double *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(channel_distortion,0,length* sizeof(*channel_distortion)); status=MagickTrue; switch (metric) { case AbsoluteErrorMetric: { status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion, exception); break; } case FuzzErrorMetric: { status=GetFuzzDistortion(image,reconstruct_image,channel_distortion, exception); break; } case MeanAbsoluteErrorMetric: { status=GetMeanAbsoluteDistortion(image,reconstruct_image, channel_distortion,exception); break; } case MeanErrorPerPixelErrorMetric: { status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion, exception); break; } case MeanSquaredErrorMetric: { status=GetMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case NormalizedCrossCorrelationErrorMetric: default: { status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image, channel_distortion,exception); break; } case PeakAbsoluteErrorMetric: { status=GetPeakAbsoluteDistortion(image,reconstruct_image, channel_distortion,exception); break; } case PeakSignalToNoiseRatioErrorMetric: { status=GetPeakSignalToNoiseRatio(image,reconstruct_image, channel_distortion,exception); break; } case PerceptualHashErrorMetric: { status=GetRootMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case RootMeanSquaredErrorMetric: { status=GetRootMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } } if (status == MagickFalse) { channel_distortion=(double *) RelinquishMagickMemory(channel_distortion); return((double *) NULL); } return(channel_distortion); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e s E q u a l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImagesEqual() compare the pixels of two images and returns immediately % if any pixel is not identical. % % The format of the IsImagesEqual method is: % % MagickBooleanType IsImagesEqual(const Image *image, % const Image *reconstruct_image,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsImagesEqual(const Image *image, const Image *reconstruct_image,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; size_t columns, rows; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) rows; y++) { register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) columns; x++) { register ssize_t i; if (GetPixelWriteMask(image,p) == 0) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=fabs(p[i]-(double) GetPixelChannel(reconstruct_image, channel,q)); if (distance >= MagickEpsilon) break; } if (i < (ssize_t) GetPixelChannels(image)) break; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } if (x < (ssize_t) columns) break; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); return(y < (ssize_t) rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r M e t r i c % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColorMetric() measures the difference between colors at each pixel % location of two images. A value other than 0 means the colors match % exactly. Otherwise an error measure is computed by summing over all % pixels in an image the distance squared in RGB space between each image % pixel and its corresponding pixel in the reconstruct image. The error % measure is assigned to these image members: % % o mean_error_per_pixel: The mean error for any single pixel in % the image. % % o normalized_mean_error: The normalized mean quantization error for % any single pixel in the image. This distance measure is normalized to % a range between 0 and 1. It is independent of the range of red, green, % and blue values in the image. % % o normalized_maximum_error: The normalized maximum quantization % error for any single pixel in the image. This distance measure is % normalized to a range between 0 and 1. It is independent of the range % of red, green, and blue values in your image. % % A small normalized mean square error, accessed as % image->normalized_mean_error, suggests the images are very similar in % spatial layout and color. % % The format of the SetImageColorMetric method is: % % MagickBooleanType SetImageColorMetric(Image *image, % const Image *reconstruct_image,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageColorMetric(Image *image, const Image *reconstruct_image,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double area, maximum_error, mean_error, mean_error_per_pixel; MagickBooleanType status; size_t columns, rows; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); area=0.0; maximum_error=0.0; mean_error_per_pixel=0.0; mean_error=0.0; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) rows; y++) { register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) columns; x++) { register ssize_t i; if (GetPixelWriteMask(image,p) == 0) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits=GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=fabs(p[i]-(double) GetPixelChannel(reconstruct_image, channel,q)); if (distance >= MagickEpsilon) { mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; } area++; } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=(double) (mean_error_per_pixel/area); image->error.normalized_mean_error=(double) (QuantumScale*QuantumScale* mean_error/area); image->error.normalized_maximum_error=(double) (QuantumScale*maximum_error); status=image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S i m i l a r i t y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SimilarityImage() compares the reference image of the image and returns the % best match offset. In addition, it returns a similarity image such that an % exact match location is completely white and if none of the pixels match, % black, otherwise some gray level in-between. % % The format of the SimilarityImageImage method is: % % Image *SimilarityImage(const Image *image,const Image *reference, % const MetricType metric,const double similarity_threshold, % RectangleInfo *offset,double *similarity,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reference: find an area of the image that closely resembles this image. % % o metric: the metric. % % o similarity_threshold: minimum distortion for (sub)image match. % % o offset: the best match offset of the reference image within the image. % % o similarity: the computed similarity between the images. % % o exception: return any errors or warnings in this structure. % */ static double GetSimilarityMetric(const Image *image,const Image *reference, const MetricType metric,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { double distortion; Image *similarity_image; MagickBooleanType status; RectangleInfo geometry; SetGeometry(reference,&geometry); geometry.x=x_offset; geometry.y=y_offset; similarity_image=CropImage(image,&geometry,exception); if (similarity_image == (Image *) NULL) return(0.0); distortion=0.0; status=GetImageDistortion(similarity_image,reference,metric,&distortion, exception); similarity_image=DestroyImage(similarity_image); if (status == MagickFalse) return(0.0); return(distortion); } MagickExport Image *SimilarityImage(const Image *image,const Image *reference, const MetricType metric,const double similarity_threshold, RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception) { #define SimilarityImageTag "Similarity/Image" CacheView *similarity_view; Image *similarity_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(offset != (RectangleInfo *) NULL); SetGeometry(reference,offset); *similarity_metric=MagickMaximumValue; similarity_image=CloneImage(image,image->columns-reference->columns+1, image->rows-reference->rows+1,MagickTrue,exception); if (similarity_image == (Image *) NULL) return((Image *) NULL); status=SetImageStorageClass(similarity_image,DirectClass,exception); if (status == MagickFalse) { similarity_image=DestroyImage(similarity_image); return((Image *) NULL); } (void) SetImageAlphaChannel(similarity_image,DeactivateAlphaChannel, exception); /* Measure similarity of reference image against image. */ status=MagickTrue; progress=0; similarity_view=AcquireAuthenticCacheView(similarity_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ shared(progress,status,similarity_metric) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) (image->rows-reference->rows+1); y++) { double similarity; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp flush(similarity_metric) #endif if (*similarity_metric <= similarity_threshold) continue; q=GetCacheViewAuthenticPixels(similarity_view,0,y,similarity_image->columns, 1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) (image->columns-reference->columns+1); x++) { register ssize_t i; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp flush(similarity_metric) #endif if (*similarity_metric <= similarity_threshold) break; similarity=GetSimilarityMetric(image,reference,metric,x,y,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SimilarityImage) #endif if ((metric == NormalizedCrossCorrelationErrorMetric) || (metric == UndefinedErrorMetric)) similarity=1.0-similarity; if (similarity < *similarity_metric) { offset->x=x; offset->y=y; *similarity_metric=similarity; } if (metric == PerceptualHashErrorMetric) similarity=MagickMin(0.01*similarity,1.0); if (GetPixelWriteMask(similarity_image,q) == 0) { SetPixelBackgoundColor(similarity_image,q); q+=GetPixelChannels(similarity_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait similarity_traits=GetPixelChannelTraits(similarity_image, channel); if ((traits == UndefinedPixelTrait) || (similarity_traits == UndefinedPixelTrait) || ((similarity_traits & UpdatePixelTrait) == 0)) continue; SetPixelChannel(similarity_image,channel,ClampToQuantum(QuantumRange- QuantumRange*similarity),q); } q+=GetPixelChannels(similarity_image); } if (SyncCacheViewAuthenticPixels(similarity_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SimilarityImage) #endif proceed=SetImageProgress(image,SimilarityImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } similarity_view=DestroyCacheView(similarity_view); if (status == MagickFalse) similarity_image=DestroyImage(similarity_image); return(similarity_image); }
GB_unop__cosh_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__cosh_fc64_fc64) // op(A') function: GB (_unop_tran__cosh_fc64_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = ccosh (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = ccosh (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = ccosh (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_COSH || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__cosh_fc64_fc64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = ccosh (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = ccosh (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__cosh_fc64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
add.c
#include "main.h" mat_rv addition_coo_nothreading(coo matrix1, coo matrix2) { mat_rv rv; if(matrix1.rows != matrix2.rows || matrix1.cols != matrix2.cols){ rv.error = ERR_WRONG_DIM; return rv; } if(matrix1.elems[0].type != matrix2.elems[0].type){ rv.error = ERR_TYPE_MISSMATCH; return rv; } struct timespec start, end; get_utc_time(&start); coo result; result.rows = matrix1.rows; result.cols = matrix1.cols; result.type = matrix1.type; result.length = 0; int size = MALLOCINIT; if(!(result.elems = (coo_elem*)malloc(size*sizeof(coo_elem)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } int matrix1_i = 0; int matrix2_i = 0; while(matrix1_i < matrix1.length && matrix2_i < matrix2.length){ coo_elem next; next.type = matrix1.elems[0].type; if(matrix1.elems[matrix1_i].i == matrix2.elems[matrix2_i].i){ if(matrix1.elems[matrix1_i].j == matrix2.elems[matrix2_i].j){ if(next.type == MAT_INT) next.val.i = matrix1.elems[matrix1_i].val.i + matrix2.elems[matrix2_i].val.i; else next.val.f = matrix1.elems[matrix1_i].val.f + matrix2.elems[matrix2_i].val.f; next.i = matrix1.elems[matrix1_i].i; next.j = matrix1.elems[matrix1_i].j; ++matrix1_i; ++matrix2_i; } else if(matrix1.elems[matrix1_i].j > matrix2.elems[matrix2_i].j) next = matrix2.elems[matrix2_i++]; else next = matrix1.elems[matrix1_i++]; } else if(matrix1.elems[matrix1_i].i > matrix2.elems[matrix2_i].i) next = matrix2.elems[matrix2_i++]; else next = matrix1.elems[matrix1_i++]; if(size == result.length){ size *= 2; if(!(result.elems = realloc(result.elems, size*sizeof(coo_elem)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } } result.elems[result.length++] = next; } while(matrix1_i < matrix1.length){ if(size == result.length){ size *= 2; if(!(result.elems = realloc(result.elems, size*sizeof(coo_elem)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } } result.elems[result.length++] = matrix1.elems[matrix1_i++]; } while(matrix2_i < matrix2.length){ if(size == result.length){ size *= 2; if(!(result.elems = realloc(result.elems, size*sizeof(coo_elem)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } } result.elems[result.length++] = matrix2.elems[matrix2_i++]; } get_utc_time(&end); rv = coo_to_mat_nothreading(result); rv.t_process = time_delta(end, start); free_coo(result); return rv; } mat_rv addition_coo(coo matrix1, coo matrix2, int thread_count) { mat_rv rv; if(matrix1.rows != matrix2.rows || matrix1.cols != matrix2.cols){ rv.error = ERR_WRONG_DIM; return rv; } if(matrix1.elems[0].type != matrix2.elems[0].type){ rv.error = ERR_TYPE_MISSMATCH; return rv; } struct timespec start, end; get_utc_time(&start); coo result; result.rows = matrix1.rows; result.cols = matrix1.cols; result.type = matrix1.type; result.length = 0; //local storage elems coo_elem **local_elems; int *local_elems_len; if(!(local_elems = (coo_elem**)malloc(matrix1.rows * sizeof(coo_elem *)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } if(!(local_elems_len = (int*)calloc(matrix1.rows, sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } //allocate for worst case such that additional calls to change allocation is not needed //by each thread for(int i = 0; i < matrix1.rows; ++i){ if(!(local_elems[i] = (coo_elem*)malloc(matrix1.cols * sizeof(coo_elem)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } } //row1 corresponds to matrix1 //row2 corresponds to matrix2 int *row1_entries; int *row1_lens; int *row2_entries; int *row2_lens; if(!(row1_entries = (int*)malloc(matrix1.rows * sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } if(!(row1_lens = (int*)calloc(matrix1.rows, sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } if(!(row2_entries = (int*)malloc(matrix2.rows * sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } if(!(row2_lens = (int*)calloc(matrix2.rows, sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } for(int i = 0; i < matrix1.rows; ++i){ row1_entries[i] = -1; row2_entries[i] = -1; } //Time constants for building makes threading redundant for(int i = 0; i < matrix1.length; ++i){ if(row1_entries[matrix1.elems[i].i] == -1) row1_entries[matrix1.elems[i].i] = i; row1_lens[matrix1.elems[i].i]++; } for(int i = 0; i < matrix2.length; ++i){ if(row2_entries[matrix2.elems[i].i] == -1) row2_entries[matrix2.elems[i].i] = i; row2_lens[matrix2.elems[i].i]++; } int i; #pragma omp parallel for num_threads(thread_count) private(i) shared(matrix1, matrix2, local_elems) for(i = 0; i < matrix1.rows; ++i){ if(row1_lens[i] == 0 && row2_lens[i] == 0) continue; int row1_i = 0, row2_i = 0; while(row1_i < row1_lens[i] && row2_i < row2_lens[i]){ if(matrix1.elems[row1_entries[i] + row1_i].j == matrix2.elems[row2_entries[i] + row2_i].j){ if(result.type == MAT_INT) local_elems[i][local_elems_len[i]].val.i = matrix1.elems[row1_entries[i] + row1_i].val.i + matrix2.elems[row2_entries[i] + row2_i].val.i; else local_elems[i][local_elems_len[i]].val.f = matrix1.elems[row1_entries[i] + row1_i].val.f + matrix2.elems[row2_entries[i] + row2_i].val.f; local_elems[i][local_elems_len[i]].j = matrix1.elems[row1_entries[i] + row1_i].j; ++row2_i; ++row1_i; } else if(matrix1.elems[row1_entries[i] + row1_i].j > matrix2.elems[row2_entries[i] + row2_i].j){ if(result.type == MAT_INT) local_elems[i][local_elems_len[i]].val.i = matrix2.elems[row2_entries[i] + row2_i].val.i; else local_elems[i][local_elems_len[i]].val.f = matrix2.elems[row2_entries[i] + row2_i].val.f; local_elems[i][local_elems_len[i]].j = matrix2.elems[row2_entries[i] + row2_i].j; ++row2_i; } else{ if(result.type == MAT_INT) local_elems[i][local_elems_len[i]].val.i = matrix1.elems[row1_entries[i] + row1_i].val.i; else local_elems[i][local_elems_len[i]].val.f = matrix1.elems[row1_entries[i] + row1_i].val.f; local_elems[i][local_elems_len[i]].j = matrix1.elems[row1_entries[i] + row1_i].j; ++row1_i; } local_elems[i][local_elems_len[i]].i = i; local_elems[i][local_elems_len[i]].type = result.type; ++local_elems_len[i]; } while(row1_i < row1_lens[i]){ if(result.type == MAT_INT) local_elems[i][local_elems_len[i]].val.i = matrix1.elems[row1_entries[i] + row1_i].val.i; else local_elems[i][local_elems_len[i]].val.f = matrix1.elems[row1_entries[i] + row1_i].val.f; local_elems[i][local_elems_len[i]].j = matrix1.elems[row1_entries[i] + row1_i].j; local_elems[i][local_elems_len[i]].i = i; local_elems[i][local_elems_len[i]].type = result.type; ++local_elems_len[i]; ++row1_i; } while(row2_i < row2_lens[i]){ if(result.type == MAT_INT) local_elems[i][local_elems_len[i]].val.i = matrix2.elems[row2_entries[i] + row2_i].val.i; else local_elems[i][local_elems_len[i]].val.f = matrix2.elems[row2_entries[i] + row2_i].val.f; local_elems[i][local_elems_len[i]].j = matrix2.elems[row2_entries[i] + row2_i].j; local_elems[i][local_elems_len[i]].i = i; local_elems[i][local_elems_len[i]].type = result.type; ++local_elems_len[i]; ++row2_i; } } free(row1_entries); free(row1_lens); free(row2_entries); free(row2_lens); for(i = 0; i < result.rows; ++i) result.length += local_elems_len[i]; if(!(result.elems = malloc(result.length * sizeof(coo_elem)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } int index = 0; for(i = 0; i < result.rows; ++i){ memcpy(&result.elems[index], local_elems[i], local_elems_len[i] * sizeof(coo_elem)); index += local_elems_len[i]; free(local_elems[i]); } free(local_elems); free(local_elems_len); get_utc_time(&end); rv = coo_to_mat(result, thread_count); rv.t_process = time_delta(end, start); free_coo(result); return rv; } mat_rv addition_csr_nothreading(csr matrix1, csr matrix2) { mat_rv rv; if(matrix1.rows != matrix2.rows || matrix1.cols != matrix2.cols){ rv.error = ERR_WRONG_DIM; return rv; } if(matrix1.type != matrix2.type){ rv.error = ERR_TYPE_MISSMATCH; return rv; } struct timespec start, end; get_utc_time(&start); csr result; result.rows = matrix1.rows; result.cols = matrix1.cols; result.type = matrix1.type; result.num_vals = 0; //check worst case between all rows * cols or all elems int nnz_size = matrix1.cols*matrix1.rows; if(matrix1.num_vals + matrix2.num_vals < nnz_size) nnz_size = matrix1.num_vals + matrix2.num_vals; if(result.type == MAT_INT){ if(!(result.nnz.i = (int*)malloc(nnz_size * sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } } else{ if(!(result.nnz.f = (long double*)malloc(nnz_size * sizeof(long double)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } } if(!(result.ja = (int*)malloc(nnz_size * sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } if(!(result.ia = (int*)malloc((result.rows + 1) * sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } result.ia[0] = 0; for(int i = 0; i < result.rows; ++i){ result.ia[i + 1] = result.ia[i]; int mat1_i = matrix1.ia[i]; int mat2_i = matrix2.ia[i]; while(mat1_i < matrix1.ia[i + 1] && mat2_i < matrix2.ia[i + 1]){ if(matrix1.ja[mat1_i] == matrix2.ja[mat2_i]){ if(result.type == MAT_INT) result.nnz.i[result.ia[i + 1]] = matrix1.nnz.i[mat1_i] + matrix2.nnz.i[mat2_i]; else result.nnz.f[result.ia[i + 1]] = matrix1.nnz.f[mat1_i] + matrix2.nnz.f[mat2_i]; result.ja[result.ia[i + 1]] = matrix1.ja[mat1_i]; ++result.ia[i + 1]; ++mat1_i; ++mat2_i; } else if(matrix1.ja[mat1_i] > matrix2.ja[mat2_i]) ++mat2_i; else ++mat1_i; } while(mat1_i < matrix1.ia[i + 1]){ if(result.type == MAT_INT) result.nnz.i[result.ia[i + 1]] = matrix1.nnz.i[mat1_i]; else result.nnz.f[result.ia[i + 1]] = matrix1.nnz.f[mat1_i]; result.ja[result.ia[i + 1]] = matrix1.ja[mat1_i]; ++result.ia[i + 1]; ++mat1_i; } while(mat2_i < matrix2.ia[i + 1]){ if(result.type == MAT_INT) result.nnz.i[result.ia[i + 1]] = matrix2.nnz.i[mat2_i]; else result.nnz.f[result.ia[i + 1]] = matrix2.nnz.f[mat2_i]; result.ja[result.ia[i + 1]] = matrix2.ja[mat2_i]; ++result.ia[i + 1]; ++mat2_i; } } result.num_vals = result.ia[result.rows]; get_utc_time(&end); rv = csr_to_mat_nothreading(result); rv.t_process = time_delta(end, start); free_csr(result); return rv; } mat_rv addition_csr(csr matrix1, csr matrix2, int thread_count) { mat_rv rv; if(matrix1.rows != matrix2.rows || matrix1.cols != matrix2.cols){ rv.error = ERR_WRONG_DIM; return rv; } if(matrix1.type != matrix2.type){ rv.error = ERR_TYPE_MISSMATCH; return rv; } struct timespec start, end; get_utc_time(&start); csr result; result.rows = matrix1.rows; result.cols = matrix1.cols; result.type = matrix1.type; result.num_vals = 0; union{ int **i; long double **f; } local_nnzs; int **local_jas; //allocate for worst case if(result.type == MAT_INT){ if(!(local_nnzs.i = (int**)malloc(result.rows*sizeof(int *)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } for(int i = 0; i < result.rows; ++i){ if(!(local_nnzs.i[i] = (int*)malloc(result.cols*sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } } } else{ if(!(local_nnzs.f = (long double**)malloc(result.rows*sizeof(long double *)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } for(int i = 0; i < result.rows; ++i){ if(!(local_nnzs.f[i] = (long double*)malloc(result.cols*sizeof(long double)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } } } if(!(local_jas = (int**)malloc(result.rows*sizeof(int *)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } for(int i = 0; i < result.rows; ++i){ if(!(local_jas[i] = (int*)malloc(result.cols*sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } } //using result.ia to store length of each row then iterate through at the end if(!(result.ia = (int*)calloc(result.rows + 1, sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } int i; #pragma omp parallel num_threads(thread_count) shared(matrix1, matrix2, result, local_nnzs, local_jas) { //store type on the local stack MAT_TYPE local_type = result.type; #pragma omp for private(i) for(i = 0; i < result.rows; ++i){ int mat1_i = matrix1.ia[i]; int mat2_i = matrix2.ia[i]; while(mat1_i < matrix1.ia[i + 1] && mat2_i < matrix2.ia[i + 1]){ if(matrix1.ja[mat1_i] == matrix2.ja[mat2_i]){ if(local_type == MAT_INT) local_nnzs.i[i][result.ia[i + 1]] = matrix1.nnz.i[mat1_i] + matrix2.nnz.i[mat2_i]; else local_nnzs.f[i][result.ia[i + 1]] = matrix1.nnz.f[mat1_i] + matrix2.nnz.f[mat2_i]; local_jas[i][result.ia[i + 1]] = matrix1.ja[mat1_i]; result.ia[i + 1]++; mat1_i++; mat2_i++; } else if(matrix1.ja[mat1_i] > matrix2.ja[mat2_i]) mat2_i++; else mat1_i++; } while(mat1_i < matrix1.ia[i + 1]){ if(local_type == MAT_INT) local_nnzs.i[i][result.ia[i + 1]] = matrix1.nnz.i[mat1_i]; else local_nnzs.f[i][result.ia[i + 1]] = matrix1.nnz.f[mat1_i]; local_jas[i][result.ia[i + 1]] = matrix1.ja[mat1_i]; ++result.ia[i + 1]; ++mat1_i; } while(mat2_i < matrix2.ia[i + 1]){ if(local_type == MAT_INT) local_nnzs.i[i][result.ia[i + 1]] = matrix2.nnz.i[mat2_i]; else local_nnzs.f[i][result.ia[i + 1]] = matrix2.nnz.f[mat2_i]; local_jas[i][result.ia[i + 1]] = matrix2.ja[mat2_i]; ++result.ia[i + 1]; ++mat2_i; } } } for(int i = 1; i < result.rows + 1; ++i) result.num_vals += result.ia[i]; if(result.type == MAT_INT){ if(!(result.nnz.i = (int*)malloc(result.num_vals * sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } } else{ if(!(result.nnz.f = (long double*)malloc(result.num_vals * sizeof(long double)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } } if(!(result.ja = (int*)malloc(result.num_vals * sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } for(int i = 0; i < result.rows; ++i){ if(result.ia[i + 1] > 0){ if(result.type == MAT_INT){ memcpy(&result.nnz.i[result.ia[i]],local_nnzs.i[i], result.ia[i + 1] * sizeof(int)); free(local_nnzs.i[i]); } else{ memcpy(&result.nnz.f[result.ia[i]],local_nnzs.f[i], result.ia[i + 1] * sizeof(long double)); free(local_nnzs.f[i]); } memcpy(&result.ja[result.ia[i]], local_jas[i], result.ia[i + 1] * sizeof(int)); free(local_jas[i]); } result.ia[i + 1] += result.ia[i]; } free(local_jas); if(result.type == MAT_INT) free(local_nnzs.i); else free(local_nnzs.f); get_utc_time(&end); rv = csr_to_mat(result, thread_count); rv.t_process = time_delta(end, start); free_csr(result); return rv; } mat_rv addition_csc_nothreading(csc matrix1, csc matrix2) { mat_rv rv; if(matrix1.rows != matrix2.rows || matrix1.cols != matrix2.cols){ rv.error = ERR_WRONG_DIM; return rv; } if(matrix1.type != matrix2.type){ rv.error = ERR_TYPE_MISSMATCH; return rv; } struct timespec start, end; get_utc_time(&start); csc result; result.cols = matrix1.cols; result.rows = matrix1.rows; result.type = matrix1.type; result.num_vals = 0; //check worst case between all cols * rows or all elems int nnz_size = matrix1.rows*matrix1.cols; if(matrix1.num_vals + matrix2.num_vals < nnz_size) nnz_size = matrix1.num_vals + matrix2.num_vals; if(result.type == MAT_INT){ if(!(result.nnz.i = (int*)malloc(nnz_size * sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } } else{ if(!(result.nnz.f = (long double*)malloc(nnz_size * sizeof(long double)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } } if(!(result.ja = (int*)malloc(nnz_size * sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } if(!(result.ia = (int*)malloc((result.cols + 1) * sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } result.ia[0] = 0; for(int i = 0; i < result.cols; ++i){ result.ia[i + 1] = result.ia[i]; int mat1_i = matrix1.ia[i]; int mat2_i = matrix2.ia[i]; while(mat1_i < matrix1.ia[i + 1] && mat2_i < matrix2.ia[i + 1]){ if(matrix1.ja[mat1_i] == matrix2.ja[mat2_i]){ if(result.type == MAT_INT) result.nnz.i[result.ia[i + 1]] = matrix1.nnz.i[mat1_i] + matrix2.nnz.i[mat2_i]; else result.nnz.f[result.ia[i + 1]] = matrix1.nnz.f[mat1_i] + matrix2.nnz.f[mat2_i]; result.ja[result.ia[i + 1]] = matrix1.ja[mat1_i]; ++result.ia[i + 1]; ++mat1_i; ++mat2_i; } else if(matrix1.ja[mat1_i] > matrix2.ja[mat2_i]) ++mat2_i; else ++mat1_i; } while(mat1_i < matrix1.ia[i + 1]){ if(result.type == MAT_INT) result.nnz.i[result.ia[i + 1]] = matrix1.nnz.i[mat1_i]; else result.nnz.f[result.ia[i + 1]] = matrix1.nnz.f[mat1_i]; result.ja[result.ia[i + 1]] = matrix1.ja[mat1_i]; ++result.ia[i + 1]; ++mat1_i; } while(mat2_i < matrix2.ia[i + 1]){ if(result.type == MAT_INT) result.nnz.i[result.ia[i + 1]] = matrix2.nnz.i[mat2_i]; else result.nnz.f[result.ia[i + 1]] = matrix2.nnz.f[mat2_i]; result.ja[result.ia[i + 1]] = matrix2.ja[mat2_i]; ++result.ia[i + 1]; ++mat2_i; } } result.num_vals = result.ia[result.cols]; get_utc_time(&end); rv = csc_to_mat_nothreading(result); rv.t_process = time_delta(end, start); free_csc(result); return rv; } mat_rv addition_csc(csc matrix1, csc matrix2, int thread_count) { mat_rv rv; if(matrix1.rows != matrix2.rows || matrix1.cols != matrix2.cols){ rv.error = ERR_WRONG_DIM; return rv; } if(matrix1.type != matrix2.type){ rv.error = ERR_TYPE_MISSMATCH; return rv; } struct timespec start, end; get_utc_time(&start); csc result; result.cols = matrix1.cols; result.rows = matrix1.rows; result.type = matrix1.type; result.num_vals = 0; union{ int **i; long double **f; } local_nnzs; int **local_jas; //allocate for worst case if(result.type == MAT_INT){ if(!(local_nnzs.i = (int**)malloc(result.cols*sizeof(int *)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } for(int i = 0; i < result.cols; ++i){ if(!(local_nnzs.i[i] = (int*)malloc(result.rows*sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } } } else{ if(!(local_nnzs.f = (long double**)malloc(result.cols*sizeof(long double *)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } for(int i = 0; i < result.cols; ++i){ if(!(local_nnzs.f[i] = (long double*)malloc(result.rows*sizeof(long double)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } } } if(!(local_jas = (int**)malloc(result.cols*sizeof(int *)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } for(int i = 0; i < result.cols; ++i){ if(!(local_jas[i] = (int*)malloc(result.rows*sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } } //using result.ia to store length of each row then iterate through at the end if(!(result.ia = (int*)calloc(result.cols + 1, sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } int i; #pragma omp parallel num_threads(thread_count) shared(matrix1, matrix2, result, local_nnzs, local_jas) { //store type on the local stack MAT_TYPE local_type = result.type; #pragma omp for private(i) for(i = 0; i < result.cols; ++i){ int mat1_i = matrix1.ia[i]; int mat2_i = matrix2.ia[i]; while(mat1_i < matrix1.ia[i + 1] && mat2_i < matrix2.ia[i + 1]){ if(matrix1.ja[mat1_i] == matrix2.ja[mat2_i]){ if(local_type == MAT_INT) local_nnzs.i[i][result.ia[i + 1]] = matrix1.nnz.i[mat1_i] + matrix2.nnz.i[mat2_i]; else local_nnzs.f[i][result.ia[i + 1]] = matrix1.nnz.f[mat1_i] + matrix2.nnz.f[mat2_i]; local_jas[i][result.ia[i + 1]] = matrix1.ja[mat1_i]; result.ia[i + 1]++; mat1_i++; mat2_i++; } else if(matrix1.ja[mat1_i] > matrix2.ja[mat2_i]) mat2_i++; else mat1_i++; } while(mat1_i < matrix1.ia[i + 1]){ if(local_type == MAT_INT) local_nnzs.i[i][result.ia[i + 1]] = matrix1.nnz.i[mat1_i]; else local_nnzs.f[i][result.ia[i + 1]] = matrix1.nnz.f[mat1_i]; local_jas[i][result.ia[i + 1]] = matrix1.ja[mat1_i]; ++result.ia[i + 1]; ++mat1_i; } while(mat2_i < matrix2.ia[i + 1]){ if(local_type == MAT_INT) local_nnzs.i[i][result.ia[i + 1]] = matrix2.nnz.i[mat2_i]; else local_nnzs.f[i][result.ia[i + 1]] = matrix2.nnz.f[mat2_i]; local_jas[i][result.ia[i + 1]] = matrix2.ja[mat2_i]; ++result.ia[i + 1]; ++mat2_i; } } } for(int i = 1; i < result.cols + 1; ++i) result.num_vals += result.ia[i]; if(result.type == MAT_INT){ if(!(result.nnz.i = (int*)malloc(result.num_vals * sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } } else{ if(!(result.nnz.f = (long double*)malloc(result.num_vals * sizeof(long double)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } } if(!(result.ja = (int*)malloc(result.num_vals * sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } for(int i = 0; i < result.cols; ++i){ if(result.ia[i + 1] > 0){ if(result.type == MAT_INT){ memcpy(&result.nnz.i[result.ia[i]],local_nnzs.i[i], result.ia[i + 1] * sizeof(int)); free(local_nnzs.i[i]); } else{ memcpy(&result.nnz.f[result.ia[i]],local_nnzs.f[i], result.ia[i + 1] * sizeof(long double)); free(local_nnzs.f[i]); } memcpy(&result.ja[result.ia[i]], local_jas[i], result.ia[i + 1] * sizeof(int)); free(local_jas[i]); } result.ia[i + 1] += result.ia[i]; } free(local_jas); if(result.type == MAT_INT) free(local_nnzs.i); else free(local_nnzs.f); get_utc_time(&end); rv = csc_to_mat(result, thread_count); rv.t_process = time_delta(end, start); free_csc(result); return rv; } mat_rv addition(OPERATIONARGS *args) { mat_rv rv; //default = COO if (args->format == FORM_DEFAULT) args->format = COO; switch(args->format){ case COO:{ struct timespec delta1, delta2; struct timespec fileio1, fileio2; coo matrix1 = read_coo(args->file1, &delta1, &fileio1); coo matrix2 = read_coo(args->file2, &delta2, &fileio2); struct timespec construct = time_sum(delta1, delta2); if(args->nothreading) rv = addition_coo_nothreading(matrix1, matrix2); else rv = addition_coo(matrix1, matrix2, args->num_threads); rv.t_construct = time_sum(rv.t_construct, construct); rv.t_fileio = time_sum(fileio1, fileio2); free_coo(matrix1); free_coo(matrix2); return rv; break; } case CSR:{ struct timespec delta1, delta2; struct timespec fileio1, fileio2; csr matrix1 = read_csr(args->file1, &delta1, &fileio1); csr matrix2 = read_csr(args->file2, &delta2, &fileio2); struct timespec construct = time_sum(delta1, delta2); if(args->nothreading) rv = addition_csr_nothreading(matrix1, matrix2); else rv = addition_csr(matrix1, matrix2, args->num_threads); rv.t_construct = time_sum(rv.t_construct, construct); rv.t_fileio = time_sum(fileio1, fileio2); free_csr(matrix1); free_csr(matrix2); return rv; break; } case CSC:{ struct timespec delta1, delta2; struct timespec fileio1, fileio2; csc matrix1 = read_csc(args->file1, &delta1, &fileio1); csc matrix2 = read_csc(args->file2, &delta2, &fileio2); struct timespec construct = time_sum(delta1, delta2); if(args->nothreading) rv = addition_csc_nothreading(matrix1, matrix2); else rv = addition_csc(matrix1, matrix2, args->num_threads); rv.t_construct = time_sum(rv.t_construct, construct); rv.t_fileio = time_sum(fileio1, fileio2); free_csc(matrix1); free_csc(matrix2); return rv; break; } default: fprintf(stderr, "format not implemented\n"); exit(EXIT_FAILURE); break; } //execution should never reach here rv.error = ERR_NOT_SET; return rv; }
clang-313307.c
#include <stdio.h> #include <omp.h> int main(){ int nthreads_a; int nteams_a; #pragma omp target parallel map(nteams_a, nthreads_a) { nteams_a = omp_get_num_teams(); nthreads_a = omp_get_num_threads(); } printf("hello %d %d\n", nteams_a, nthreads_a); if (nteams_a != 1 || nthreads_a != 256) return 1; return 0; }
morn_list.c
/* Copyright (C) 2019-2020 JingWeiZhangHuai <jingweizhanghuai@163.com> Licensed under the Apache License, Version 2.0; you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "morn_ptc.h" struct HandleListCreate { MList *list; MChain *property; int64_t reserve[8]; int writeable; int num; void **data; MMemory *memory; int defrag_size; int read_order; }; void endListCreate(struct HandleListCreate *handle) { mException((handle->list == NULL),EXIT,"invalid list"); if(handle->property!=NULL) mChainRelease(handle->property); if(handle->memory !=NULL) mMemoryRelease(handle->memory); if(handle->data != NULL) mFree(handle->data); memset(handle->list,0,sizeof(MList)); mFree(((MList **)(handle->list))-1); } #define HASH_ListCreate 0xfa6c59f MList *ListCreate(int num,void **data) { MList **phandle = (MList **)mMalloc(sizeof(MList *)+sizeof(MList)); MList *list = (MList *)(phandle+1); memset(list,0,sizeof(MList)); *phandle=mHandleCreate(); MHandle *hdl=mHandle(list,ListCreate); struct HandleListCreate *handle = (struct HandleListCreate *)(hdl->handle); handle->list = list; if(num<0) num = 0; handle->num = num; list->num = num; if(num>0) { handle->data = (void **)mMalloc(num*sizeof(void *)); if(!INVALID_POINTER(data)) memcpy(handle->data,data,num*sizeof(void *)); else memset(handle->data, 0,num*sizeof(void *)); } else mException((!INVALID_POINTER(data)),EXIT,"invalid input"); mPropertyFunction(list,"device",mornMemoryDevice,NULL); list->data = handle->data; return list; } void mListRelease(MList *list) { mHandleRelease(list); } void m_ListAppend(MList *list,void **data,int n) { mException(INVALID_POINTER(list),EXIT,"invalid input source list"); if(n<0) n=list->num+1; else mException(n<list->num,EXIT,"invalid list append number"); struct HandleListCreate *handle= (struct HandleListCreate *)(ObjHandle(list,0)->handle); if(n<=handle->num) { if((list->data!= handle->data)&&(list->num>0)) memcpy(handle->data,list->data,list->num*sizeof(void *)); if(data!=NULL) memcpy(handle->data,data,(n-list->num)*sizeof(void *)); list->data = handle->data; list->num = n; return; } // printf("aaaaaaaaaaaaaa\n"); int num = list->num + MAX(MAX(128,n-list->num),(list->num)>>1); void **list_data = (void **)mMalloc(num*sizeof(void *)); if(list->num>0) memcpy(list_data,list->data,(list->num)*sizeof(void *)); memset(list_data+list->num,0,(num-list->num)*sizeof(void *)); if(data!=NULL) memcpy(list_data+list->num,data,(n-list->num)*sizeof(void *)); if(handle->data != NULL) mFree(handle->data); handle->data = list_data; handle->num = num; list->data = handle->data; list->num = n; } void mListPlace(MList *list,void *data,int num,int size) { if(num<=0) return; mException((size<=0),EXIT,"invalid input list element size"); int list_num = list->num; mListAppend(list,list_num+num); struct HandleListCreate *handle = (struct HandleListCreate *)(ObjHandle(list,0)->handle); void **idx = list->data+list_num; if(handle->memory == NULL) handle->memory = mMemoryCreate(1,size*num,MORN_HOST); else mMemoryAppend(handle->memory,size*num); mMemoryIndex(handle->memory,num,size,&idx,1); // printf("list_num=%d\n",list_num); // printf("idx0=%p,list->data[0]=%p\n",idx[0],list->data[0]); if(data==NULL) return; char *p=(char *)data; for(int i=0;i<num;i++) {memcpy(list->data[list_num+i],p,size);p+=size;} } // void mListOperate(MList *list,void (*func)(void *,void *),void *para) // { // for(int i=0;i<list->num;i++) func(list->data[i],para); // } // struct HandleListWrite // { // int defrag_size; // }; // void endListWrite(void *info) {} // #define HASH_ListWrite 0x40aea976 void *mListWrite(MList *list,int n,void *data,int size) { mException(INVALID_POINTER(list),EXIT,"invalid input source list"); mException((n>list->num),EXIT,"invalid write location %d(with list->num is %d)",n,list->num); if(size<0) { mException((INVALID_POINTER(data)),EXIT,"invalid data to write,which is %p",data); size = strlen((char *)data)+1; } struct HandleListCreate *handle0 = (struct HandleListCreate *)(ObjHandle(list,0)->handle); if(n<0) n = list->num; if(handle0->memory == NULL) handle0->memory = mMemoryCreate(DFLT,DFLT,MORN_HOST); void *ptr = mMemoryWrite(handle0->memory,data,size); int flag = (n==list->num); if(!flag) flag=(list->data[n]==NULL); if(flag) { if(n<handle0->num) list->num = n+1; else mListAppend(list,DFLT); list->data[n] = ptr; } else { list->data[n] = ptr; handle0->defrag_size += size; if(handle0->defrag_size>16384) { mListElementOperate(list,MemoryCollect,handle0->memory); MemoryDefrag(handle0->memory); handle0->defrag_size=0; } } return list->data[n]; } // struct HandleListRead // { // int read_order; // }; // void endListRead(void *info) {} // #define HASH_ListRead 0x537cc305 void *mListRead(MList *list,int n,void *data,int size) { mException(INVALID_POINTER(list),EXIT,"invalid input"); struct HandleListCreate *handle0 = (struct HandleListCreate *)(ObjHandle(list,0)->handle); // MHandle *hdl=mHandle(list,ListRead); // struct HandleListRead *handle = (struct HandleListRead *)(hdl->handle); // if(hdl->valid == 0) handle->read_order = -1; // hdl->valid = 1; if(n<0) n = handle0->read_order; handle0->read_order = n+1; if(n>=list->num) return NULL; if(data!=NULL) { if(size>0) memcpy( data, list->data[n],size); else strcpy((char *)data,(char *)list->data[n]); } return list->data[n]; } void mListClear(MList *list) { list->num=0; struct HandleListCreate *handle0 = (struct HandleListCreate *)(ObjHandle(list,0)->handle); if(handle0->memory!=NULL) mMemoryClear(handle0->memory); } void mListReorder(MList *list) { mException(INVALID_POINTER(list),EXIT,"invalid input source list"); void **data = list->data; int list_num = list->num; void *buff; int i; for(i=0;i<list_num;i++) { int j = mRand(0,list_num); buff = data[i]; data[i] = data[j]; data[j] = buff; } } void mListCopy(MList *src,MList *dst) { mListAppend(dst,src->num); struct HandleListCreate *src_handle = (struct HandleListCreate *)(ObjHandle(src,0)->handle); if(src_handle->memory == NULL) { memcpy(dst->data,src->data,src->num*sizeof(void *)); return; } struct HandleListCreate *dst_handle = (struct HandleListCreate *)(ObjHandle(dst,0)->handle); if(dst_handle->memory == NULL) dst_handle->memory = mMemoryCreate(DFLT,DFLT,MORN_HOST); mMemoryCopy(src_handle->memory,&(src->data),dst_handle->memory,&(src->data),1,&(src->num)); } void mListMerge(MList *list1,MList *list2,MList *dst) { if(list1->num+list2->num==0) {mListClear(dst); return;} mListAppend(dst,list1->num+list2->num); struct HandleListCreate *handle1 =(struct HandleListCreate *)(ObjHandle(list1,0)->handle); struct HandleListCreate *handle2 =(struct HandleListCreate *)(ObjHandle(list2,0)->handle); struct HandleListCreate *dst_handle=(struct HandleListCreate *)(ObjHandle( dst,0)->handle); int num1 = list1->num; int num2 = list2->num; if(dst==list1) { if(num2>0) { memcpy(dst->data+num1,list2->data,num2*sizeof(void *)); mFree(list2->data);list2->data = NULL;list2->num = 0; } } else if(dst==list2) { if(num1>0) { memcpy(dst->data+num2,list1->data,num1*sizeof(void *)); mFree(list1->data);list1->data = NULL;list1->num = 0; } } else { if(num1>0) { memcpy(dst->data ,list1->data,num1*sizeof(void *)); mFree(list1->data);list1->data = NULL;list1->num = 0; } if(num2>0) { memcpy(dst->data+num1,list2->data,num2*sizeof(void *)); mFree(list2->data);list2->data = NULL;list2->num = 0; } } if(dst_handle->memory==NULL) dst_handle->memory = mMemoryCreate(DFLT,DFLT,MORN_HOST); else mMemoryRedefine(dst_handle->memory,num1+num2,DFLT,DFLT); mMemoryMerge(handle1->memory,handle2->memory,dst_handle->memory); mMemoryRelease(handle1->memory);handle1->memory = NULL; mMemoryRelease(handle2->memory);handle2->memory = NULL; } void mListElementDelete(MList *list,int n) { mException(INVALID_POINTER(list),EXIT,"invalid input"); mException((n>=list->num),EXIT,"invalid input"); memmove(list->data+n,list->data+n+1,(list->num-n-1)*sizeof(void *)); list->num-=1; } void *mListElementInsert(MList *list,int n,void *data,int size) { mListWrite(list,DFLT,data,size); void *buff = list->data[list->num-1]; memmove(list->data+n+1,list->data+n,(list->num-n-1)*sizeof(void *)); list->data[n] = buff; return buff; } void mListElementOperate(MList *list,void *function,void *para) { void (*func)(void *,void *) = function; mException(INVALID_POINTER(list)||(func==NULL),EXIT,"invalid input"); int i; // #pragma omp parallel for for(i=0;i<list->num;i++) func(list->data[i],para); } void mListElementScreen(MList *list,void *function,void *para) { int (*func)(void *,void *) = function; mException(INVALID_POINTER(list)||(func==NULL),EXIT,"invalid input"); int n=0; for(int i=0;i<list->num;i++) { if(func(list->data[i],para)) { list->data[n] = list->data[i]; n=n+1; } } list->num = n; } void mListElementSelect(MList *list,void *function,void *para) { void (*func)(void *,void *,int *,int *,void *) = function; mException(INVALID_POINTER(list)||(func==NULL),EXIT,"invalid input"); int n=0; for(int i=0;i<list->num;i++) { if(list->data[i]==NULL) continue; int flag1=1; for(int j=i+1;j<list->num;j++) { if(list->data[j] == NULL) continue; int flag2=1; func(list->data[i],list->data[j],&flag1,&flag2,para); if(flag2==0) list->data[j]=NULL; if(flag1==0) break; } if(flag1==1) { list->data[n]=list->data[i]; n=n+1; } } list->num = n; } /* void mListSelect(MList *list,void (*func)(void *,void *,int *,int *,void *),void *para) { mException(INVALID_POINTER(list)||(func==NULL),EXIT,"invalid input"); void **data = list->data; int *flag=mMalloc((list->num+2)*sizeof(int)); flag=flag+1; memset(flag,DFLT,list->num*sizeof(int)); flag[-1]=list->num; flag[list->num]=-1; int flag1,flag2; while(1) { int ok=1; for(int i=flag[i];i<list->num;i++) { if(flag[i]<0) continue; for(int j=flag[i]+1;j<list->num;j++) { if(j==i) continue; if((flag[j]>=0)&&(flag[j]<list->num)) continue; func(data[i],data[j],&flag1,&flag2,para); if(flag1==0) {flag[i] = j;ok=0;break;} if(flag2==0) {flag[j] = i;ok=0;continue;} } if(flag[i]>=0) continue; flag[i]=list->num; } if(ok) break; } int n=0; for(int i=0;i<list->num;i++) if(flag[i]==list->num) {list->data[n]=data[i];n++;} list->num = n; mFree(flag-1); } */ int mListCluster(MList *list,int *group,void *function,void *para) { int (*func)(void *,void *,void *) = function; mException((INVALID_POINTER(list))||(group==NULL)||(func==NULL),EXIT,"invalid input"); char *valid = (char *)mMalloc(list->num * sizeof(char)); memset(valid,0 ,list->num*sizeof(char)); memset(group,DFLT,list->num*sizeof(int)); int i,j,k; int n=0; for(i=0;i<list->num;i++) { for(j=0;j<i;j++) { if(group[i]==group[j]) continue; if(func(list->data[i],list->data[j],para)==1)//同类 { if(group[i] == DFLT) group[i] = group[j]; else { valid[group[j]] = 0; int g = group[j]; for(k=0;k<i;k++) if(group[k] == g) group[k] = group[i]; } } } if(group[i] == DFLT) { group[i] = n; valid[n] = 1; n = n+1; } } int *c = (int *)mMalloc(n *sizeof(int)); int num = 0; for(i=0;i<n;i++) { if(valid[i] != 0) {c[i] = num;num +=1;} } mFree(valid); for(i=0;i<list->num;i++) group[i] = c[group[i]]; mFree(c); return num; } struct HandleListClassify { int *group; char *valid; MSheet *sheet; int list_num; }; void endListClassify(struct HandleListClassify *handle) { if(handle->group!=NULL) mFree(handle->group); if(handle->valid!=NULL) mFree(handle->valid); if(handle->sheet!=NULL) mSheetRelease(handle->sheet); } #define HASH_ListClassify 0x24c19acf MSheet *mListClassify(MList *list,void *function,void *para) { int (*func)(void *,void *,void *) = function; mException((INVALID_POINTER(list))||(func==NULL),EXIT,"invalid input"); MHandle *hdl = mHandle(list,ListClassify); struct HandleListClassify *handle = (struct HandleListClassify *)(hdl->handle); if((hdl->valid == 0)||(handle->list_num<list->num)) { if(handle->list_num<list->num) { if(handle->group!=NULL) {mFree(handle->group);handle->group=NULL;} if(handle->valid!=NULL) {mFree(handle->valid);handle->valid=NULL;} } if(handle->group==NULL) handle->group = (int *)mMalloc(list->num*sizeof(int )); if(handle->valid==NULL) handle->valid = (char *)mMalloc(list->num*sizeof(char)); handle->list_num = list->num; if(handle->sheet == NULL) handle->sheet = mSheetCreate(); hdl->valid = 1; } char *valid = handle->valid; int *group = handle->group; memset(valid,0 ,list->num*sizeof(char)); memset(group,DFLT,list->num*sizeof(int)); int i,j,k; int n=0; for(i=0;i<list->num;i++) { for(j=0;j<i;j++) { if(group[i]==group[j]) continue; if(func(list->data[i],list->data[j],para)==1) { if(group[i] == DFLT) group[i] = group[j]; else { valid[group[j]] = 0; int g = group[j]; for(k=0;k<i;k++) if(group[k] == g) group[k] = group[i]; } } } if(group[i] == DFLT) { group[i] = n; valid[n] = 1; n = n+1; } } int *c = (int *)mMalloc(n *sizeof(int)); int num = 0; for(i=0;i<n;i++) { if(valid[i] != 0) {c[i] = num;num +=1;} } MSheet *sheet = handle->sheet; mSheetClear(sheet); mSheetRowAppend(sheet,num); for(i=0;i<list->num;i++) { int g = c[group[i]]; int n = sheet->col[g]; mSheetColAppend(sheet,g,n+1); sheet->data[g][n]=list->data[i]; } mFree(c); return sheet; } void _ListSort(void **list_data,int n,int (*func)(void *,void *,void *),void *para) { void *buff; if(func(list_data[n-1],list_data[0],para)<0) {buff=list_data[n-1];list_data[n-1]=list_data[0];list_data[0]=buff;} if(n==2) return; if(func(list_data[ 1],list_data[0],para)<0) {buff=list_data[ 0];list_data[ 0]=list_data[1];} else if(func(list_data[n-1],list_data[1],para)<0) {buff=list_data[n-1];list_data[n-1]=list_data[1];} else buff=list_data[ 1]; if(n==3) {list_data[1]=buff;return;} int i=1;int j=n-2; while(1) { while(func(list_data[j],buff,para)>=0) {j=j-1;if(j==i) goto ListSort_next;} list_data[i] = list_data[j]; i=i+1;if(i==j) goto ListSort_next; while(func(list_data[i],buff,para)<=0) {i=i+1;if(i==j) goto ListSort_next;} list_data[j] = list_data[i]; j=j-1;if(i==j) goto ListSort_next; } ListSort_next: list_data[i]=buff; if( i >1) _ListSort(list_data , i ,func,para); if(n-i-1>1) _ListSort(list_data+i+1,n-i-1,func,para); } void mListSort(MList *list,void *function,void *para) { int (*func)(void *,void *,void *) = function; mException((INVALID_POINTER(list))||(func==NULL),EXIT,"invalid input"); if(list->num<=1)return; _ListSort(list->data,list->num,func,para); } struct HandleListMatch { int list_num; int *idx; }; void endListMatch(struct HandleListMatch *handle) { if(handle->idx!=NULL) mFree(handle->idx); } #define HASH_ListMatch 0x39871020 int *m_ListMatch(MList *src,MList *dst,float thresh,void *function,void *para) { float (*func)(void *,void *,void *) = function; mException((INVALID_POINTER(src)||INVALID_POINTER(dst)),EXIT,"invalid input"); MHandle *hdl = mHandle(src,ListMatch); struct HandleListMatch *handle = (struct HandleListMatch *)(hdl->handle); if((hdl->valid==0)||(src->num>handle->list_num)) { int list_num = MAX(src->num,handle->list_num); if(list_num>handle->list_num) { if(handle->idx !=NULL) mFree(handle->idx); handle->idx = mMalloc(list_num*sizeof(int)); handle->list_num = list_num; } hdl->valid = 1; } if(dst->num==0) {memset(handle->idx,DFLT,src->num*sizeof(int));return handle->idx;} for(int i=0;i<src->num;i++) { float d_min = func(src->data[i],dst->data[0],para);int idx = 0; for(int j=1;j<dst->num;j++) { float d = func(src->data[i],dst->data[j],para); if(d<d_min){d_min=d;idx=j;} } handle->idx[i]=(d_min<thresh)?idx:DFLT; } return (handle->idx); } struct HandleStack { volatile int order; }; void endStack(void *info) {} #define HASH_Stack 0x8c4d4c73 void *mStackWrite(MList *stack,void *data,int size) { mException(INVALID_POINTER(stack),EXIT,"invalid stack"); MHandle *hdl=mHandle(stack,Stack); struct HandleStack *handle = (struct HandleStack *)(hdl->handle); if(hdl->valid == 0) handle->order = -1; hdl->valid = 1; if(handle->order==stack->num-1) return NULL; mAtomicAdd(&(handle->order),1); return mListWrite(stack,handle->order,data,size); } void *mStackRead(MList *stack,void *data,int size) { mException(INVALID_POINTER(stack),EXIT,"invalid stack"); MHandle *hdl=mHandle(stack,Stack); struct HandleStack *handle = (struct HandleStack *)(hdl->handle); if(hdl->valid == 0) return NULL; if(handle->order <0) return NULL; int order = mAtomicSub(&(handle->order),1); void *p=stack->data[order]; if(data!=NULL) { if(size<=0) strcpy((char *)data,(char *)p); else memcpy(data,p,size); } return p; } int mStackSize(MList *stack) { mException(INVALID_POINTER(stack),EXIT,"invalid stack"); MHandle *hdl=mHandle(stack,Stack); struct HandleStack *handle = (struct HandleStack *)(hdl->handle); if(hdl->valid == 0) handle->order = -1; hdl->valid = 1; return (handle->order+1); } // struct HandleQueue // { // volatile int read_order; // volatile int write_order; // volatile int flag; // }; // void endQueue(void *info) {} // #define HASH_Queue 0xd98b43dc // int mQueueSize(MList *queue) // { // mException(INVALID_POINTER(queue),EXIT,"invalid queue"); // MHandle *hdl=mHandle(queue,Queue); // struct HandleQueue *handle = (struct HandleQueue *)(hdl->handle); // if(handle->flag>0) return queue->num; // if(handle->flag<0) return 0; // int n = handle->write_order - handle->read_order; // return ((n>0)?n:(queue->num+n)); // } // void *mQueueWrite(MList *queue,void *data,int size) // { // mException(INVALID_POINTER(queue),EXIT,"invalid queue"); // mException(queue->num<=0,EXIT,"invalid queue"); // MHandle *hdl=mHandle(queue,Queue); // struct HandleQueue *handle = (struct HandleQueue *)(hdl->handle); // if(hdl->valid == 0) {handle->read_order=0;handle->write_order=0;} // hdl->valid = 1; // if(handle->flag>0) return NULL; // int order=mAtomicAdd(&(handle->write_order),1); // if(order>=queue->num) order=order-queue->num; // void *p = mListWrite(queue,order,data,size); // mAtomicCompare(&(handle->write_order),queue->num,0); // handle->flag =(handle->write_order == handle->read_order)?1:0; // return p; // } // void *mQueueRead(MList *queue,void *data,int size) // { // mException(INVALID_POINTER(queue),EXIT,"invalid queue"); // mException(queue->num<=0,EXIT,"invalid queue"); // MHandle *hdl=mHandle(queue,Queue); // struct HandleQueue *handle = (struct HandleQueue *)(hdl->handle); // if(hdl->valid == 0) return NULL; // if(handle->flag<0) return NULL; // int order = mAtomicAdd(&(handle->read_order),1); // void *p = queue->data[order]; // mAtomicCompare(&(handle->read_order),queue->num,0); // handle->flag =(handle->write_order == handle->read_order)?-1:0; // if(data!=NULL) // { // if(size<=0) strcpy((char *)data,(char *)p); // else memcpy(data,p,size); // } // return p; // } // struct HashElement // { // int hash; // void *data; // }; // struct HandleHashList // { // int num; // }; // void mHashList(MList *list,void *data,int size) // { // if(list->size < /* struct HandleBuffer { int proc_num; int *order; unsigned char *state; }; void endBuffer(void *info) { struct HandleBuffer *handle = info; if(handle->state != NULL) mFree(handle->state); } #define HASH_Buffer 0xcb4df739 int BufferRead(MList *buffer,int ID,struct HandleBuffer *handle) { int proc_num = handle->proc_num; int order = handle->order[ID]; if(((ID >0)&&(handle->order[ID-1]==order))||((ID==0)&&(handle->order[proc_num-1]==order))) return DFLT; int state = handle->state[order]; if((state&1 == 1)||(order<0)) { order = order + 1; if(order == buffer->num) { if(handle->order[handle->proc_num-1]<0) return DFLT; order = 0; } handle->state[handle->order[ID]] = 0; handle->order[ID] = order; return BufferRead(buffer,ID,handle); } return order; } void *mBufferSet(MList *buffer,int volume,int proc_num) { mException(INVALID_POINTER(buffer),EXIT,"invalid buffer"); if(volume>0) { if(buffer->num>volume) buff->num = volume; else mListAppend(buff,volume); } mException(buffer->num<=1,EXIT,"invalid buffer"); mException((proc_num<=0),EXIT,"invalid proc_num"); MHandle *hdl;ObjectHandle(buffer,Buffer,hdl); struct HandleBuffer *handle = hdl->handle; if(hdl->valid == 0) { handle->order = mMalloc(proc_num*sizeof(int)); memset(handle->order,-1,proc_num*sizeof(int)); handle->proc_num = proc_num; handle->state = mMalloc(buffer->num*sizeof(unsigned char)); memset(handle->state,0,buffer->num*sizeof(unsigned char)); } hdl->valid = 1; } void *mBufferWrite(MList *buffer,int ID,void *data,int size) { mException(INVALID_POINTER(buffer),EXIT,"invalid buffer"); MHandle *hdl;ObjectHandle(buffer,Buffer,hdl); struct HandleBuffer *handle = hdl->handle; mException((hdl->valid == 0),EXIT,"invalid buffer"); int proc_num = handle->proc_num; mException((ID>=proc_num)||(ID=<0),EXIT,"invalid ID"); int order = handle->order[ID]; if((handle->state[order]&2!=0)||(order<0)) { order = order+1; if(order==buffer->num) order=0; if((ID==0)&&(state[order]!=0)) return NULL; if((ID >0)&&(state[order]!=4)) return NULL; handle->state[handle->order] = 4; handle->order[ID] = order; } void *p = mListWrite(buffer,order,data,size); handle->state[order] = (handle->state[order])|2; return p; } void mBufferRead(MList *buffer,int ID,void *data,int size) { mException(INVALID_POINTER(buffer),EXIT,"invalid buffer"); MHandle *hdl;ObjectHandle(buffer,Buffer,hdl); struct HandleBuffer *handle = hdl->handle; mException((hdl->valid == 0),EXIT,"invalid buffer"); int proc_num = handle->proc_num; mException((ID>=proc_num)||(ID=<0),EXIT,"invalid ID"); int order = handle->order[ID]; if((handle->state[order]&1!=0)||(order<0)) { order = order+1; if(order==buffer->num) { if(handle->order[proc_num-1]< 0) return NULL; order=0; } if(ID>0) if(handle->order[ID -1]==order) return NULL; else if(proc_num>1) if(handle->order[proc_num-1]==order) return NULL; handle->state[handle->order] = 0; handle->order = order; } void *p = mListRead(buffer,order,data,size); */
gemm.c
// This file is generated from test alphabets program by code generator in alphaz // To compile this code, use -lm option for math library. // Includes #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <math.h> #include <string.h> #include <limits.h> #include <float.h> #include <omp.h> #include <immintrin.h> #include <malloc.h> // Common Macros #define max(x, y) ((x)>(y) ? (x) : (y)) #define MAX(x, y) ((x)>(y) ? (x) : (y)) #define min(x, y) ((x)>(y) ? (y) : (x)) #define MIN(x, y) ((x)>(y) ? (y) : (x)) #define CEILD(n,d) (int)ceil(((float)(n))/((float)(d))) #define ceild(n,d) (int)ceil(((float)(n))/((float)(d))) #define FLOORD(n,d) (int)floor(((float)(n))/((float)(d))) #define floord(n,d) (int)floor(((float)(n))/((float)(d))) #define CDIV(x,y) CEILD((x),(y)) #define div(x,y) CDIV((x),(y)) #define FDIV(x,y) FLOORD((x),(y)) #define LB_SHIFT(b,s) ((int)ceild(b,s) * s) #define MOD(i,j) ((i)%(j)) #define mallocCheck(v,s,d) if ((v) == NULL) { printf("Failed to allocate memory for %s : size=%lu\n", "sizeof(d)*(s)", sizeof(d)*(s)); exit(-1); } // Reduction Operators #define RADD(x,y) ((x)+=(y)) #define RMUL(x,y) ((x)*=(y)) #define RMAX(x,y) ((x)=MAX((x),(y))) #define RMIN(x,y) ((x)=MIN((x),(y))) // Common functions for min and max //functions for integer max inline int __max_int(int x, int y){ return ((x)>(y) ? (x) : (y)); } inline short __max_short(short x, short y){ return ((x)>(y) ? (x) : (y)); } inline long __max_long(long x, long y){ return ((x)>(y) ? (x) : (y)); } inline unsigned int __max_unsigned_int(unsigned int x, unsigned int y){ return ((x)>(y) ? (x) : (y)); } inline unsigned short __max_unsigned_short(unsigned short x, unsigned short y){ return ((x)>(y) ? (x) : (y)); } //function for float max inline float __max_float(float x, float y){ return ((x)>(y) ? (x) : (y)); } //function for integer min inline int __min_int(int x, int y){ return ((x)>(y) ? (y) : (x)); } inline short __min_short(short x, short y){ return ((x)>(y) ? (y) : (x)); } inline long __min_long(long x, long y){ return ((x)>(y) ? (y) : (x)); } inline unsigned int __min_unsigned_int(unsigned int x, unsigned int y){ return ((x)>(y) ? (y) : (x)); } inline unsigned short __min_unsigned_short(unsigned short x, unsigned short y){ return ((x)>(y) ? (y) : (x)); } inline unsigned long __min_unsigned_long(unsigned long x, unsigned long y){ return ((x)>(y) ? (y) : (x)); } inline float __min_float(float x, float y){ return ((x)>(y) ? (y) : (x)); } //Memory Macros #define A(i,j) A[(i) * (Q) + j] #define B(i,j) B[(i) * (R) + j] #define Cout(i,j) Cout[(i) * (R) + j] #define Acc(i,j) Acc[(i) * (R) + j] void gemm(long P, long Q, long R, long ts1_l1, long ts2_l1, long ts3_l1, float* alpha, float* beta, float* A, float* B, float* Cout){ ///Parameter checking if (!((P >= 2 && Q >= 2 && R >= 2 && ts1_l1 > 0 && ts2_l1 > 0 && ts3_l1 > 0))) { printf("The value of parameters are not valid.\n"); exit(-1); } //Memory Allocation float* Acc = (float*)malloc(sizeof(float)*((P) * (R))); mallocCheck(Acc, ((P) * (R)), float); #define S1(i,j,k) Acc(i,k) = (Acc(i,k))+((A(i,j))*(B(j,k))) #define S2(i,j,k) Acc(i,k) = (A(i,j))*(B(j,k)) #define S0(i,j,i2) Cout(i,i2) = ((*alpha)*(Acc(i,i2)))+((*beta)*(Cout(i,i2))) { //Domain //{i,j,k|P>=2 && R>=2 && i>=0 && P>=i+1 && k>=0 && R>=k+1 && Q>=j+1 && j>=1 && Q>=2} //{i,j,k|j==0 && P>=2 && Q>=2 && R>=2 && i>=0 && P>=i+1 && k>=0 && R>=k+1} //{i,j,i2|j==Q-1 && i>=0 && P>=i+1 && Q>=2 && R>=i2+1 && P>=2 && i2>=0 && R>=2} int ti1_l1,ti2_l1,ti3_l1,start_l1_d0,end_l1_d0,time_l1_d0,c1,c2,c3; if ((Q >= 3)) { { { start_l1_d0 = INT_MAX; end_l1_d0 = INT_MIN; ti1_l1 = (ceild((-ts1_l1+1),(ts1_l1))) * (ts1_l1); ti2_l1 = (ceild((-ts2_l1+1),(ts2_l1))) * (ts2_l1); ti3_l1 = (ceild((-ts3_l1+1),(ts3_l1))) * (ts3_l1); start_l1_d0 = min(start_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); end_l1_d0 = max(end_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); ti3_l1 = R-1; start_l1_d0 = min(start_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); end_l1_d0 = max(end_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); ti2_l1 = Q-1; ti3_l1 = (ceild((-ts3_l1+1),(ts3_l1))) * (ts3_l1); start_l1_d0 = min(start_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); end_l1_d0 = max(end_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); ti3_l1 = R-1; start_l1_d0 = min(start_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); end_l1_d0 = max(end_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); ti1_l1 = P-1; ti2_l1 = (ceild((-ts2_l1+1),(ts2_l1))) * (ts2_l1); ti3_l1 = (ceild((-ts3_l1+1),(ts3_l1))) * (ts3_l1); start_l1_d0 = min(start_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); end_l1_d0 = max(end_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); ti3_l1 = R-1; start_l1_d0 = min(start_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); end_l1_d0 = max(end_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); ti2_l1 = Q-1; ti3_l1 = (ceild((-ts3_l1+1),(ts3_l1))) * (ts3_l1); start_l1_d0 = min(start_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); end_l1_d0 = max(end_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); ti3_l1 = R-1; start_l1_d0 = min(start_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); end_l1_d0 = max(end_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); for(time_l1_d0=start_l1_d0;time_l1_d0 <= end_l1_d0;time_l1_d0+=1) { #pragma omp parallel for private(c1,c2,c3,ti1_l1,ti2_l1,ti3_l1) schedule(static ,1) for(ti1_l1=(ceild((-ts1_l1+1),(ts1_l1))) * (ts1_l1);ti1_l1 <= P-1;ti1_l1+=ts1_l1) { for(ti2_l1=(ceild((-ts2_l1+1),(ts2_l1))) * (ts2_l1);ti2_l1 <= Q-1;ti2_l1+=ts2_l1) { ti3_l1 = (time_l1_d0 + ((ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1)) * (-1)) * (ts3_l1); if (((ceild((-ts3_l1+1),(ts3_l1))) * (ts3_l1) <= ti3_l1 && ti3_l1 <= R-1)) { //guard that isolates selected statements for generic point loops if ((0 < ti2_l1 && ti2_l1+ts2_l1 < Q)) { //full-tile guard if ((0 <= ti1_l1 && ti1_l1+ts1_l1 <= P && 0 <= ti2_l1-1 && ti2_l1+ts2_l1 <= Q-1 && 0 <= ti3_l1 && ti3_l1+ts3_l1 <= R)) { for(c1=ti1_l1;c1 <= ti1_l1+ts1_l1-1;c1+=1) { for(c2=ti2_l1;c2 <= ti2_l1+ts2_l1-1;c2+=1) { //#pragma ivdep //#pragma vector always for(c3=ti3_l1;c3 <= ti3_l1+ts3_l1-1;c3+=1) { S1((c1),(c2),(c3)); } } } } else { for(c1=max(ti1_l1,0);c1 <= min(ti1_l1+ts1_l1-1,P-1);c1+=1) { for(c2=max(ti2_l1,1);c2 <= min(ti2_l1+ts2_l1-1,Q-2);c2+=1) { //#pragma ivdep //#pragma vector always for(c3=max(ti3_l1,0);c3 <= min(ti3_l1+ts3_l1-1,R-1);c3+=1) { S1((c1),(c2),(c3)); } } } } } else { { for(c1=max(ti1_l1,0);c1 <= min(ti1_l1+ts1_l1-1,P-1);c1+=1) { for(c2=max(ti2_l1,0);c2 <= min(ti2_l1+ts2_l1-1,0);c2+=1) { for(c3=max(ti3_l1,0);c3 <= min(ti3_l1+ts3_l1-1,R-1);c3+=1) { S2((c1),(0),(c3)); } } for(c2=max(ti2_l1,1);c2 <= min(ti2_l1+ts2_l1-1,Q-2);c2+=1) { //#pragma ivdep //#pragma vector always for(c3=max(ti3_l1,0);c3 <= min(ti3_l1+ts3_l1-1,R-1);c3+=1) { S1((c1),(c2),(c3)); } } for(c2=max(ti2_l1,Q-1);c2 <= min(ti2_l1+ts2_l1-1,Q-1);c2+=1) { for(c3=max(ti3_l1,0);c3 <= min(ti3_l1+ts3_l1-1,R-1);c3+=1) { S1((c1),(Q-1),(c3)); S0((c1),(Q-1),(c3)); } } } } } } } } } } } } if (Q == 2) { { { start_l1_d0 = INT_MAX; end_l1_d0 = INT_MIN; ti1_l1 = (ceild((-ts1_l1+1),(ts1_l1))) * (ts1_l1); ti2_l1 = (ceild((-ts2_l1+1),(ts2_l1))) * (ts2_l1); ti3_l1 = (ceild((-ts3_l1+1),(ts3_l1))) * (ts3_l1); start_l1_d0 = min(start_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); end_l1_d0 = max(end_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); ti3_l1 = R-1; start_l1_d0 = min(start_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); end_l1_d0 = max(end_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); ti2_l1 = 1; ti3_l1 = (ceild((-ts3_l1+1),(ts3_l1))) * (ts3_l1); start_l1_d0 = min(start_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); end_l1_d0 = max(end_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); ti3_l1 = R-1; start_l1_d0 = min(start_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); end_l1_d0 = max(end_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); ti1_l1 = P-1; ti2_l1 = (ceild((-ts2_l1+1),(ts2_l1))) * (ts2_l1); ti3_l1 = (ceild((-ts3_l1+1),(ts3_l1))) * (ts3_l1); start_l1_d0 = min(start_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); end_l1_d0 = max(end_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); ti3_l1 = R-1; start_l1_d0 = min(start_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); end_l1_d0 = max(end_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); ti2_l1 = 1; ti3_l1 = (ceild((-ts3_l1+1),(ts3_l1))) * (ts3_l1); start_l1_d0 = min(start_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); end_l1_d0 = max(end_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); ti3_l1 = R-1; start_l1_d0 = min(start_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); end_l1_d0 = max(end_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); for(time_l1_d0=start_l1_d0;time_l1_d0 <= end_l1_d0;time_l1_d0+=1) { #pragma omp parallel for private(c1,c2,c3,ti1_l1,ti2_l1,ti3_l1) schedule(static ,1) for(ti1_l1=(ceild((-ts1_l1+1),(ts1_l1))) * (ts1_l1);ti1_l1 <= P-1;ti1_l1+=ts1_l1) { for(ti2_l1=(ceild((-ts2_l1+1),(ts2_l1))) * (ts2_l1);ti2_l1 <= 1;ti2_l1+=ts2_l1) { ti3_l1 = (time_l1_d0 + ((ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1)) * (-1)) * (ts3_l1); if (((ceild((-ts3_l1+1),(ts3_l1))) * (ts3_l1) <= ti3_l1 && ti3_l1 <= R-1)) { //guard that isolates selected statements for generic point loops if (0 < ti2_l1) { //full-tile guard if (0 <= ti1_l1 && ti1_l1+ts1_l1 <= P && 0 <= ti2_l1-1 && ti2_l1+ts2_l1 <= 2 && 0 <= ti3_l1 && ti3_l1+ts3_l1 <= R) { for(c1=ti1_l1;c1 <= ti1_l1+ts1_l1-1;c1+=1) { for(c2=ti2_l1;c2 <= ti2_l1+ts2_l1-1;c2+=1) { for(c3=ti3_l1;c3 <= ti3_l1+ts3_l1-1;c3+=1) { S1((c1),(1),(c3)); S0((c1),(1),(c3)); } } } } else { for(c1=max(ti1_l1,0);c1 <= min(ti1_l1+ts1_l1-1,P-1);c1+=1) { for(c2=max(ti2_l1,1);c2 <= min(ti2_l1+ts2_l1-1,1);c2+=1) { for(c3=max(ti3_l1,0);c3 <= min(ti3_l1+ts3_l1-1,R-1);c3+=1) { S1((c1),(1),(c3)); S0((c1),(1),(c3)); } } } } } else { { for(c1=max(ti1_l1,0);c1 <= min(ti1_l1+ts1_l1-1,P-1);c1+=1) { for(c2=max(ti2_l1,0);c2 <= min(ti2_l1+ts2_l1-1,0);c2+=1) { for(c3=max(ti3_l1,0);c3 <= min(ti3_l1+ts3_l1-1,R-1);c3+=1) { S2((c1),(0),(c3)); } } for(c2=max(ti2_l1,1);c2 <= min(ti2_l1+ts2_l1-1,1);c2+=1) { for(c3=max(ti3_l1,0);c3 <= min(ti3_l1+ts3_l1-1,R-1);c3+=1) { S1((c1),(1),(c3)); S0((c1),(1),(c3)); } } } } } } } } } } } } } #undef S1 #undef S2 #undef S0 //Memory Free free(Acc); } //Memory Macros #undef A #undef B #undef Cout #undef Acc //Common Macro undefs #undef max #undef MAX #undef min #undef MIN #undef CEILD #undef ceild #undef FLOORD #undef floord #undef CDIV #undef FDIV #undef LB_SHIFT #undef MOD #undef RADD #undef RMUL #undef RMAX #undef RMIN
gbdt.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_BOOSTING_GBDT_H_ #define LIGHTGBM_BOOSTING_GBDT_H_ #include <LightGBM/boosting.h> #include <LightGBM/objective_function.h> #include <LightGBM/prediction_early_stop.h> #include <LightGBM/cuda/vector_cudahost.h> #include <LightGBM/utils/json11.h> #include <LightGBM/utils/threading.h> #include <string> #include <algorithm> #include <cstdio> #include <fstream> #include <map> #include <memory> #include <mutex> #include <unordered_map> #include <utility> #include <vector> #include "score_updater.hpp" namespace LightGBM { using json11::Json; /*! * \brief GBDT algorithm implementation. including Training, prediction, bagging. */ class GBDT : public GBDTBase { public: /*! * \brief Constructor */ GBDT(); /*! * \brief Destructor */ ~GBDT(); /*! * \brief Initialization logic * \param gbdt_config Config for boosting * \param train_data Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void Init(const Config* gbdt_config, const Dataset* train_data, const ObjectiveFunction* objective_function, const std::vector<const Metric*>& training_metrics) override; /*! * \brief Merge model from other boosting object. Will insert to the front of current boosting object * \param other */ void MergeFrom(const Boosting* other) override { auto other_gbdt = reinterpret_cast<const GBDT*>(other); // tmp move to other vector auto original_models = std::move(models_); models_ = std::vector<std::unique_ptr<Tree>>(); // push model from other first for (const auto& tree : other_gbdt->models_) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_init_iteration_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; // push model in current object for (const auto& tree : original_models) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; } void ShuffleModels(int start_iter, int end_iter) override { int total_iter = static_cast<int>(models_.size()) / num_tree_per_iteration_; start_iter = std::max(0, start_iter); if (end_iter <= 0) { end_iter = total_iter; } end_iter = std::min(total_iter, end_iter); auto original_models = std::move(models_); std::vector<int> indices(total_iter); for (int i = 0; i < total_iter; ++i) { indices[i] = i; } Random tmp_rand(17); for (int i = start_iter; i < end_iter - 1; ++i) { int j = tmp_rand.NextShort(i + 1, end_iter); std::swap(indices[i], indices[j]); } models_ = std::vector<std::unique_ptr<Tree>>(); for (int i = 0; i < total_iter; ++i) { for (int j = 0; j < num_tree_per_iteration_; ++j) { int tree_idx = indices[i] * num_tree_per_iteration_ + j; auto new_tree = std::unique_ptr<Tree>(new Tree(*(original_models[tree_idx].get()))); models_.push_back(std::move(new_tree)); } } } /*! * \brief Reset the training data * \param train_data New Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void ResetTrainingData(const Dataset* train_data, const ObjectiveFunction* objective_function, const std::vector<const Metric*>& training_metrics) override; /*! * \brief Reset Boosting Config * \param gbdt_config Config for boosting */ void ResetConfig(const Config* gbdt_config) override; /*! * \brief Adding a validation dataset * \param valid_data Validation dataset * \param valid_metrics Metrics for validation dataset */ void AddValidDataset(const Dataset* valid_data, const std::vector<const Metric*>& valid_metrics) override; /*! * \brief Perform a full training procedure * \param snapshot_freq frequency of snapshot * \param model_output_path path of model file */ void Train(int snapshot_freq, const std::string& model_output_path) override; void RefitTree(const std::vector<std::vector<int>>& tree_leaf_prediction) override; /*! * \brief Training logic * \param gradients nullptr for using default objective, otherwise use self-defined boosting * \param hessians nullptr for using default objective, otherwise use self-defined boosting * \return True if cannot train any more */ bool TrainOneIter(const score_t* gradients, const score_t* hessians) override; /*! * \brief Rollback one iteration */ void RollbackOneIter() override; /*! * \brief Get current iteration */ int GetCurrentIteration() const override { return static_cast<int>(models_.size()) / num_tree_per_iteration_; } /*! * \brief Can use early stopping for prediction or not * \return True if cannot use early stopping for prediction */ bool NeedAccuratePrediction() const override { if (objective_function_ == nullptr) { return true; } else { return objective_function_->NeedAccuratePrediction(); } } /*! * \brief Get evaluation result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return evaluation result */ std::vector<double> GetEvalAt(int data_idx) const override; /*! * \brief Get current training score * \param out_len length of returned score * \return training score */ const double* GetTrainingScore(int64_t* out_len) override; /*! * \brief Get size of prediction at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return The size of prediction */ int64_t GetNumPredictAt(int data_idx) const override { CHECK(data_idx >= 0 && data_idx <= static_cast<int>(valid_score_updater_.size())); data_size_t num_data = train_data_->num_data(); if (data_idx > 0) { num_data = valid_score_updater_[data_idx - 1]->num_data(); } return num_data * num_class_; } /*! * \brief Get prediction result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \param result used to store prediction result, should allocate memory before call this function * \param out_len length of returned score */ void GetPredictAt(int data_idx, double* out_result, int64_t* out_len) override; /*! * \brief Get number of prediction for one data * \param start_iteration Start index of the iteration to predict * \param num_iteration number of used iterations * \param is_pred_leaf True if predicting leaf index * \param is_pred_contrib True if predicting feature contribution * \return number of prediction */ inline int NumPredictOneRow(int start_iteration, int num_iteration, bool is_pred_leaf, bool is_pred_contrib) const override { int num_pred_in_one_row = num_class_; if (is_pred_leaf) { int max_iteration = GetCurrentIteration(); start_iteration = std::max(start_iteration, 0); start_iteration = std::min(start_iteration, max_iteration); if (num_iteration > 0) { num_pred_in_one_row *= static_cast<int>(std::min(max_iteration - start_iteration, num_iteration)); } else { num_pred_in_one_row *= (max_iteration - start_iteration); } } else if (is_pred_contrib) { num_pred_in_one_row = num_tree_per_iteration_ * (max_feature_idx_ + 2); // +1 for 0-based indexing, +1 for baseline } return num_pred_in_one_row; } void PredictRaw(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; void PredictRawByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void Predict(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; void PredictByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void PredictLeafIndex(const double* features, double* output) const override; void PredictLeafIndexByMap(const std::unordered_map<int, double>& features, double* output) const override; void PredictContrib(const double* features, double* output) const override; void PredictContribByMap(const std::unordered_map<int, double>& features, std::vector<std::unordered_map<int, double>>* output) const override; /*! * \brief Dump model to json format string * \param start_iteration The model will be saved start from * \param num_iteration Number of iterations that want to dump, -1 means dump all * \param feature_importance_type Type of feature importance, 0: split, 1: gain * \return Json format string of model */ std::string DumpModel(int start_iteration, int num_iteration, int feature_importance_type) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \return if-else format codes of model */ std::string ModelToIfElse(int num_iteration) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \param filename Filename that want to save to * \return is_finish Is training finished or not */ bool SaveModelToIfElse(int num_iteration, const char* filename) const override; /*! * \brief Save model to file * \param start_iteration The model will be saved start from * \param num_iterations Number of model that want to save, -1 means save all * \param feature_importance_type Type of feature importance, 0: split, 1: gain * \param filename Filename that want to save to * \return is_finish Is training finished or not */ bool SaveModelToFile(int start_iteration, int num_iterations, int feature_importance_type, const char* filename) const override; /*! * \brief Save model to string * \param start_iteration The model will be saved start from * \param num_iterations Number of model that want to save, -1 means save all * \param feature_importance_type Type of feature importance, 0: split, 1: gain * \return Non-empty string if succeeded */ std::string SaveModelToString(int start_iteration, int num_iterations, int feature_importance_type) const override; /*! * \brief Restore from a serialized buffer */ bool LoadModelFromString(const char* buffer, size_t len) override; /*! * \brief Calculate feature importances * \param num_iteration Number of model that want to use for feature importance, -1 means use all * \param importance_type: 0 for split, 1 for gain * \return vector of feature_importance */ std::vector<double> FeatureImportance(int num_iteration, int importance_type) const override; /*! * \brief Calculate upper bound value * \return upper bound value */ double GetUpperBoundValue() const override; /*! * \brief Calculate lower bound value * \return lower bound value */ double GetLowerBoundValue() const override; /*! * \brief Get max feature index of this model * \return Max feature index of this model */ inline int MaxFeatureIdx() const override { return max_feature_idx_; } /*! * \brief Get feature names of this model * \return Feature names of this model */ inline std::vector<std::string> FeatureNames() const override { return feature_names_; } /*! * \brief Get index of label column * \return index of label column */ inline int LabelIdx() const override { return label_idx_; } /*! * \brief Get number of weak sub-models * \return Number of weak sub-models */ inline int NumberOfTotalModel() const override { return static_cast<int>(models_.size()); } /*! * \brief Get number of tree per iteration * \return number of tree per iteration */ inline int NumModelPerIteration() const override { return num_tree_per_iteration_; } /*! * \brief Get number of classes * \return Number of classes */ inline int NumberOfClasses() const override { return num_class_; } inline void InitPredict(int start_iteration, int num_iteration, bool is_pred_contrib) override { num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; start_iteration = std::max(start_iteration, 0); start_iteration = std::min(start_iteration, num_iteration_for_pred_); if (num_iteration > 0) { num_iteration_for_pred_ = std::min(num_iteration, num_iteration_for_pred_ - start_iteration); } else { num_iteration_for_pred_ = num_iteration_for_pred_ - start_iteration; } start_iteration_for_pred_ = start_iteration; if (is_pred_contrib) { #pragma omp parallel for schedule(static) for (int i = 0; i < static_cast<int>(models_.size()); ++i) { models_[i]->RecomputeMaxDepth(); } } } inline double GetLeafValue(int tree_idx, int leaf_idx) const override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); return models_[tree_idx]->LeafOutput(leaf_idx); } inline void SetLeafValue(int tree_idx, int leaf_idx, double val) override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); models_[tree_idx]->SetLeafOutput(leaf_idx, val); } /*! * \brief Get Type name of this boosting object */ const char* SubModelName() const override { return "tree"; } /*! * \brief Get the trees contained in this boosting class. Used for MOJO writing. */ inline const std::vector<std::unique_ptr<Tree>>& GetTrees() const override { return models_; } bool IsLinear() const override { return linear_tree_; } protected: virtual bool GetIsConstHessian(const ObjectiveFunction* objective_function) { if (objective_function != nullptr) { return objective_function->IsConstantHessian(); } else { return false; } } /*! * \brief Print eval result and check early stopping */ virtual bool EvalAndCheckEarlyStopping(); /*! * \brief reset config for bagging */ void ResetBaggingConfig(const Config* config, bool is_change_dataset); /*! * \brief Implement bagging logic * \param iter Current interation */ virtual void Bagging(int iter); virtual data_size_t BaggingHelper(data_size_t start, data_size_t cnt, data_size_t* buffer); data_size_t BalancedBaggingHelper(data_size_t start, data_size_t cnt, data_size_t* buffer); /*! * \brief calculate the object function */ virtual void Boosting(); /*! * \brief updating score after tree was trained * \param tree Trained tree of this iteration * \param cur_tree_id Current tree for multiclass training */ virtual void UpdateScore(const Tree* tree, const int cur_tree_id); /*! * \brief eval results for one metric */ virtual std::vector<double> EvalOneMetric(const Metric* metric, const double* score) const; /*! * \brief Print metric result of current iteration * \param iter Current iteration * \return best_msg if met early_stopping */ std::string OutputMetric(int iter); double BoostFromAverage(int class_id, bool update_scorer); /*! \brief current iteration */ int iter_; /*! \brief Pointer to training data */ const Dataset* train_data_; /*! \brief Config of gbdt */ std::unique_ptr<Config> config_; /*! \brief Tree learner, will use this class to learn trees */ std::unique_ptr<TreeLearner> tree_learner_; /*! \brief Objective function */ const ObjectiveFunction* objective_function_; /*! \brief Store and update training data's score */ std::unique_ptr<ScoreUpdater> train_score_updater_; /*! \brief Metrics for training data */ std::vector<const Metric*> training_metrics_; /*! \brief Store and update validation data's scores */ std::vector<std::unique_ptr<ScoreUpdater>> valid_score_updater_; /*! \brief Metric for validation data */ std::vector<std::vector<const Metric*>> valid_metrics_; /*! \brief Number of rounds for early stopping */ int early_stopping_round_; /*! \brief Only use first metric for early stopping */ bool es_first_metric_only_; /*! \brief Best iteration(s) for early stopping */ std::vector<std::vector<int>> best_iter_; /*! \brief Best score(s) for early stopping */ std::vector<std::vector<double>> best_score_; /*! \brief output message of best iteration */ std::vector<std::vector<std::string>> best_msg_; /*! \brief Trained models(trees) */ std::vector<std::unique_ptr<Tree>> models_; /*! \brief Max feature index of training data*/ int max_feature_idx_; #ifdef USE_CUDA /*! \brief First order derivative of training data */ std::vector<score_t, CHAllocator<score_t>> gradients_; /*! \brief Second order derivative of training data */ std::vector<score_t, CHAllocator<score_t>> hessians_; #else /*! \brief First order derivative of training data */ std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> gradients_; /*! \brief Second order derivative of training data */ std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> hessians_; #endif /*! \brief Store the indices of in-bag data */ std::vector<data_size_t, Common::AlignmentAllocator<data_size_t, kAlignedSize>> bag_data_indices_; /*! \brief Number of in-bag data */ data_size_t bag_data_cnt_; /*! \brief Number of training data */ data_size_t num_data_; /*! \brief Number of trees per iterations */ int num_tree_per_iteration_; /*! \brief Number of class */ int num_class_; /*! \brief Index of label column */ data_size_t label_idx_; /*! \brief number of used model */ int num_iteration_for_pred_; /*! \brief Start iteration of used model */ int start_iteration_for_pred_; /*! \brief Shrinkage rate for one iteration */ double shrinkage_rate_; /*! \brief Number of loaded initial models */ int num_init_iteration_; /*! \brief Feature names */ std::vector<std::string> feature_names_; std::vector<std::string> feature_infos_; std::unique_ptr<Dataset> tmp_subset_; bool is_use_subset_; std::vector<bool> class_need_train_; bool is_constant_hessian_; std::unique_ptr<ObjectiveFunction> loaded_objective_; bool average_output_; bool need_re_bagging_; bool balanced_bagging_; std::string loaded_parameter_; std::vector<int8_t> monotone_constraints_; const int bagging_rand_block_ = 1024; std::vector<Random> bagging_rands_; ParallelPartitionRunner<data_size_t, false> bagging_runner_; Json forced_splits_json_; bool linear_tree_; }; } // namespace LightGBM #endif // LightGBM_BOOSTING_GBDT_H_
GB_binop__le_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__le_uint8) // A.*B function (eWiseMult): GB (_AemultB_01__le_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__le_uint8) // A.*B function (eWiseMult): GB (_AemultB_03__le_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__le_uint8) // A*D function (colscale): GB (_AxD__le_uint8) // D*A function (rowscale): GB (_DxB__le_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__le_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__le_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_uint8) // C=scalar+B GB (_bind1st__le_uint8) // C=scalar+B' GB (_bind1st_tran__le_uint8) // C=A+scalar GB (_bind2nd__le_uint8) // C=A'+scalar GB (_bind2nd_tran__le_uint8) // C type: bool // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LE || GxB_NO_UINT8 || GxB_NO_LE_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__le_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__le_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__le_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__le_uint8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__le_uint8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__le_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__le_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__le_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__le_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__le_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__le_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__le_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__le_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__le_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__rminus_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rminus_fp64) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__rminus_fp64) // A.*B function (eWiseMult): GB (_AemultB_03__rminus_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_fp64) // A*D function (colscale): GB (_AxD__rminus_fp64) // D*A function (rowscale): GB (_DxB__rminus_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__rminus_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__rminus_fp64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_fp64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_fp64) // C=scalar+B GB (_bind1st__rminus_fp64) // C=scalar+B' GB (_bind1st_tran__rminus_fp64) // C=A+scalar GB (_bind2nd__rminus_fp64) // C=A'+scalar GB (_bind2nd_tran__rminus_fp64) // C type: double // A type: double // B,b type: double // BinaryOp: cij = (bij - aij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ double bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (y - x) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_FP64 || GxB_NO_RMINUS_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rminus_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__rminus_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rminus_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rminus_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rminus_fp64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rminus_fp64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rminus_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__rminus_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rminus_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__rminus_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rminus_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rminus_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; double bij = Bx [p] ; Cx [p] = (bij - x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rminus_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = Ax [p] ; Cx [p] = (y - aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = (aij - x) ; \ } GrB_Info GB (_bind1st_tran__rminus_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = (y - aij) ; \ } GrB_Info GB (_bind2nd_tran__rminus_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
syr2k.limlam-par.c
/** * This version is stamped on May 10, 2016 * * Contact: * Louis-Noel Pouchet <pouchet.ohio-state.edu> * Tomofumi Yuki <tomofumi.yuki.fr> * * Web address: http://polybench.sourceforge.net */ /* syr2k.c: this file is part of PolyBench/C */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ #include "syr2k.h" /* Array initialization. */ static void init_array(int n, int m, DATA_TYPE *alpha, DATA_TYPE *beta, DATA_TYPE POLYBENCH_2D(C,N,N,n,n), DATA_TYPE POLYBENCH_2D(A,N,M,n,m), DATA_TYPE POLYBENCH_2D(B,N,M,n,m)) { int i, j; *alpha = 1.5; *beta = 1.2; for (i = 0; i < n; i++) for (j = 0; j < m; j++) { A[i][j] = (DATA_TYPE) ((i*j+1)%n) / n; B[i][j] = (DATA_TYPE) ((i*j+2)%m) / m; } for (i = 0; i < n; i++) for (j = 0; j < n; j++) { C[i][j] = (DATA_TYPE) ((i*j+3)%n) / m; } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int n, DATA_TYPE POLYBENCH_2D(C,N,N,n,n)) { int i, j; POLYBENCH_DUMP_START; POLYBENCH_DUMP_BEGIN("C"); for (i = 0; i < n; i++) for (j = 0; j < n; j++) { if ((i * n + j) % 20 == 0) fprintf (POLYBENCH_DUMP_TARGET, "\n"); fprintf (POLYBENCH_DUMP_TARGET, DATA_PRINTF_MODIFIER, C[i][j]); } POLYBENCH_DUMP_END("C"); POLYBENCH_DUMP_FINISH; } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_syr2k(int n, int m, DATA_TYPE alpha, DATA_TYPE beta, DATA_TYPE POLYBENCH_2D(C,N,N,n,n), DATA_TYPE POLYBENCH_2D(A,N,M,n,m), DATA_TYPE POLYBENCH_2D(B,N,M,n,m)) { int i, j, k; //BLAS PARAMS //UPLO = 'L' //TRANS = 'N' //A is NxM //B is NxM //C is NxN #pragma scop #pragma omp parallel for private(i,j,k) for (i = 0; i < _PB_N; i++) { #pragma omp parallel for private(j,k) for (j = 0; j <= i; j++) { C[i][j] *= beta; for (k = 0; k < _PB_M; k++) { C[i][j] += A[j][k]*alpha*B[i][k] + B[j][k]*alpha*A[i][k]; } } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; double footprint = 8*(n*n + 2*n*m); // HAVERFORD added code double FP_ops = 3.0 * m * (n + 1) * n; // HAVERFORD added code #ifdef POLYBENCH_GFLOPS polybench_set_program_flops(FP_ops); // HAVERFORD addition #endif #if defined POLYFORD_VERBOSE printf("Starting %s, n=%8d, m=%8d, Footprint %8.4g M, Source FP ops=%8.4g G\n", __FILE__, n, m, footprint / (1024 * 1024), FP_ops/1000000000.0); #endif /* Variable declaration/allocation. */ DATA_TYPE alpha; DATA_TYPE beta; POLYBENCH_2D_ARRAY_DECL(C,DATA_TYPE,N,N,n,n); POLYBENCH_2D_ARRAY_DECL(A,DATA_TYPE,N,M,n,m); POLYBENCH_2D_ARRAY_DECL(B,DATA_TYPE,N,M,n,m); /* Initialize array(s). */ init_array (n, m, &alpha, &beta, POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_syr2k (n, m, alpha, beta, POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(C))); /* Be clean. */ POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
bfs_replicated_csc.c
/* Copyright (C) 2010 The Trustees of Indiana University. */ /* */ /* Use, modification and distribution is subject to the Boost Software */ /* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */ /* http://www.boost.org/LICENSE_1_0.txt) */ /* */ /* Authors: Jeremiah Willcock */ /* Andrew Lumsdaine */ #define _GNU_SOURCE #include "common.h" #include "oned_csc.h" #include "onesided.h" #include <mpi.h> #include <stdint.h> #include <inttypes.h> #include <stdlib.h> #include <stddef.h> #include <string.h> #include <limits.h> #include <assert.h> static oned_csc_graph g; static unsigned long* g_in_queue; static unsigned long* g_in_queue_summary; static unsigned long* g_out_queue; static unsigned long* g_out_queue_summary; static unsigned long* g_visited; static void allocate_memory(void) { int32_t maxlocalverts = g.max_nlocalverts; // int64_t maxlocalverts = g.max_nlocalverts; // int64_t local_queue_summary_size = (maxlocalverts + ULONG_BITS * ULONG_BITS - 1) / ULONG_BITS / ULONG_BITS; int32_t local_queue_summary_size = (maxlocalverts + ULONG_BITS * ULONG_BITS - 1) / ULONG_BITS / ULONG_BITS; //int64_t local_queue_size = local_queue_summary_size * ULONG_BITS; int32_t local_queue_size = local_queue_summary_size * ULONG_BITS; // int64_t global_queue_summary_size = MUL_SIZE(local_queue_summary_size); int32_t global_queue_summary_size = MUL_SIZE(local_queue_summary_size); int32_t global_queue_size = MUL_SIZE(local_queue_size); //int64_t global_queue_size = MUL_SIZE(local_queue_size); g_in_queue = (unsigned long*)xmalloc(global_queue_size * sizeof(unsigned long)); g_in_queue_summary = (unsigned long*)xmalloc(global_queue_summary_size * sizeof(unsigned long)); g_out_queue = (unsigned long*)xmalloc(local_queue_size * sizeof(unsigned long)); g_out_queue_summary = (unsigned long*)xmalloc(local_queue_summary_size * sizeof(unsigned long)); g_visited = (unsigned long*)xmalloc(local_queue_size * sizeof(unsigned long)); } static void deallocate_memory(void) { free(g_in_queue); g_in_queue = NULL; free(g_in_queue_summary); g_in_queue_summary = NULL; free(g_out_queue); g_out_queue = NULL; free(g_out_queue_summary); g_out_queue_summary = NULL; free(g_visited); g_visited = NULL; } void make_graph_data_structure(const tuple_graph* const tg) { convert_graph_to_oned_csc(tg, &g); allocate_memory(); /* Make sure all of the space is available */ deallocate_memory(); } void free_graph_data_structure(void) { free_oned_csc_graph(&g); /* deallocate_memory(); */ } int bfs_writes_depth_map(void) { return 0; } /* This version is the traditional level-synchronized BFS using two queues. A * bitmap is used to indicate which vertices have been visited. Messages are * sent and processed asynchronously throughout the code to hopefully overlap * communication with computation. */ //void run_bfs(int64_t root, int64_t* pred) void run_bfs(int32_t root, int32_t* pred) { allocate_memory(); const ptrdiff_t nlocalverts = g.nlocalverts; /* const int64_t nglobalverts = g.nglobalverts; */ const size_t* const restrict rowstarts = g.rowstarts; const int32_t* const restrict column = g.column; //const int64_t* const restrict column = g.column; /* Set up the visited bitmap. */ int lg_local_queue_size = g.lg_local_queue_size; int32_t local_queue_size = INT32_C(1) << lg_local_queue_size; // int64_t local_queue_size = INT64_C(1) << lg_local_queue_size; int32_t local_queue_summary_size = local_queue_size / ULONG_BITS;// int64_t local_queue_summary_size = local_queue_size / ULONG_BITS; int32_t global_queue_summary_size = MUL_SIZE(local_queue_summary_size); // int64_t global_queue_summary_size = MUL_SIZE(local_queue_summary_size); int32_t global_queue_size = MUL_SIZE(local_queue_size);//int64_t global_queue_size = MUL_SIZE(local_queue_size); #if 0 // int64_t* restrict column_swizzled = (int64_t*)xmalloc(nlocaledges * sizeof(int64_t)); int32_t* restrict column_swizzled = (int32_t*)xmalloc(nlocaledges * sizeof(int32_t)); { size_t i; for (i = 0; i < nlocaledges; ++i) { int32_t c = column[i]; // int64_t c = column[i]; column_swizzled[i] = SWIZZLE_VERTEX(c); } } #endif unsigned long* restrict in_queue = g_in_queue; memset(in_queue, 0, global_queue_size * sizeof(unsigned long)); unsigned long* restrict in_queue_summary = g_in_queue_summary; memset(in_queue_summary, 0, global_queue_summary_size * sizeof(unsigned long)); unsigned long* restrict out_queue = g_out_queue; unsigned long* restrict out_queue_summary = g_out_queue_summary; unsigned long* restrict visited = g_visited; memset(visited, 0, local_queue_size * sizeof(unsigned long)); // #define SET_IN(v) do {int64_t vs = SWIZZLE_VERTEX(v); size_t word_idx = vs / ULONG_BITS; int bit_idx = vs % ULONG_BITS; unsigned long mask = (1UL << bit_idx); #define SET_IN(v) do {int32_t vs = SWIZZLE_VERTEX(v); size_t word_idx = vs / ULONG_BITS; int bit_idx = vs % ULONG_BITS; unsigned long mask = (1UL << bit_idx); in_queue_summary[word_idx / ULONG_BITS] |= (1UL << (word_idx % ULONG_BITS)); in_queue[word_idx] |= mask;} while (0) #define TEST_IN(vs) (((in_queue_summary[vs / ULONG_BITS / ULONG_BITS] & (1UL << ((vs / ULONG_BITS) % ULONG_BITS))) != 0) && ((in_queue[vs / ULONG_BITS] & (1UL << (vs % ULONG_BITS))) != 0)) #define TEST_VISITED_LOCAL(v) ((visited[(v) / ULONG_BITS] & (1UL << ((v) % ULONG_BITS))) != 0) #define TAS_VISITED_LOCAL(v) (((__sync_fetch_and_or(&visited[(v) / ULONG_BITS], (1UL << ((v) % ULONG_BITS))) & (1UL << ((v) % ULONG_BITS))) != 0) ? 1 : (__sync_fetch_and_or(&out_queue[(v) / ULONG_BITS], (1UL << ((v) % ULONG_BITS))), 0)) // #define SET_VISITED_LOCAL(v) do {size_t word_idx = (v) / ULONG_BITS; int bit_idx = (v) % ULONG_BITS; unsigned long mask = (1UL << bit_idx); __sync_fetch_and_or(&visited[word_idx], mask); __sync_fetch_and_or(&out_queue[word_idx], mask);} while (0) #define SET_VISITED_LOCAL(v) do {size_t word_idx = (v) / ULONG_BITS; int bit_idx = (v) % ULONG_BITS; unsigned long mask = (1UL << bit_idx); visited[word_idx] |= mask; out_queue[word_idx] |= mask;} while (0) SET_IN(root); {ptrdiff_t i; _Pragma("omp parallel for schedule(static)") for (i = 0; i < nlocalverts; ++i) pred[i] = -1;} if (VERTEX_OWNER(root) == rank) { pred[VERTEX_LOCAL(root)] = root; SET_VISITED_LOCAL(VERTEX_LOCAL(root)); } uint16_t cur_level = 0; while (1) { ++cur_level; #if 0 if (rank == 0) fprintf(stderr, "BFS level %" PRIu16 "\n", cur_level); #endif memset(out_queue, 0, local_queue_size * sizeof(unsigned long)); // memset(out_queue_summary, 0, local_queue_summary_size * sizeof(unsigned long)); ptrdiff_t i, ii_summary; #if 0 #pragma omp parallel for schedule(static) for (i = 0; i < global_queue_summary_size; ++i) { unsigned long val = 0UL; int j; unsigned long mask = 1UL; for (j = 0; j < ULONG_BITS; ++j, mask <<= 1) { if (in_queue[i * ULONG_BITS + j]) val |= mask; } in_queue_summary[i] = val; } #endif unsigned long not_done = 0; #pragma omp parallel for schedule(static) reduction(|:not_done) for (ii_summary = 0; ii_summary < global_queue_summary_size; ++ii_summary) { uint32_t val_summary = in_queue_summary[ii_summary]; // uint64_t val_summary = in_queue_summary[ii_summary]; if (val_summary == 0) continue; int ii_offset; ptrdiff_t ii; for (ii_offset = 0; ii_offset < ULONG_BITS; ++ii_offset) { // if ((val_summary & (UINT64_C(1) << ii_offset)) == 0) continue; if ((val_summary & (UINT64_C(1) << ii_offset)) == 0) continue; ii = ii_summary * ULONG_BITS + ii_offset; uint32_t val = in_queue[ii]; // uint64_t val = in_queue[ii]; if (val == 0) continue; size_t i, i_end = rowstarts[ii + 1]; for (i = rowstarts[ii]; i < i_end; ++i) { int32_t c = column[i];//int64_t c = column[i]; int32_t v0_local = c / ULONG_BITS; // int64_t v0_local = c / ULONG_BITS; //if ((val & (UINT64_C(1) << (c % ULONG_BITS))) != 0 /* TEST_IN(v1_swizzled) */ && !TAS_VISITED_LOCAL(v0_local)) if ((val & (UINT32_C(1) << (c % ULONG_BITS))) != 0 /* TEST_IN(v1_swizzled) */ && !TAS_VISITED_LOCAL(v0_local)) { assert (pred[v0_local] == -1); int32_t v1_swizzled = (int32_t)ii * ULONG_BITS + c % ULONG_BITS; // int64_t v1_swizzled = (int64_t)ii * ULONG_BITS + c % ULONG_BITS; pred[v0_local] = UNSWIZZLE_VERTEX(v1_swizzled); not_done |= 1; } } } } #if 1 #pragma omp parallel for schedule(static) for (i = 0; i < local_queue_summary_size; ++i) { unsigned long val = 0UL; int j; unsigned long mask = 1UL; for (j = 0; j < ULONG_BITS; ++j, mask <<= 1) { unsigned long full_val = out_queue[i * ULONG_BITS + j]; visited[i * ULONG_BITS + j] |= full_val; if (full_val) val |= mask; } out_queue_summary[i] = val; // not_done |= val; } #endif MPI_Allreduce(MPI_IN_PLACE, &not_done, 1, MPI_UNSIGNED_LONG, MPI_BOR, MPI_COMM_WORLD); if (not_done == 0) break; MPI_Allgather(out_queue, local_queue_size, MPI_UNSIGNED_LONG, in_queue, local_queue_size, MPI_UNSIGNED_LONG, MPI_COMM_WORLD); MPI_Allgather(out_queue_summary, local_queue_summary_size, MPI_UNSIGNED_LONG, in_queue_summary, local_queue_summary_size, MPI_UNSIGNED_LONG, MPI_COMM_WORLD); } deallocate_memory(); } //void get_vertex_distribution_for_pred(size_t count, const int64_t* vertex_p, int* owner_p, size_t* local_p) void get_vertex_distribution_for_pred(size_t count, const int32_t* vertex_p, int* owner_p, size_t* local_p) { const int32_t* restrict vertex = vertex_p; //const int64_t* restrict vertex = vertex_p; int* restrict owner = owner_p; size_t* restrict local = local_p; ptrdiff_t i; #pragma omp parallel for for (i = 0; i < (ptrdiff_t)count; ++i) { owner[i] = VERTEX_OWNER(vertex[i]); local[i] = VERTEX_LOCAL(vertex[i]); } } //int64_t vertex_to_global_for_pred(int v_rank, size_t v_local) int32_t vertex_to_global_for_pred(int v_rank, size_t v_local) { return VERTEX_TO_GLOBAL(v_rank, v_local); } size_t get_nlocalverts_for_pred(void) { return g.nlocalverts; }
par_rap.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_parcsr_ls.h" #include "_hypre_utilities.h" #include "hypre_hopscotch_hash.h" /*-------------------------------------------------------------------------- * hypre_BoomerAMGBuildCoarseOperator *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildCoarseOperator( hypre_ParCSRMatrix *RT, hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *P, hypre_ParCSRMatrix **RAP_ptr ) { hypre_BoomerAMGBuildCoarseOperatorKT( RT, A, P, 0, RAP_ptr); return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGBuildCoarseOperatorKT( hypre_ParCSRMatrix *RT, hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *P, HYPRE_Int keepTranspose, hypre_ParCSRMatrix **RAP_ptr ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RAP] -= hypre_MPI_Wtime(); #endif MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *RT_diag = hypre_ParCSRMatrixDiag(RT); hypre_CSRMatrix *RT_offd = hypre_ParCSRMatrixOffd(RT); HYPRE_Int num_cols_diag_RT = hypre_CSRMatrixNumCols(RT_diag); HYPRE_Int num_cols_offd_RT = hypre_CSRMatrixNumCols(RT_offd); HYPRE_Int num_rows_offd_RT = hypre_CSRMatrixNumRows(RT_offd); hypre_ParCSRCommPkg *comm_pkg_RT = hypre_ParCSRMatrixCommPkg(RT); HYPRE_Int num_recvs_RT = 0; HYPRE_Int num_sends_RT = 0; HYPRE_Int *send_map_starts_RT; HYPRE_Int *send_map_elmts_RT; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); hypre_CSRMatrix *P_diag = hypre_ParCSRMatrixDiag(P); HYPRE_Real *P_diag_data = hypre_CSRMatrixData(P_diag); HYPRE_Int *P_diag_i = hypre_CSRMatrixI(P_diag); HYPRE_Int *P_diag_j = hypre_CSRMatrixJ(P_diag); hypre_CSRMatrix *P_offd = hypre_ParCSRMatrixOffd(P); HYPRE_BigInt *col_map_offd_P = hypre_ParCSRMatrixColMapOffd(P); HYPRE_Real *P_offd_data = hypre_CSRMatrixData(P_offd); HYPRE_Int *P_offd_i = hypre_CSRMatrixI(P_offd); HYPRE_Int *P_offd_j = hypre_CSRMatrixJ(P_offd); HYPRE_BigInt first_col_diag_P = hypre_ParCSRMatrixFirstColDiag(P); HYPRE_BigInt last_col_diag_P; HYPRE_Int num_cols_diag_P = hypre_CSRMatrixNumCols(P_diag); HYPRE_Int num_cols_offd_P = hypre_CSRMatrixNumCols(P_offd); HYPRE_BigInt *coarse_partitioning = hypre_ParCSRMatrixColStarts(P); HYPRE_BigInt *RT_partitioning = hypre_ParCSRMatrixColStarts(RT); hypre_ParCSRMatrix *RAP; HYPRE_BigInt *col_map_offd_RAP = NULL; HYPRE_BigInt *new_col_map_offd_RAP = NULL; hypre_CSRMatrix *RAP_int = NULL; HYPRE_Real *RAP_int_data; HYPRE_Int *RAP_int_i; HYPRE_BigInt *RAP_int_j; hypre_CSRMatrix *RAP_ext; HYPRE_Real *RAP_ext_data = NULL; HYPRE_Int *RAP_ext_i = NULL; HYPRE_BigInt *RAP_ext_j = NULL; hypre_CSRMatrix *RAP_diag; HYPRE_Real *RAP_diag_data; HYPRE_Int *RAP_diag_i; HYPRE_Int *RAP_diag_j; hypre_CSRMatrix *RAP_offd; HYPRE_Real *RAP_offd_data = NULL; HYPRE_Int *RAP_offd_i = NULL; HYPRE_Int *RAP_offd_j = NULL; HYPRE_Int RAP_size; HYPRE_Int RAP_ext_size; HYPRE_Int RAP_diag_size; HYPRE_Int RAP_offd_size; HYPRE_Int P_ext_diag_size; HYPRE_Int P_ext_offd_size; HYPRE_BigInt first_col_diag_RAP; HYPRE_BigInt last_col_diag_RAP; HYPRE_Int num_cols_offd_RAP = 0; hypre_CSRMatrix *R_diag; HYPRE_Real *R_diag_data; HYPRE_Int *R_diag_i; HYPRE_Int *R_diag_j; hypre_CSRMatrix *R_offd; HYPRE_Real *R_offd_data; HYPRE_Int *R_offd_i; HYPRE_Int *R_offd_j; HYPRE_Real *RA_diag_data_array = NULL; HYPRE_Int *RA_diag_j_array = NULL; HYPRE_Real *RA_offd_data_array = NULL; HYPRE_Int *RA_offd_j_array = NULL; hypre_CSRMatrix *Ps_ext; HYPRE_Real *Ps_ext_data; HYPRE_Int *Ps_ext_i; HYPRE_BigInt *Ps_ext_j; HYPRE_Real *P_ext_diag_data = NULL; HYPRE_Int *P_ext_diag_i = NULL; HYPRE_Int *P_ext_diag_j = NULL; HYPRE_Real *P_ext_offd_data = NULL; HYPRE_Int *P_ext_offd_i = NULL; HYPRE_Int *P_ext_offd_j = NULL; HYPRE_BigInt *P_big_offd_j = NULL; HYPRE_BigInt *col_map_offd_Pext; HYPRE_Int *map_P_to_Pext = NULL; HYPRE_Int *map_P_to_RAP = NULL; HYPRE_Int *map_Pext_to_RAP = NULL; HYPRE_Int *P_marker; HYPRE_Int **P_mark_array; HYPRE_Int **A_mark_array; HYPRE_Int *A_marker; HYPRE_BigInt *temp; HYPRE_BigInt n_coarse, n_coarse_RT; HYPRE_Int square = 1; HYPRE_Int num_cols_offd_Pext = 0; HYPRE_Int ic, i, j, k; HYPRE_Int i1, i2, i3, ii, ns, ne, size, rest; HYPRE_Int cnt = 0; /*value; */ HYPRE_Int jj1, jj2, jj3, jcol; HYPRE_Int *jj_count, *jj_cnt_diag, *jj_cnt_offd; HYPRE_Int jj_counter, jj_count_diag, jj_count_offd; HYPRE_Int jj_row_begining, jj_row_begin_diag, jj_row_begin_offd; HYPRE_Int start_indexing = 0; /* start indexing for RAP_data at 0 */ HYPRE_Int num_nz_cols_A; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Real r_entry; HYPRE_Real r_a_product; HYPRE_Real r_a_p_product; HYPRE_Real zero = 0.0; HYPRE_Int *prefix_sum_workspace; /*----------------------------------------------------------------------- * Copy ParCSRMatrix RT into CSRMatrix R so that we have row-wise access * to restriction . *-----------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm,&num_procs); num_threads = hypre_NumThreads(); if (comm_pkg_RT) { num_recvs_RT = hypre_ParCSRCommPkgNumRecvs(comm_pkg_RT); num_sends_RT = hypre_ParCSRCommPkgNumSends(comm_pkg_RT); send_map_starts_RT =hypre_ParCSRCommPkgSendMapStarts(comm_pkg_RT); send_map_elmts_RT = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_RT); } else if (num_procs > 1) { hypre_MatvecCommPkgCreate(RT); comm_pkg_RT = hypre_ParCSRMatrixCommPkg(RT); num_recvs_RT = hypre_ParCSRCommPkgNumRecvs(comm_pkg_RT); num_sends_RT = hypre_ParCSRCommPkgNumSends(comm_pkg_RT); send_map_starts_RT =hypre_ParCSRCommPkgSendMapStarts(comm_pkg_RT); send_map_elmts_RT = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_RT); } hypre_CSRMatrixTranspose(RT_diag,&R_diag,1); if (num_cols_offd_RT) { hypre_CSRMatrixTranspose(RT_offd,&R_offd,1); R_offd_data = hypre_CSRMatrixData(R_offd); R_offd_i = hypre_CSRMatrixI(R_offd); R_offd_j = hypre_CSRMatrixJ(R_offd); } /*----------------------------------------------------------------------- * Access the CSR vectors for R. Also get sizes of fine and * coarse grids. *-----------------------------------------------------------------------*/ R_diag_data = hypre_CSRMatrixData(R_diag); R_diag_i = hypre_CSRMatrixI(R_diag); R_diag_j = hypre_CSRMatrixJ(R_diag); n_coarse = hypre_ParCSRMatrixGlobalNumCols(P); num_nz_cols_A = num_cols_diag_A + num_cols_offd_A; n_coarse_RT = hypre_ParCSRMatrixGlobalNumCols(RT); if (n_coarse != n_coarse_RT) square = 0; /*----------------------------------------------------------------------- * Generate Ps_ext, i.e. portion of P that is stored on neighbor procs * and needed locally for triple matrix product *-----------------------------------------------------------------------*/ #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_UnorderedIntMap send_map_elmts_RT_inverse_map; HYPRE_Int *send_map_elmts_starts_RT_aggregated = NULL; HYPRE_Int *send_map_elmts_RT_aggregated = NULL; HYPRE_Int send_map_elmts_RT_inverse_map_initialized = num_sends_RT > 0 && send_map_starts_RT[num_sends_RT] - send_map_starts_RT[0] > 0; if (send_map_elmts_RT_inverse_map_initialized) { hypre_UnorderedIntSet send_map_elmts_set; hypre_UnorderedIntSetCreate(&send_map_elmts_set, 2*(send_map_starts_RT[num_sends_RT] - send_map_starts_RT[0]), 16*hypre_NumThreads()); #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i = send_map_starts_RT[0]; i < send_map_starts_RT[num_sends_RT]; i++) { HYPRE_Int key = send_map_elmts_RT[i]; hypre_UnorderedIntSetPut(&send_map_elmts_set, key); } HYPRE_Int send_map_elmts_unique_size; HYPRE_Int *send_map_elmts_unique = hypre_UnorderedIntSetCopyToArray(&send_map_elmts_set, &send_map_elmts_unique_size); hypre_UnorderedIntSetDestroy(&send_map_elmts_set); hypre_UnorderedIntMapCreate(&send_map_elmts_RT_inverse_map, 2*send_map_elmts_unique_size, 16*hypre_NumThreads()); #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i = 0; i < send_map_elmts_unique_size; i++) { hypre_UnorderedIntMapPutIfAbsent(&send_map_elmts_RT_inverse_map, send_map_elmts_unique[i], i); } hypre_TFree(send_map_elmts_unique, HYPRE_MEMORY_HOST); send_map_elmts_starts_RT_aggregated = hypre_TAlloc(HYPRE_Int, send_map_elmts_unique_size + 1, HYPRE_MEMORY_HOST); send_map_elmts_RT_aggregated = hypre_TAlloc(HYPRE_Int, send_map_starts_RT[num_sends_RT], HYPRE_MEMORY_HOST); #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i = 0; i < send_map_elmts_unique_size; i++) { send_map_elmts_starts_RT_aggregated[i] = 0; } #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i = send_map_starts_RT[0]; i < send_map_starts_RT[num_sends_RT]; i++) { HYPRE_Int idx = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, send_map_elmts_RT[i]); #pragma omp atomic send_map_elmts_starts_RT_aggregated[idx]++; } for (i = 0; i < send_map_elmts_unique_size - 1; i++) { send_map_elmts_starts_RT_aggregated[i + 1] += send_map_elmts_starts_RT_aggregated[i]; } send_map_elmts_starts_RT_aggregated[send_map_elmts_unique_size] = send_map_starts_RT[num_sends_RT]; #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i = send_map_starts_RT[num_sends_RT] - 1; i >= send_map_starts_RT[0]; i--) { HYPRE_Int idx = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, send_map_elmts_RT[i]); HYPRE_Int offset = hypre_fetch_and_add(send_map_elmts_starts_RT_aggregated + idx, -1) - 1; send_map_elmts_RT_aggregated[offset] = i; } } #endif /* HYPRE_CONCURRENT_HOPSCOTCH */ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX_RAP] -= hypre_MPI_Wtime(); #endif if (num_procs > 1) { Ps_ext = hypre_ParCSRMatrixExtractBExt(P,A,1); Ps_ext_data = hypre_CSRMatrixData(Ps_ext); Ps_ext_i = hypre_CSRMatrixI(Ps_ext); Ps_ext_j = hypre_CSRMatrixBigJ(Ps_ext); } P_ext_diag_i = hypre_TAlloc(HYPRE_Int, num_cols_offd_A+1, HYPRE_MEMORY_HOST); P_ext_offd_i = hypre_TAlloc(HYPRE_Int, num_cols_offd_A+1, HYPRE_MEMORY_HOST); P_ext_diag_i[0] = 0; P_ext_offd_i[0] = 0; P_ext_diag_size = 0; P_ext_offd_size = 0; last_col_diag_P = first_col_diag_P + (HYPRE_BigInt) num_cols_diag_P - 1; /*HYPRE_Int prefix_sum_workspace[2*(num_threads + 1)];*/ prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, 2*(num_threads + 1), HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,j) #endif /* This threading causes problem, maybe the prefix_sum in combination with BigInt? */ { HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_A); HYPRE_Int P_ext_diag_size_private = 0; HYPRE_Int P_ext_offd_size_private = 0; for (i = i_begin; i < i_end; i++) { for (j=Ps_ext_i[i]; j < Ps_ext_i[i+1]; j++) if (Ps_ext_j[j] < first_col_diag_P || Ps_ext_j[j] > last_col_diag_P) P_ext_offd_size_private++; else P_ext_diag_size_private++; } hypre_prefix_sum_pair(&P_ext_diag_size_private, &P_ext_diag_size, &P_ext_offd_size_private, &P_ext_offd_size, prefix_sum_workspace); #ifdef HYPRE_USING_OPENMP #pragma omp master #endif { if (P_ext_diag_size) { P_ext_diag_j = hypre_CTAlloc(HYPRE_Int, P_ext_diag_size, HYPRE_MEMORY_HOST); P_ext_diag_data = hypre_CTAlloc(HYPRE_Real, P_ext_diag_size, HYPRE_MEMORY_HOST); } if (P_ext_offd_size) { P_ext_offd_j = hypre_CTAlloc(HYPRE_Int, P_ext_offd_size, HYPRE_MEMORY_HOST); P_big_offd_j = hypre_CTAlloc(HYPRE_BigInt, P_ext_offd_size, HYPRE_MEMORY_HOST); P_ext_offd_data = hypre_CTAlloc(HYPRE_Real, P_ext_offd_size, HYPRE_MEMORY_HOST); //temp = hypre_CTAlloc(HYPRE_BigInt, P_ext_offd_size+num_cols_offd_P, HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i = i_begin; i < i_end; i++) { for (j=Ps_ext_i[i]; j < Ps_ext_i[i+1]; j++) { HYPRE_BigInt value = Ps_ext_j[j]; if (value < first_col_diag_P || value > last_col_diag_P) { //Ps_ext_j[P_ext_offd_size_private] = value; //temp[P_ext_offd_size_private] = value; P_big_offd_j[P_ext_offd_size_private] = value; P_ext_offd_data[P_ext_offd_size_private++] = Ps_ext_data[j]; } else { P_ext_diag_j[P_ext_diag_size_private] = (HYPRE_Int)(Ps_ext_j[j] - first_col_diag_P); P_ext_diag_data[P_ext_diag_size_private++] = Ps_ext_data[j]; } } P_ext_diag_i[i+1] = P_ext_diag_size_private; P_ext_offd_i[i+1] = P_ext_offd_size_private; } } /* omp parallel */ hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); if (num_procs > 1) { hypre_CSRMatrixDestroy(Ps_ext); Ps_ext = NULL; } #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (P_ext_offd_size || num_cols_offd_P) { hypre_UnorderedBigIntSet found_set; hypre_UnorderedBigIntSetCreate(&found_set, P_ext_offd_size + num_cols_offd_P, 16*hypre_NumThreads()); #pragma omp parallel private(i) { #pragma omp for HYPRE_SMP_SCHEDULE for (i = 0; i < P_ext_offd_size; i++) { //hypre_UnorderedBigIntSetPut(&found_set, Ps_ext_j[i]); hypre_UnorderedBigIntSetPut(&found_set, P_big_offd_j[i]); } #pragma omp for HYPRE_SMP_SCHEDULE for (i = 0; i < num_cols_offd_P; i++) { hypre_UnorderedBigIntSetPut(&found_set, col_map_offd_P[i]); } } /* omp parallel */ /* Warning on getting temp right !!!!! */ temp = hypre_UnorderedBigIntSetCopyToArray(&found_set, &num_cols_offd_Pext); hypre_UnorderedBigIntSetDestroy(&found_set); hypre_UnorderedBigIntMap col_map_offd_Pext_inverse; hypre_big_sort_and_create_inverse_map(temp, num_cols_offd_Pext, &col_map_offd_Pext, &col_map_offd_Pext_inverse); #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i=0 ; i < P_ext_offd_size; i++) //Ps_ext_j[i] = hypre_UnorderedBigIntMapGet(&col_map_offd_Pext_inverse, Ps_ext_j[i]); P_ext_offd_j[i] = hypre_UnorderedBigIntMapGet(&col_map_offd_Pext_inverse, P_big_offd_j[i]); if (num_cols_offd_Pext) hypre_UnorderedBigIntMapDestroy(&col_map_offd_Pext_inverse); } #else /* !HYPRE_CONCURRENT_HOPSCOTCH */ if (P_ext_offd_size || num_cols_offd_P) { temp = hypre_CTAlloc(HYPRE_BigInt, P_ext_offd_size+num_cols_offd_P, HYPRE_MEMORY_HOST); for (i=0; i < P_ext_offd_size; i++) //Ps_ext_j[i] = temp[i]; //temp[i] = Ps_ext_j[i]; temp[i] = P_big_offd_j[i]; cnt = P_ext_offd_size; for (i=0; i < num_cols_offd_P; i++) temp[cnt++] = col_map_offd_P[i]; } if (cnt) { hypre_BigQsort0(temp, 0, cnt-1); num_cols_offd_Pext = 1; HYPRE_BigInt value = temp[0]; for (i=1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_Pext++] = value; } } } if (num_cols_offd_Pext) col_map_offd_Pext = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_Pext, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd_Pext; i++) col_map_offd_Pext[i] = temp[i]; if (P_ext_offd_size || num_cols_offd_P) hypre_TFree(temp, HYPRE_MEMORY_HOST); /*if (P_ext_offd_size) P_ext_offd_j = hypre_CTAlloc(HYPRE_Int, P_ext_offd_size, HYPRE_MEMORY_HOST);*/ for (i=0 ; i < P_ext_offd_size; i++) P_ext_offd_j[i] = hypre_BigBinarySearch(col_map_offd_Pext, //Ps_ext_j[i], P_big_offd_j[i], num_cols_offd_Pext); #endif /* !HYPRE_CONCURRENT_HOPSCOTCH */ if (P_ext_offd_size) hypre_TFree(P_big_offd_j, HYPRE_MEMORY_HOST); /*if (num_procs > 1) { hypre_CSRMatrixDestroy(Ps_ext); Ps_ext = NULL; }*/ if (num_cols_offd_P) { map_P_to_Pext = hypre_CTAlloc(HYPRE_Int, num_cols_offd_P, HYPRE_MEMORY_HOST); cnt = 0; for (i=0; i < num_cols_offd_Pext; i++) if (col_map_offd_Pext[i] == col_map_offd_P[cnt]) { map_P_to_Pext[cnt++] = i; if (cnt == num_cols_offd_P) break; } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX_RAP] += hypre_MPI_Wtime(); #endif /*----------------------------------------------------------------------- * First Pass: Determine size of RAP_int and set up RAP_int_i if there * are more than one processor and nonzero elements in R_offd *-----------------------------------------------------------------------*/ P_mark_array = hypre_CTAlloc(HYPRE_Int *, num_threads, HYPRE_MEMORY_HOST); A_mark_array = hypre_CTAlloc(HYPRE_Int *, num_threads, HYPRE_MEMORY_HOST); if (num_cols_offd_RT) { jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_counter,jj_row_begining,A_marker,P_marker) HYPRE_SMP_SCHEDULE #endif for (ii = 0; ii < num_threads; ii++) { size = num_cols_offd_RT/num_threads; rest = num_cols_offd_RT - size*num_threads; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } /*----------------------------------------------------------------------- * Allocate marker arrays. *-----------------------------------------------------------------------*/ if (num_cols_offd_Pext || num_cols_diag_P) { P_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_cols_diag_P+num_cols_offd_Pext, HYPRE_MEMORY_HOST); P_marker = P_mark_array[ii]; } A_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_nz_cols_A, HYPRE_MEMORY_HOST); A_marker = A_mark_array[ii]; /*----------------------------------------------------------------------- * Initialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; for (ic = 0; ic < num_cols_diag_P+num_cols_offd_Pext; ic++) { P_marker[ic] = -1; } for (i = 0; i < num_nz_cols_A; i++) { A_marker[i] = -1; } /*----------------------------------------------------------------------- * Loop over exterior c-points *-----------------------------------------------------------------------*/ for (ic = ns; ic < ne; ic++) { jj_row_begining = jj_counter; /*-------------------------------------------------------------------- * Loop over entries in row ic of R_offd. *--------------------------------------------------------------------*/ for (jj1 = R_offd_i[ic]; jj1 < R_offd_i[ic+1]; jj1++) { i1 = R_offd_j[jj1]; /*----------------------------------------------------------------- * Loop over entries in row i1 of A_offd. *-----------------------------------------------------------------*/ for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++) { i2 = A_offd_j[jj2]; /*-------------------------------------------------------------- * Check A_marker to see if point i2 has been previously * visited. New entries in RAP only occur from unmarked points. *--------------------------------------------------------------*/ if (A_marker[i2] != ic) { /*----------------------------------------------------------- * Mark i2 as visited. *-----------------------------------------------------------*/ A_marker[i2] = ic; /*----------------------------------------------------------- * Loop over entries in row i2 of P_ext. *-----------------------------------------------------------*/ for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2+1]; jj3++) { i3 = P_ext_diag_j[jj3]; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (P_marker[i3] < jj_row_begining) { P_marker[i3] = jj_counter; jj_counter++; } } for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2+1]; jj3++) { i3 = P_ext_offd_j[jj3] + num_cols_diag_P; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (P_marker[i3] < jj_row_begining) { P_marker[i3] = jj_counter; jj_counter++; } } } } /*----------------------------------------------------------------- * Loop over entries in row i1 of A_diag. *-----------------------------------------------------------------*/ for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++) { i2 = A_diag_j[jj2]; /*-------------------------------------------------------------- * Check A_marker to see if point i2 has been previously * visited. New entries in RAP only occur from unmarked points. *--------------------------------------------------------------*/ if (A_marker[i2+num_cols_offd_A] != ic) { /*----------------------------------------------------------- * Mark i2 as visited. *-----------------------------------------------------------*/ A_marker[i2+num_cols_offd_A] = ic; /*----------------------------------------------------------- * Loop over entries in row i2 of P_diag. *-----------------------------------------------------------*/ for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2+1]; jj3++) { i3 = P_diag_j[jj3]; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (P_marker[i3] < jj_row_begining) { P_marker[i3] = jj_counter; jj_counter++; } } /*----------------------------------------------------------- * Loop over entries in row i2 of P_offd. *-----------------------------------------------------------*/ for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2+1]; jj3++) { i3 = map_P_to_Pext[P_offd_j[jj3]] + num_cols_diag_P; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (P_marker[i3] < jj_row_begining) { P_marker[i3] = jj_counter; jj_counter++; } } } } } } jj_count[ii] = jj_counter; } /*----------------------------------------------------------------------- * Allocate RAP_int_data and RAP_int_j arrays. *-----------------------------------------------------------------------*/ for (i = 0; i < num_threads-1; i++) jj_count[i+1] += jj_count[i]; RAP_size = jj_count[num_threads-1]; RAP_int_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_RT+1, HYPRE_MEMORY_HOST); RAP_int_data = hypre_CTAlloc(HYPRE_Real, RAP_size, HYPRE_MEMORY_HOST); RAP_int_j = hypre_CTAlloc(HYPRE_BigInt, RAP_size, HYPRE_MEMORY_HOST); RAP_int_i[num_cols_offd_RT] = RAP_size; /*----------------------------------------------------------------------- * Second Pass: Fill in RAP_int_data and RAP_int_j. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_counter,jj_row_begining,A_marker,P_marker,r_entry,r_a_product,r_a_p_product) HYPRE_SMP_SCHEDULE #endif for (ii = 0; ii < num_threads; ii++) { size = num_cols_offd_RT/num_threads; rest = num_cols_offd_RT - size*num_threads; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } /*----------------------------------------------------------------------- * Initialize some stuff. *-----------------------------------------------------------------------*/ if (num_cols_offd_Pext || num_cols_diag_P) P_marker = P_mark_array[ii]; A_marker = A_mark_array[ii]; jj_counter = start_indexing; if (ii > 0) jj_counter = jj_count[ii-1]; for (ic = 0; ic < num_cols_diag_P+num_cols_offd_Pext; ic++) { P_marker[ic] = -1; } for (i = 0; i < num_nz_cols_A; i++) { A_marker[i] = -1; } /*----------------------------------------------------------------------- * Loop over exterior c-points. *-----------------------------------------------------------------------*/ for (ic = ns; ic < ne; ic++) { jj_row_begining = jj_counter; RAP_int_i[ic] = jj_counter; /*-------------------------------------------------------------------- * Loop over entries in row ic of R_offd. *--------------------------------------------------------------------*/ for (jj1 = R_offd_i[ic]; jj1 < R_offd_i[ic+1]; jj1++) { i1 = R_offd_j[jj1]; r_entry = R_offd_data[jj1]; /*----------------------------------------------------------------- * Loop over entries in row i1 of A_offd. *-----------------------------------------------------------------*/ for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++) { i2 = A_offd_j[jj2]; r_a_product = r_entry * A_offd_data[jj2]; /*-------------------------------------------------------------- * Check A_marker to see if point i2 has been previously * visited. New entries in RAP only occur from unmarked points. *--------------------------------------------------------------*/ if (A_marker[i2] != ic) { /*----------------------------------------------------------- * Mark i2 as visited. *-----------------------------------------------------------*/ A_marker[i2] = ic; /*----------------------------------------------------------- * Loop over entries in row i2 of P_ext. *-----------------------------------------------------------*/ for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2+1]; jj3++) { i3 = P_ext_diag_j[jj3]; r_a_p_product = r_a_product * P_ext_diag_data[jj3]; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (P_marker[i3] < jj_row_begining) { P_marker[i3] = jj_counter; RAP_int_data[jj_counter] = r_a_p_product; RAP_int_j[jj_counter] = (HYPRE_BigInt)i3 + first_col_diag_P; jj_counter++; } else { RAP_int_data[P_marker[i3]] += r_a_p_product; } } for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2+1]; jj3++) { i3 = P_ext_offd_j[jj3] + num_cols_diag_P; r_a_p_product = r_a_product * P_ext_offd_data[jj3]; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (P_marker[i3] < jj_row_begining) { P_marker[i3] = jj_counter; RAP_int_data[jj_counter] = r_a_p_product; RAP_int_j[jj_counter] = col_map_offd_Pext[i3-num_cols_diag_P]; jj_counter++; } else { RAP_int_data[P_marker[i3]] += r_a_p_product; } } } /*-------------------------------------------------------------- * If i2 is previously visited ( A_marker[12]=ic ) it yields * no new entries in RAP and can just add new contributions. *--------------------------------------------------------------*/ else { for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2+1]; jj3++) { i3 = P_ext_diag_j[jj3]; r_a_p_product = r_a_product * P_ext_diag_data[jj3]; RAP_int_data[P_marker[i3]] += r_a_p_product; } for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2+1]; jj3++) { i3 = P_ext_offd_j[jj3] + num_cols_diag_P; r_a_p_product = r_a_product * P_ext_offd_data[jj3]; RAP_int_data[P_marker[i3]] += r_a_p_product; } } } /*----------------------------------------------------------------- * Loop over entries in row i1 of A_diag. *-----------------------------------------------------------------*/ for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++) { i2 = A_diag_j[jj2]; r_a_product = r_entry * A_diag_data[jj2]; /*-------------------------------------------------------------- * Check A_marker to see if point i2 has been previously * visited. New entries in RAP only occur from unmarked points. *--------------------------------------------------------------*/ if (A_marker[i2+num_cols_offd_A] != ic) { /*----------------------------------------------------------- * Mark i2 as visited. *-----------------------------------------------------------*/ A_marker[i2+num_cols_offd_A] = ic; /*----------------------------------------------------------- * Loop over entries in row i2 of P_diag. *-----------------------------------------------------------*/ for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2+1]; jj3++) { i3 = P_diag_j[jj3]; r_a_p_product = r_a_product * P_diag_data[jj3]; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (P_marker[i3] < jj_row_begining) { P_marker[i3] = jj_counter; RAP_int_data[jj_counter] = r_a_p_product; RAP_int_j[jj_counter] = (HYPRE_BigInt)i3 + first_col_diag_P; jj_counter++; } else { RAP_int_data[P_marker[i3]] += r_a_p_product; } } for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2+1]; jj3++) { i3 = map_P_to_Pext[P_offd_j[jj3]] + num_cols_diag_P; r_a_p_product = r_a_product * P_offd_data[jj3]; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (P_marker[i3] < jj_row_begining) { P_marker[i3] = jj_counter; RAP_int_data[jj_counter] = r_a_p_product; RAP_int_j[jj_counter] = col_map_offd_Pext[i3-num_cols_diag_P]; jj_counter++; } else { RAP_int_data[P_marker[i3]] += r_a_p_product; } } } /*-------------------------------------------------------------- * If i2 is previously visited ( A_marker[12]=ic ) it yields * no new entries in RAP and can just add new contributions. *--------------------------------------------------------------*/ else { for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2+1]; jj3++) { i3 = P_diag_j[jj3]; r_a_p_product = r_a_product * P_diag_data[jj3]; RAP_int_data[P_marker[i3]] += r_a_p_product; } for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2+1]; jj3++) { i3 = map_P_to_Pext[P_offd_j[jj3]] + num_cols_diag_P; r_a_p_product = r_a_product * P_offd_data[jj3]; RAP_int_data[P_marker[i3]] += r_a_p_product; } } } } } if (num_cols_offd_Pext || num_cols_diag_P) hypre_TFree(P_mark_array[ii], HYPRE_MEMORY_HOST); hypre_TFree(A_mark_array[ii], HYPRE_MEMORY_HOST); } RAP_int = hypre_CSRMatrixCreate(num_cols_offd_RT,num_rows_offd_RT,RAP_size); hypre_CSRMatrixMemoryLocation(RAP_int) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI(RAP_int) = RAP_int_i; hypre_CSRMatrixBigJ(RAP_int) = RAP_int_j; hypre_CSRMatrixData(RAP_int) = RAP_int_data; hypre_TFree(jj_count, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX_RAP] -= hypre_MPI_Wtime(); #endif RAP_ext_size = 0; if (num_sends_RT || num_recvs_RT) { void *request; hypre_ExchangeExternalRowsInit(RAP_int, comm_pkg_RT, &request); RAP_ext = hypre_ExchangeExternalRowsWait(request); RAP_ext_i = hypre_CSRMatrixI(RAP_ext); RAP_ext_j = hypre_CSRMatrixBigJ(RAP_ext); RAP_ext_data = hypre_CSRMatrixData(RAP_ext); RAP_ext_size = RAP_ext_i[hypre_CSRMatrixNumRows(RAP_ext)]; } if (num_cols_offd_RT) { hypre_CSRMatrixDestroy(RAP_int); RAP_int = NULL; } RAP_diag_i = hypre_TAlloc(HYPRE_Int, num_cols_diag_RT+1, HYPRE_MEMORY_SHARED); RAP_offd_i = hypre_TAlloc(HYPRE_Int, num_cols_diag_RT+1, HYPRE_MEMORY_SHARED); first_col_diag_RAP = first_col_diag_P; last_col_diag_RAP = first_col_diag_P + num_cols_diag_P - 1; /*----------------------------------------------------------------------- * check for new nonzero columns in RAP_offd generated through RAP_ext *-----------------------------------------------------------------------*/ #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_UnorderedBigIntMap col_map_offd_RAP_inverse; if (RAP_ext_size || num_cols_offd_Pext) { hypre_UnorderedBigIntSet found_set; hypre_UnorderedBigIntSetCreate(&found_set, 2*(RAP_ext_size + num_cols_offd_Pext), 16*hypre_NumThreads()); cnt = 0; #pragma omp parallel private(i) { #pragma omp for HYPRE_SMP_SCHEDULE for (i = 0; i < RAP_ext_size; i++) { if (RAP_ext_j[i] < first_col_diag_RAP || RAP_ext_j[i] > last_col_diag_RAP) hypre_UnorderedBigIntSetPut(&found_set, RAP_ext_j[i]); } #pragma omp for HYPRE_SMP_SCHEDULE for (i = 0; i < num_cols_offd_Pext; i++) { hypre_UnorderedBigIntSetPut(&found_set, col_map_offd_Pext[i]); } } /* omp parallel */ temp = hypre_UnorderedBigIntSetCopyToArray(&found_set, &num_cols_offd_RAP); hypre_UnorderedBigIntSetDestroy(&found_set); hypre_big_sort_and_create_inverse_map(temp, num_cols_offd_RAP, &col_map_offd_RAP, &col_map_offd_RAP_inverse); } #else /* !HYPRE_CONCURRENT_HOPSCOTCH */ if (RAP_ext_size || num_cols_offd_Pext) { temp = hypre_CTAlloc(HYPRE_BigInt, RAP_ext_size+num_cols_offd_Pext, HYPRE_MEMORY_HOST); cnt = 0; for (i=0; i < RAP_ext_size; i++) if (RAP_ext_j[i] < first_col_diag_RAP || RAP_ext_j[i] > last_col_diag_RAP) temp[cnt++] = RAP_ext_j[i]; for (i=0; i < num_cols_offd_Pext; i++) temp[cnt++] = col_map_offd_Pext[i]; if (cnt) { hypre_BigQsort0(temp,0,cnt-1); HYPRE_BigInt value = temp[0]; num_cols_offd_RAP = 1; for (i=1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_RAP++] = value; } } } /* now evaluate col_map_offd_RAP */ if (num_cols_offd_RAP) col_map_offd_RAP = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_RAP, HYPRE_MEMORY_HOST); for (i=0 ; i < num_cols_offd_RAP; i++) col_map_offd_RAP[i] = temp[i]; hypre_TFree(temp, HYPRE_MEMORY_HOST); } #endif /* !HYPRE_CONCURRENT_HOPSCOTCH */ if (num_cols_offd_P) { map_P_to_RAP = hypre_TAlloc(HYPRE_Int, num_cols_offd_P, HYPRE_MEMORY_HOST); cnt = 0; for (i=0; i < num_cols_offd_RAP; i++) if (col_map_offd_RAP[i] == col_map_offd_P[cnt]) { map_P_to_RAP[cnt++] = i; if (cnt == num_cols_offd_P) break; } } if (num_cols_offd_Pext) { map_Pext_to_RAP = hypre_TAlloc(HYPRE_Int, num_cols_offd_Pext, HYPRE_MEMORY_HOST); cnt = 0; for (i=0; i < num_cols_offd_RAP; i++) if (col_map_offd_RAP[i] == col_map_offd_Pext[cnt]) { map_Pext_to_RAP[cnt++] = i; if (cnt == num_cols_offd_Pext) break; } } /*----------------------------------------------------------------------- * Convert RAP_ext column indices *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i=0; i < RAP_ext_size; i++) if (RAP_ext_j[i] < first_col_diag_RAP || RAP_ext_j[i] > last_col_diag_RAP) RAP_ext_j[i] = (HYPRE_BigInt)num_cols_diag_P #ifdef HYPRE_CONCURRENT_HOPSCOTCH +(HYPRE_BigInt)hypre_UnorderedBigIntMapGet(&col_map_offd_RAP_inverse, RAP_ext_j[i]); #else +(HYPRE_BigInt)hypre_BigBinarySearch(col_map_offd_RAP, RAP_ext_j[i],num_cols_offd_RAP); #endif else RAP_ext_j[i] -= first_col_diag_RAP; #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (num_cols_offd_RAP) hypre_UnorderedBigIntMapDestroy(&col_map_offd_RAP_inverse); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX_RAP] += hypre_MPI_Wtime(); #endif /* need to allocate new P_marker etc. and make further changes */ /*----------------------------------------------------------------------- * Initialize some stuff. *-----------------------------------------------------------------------*/ jj_cnt_diag = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_cnt_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,k,jcol,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_count_diag,jj_count_offd,jj_row_begin_diag,jj_row_begin_offd,A_marker,P_marker) HYPRE_SMP_SCHEDULE #endif for (ii = 0; ii < num_threads; ii++) { size = num_cols_diag_RT/num_threads; rest = num_cols_diag_RT - size*num_threads; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } P_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_cols_diag_P+num_cols_offd_RAP, HYPRE_MEMORY_HOST); A_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_nz_cols_A, HYPRE_MEMORY_HOST); P_marker = P_mark_array[ii]; A_marker = A_mark_array[ii]; jj_count_diag = start_indexing; jj_count_offd = start_indexing; for (ic = 0; ic < num_cols_diag_P+num_cols_offd_RAP; ic++) { P_marker[ic] = -1; } for (i = 0; i < num_nz_cols_A; i++) { A_marker[i] = -1; } /*----------------------------------------------------------------------- * Loop over interior c-points. *-----------------------------------------------------------------------*/ for (ic = ns; ic < ne; ic++) { /*-------------------------------------------------------------------- * Set marker for diagonal entry, RAP_{ic,ic}. and for all points * being added to row ic of RAP_diag and RAP_offd through RAP_ext *--------------------------------------------------------------------*/ jj_row_begin_diag = jj_count_diag; jj_row_begin_offd = jj_count_offd; if (square) P_marker[ic] = jj_count_diag++; #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (send_map_elmts_RT_inverse_map_initialized) { HYPRE_Int i = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, ic); if (i != -1) { for (j = send_map_elmts_starts_RT_aggregated[i]; j < send_map_elmts_starts_RT_aggregated[i + 1]; j++) { HYPRE_Int jj = send_map_elmts_RT_aggregated[j]; for (k=RAP_ext_i[jj]; k < RAP_ext_i[jj+1]; k++) { jcol = (HYPRE_Int)RAP_ext_j[k]; if (jcol < num_cols_diag_P) { if (P_marker[jcol] < jj_row_begin_diag) { P_marker[jcol] = jj_count_diag; jj_count_diag++; } } else { if (P_marker[jcol] < jj_row_begin_offd) { P_marker[jcol] = jj_count_offd; jj_count_offd++; } } } } } // if (set) } #else /* !HYPRE_CONCURRENT_HOPSCOTCH */ for (i=0; i < num_sends_RT; i++) for (j = send_map_starts_RT[i]; j < send_map_starts_RT[i+1]; j++) if (send_map_elmts_RT[j] == ic) { for (k=RAP_ext_i[j]; k < RAP_ext_i[j+1]; k++) { jcol = (HYPRE_Int) RAP_ext_j[k]; if (jcol < num_cols_diag_P) { if (P_marker[jcol] < jj_row_begin_diag) { P_marker[jcol] = jj_count_diag; jj_count_diag++; } } else { if (P_marker[jcol] < jj_row_begin_offd) { P_marker[jcol] = jj_count_offd; jj_count_offd++; } } } break; } #endif /* !HYPRE_CONCURRENT_HOPSCOTCH */ /*-------------------------------------------------------------------- * Loop over entries in row ic of R_diag. *--------------------------------------------------------------------*/ for (jj1 = R_diag_i[ic]; jj1 < R_diag_i[ic+1]; jj1++) { i1 = R_diag_j[jj1]; /*----------------------------------------------------------------- * Loop over entries in row i1 of A_offd. *-----------------------------------------------------------------*/ if (num_cols_offd_A) { for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++) { i2 = A_offd_j[jj2]; /*-------------------------------------------------------------- * Check A_marker to see if point i2 has been previously * visited. New entries in RAP only occur from unmarked points. *--------------------------------------------------------------*/ if (A_marker[i2] != ic) { /*----------------------------------------------------------- * Mark i2 as visited. *-----------------------------------------------------------*/ A_marker[i2] = ic; /*----------------------------------------------------------- * Loop over entries in row i2 of P_ext. *-----------------------------------------------------------*/ for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2+1]; jj3++) { i3 = P_ext_diag_j[jj3]; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (P_marker[i3] < jj_row_begin_diag) { P_marker[i3] = jj_count_diag; jj_count_diag++; } } for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2+1]; jj3++) { i3 = map_Pext_to_RAP[P_ext_offd_j[jj3]]+num_cols_diag_P; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (P_marker[i3] < jj_row_begin_offd) { P_marker[i3] = jj_count_offd; jj_count_offd++; } } } } } /*----------------------------------------------------------------- * Loop over entries in row i1 of A_diag. *-----------------------------------------------------------------*/ for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++) { i2 = A_diag_j[jj2]; /*-------------------------------------------------------------- * Check A_marker to see if point i2 has been previously * visited. New entries in RAP only occur from unmarked points. *--------------------------------------------------------------*/ if (A_marker[i2+num_cols_offd_A] != ic) { /*----------------------------------------------------------- * Mark i2 as visited. *-----------------------------------------------------------*/ A_marker[i2+num_cols_offd_A] = ic; /*----------------------------------------------------------- * Loop over entries in row i2 of P_diag. *-----------------------------------------------------------*/ for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2+1]; jj3++) { i3 = P_diag_j[jj3]; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (P_marker[i3] < jj_row_begin_diag) { P_marker[i3] = jj_count_diag; jj_count_diag++; } } /*----------------------------------------------------------- * Loop over entries in row i2 of P_offd. *-----------------------------------------------------------*/ if (num_cols_offd_P) { for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2+1]; jj3++) { i3 = map_P_to_RAP[P_offd_j[jj3]] + num_cols_diag_P; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (P_marker[i3] < jj_row_begin_offd) { P_marker[i3] = jj_count_offd; jj_count_offd++; } } } } } } /*-------------------------------------------------------------------- * Set RAP_diag_i and RAP_offd_i for this row. *--------------------------------------------------------------------*/ /* RAP_diag_i[ic] = jj_row_begin_diag; RAP_offd_i[ic] = jj_row_begin_offd; */ } jj_cnt_diag[ii] = jj_count_diag; jj_cnt_offd[ii] = jj_count_offd; } for (i=0; i < num_threads-1; i++) { jj_cnt_diag[i+1] += jj_cnt_diag[i]; jj_cnt_offd[i+1] += jj_cnt_offd[i]; } jj_count_diag = jj_cnt_diag[num_threads-1]; jj_count_offd = jj_cnt_offd[num_threads-1]; RAP_diag_i[num_cols_diag_RT] = jj_count_diag; RAP_offd_i[num_cols_diag_RT] = jj_count_offd; /*----------------------------------------------------------------------- * Allocate RAP_diag_data and RAP_diag_j arrays. * Allocate RAP_offd_data and RAP_offd_j arrays. *-----------------------------------------------------------------------*/ RAP_diag_size = jj_count_diag; if (RAP_diag_size) { RAP_diag_data = hypre_CTAlloc(HYPRE_Real, RAP_diag_size, HYPRE_MEMORY_SHARED); RAP_diag_j = hypre_CTAlloc(HYPRE_Int, RAP_diag_size, HYPRE_MEMORY_SHARED); } RAP_offd_size = jj_count_offd; if (RAP_offd_size) { RAP_offd_data = hypre_CTAlloc(HYPRE_Real, RAP_offd_size, HYPRE_MEMORY_SHARED); RAP_offd_j = hypre_CTAlloc(HYPRE_Int, RAP_offd_size, HYPRE_MEMORY_SHARED); } if (RAP_offd_size == 0 && num_cols_offd_RAP != 0) { num_cols_offd_RAP = 0; hypre_TFree(col_map_offd_RAP, HYPRE_MEMORY_HOST); } RA_diag_data_array = hypre_TAlloc(HYPRE_Real, num_cols_diag_A*num_threads, HYPRE_MEMORY_HOST); RA_diag_j_array = hypre_TAlloc(HYPRE_Int, num_cols_diag_A*num_threads, HYPRE_MEMORY_HOST); if (num_cols_offd_A) { RA_offd_data_array = hypre_TAlloc(HYPRE_Real, num_cols_offd_A*num_threads, HYPRE_MEMORY_HOST); RA_offd_j_array = hypre_TAlloc(HYPRE_Int, num_cols_offd_A*num_threads, HYPRE_MEMORY_HOST); } /*----------------------------------------------------------------------- * Second Pass: Fill in RAP_diag_data and RAP_diag_j. * Second Pass: Fill in RAP_offd_data and RAP_offd_j. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,k,jcol,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_count_diag,jj_count_offd,jj_row_begin_diag,jj_row_begin_offd,A_marker,P_marker,r_entry,r_a_product,r_a_p_product) HYPRE_SMP_SCHEDULE #endif for (ii = 0; ii < num_threads; ii++) { size = num_cols_diag_RT/num_threads; rest = num_cols_diag_RT - size*num_threads; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } /*----------------------------------------------------------------------- * Initialize some stuff. *-----------------------------------------------------------------------*/ P_marker = P_mark_array[ii]; A_marker = A_mark_array[ii]; for (ic = 0; ic < num_cols_diag_P+num_cols_offd_RAP; ic++) { P_marker[ic] = -1; } for (i = 0; i < num_nz_cols_A ; i++) { A_marker[i] = -1; } jj_count_diag = start_indexing; jj_count_offd = start_indexing; if (ii > 0) { jj_count_diag = jj_cnt_diag[ii-1]; jj_count_offd = jj_cnt_offd[ii-1]; } // temporal matrix RA = R*A // only need to store one row per thread because R*A and (R*A)*P are fused // into one loop. hypre_CSRMatrix RA_diag, RA_offd; RA_diag.data = RA_diag_data_array + num_cols_diag_A*ii; RA_diag.j = RA_diag_j_array + num_cols_diag_A*ii; RA_diag.num_nonzeros = 0; RA_offd.num_nonzeros = 0; if (num_cols_offd_A) { RA_offd.data = RA_offd_data_array + num_cols_offd_A*ii; RA_offd.j = RA_offd_j_array + num_cols_offd_A*ii; } /*----------------------------------------------------------------------- * Loop over interior c-points. *-----------------------------------------------------------------------*/ for (ic = ns; ic < ne; ic++) { /*-------------------------------------------------------------------- * Create diagonal entry, RAP_{ic,ic} and add entries of RAP_ext *--------------------------------------------------------------------*/ jj_row_begin_diag = jj_count_diag; jj_row_begin_offd = jj_count_offd; RAP_diag_i[ic] = jj_row_begin_diag; RAP_offd_i[ic] = jj_row_begin_offd; HYPRE_Int ra_row_begin_diag = RA_diag.num_nonzeros; HYPRE_Int ra_row_begin_offd = RA_offd.num_nonzeros; if (square) { P_marker[ic] = jj_count_diag; RAP_diag_data[jj_count_diag] = zero; RAP_diag_j[jj_count_diag] = ic; jj_count_diag++; } #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (send_map_elmts_RT_inverse_map_initialized) { HYPRE_Int i = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, ic); if (i != -1) { for (j = send_map_elmts_starts_RT_aggregated[i]; j < send_map_elmts_starts_RT_aggregated[i + 1]; j++) { HYPRE_Int jj = send_map_elmts_RT_aggregated[j]; for (k=RAP_ext_i[jj]; k < RAP_ext_i[jj+1]; k++) { jcol = (HYPRE_Int)RAP_ext_j[k]; if (jcol < num_cols_diag_P) { if (P_marker[jcol] < jj_row_begin_diag) { P_marker[jcol] = jj_count_diag; RAP_diag_data[jj_count_diag] = RAP_ext_data[k]; RAP_diag_j[jj_count_diag] = jcol; jj_count_diag++; } else RAP_diag_data[P_marker[jcol]] += RAP_ext_data[k]; } else { if (P_marker[jcol] < jj_row_begin_offd) { P_marker[jcol] = jj_count_offd; RAP_offd_data[jj_count_offd] = RAP_ext_data[k]; RAP_offd_j[jj_count_offd] = jcol-num_cols_diag_P; jj_count_offd++; } else RAP_offd_data[P_marker[jcol]] += RAP_ext_data[k]; } } } } // if (set) } #else /* !HYPRE_CONCURRENT_HOPSCOTCH */ for (i=0; i < num_sends_RT; i++) for (j = send_map_starts_RT[i]; j < send_map_starts_RT[i+1]; j++) if (send_map_elmts_RT[j] == ic) { for (k=RAP_ext_i[j]; k < RAP_ext_i[j+1]; k++) { jcol = (HYPRE_Int)RAP_ext_j[k]; if (jcol < num_cols_diag_P) { if (P_marker[jcol] < jj_row_begin_diag) { P_marker[jcol] = jj_count_diag; RAP_diag_data[jj_count_diag] = RAP_ext_data[k]; RAP_diag_j[jj_count_diag] = jcol; jj_count_diag++; } else RAP_diag_data[P_marker[jcol]] += RAP_ext_data[k]; } else { if (P_marker[jcol] < jj_row_begin_offd) { P_marker[jcol] = jj_count_offd; RAP_offd_data[jj_count_offd] = RAP_ext_data[k]; RAP_offd_j[jj_count_offd] = jcol-num_cols_diag_P; jj_count_offd++; } else RAP_offd_data[P_marker[jcol]] += RAP_ext_data[k]; } } break; } #endif /* !HYPRE_CONCURRENT_HOPSCOTCH */ /*-------------------------------------------------------------------- * Loop over entries in row ic of R_diag and compute row ic of RA. *--------------------------------------------------------------------*/ for (jj1 = R_diag_i[ic]; jj1 < R_diag_i[ic+1]; jj1++) { i1 = R_diag_j[jj1]; r_entry = R_diag_data[jj1]; /*----------------------------------------------------------------- * Loop over entries in row i1 of A_offd. *-----------------------------------------------------------------*/ if (num_cols_offd_A) { for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++) { i2 = A_offd_j[jj2]; HYPRE_Real a_entry = A_offd_data[jj2]; HYPRE_Int marker = A_marker[i2]; /*-------------------------------------------------------------- * Check A_marker to see if point i2 has been previously * visited. New entries in RAP only occur from unmarked points. *--------------------------------------------------------------*/ if (marker < ra_row_begin_offd) { /*----------------------------------------------------------- * Mark i2 as visited. *-----------------------------------------------------------*/ A_marker[i2] = RA_offd.num_nonzeros; RA_offd.data[RA_offd.num_nonzeros - ra_row_begin_offd] = r_entry * a_entry; RA_offd.j[RA_offd.num_nonzeros - ra_row_begin_offd] = i2; RA_offd.num_nonzeros++; } /*-------------------------------------------------------------- * If i2 is previously visited ( A_marker[12]=ic ) it yields * no new entries in RA and can just add new contributions. *--------------------------------------------------------------*/ else { RA_offd.data[marker - ra_row_begin_offd] += r_entry * a_entry; // JSP: compiler will more likely to generate FMA instructions // when we don't eliminate common subexpressions of // r_entry * A_offd_data[jj2] manually. } } // loop over entries in row i1 of A_offd } // num_cols_offd_A /*----------------------------------------------------------------- * Loop over entries in row i1 of A_diag. *-----------------------------------------------------------------*/ for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++) { i2 = A_diag_j[jj2]; HYPRE_Real a_entry = A_diag_data[jj2]; HYPRE_Int marker = A_marker[i2+num_cols_offd_A]; /*-------------------------------------------------------------- * Check A_marker to see if point i2 has been previously * visited. New entries in RAP only occur from unmarked points. *--------------------------------------------------------------*/ if (marker < ra_row_begin_diag) { /*----------------------------------------------------------- * Mark i2 as visited. *-----------------------------------------------------------*/ A_marker[i2+num_cols_offd_A] = RA_diag.num_nonzeros; RA_diag.data[RA_diag.num_nonzeros - ra_row_begin_diag] = r_entry * a_entry; RA_diag.j[RA_diag.num_nonzeros - ra_row_begin_diag] = i2; RA_diag.num_nonzeros++; } /*-------------------------------------------------------------- * If i2 is previously visited ( A_marker[12]=ic ) it yields * no new entries in RA and can just add new contributions. *--------------------------------------------------------------*/ else { RA_diag.data[marker - ra_row_begin_diag] += r_entry * a_entry; } } // loop over entries in row i1 of A_diag } // loop over entries in row ic of R_diag /*-------------------------------------------------------------------- * Loop over entries in row ic of RA_offd. *--------------------------------------------------------------------*/ for (jj1 = ra_row_begin_offd; jj1 < RA_offd.num_nonzeros; jj1++) { i1 = RA_offd.j[jj1 - ra_row_begin_offd]; r_a_product = RA_offd.data[jj1 - ra_row_begin_offd]; /*----------------------------------------------------------- * Loop over entries in row i1 of P_ext. *-----------------------------------------------------------*/ for (jj2 = P_ext_diag_i[i1]; jj2 < P_ext_diag_i[i1+1]; jj2++) { i2 = P_ext_diag_j[jj2]; HYPRE_Real p_entry = P_ext_diag_data[jj2]; HYPRE_Int marker = P_marker[i2]; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i2} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (marker < jj_row_begin_diag) { P_marker[i2] = jj_count_diag; RAP_diag_data[jj_count_diag] = r_a_product * p_entry; RAP_diag_j[jj_count_diag] = i2; jj_count_diag++; } else RAP_diag_data[marker] += r_a_product * p_entry; } for (jj2 = P_ext_offd_i[i1]; jj2 < P_ext_offd_i[i1+1]; jj2++) { i2 = map_Pext_to_RAP[P_ext_offd_j[jj2]] + num_cols_diag_P; HYPRE_Real p_entry = P_ext_offd_data[jj2]; HYPRE_Int marker = P_marker[i2]; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i2} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (marker < jj_row_begin_offd) { P_marker[i2] = jj_count_offd; RAP_offd_data[jj_count_offd] = r_a_product * p_entry; RAP_offd_j[jj_count_offd] = i2 - num_cols_diag_P; jj_count_offd++; } else RAP_offd_data[marker] += r_a_product * p_entry; } } // loop over entries in row ic of RA_offd /*-------------------------------------------------------------------- * Loop over entries in row ic of RA_diag. *--------------------------------------------------------------------*/ for (jj1 = ra_row_begin_diag; jj1 < RA_diag.num_nonzeros; jj1++) { HYPRE_Int i1 = RA_diag.j[jj1 - ra_row_begin_diag]; HYPRE_Real r_a_product = RA_diag.data[jj1 - ra_row_begin_diag]; /*----------------------------------------------------------------- * Loop over entries in row i1 of P_diag. *-----------------------------------------------------------------*/ for (jj2 = P_diag_i[i1]; jj2 < P_diag_i[i1+1]; jj2++) { i2 = P_diag_j[jj2]; HYPRE_Real p_entry = P_diag_data[jj2]; HYPRE_Int marker = P_marker[i2]; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i2} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (marker < jj_row_begin_diag) { P_marker[i2] = jj_count_diag; RAP_diag_data[jj_count_diag] = r_a_product * p_entry; RAP_diag_j[jj_count_diag] = i2; jj_count_diag++; } else { RAP_diag_data[marker] += r_a_product * p_entry; } } if (num_cols_offd_P) { for (jj2 = P_offd_i[i1]; jj2 < P_offd_i[i1+1]; jj2++) { i2 = map_P_to_RAP[P_offd_j[jj2]] + num_cols_diag_P; HYPRE_Real p_entry = P_offd_data[jj2]; HYPRE_Int marker = P_marker[i2]; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i2} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (marker < jj_row_begin_offd) { P_marker[i2] = jj_count_offd; RAP_offd_data[jj_count_offd] = r_a_product * p_entry; RAP_offd_j[jj_count_offd] = i2 - num_cols_diag_P; jj_count_offd++; } else { RAP_offd_data[marker] += r_a_product * p_entry; } } } // num_cols_offd_P } // loop over entries in row ic of RA_diag. } // Loop over interior c-points. hypre_TFree(P_mark_array[ii], HYPRE_MEMORY_HOST); hypre_TFree(A_mark_array[ii], HYPRE_MEMORY_HOST); } // omp parallel for /* check if really all off-diagonal entries occurring in col_map_offd_RAP are represented and eliminate if necessary */ P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_offd_RAP, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_cols_offd_RAP; i++) P_marker[i] = -1; jj_count_offd = 0; #ifdef HYPRE_USING_ATOMIC #pragma omp parallel for private(i3) reduction(+:jj_count_offd) HYPRE_SMP_SCHEDULE #endif for (i=0; i < RAP_offd_size; i++) { i3 = RAP_offd_j[i]; #ifdef HYPRE_USING_ATOMIC if (hypre_compare_and_swap(P_marker + i3, -1, 0) == -1) { jj_count_offd++; } #else if (P_marker[i3]) { P_marker[i3] = 0; jj_count_offd++; } #endif } if (jj_count_offd < num_cols_offd_RAP) { new_col_map_offd_RAP = hypre_CTAlloc(HYPRE_BigInt, jj_count_offd, HYPRE_MEMORY_HOST); jj_counter = 0; for (i=0; i < num_cols_offd_RAP; i++) if (!P_marker[i]) { P_marker[i] = jj_counter; new_col_map_offd_RAP[jj_counter++] = col_map_offd_RAP[i]; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i3) HYPRE_SMP_SCHEDULE #endif for (i=0; i < RAP_offd_size; i++) { i3 = RAP_offd_j[i]; RAP_offd_j[i] = P_marker[i3]; } num_cols_offd_RAP = jj_count_offd; hypre_TFree(col_map_offd_RAP, HYPRE_MEMORY_HOST); col_map_offd_RAP = new_col_map_offd_RAP; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); RAP = hypre_ParCSRMatrixCreate(comm, n_coarse_RT, n_coarse, RT_partitioning, coarse_partitioning, num_cols_offd_RAP, RAP_diag_size, RAP_offd_size); /* Have RAP own coarse_partitioning instead of P */ hypre_ParCSRMatrixSetColStartsOwner(P,0); hypre_ParCSRMatrixSetColStartsOwner(RT,0); RAP_diag = hypre_ParCSRMatrixDiag(RAP); hypre_CSRMatrixI(RAP_diag) = RAP_diag_i; if (RAP_diag_size) { hypre_CSRMatrixData(RAP_diag) = RAP_diag_data; hypre_CSRMatrixJ(RAP_diag) = RAP_diag_j; } RAP_offd = hypre_ParCSRMatrixOffd(RAP); hypre_CSRMatrixI(RAP_offd) = RAP_offd_i; if (num_cols_offd_RAP) { hypre_CSRMatrixData(RAP_offd) = RAP_offd_data; hypre_CSRMatrixJ(RAP_offd) = RAP_offd_j; hypre_ParCSRMatrixColMapOffd(RAP) = col_map_offd_RAP; } if (num_procs > 1) { /* hypre_GenerateRAPCommPkg(RAP, A); */ hypre_MatvecCommPkgCreate(RAP); } *RAP_ptr = RAP; /*----------------------------------------------------------------------- * Free R, P_ext and marker arrays. *-----------------------------------------------------------------------*/ if (keepTranspose) { hypre_ParCSRMatrixDiagT(RT) = R_diag; } else { hypre_CSRMatrixDestroy(R_diag); } R_diag = NULL; if (num_cols_offd_RT) { if (keepTranspose) { hypre_ParCSRMatrixOffdT(RT) = R_offd; } else { hypre_CSRMatrixDestroy(R_offd); } R_offd = NULL; } if (num_sends_RT || num_recvs_RT) { hypre_CSRMatrixDestroy(RAP_ext); RAP_ext = NULL; } hypre_TFree(P_mark_array, HYPRE_MEMORY_HOST); hypre_TFree(A_mark_array, HYPRE_MEMORY_HOST); hypre_TFree(P_ext_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(P_ext_offd_i, HYPRE_MEMORY_HOST); hypre_TFree(jj_cnt_diag, HYPRE_MEMORY_HOST); hypre_TFree(jj_cnt_offd, HYPRE_MEMORY_HOST); if (num_cols_offd_P) { hypre_TFree(map_P_to_Pext, HYPRE_MEMORY_HOST); hypre_TFree(map_P_to_RAP, HYPRE_MEMORY_HOST); } if (num_cols_offd_Pext) { hypre_TFree(col_map_offd_Pext, HYPRE_MEMORY_HOST); hypre_TFree(map_Pext_to_RAP, HYPRE_MEMORY_HOST); } if (P_ext_diag_size) { hypre_TFree(P_ext_diag_data, HYPRE_MEMORY_HOST); hypre_TFree(P_ext_diag_j, HYPRE_MEMORY_HOST); } if (P_ext_offd_size) { hypre_TFree(P_ext_offd_data, HYPRE_MEMORY_HOST); hypre_TFree(P_ext_offd_j, HYPRE_MEMORY_HOST); } hypre_TFree(RA_diag_data_array, HYPRE_MEMORY_HOST); hypre_TFree(RA_diag_j_array, HYPRE_MEMORY_HOST); if (num_cols_offd_A) { hypre_TFree(RA_offd_data_array, HYPRE_MEMORY_HOST); hypre_TFree(RA_offd_j_array, HYPRE_MEMORY_HOST); } #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (send_map_elmts_RT_inverse_map_initialized) { hypre_UnorderedIntMapDestroy(&send_map_elmts_RT_inverse_map); } hypre_TFree(send_map_elmts_starts_RT_aggregated, HYPRE_MEMORY_HOST); hypre_TFree(send_map_elmts_RT_aggregated, HYPRE_MEMORY_HOST); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RAP] += hypre_MPI_Wtime(); #endif return(0); }
c-decl.c
/* Process declarations and variables for C compiler. Copyright (C) 1988-2015 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ /* Process declarations and symbol lookup for C front end. Also constructs types; the standard scalar types at initialization, and structure, union, array and enum types when they are declared. */ /* ??? not all decl nodes are given the most useful possible line numbers. For example, the CONST_DECLs for enum values. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "input.h" #include "tm.h" #include "intl.h" #include "hash-set.h" #include "vec.h" #include "symtab.h" #include "input.h" #include "alias.h" #include "double-int.h" #include "machmode.h" #include "inchash.h" #include "tree.h" #include "fold-const.h" #include "print-tree.h" #include "stor-layout.h" #include "varasm.h" #include "attribs.h" #include "stringpool.h" #include "tree-inline.h" #include "flags.h" #include "hashtab.h" #include "hash-set.h" #include "vec.h" #include "machmode.h" #include "hard-reg-set.h" #include "function.h" #include "c-tree.h" #include "toplev.h" #include "tm_p.h" #include "cpplib.h" #include "target.h" #include "debug.h" #include "opts.h" #include "timevar.h" #include "c-family/c-common.h" #include "c-family/c-objc.h" #include "c-family/c-pragma.h" #include "c-family/c-ubsan.h" #include "c-lang.h" #include "langhooks.h" #include "tree-iterator.h" #include "diagnostic-core.h" #include "dumpfile.h" #include "hash-map.h" #include "is-a.h" #include "plugin-api.h" #include "ipa-ref.h" #include "cgraph.h" #include "hash-table.h" #include "langhooks-def.h" #include "plugin.h" #include "c-family/c-ada-spec.h" #include "cilk.h" #include "builtins.h" /* In grokdeclarator, distinguish syntactic contexts of declarators. */ enum decl_context { NORMAL, /* Ordinary declaration */ FUNCDEF, /* Function definition */ PARM, /* Declaration of parm before function body */ FIELD, /* Declaration inside struct or union */ TYPENAME}; /* Typename (inside cast or sizeof) */ /* States indicating how grokdeclarator() should handle declspecs marked with __attribute__((deprecated)). An object declared as __attribute__((deprecated)) suppresses warnings of uses of other deprecated items. */ enum deprecated_states { DEPRECATED_NORMAL, DEPRECATED_SUPPRESS }; /* Nonzero if we have seen an invalid cross reference to a struct, union, or enum, but not yet printed the message. */ tree pending_invalid_xref; /* File and line to appear in the eventual error message. */ location_t pending_invalid_xref_location; /* The file and line that the prototype came from if this is an old-style definition; used for diagnostics in store_parm_decls_oldstyle. */ static location_t current_function_prototype_locus; /* Whether this prototype was built-in. */ static bool current_function_prototype_built_in; /* The argument type information of this prototype. */ static tree current_function_prototype_arg_types; /* The argument information structure for the function currently being defined. */ static struct c_arg_info *current_function_arg_info; /* The obstack on which parser and related data structures, which are not live beyond their top-level declaration or definition, are allocated. */ struct obstack parser_obstack; /* The current statement tree. */ static GTY(()) struct stmt_tree_s c_stmt_tree; /* State saving variables. */ tree c_break_label; tree c_cont_label; /* A list of decls to be made automatically visible in each file scope. */ static GTY(()) tree visible_builtins; /* Set to 0 at beginning of a function definition, set to 1 if a return statement that specifies a return value is seen. */ int current_function_returns_value; /* Set to 0 at beginning of a function definition, set to 1 if a return statement with no argument is seen. */ int current_function_returns_null; /* Set to 0 at beginning of a function definition, set to 1 if a call to a noreturn function is seen. */ int current_function_returns_abnormally; /* Set to nonzero by `grokdeclarator' for a function whose return type is defaulted, if warnings for this are desired. */ static int warn_about_return_type; /* Nonzero when the current toplevel function contains a declaration of a nested function which is never defined. */ static bool undef_nested_function; /* If non-zero, implicit "omp declare target" attribute is added into the attribute lists. */ int current_omp_declare_target_attribute; /* Each c_binding structure describes one binding of an identifier to a decl. All the decls in a scope - irrespective of namespace - are chained together by the ->prev field, which (as the name implies) runs in reverse order. All the decls in a given namespace bound to a given identifier are chained by the ->shadowed field, which runs from inner to outer scopes. The ->decl field usually points to a DECL node, but there are two exceptions. In the namespace of type tags, the bound entity is a RECORD_TYPE, UNION_TYPE, or ENUMERAL_TYPE node. If an undeclared identifier is encountered, it is bound to error_mark_node to suppress further errors about that identifier in the current function. The ->u.type field stores the type of the declaration in this scope; if NULL, the type is the type of the ->decl field. This is only of relevance for objects with external or internal linkage which may be redeclared in inner scopes, forming composite types that only persist for the duration of those scopes. In the external scope, this stores the composite of all the types declared for this object, visible or not. The ->inner_comp field (used only at file scope) stores whether an incomplete array type at file scope was completed at an inner scope to an array size other than 1. The ->u.label field is used for labels. It points to a structure which stores additional information used for warnings. The depth field is copied from the scope structure that holds this decl. It is used to preserve the proper ordering of the ->shadowed field (see bind()) and also for a handful of special-case checks. Finally, the invisible bit is true for a decl which should be ignored for purposes of normal name lookup, and the nested bit is true for a decl that's been bound a second time in an inner scope; in all such cases, the binding in the outer scope will have its invisible bit true. */ struct GTY((chain_next ("%h.prev"))) c_binding { union GTY(()) { /* first so GTY desc can use decl */ tree GTY((tag ("0"))) type; /* the type in this scope */ struct c_label_vars * GTY((tag ("1"))) label; /* for warnings */ } GTY((desc ("TREE_CODE (%0.decl) == LABEL_DECL"))) u; tree decl; /* the decl bound */ tree id; /* the identifier it's bound to */ struct c_binding *prev; /* the previous decl in this scope */ struct c_binding *shadowed; /* the innermost decl shadowed by this one */ unsigned int depth : 28; /* depth of this scope */ BOOL_BITFIELD invisible : 1; /* normal lookup should ignore this binding */ BOOL_BITFIELD nested : 1; /* do not set DECL_CONTEXT when popping */ BOOL_BITFIELD inner_comp : 1; /* incomplete array completed in inner scope */ BOOL_BITFIELD in_struct : 1; /* currently defined as struct field */ location_t locus; /* location for nested bindings */ }; #define B_IN_SCOPE(b1, b2) ((b1)->depth == (b2)->depth) #define B_IN_CURRENT_SCOPE(b) ((b)->depth == current_scope->depth) #define B_IN_FILE_SCOPE(b) ((b)->depth == 1 /*file_scope->depth*/) #define B_IN_EXTERNAL_SCOPE(b) ((b)->depth == 0 /*external_scope->depth*/) /* Each C symbol points to three linked lists of c_binding structures. These describe the values of the identifier in the three different namespaces defined by the language. */ struct GTY(()) lang_identifier { struct c_common_identifier common_id; struct c_binding *symbol_binding; /* vars, funcs, constants, typedefs */ struct c_binding *tag_binding; /* struct/union/enum tags */ struct c_binding *label_binding; /* labels */ }; /* Validate c-lang.c's assumptions. */ extern char C_SIZEOF_STRUCT_LANG_IDENTIFIER_isnt_accurate [(sizeof(struct lang_identifier) == C_SIZEOF_STRUCT_LANG_IDENTIFIER) ? 1 : -1]; /* The binding oracle; see c-tree.h. */ void (*c_binding_oracle) (enum c_oracle_request, tree identifier); /* This flag is set on an identifier if we have previously asked the binding oracle for this identifier's symbol binding. */ #define I_SYMBOL_CHECKED(node) \ (TREE_LANG_FLAG_4 (IDENTIFIER_NODE_CHECK (node))) static inline struct c_binding* * i_symbol_binding (tree node) { struct lang_identifier *lid = (struct lang_identifier *) IDENTIFIER_NODE_CHECK (node); if (lid->symbol_binding == NULL && c_binding_oracle != NULL && !I_SYMBOL_CHECKED (node)) { /* Set the "checked" flag first, to avoid infinite recursion when the binding oracle calls back into gcc. */ I_SYMBOL_CHECKED (node) = 1; c_binding_oracle (C_ORACLE_SYMBOL, node); } return &lid->symbol_binding; } #define I_SYMBOL_BINDING(node) (*i_symbol_binding (node)) #define I_SYMBOL_DECL(node) \ (I_SYMBOL_BINDING(node) ? I_SYMBOL_BINDING(node)->decl : 0) /* This flag is set on an identifier if we have previously asked the binding oracle for this identifier's tag binding. */ #define I_TAG_CHECKED(node) \ (TREE_LANG_FLAG_5 (IDENTIFIER_NODE_CHECK (node))) static inline struct c_binding ** i_tag_binding (tree node) { struct lang_identifier *lid = (struct lang_identifier *) IDENTIFIER_NODE_CHECK (node); if (lid->tag_binding == NULL && c_binding_oracle != NULL && !I_TAG_CHECKED (node)) { /* Set the "checked" flag first, to avoid infinite recursion when the binding oracle calls back into gcc. */ I_TAG_CHECKED (node) = 1; c_binding_oracle (C_ORACLE_TAG, node); } return &lid->tag_binding; } #define I_TAG_BINDING(node) (*i_tag_binding (node)) #define I_TAG_DECL(node) \ (I_TAG_BINDING(node) ? I_TAG_BINDING(node)->decl : 0) /* This flag is set on an identifier if we have previously asked the binding oracle for this identifier's label binding. */ #define I_LABEL_CHECKED(node) \ (TREE_LANG_FLAG_6 (IDENTIFIER_NODE_CHECK (node))) static inline struct c_binding ** i_label_binding (tree node) { struct lang_identifier *lid = (struct lang_identifier *) IDENTIFIER_NODE_CHECK (node); if (lid->label_binding == NULL && c_binding_oracle != NULL && !I_LABEL_CHECKED (node)) { /* Set the "checked" flag first, to avoid infinite recursion when the binding oracle calls back into gcc. */ I_LABEL_CHECKED (node) = 1; c_binding_oracle (C_ORACLE_LABEL, node); } return &lid->label_binding; } #define I_LABEL_BINDING(node) (*i_label_binding (node)) #define I_LABEL_DECL(node) \ (I_LABEL_BINDING(node) ? I_LABEL_BINDING(node)->decl : 0) /* The resulting tree type. */ union GTY((desc ("TREE_CODE (&%h.generic) == IDENTIFIER_NODE"), chain_next ("(union lang_tree_node *) c_tree_chain_next (&%h.generic)"))) lang_tree_node { union tree_node GTY ((tag ("0"), desc ("tree_node_structure (&%h)"))) generic; struct lang_identifier GTY ((tag ("1"))) identifier; }; /* Track bindings and other things that matter for goto warnings. For efficiency, we do not gather all the decls at the point of definition. Instead, we point into the bindings structure. As scopes are popped, we update these structures and gather the decls that matter at that time. */ struct GTY(()) c_spot_bindings { /* The currently open scope which holds bindings defined when the label was defined or the goto statement was found. */ struct c_scope *scope; /* The bindings in the scope field which were defined at the point of the label or goto. This lets us look at older or newer bindings in the scope, as appropriate. */ struct c_binding *bindings_in_scope; /* The number of statement expressions that have started since this label or goto statement was defined. This is zero if we are at the same statement expression level. It is positive if we are in a statement expression started since this spot. It is negative if this spot was in a statement expression and we have left it. */ int stmt_exprs; /* Whether we started in a statement expression but are no longer in it. This is set to true if stmt_exprs ever goes negative. */ bool left_stmt_expr; }; /* This structure is used to keep track of bindings seen when a goto statement is defined. This is only used if we see the goto statement before we see the label. */ struct GTY(()) c_goto_bindings { /* The location of the goto statement. */ location_t loc; /* The bindings of the goto statement. */ struct c_spot_bindings goto_bindings; }; typedef struct c_goto_bindings *c_goto_bindings_p; /* The additional information we keep track of for a label binding. These fields are updated as scopes are popped. */ struct GTY(()) c_label_vars { /* The shadowed c_label_vars, when one label shadows another (which can only happen using a __label__ declaration). */ struct c_label_vars *shadowed; /* The bindings when the label was defined. */ struct c_spot_bindings label_bindings; /* A list of decls that we care about: decls about which we should warn if a goto branches to this label from later in the function. Decls are added to this list as scopes are popped. We only add the decls that matter. */ vec<tree, va_gc> *decls_in_scope; /* A list of goto statements to this label. This is only used for goto statements seen before the label was defined, so that we can issue appropriate warnings for them. */ vec<c_goto_bindings_p, va_gc> *gotos; }; /* Each c_scope structure describes the complete contents of one scope. Four scopes are distinguished specially: the innermost or current scope, the innermost function scope, the file scope (always the second to outermost) and the outermost or external scope. Most declarations are recorded in the current scope. All normal label declarations are recorded in the innermost function scope, as are bindings of undeclared identifiers to error_mark_node. (GCC permits nested functions as an extension, hence the 'innermost' qualifier.) Explicitly declared labels (using the __label__ extension) appear in the current scope. Being in the file scope (current_scope == file_scope) causes special behavior in several places below. Also, under some conditions the Objective-C front end records declarations in the file scope even though that isn't the current scope. All declarations with external linkage are recorded in the external scope, even if they aren't visible there; this models the fact that such declarations are visible to the entire program, and (with a bit of cleverness, see pushdecl) allows diagnosis of some violations of C99 6.2.2p7 and 6.2.7p2: If, within the same translation unit, the same identifier appears with both internal and external linkage, the behavior is undefined. All declarations that refer to the same object or function shall have compatible type; otherwise, the behavior is undefined. Initially only the built-in declarations, which describe compiler intrinsic functions plus a subset of the standard library, are in this scope. The order of the blocks list matters, and it is frequently appended to. To avoid having to walk all the way to the end of the list on each insertion, or reverse the list later, we maintain a pointer to the last list entry. (FIXME: It should be feasible to use a reversed list here.) The bindings list is strictly in reverse order of declarations; pop_scope relies on this. */ struct GTY((chain_next ("%h.outer"))) c_scope { /* The scope containing this one. */ struct c_scope *outer; /* The next outermost function scope. */ struct c_scope *outer_function; /* All bindings in this scope. */ struct c_binding *bindings; /* For each scope (except the global one), a chain of BLOCK nodes for all the scopes that were entered and exited one level down. */ tree blocks; tree blocks_last; /* The depth of this scope. Used to keep the ->shadowed chain of bindings sorted innermost to outermost. */ unsigned int depth : 28; /* True if we are currently filling this scope with parameter declarations. */ BOOL_BITFIELD parm_flag : 1; /* True if we saw [*] in this scope. Used to give an error messages if these appears in a function definition. */ BOOL_BITFIELD had_vla_unspec : 1; /* True if we already complained about forward parameter decls in this scope. This prevents double warnings on foo (int a; int b; ...) */ BOOL_BITFIELD warned_forward_parm_decls : 1; /* True if this is the outermost block scope of a function body. This scope contains the parameters, the local variables declared in the outermost block, and all the labels (except those in nested functions, or declared at block scope with __label__). */ BOOL_BITFIELD function_body : 1; /* True means make a BLOCK for this scope no matter what. */ BOOL_BITFIELD keep : 1; /* True means that an unsuffixed float constant is _Decimal64. */ BOOL_BITFIELD float_const_decimal64 : 1; /* True if this scope has any label bindings. This is used to speed up searching for labels when popping scopes, particularly since labels are normally only found at function scope. */ BOOL_BITFIELD has_label_bindings : 1; /* True if we should issue a warning if a goto statement crosses any of the bindings. We still need to check the list of bindings to find the specific ones we need to warn about. This is true if decl_jump_unsafe would return true for any of the bindings. This is used to avoid looping over all the bindings unnecessarily. */ BOOL_BITFIELD has_jump_unsafe_decl : 1; }; /* The scope currently in effect. */ static GTY(()) struct c_scope *current_scope; /* The innermost function scope. Ordinary (not explicitly declared) labels, bindings to error_mark_node, and the lazily-created bindings of __func__ and its friends get this scope. */ static GTY(()) struct c_scope *current_function_scope; /* The C file scope. This is reset for each input translation unit. */ static GTY(()) struct c_scope *file_scope; /* The outermost scope. This is used for all declarations with external linkage, and only these, hence the name. */ static GTY(()) struct c_scope *external_scope; /* A chain of c_scope structures awaiting reuse. */ static GTY((deletable)) struct c_scope *scope_freelist; /* A chain of c_binding structures awaiting reuse. */ static GTY((deletable)) struct c_binding *binding_freelist; /* Append VAR to LIST in scope SCOPE. */ #define SCOPE_LIST_APPEND(scope, list, decl) do { \ struct c_scope *s_ = (scope); \ tree d_ = (decl); \ if (s_->list##_last) \ BLOCK_CHAIN (s_->list##_last) = d_; \ else \ s_->list = d_; \ s_->list##_last = d_; \ } while (0) /* Concatenate FROM in scope FSCOPE onto TO in scope TSCOPE. */ #define SCOPE_LIST_CONCAT(tscope, to, fscope, from) do { \ struct c_scope *t_ = (tscope); \ struct c_scope *f_ = (fscope); \ if (t_->to##_last) \ BLOCK_CHAIN (t_->to##_last) = f_->from; \ else \ t_->to = f_->from; \ t_->to##_last = f_->from##_last; \ } while (0) /* A c_inline_static structure stores details of a static identifier referenced in a definition of a function that may be an inline definition if no subsequent declaration of that function uses "extern" or does not use "inline". */ struct GTY((chain_next ("%h.next"))) c_inline_static { /* The location for a diagnostic. */ location_t location; /* The function that may be an inline definition. */ tree function; /* The object or function referenced. */ tree static_decl; /* What sort of reference this is. */ enum c_inline_static_type type; /* The next such structure or NULL. */ struct c_inline_static *next; }; /* List of static identifiers used or referenced in functions that may be inline definitions. */ static GTY(()) struct c_inline_static *c_inline_statics; /* True means unconditionally make a BLOCK for the next scope pushed. */ static bool keep_next_level_flag; /* True means the next call to push_scope will be the outermost scope of a function body, so do not push a new scope, merely cease expecting parameter decls. */ static bool next_is_function_body; /* A vector of pointers to c_binding structures. */ typedef struct c_binding *c_binding_ptr; /* Information that we keep for a struct or union while it is being parsed. */ struct c_struct_parse_info { /* If warn_cxx_compat, a list of types defined within this struct. */ vec<tree> struct_types; /* If warn_cxx_compat, a list of field names which have bindings, and which are defined in this struct, but which are not defined in any enclosing struct. This is used to clear the in_struct field of the c_bindings structure. */ vec<c_binding_ptr> fields; /* If warn_cxx_compat, a list of typedef names used when defining fields in this struct. */ vec<tree> typedefs_seen; }; /* Information for the struct or union currently being parsed, or NULL if not parsing a struct or union. */ static struct c_struct_parse_info *struct_parse_info; /* Forward declarations. */ static tree lookup_name_in_scope (tree, struct c_scope *); static tree c_make_fname_decl (location_t, tree, int); static tree grokdeclarator (const struct c_declarator *, struct c_declspecs *, enum decl_context, bool, tree *, tree *, tree *, bool *, enum deprecated_states); static tree grokparms (struct c_arg_info *, bool); static void layout_array_type (tree); static void warn_defaults_to (location_t, int, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4); /* T is a statement. Add it to the statement-tree. This is the C/ObjC version--C++ has a slightly different version of this function. */ tree add_stmt (tree t) { enum tree_code code = TREE_CODE (t); if (CAN_HAVE_LOCATION_P (t) && code != LABEL_EXPR) { if (!EXPR_HAS_LOCATION (t)) SET_EXPR_LOCATION (t, input_location); } if (code == LABEL_EXPR || code == CASE_LABEL_EXPR) STATEMENT_LIST_HAS_LABEL (cur_stmt_list) = 1; /* Add T to the statement-tree. Non-side-effect statements need to be recorded during statement expressions. */ if (!building_stmt_list_p ()) push_stmt_list (); append_to_statement_list_force (t, &cur_stmt_list); return t; } /* Build a pointer type using the default pointer mode. */ static tree c_build_pointer_type (tree to_type) { addr_space_t as = to_type == error_mark_node? ADDR_SPACE_GENERIC : TYPE_ADDR_SPACE (to_type); machine_mode pointer_mode; if (as != ADDR_SPACE_GENERIC || c_default_pointer_mode == VOIDmode) pointer_mode = targetm.addr_space.pointer_mode (as); else pointer_mode = c_default_pointer_mode; return build_pointer_type_for_mode (to_type, pointer_mode, false); } /* Return true if we will want to say something if a goto statement crosses DECL. */ static bool decl_jump_unsafe (tree decl) { if (error_operand_p (decl)) return false; /* Always warn about crossing variably modified types. */ if ((TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == TYPE_DECL) && variably_modified_type_p (TREE_TYPE (decl), NULL_TREE)) return true; /* Otherwise, only warn if -Wgoto-misses-init and this is an initialized automatic decl. */ if (warn_jump_misses_init && TREE_CODE (decl) == VAR_DECL && !TREE_STATIC (decl) && DECL_INITIAL (decl) != NULL_TREE) return true; return false; } void c_print_identifier (FILE *file, tree node, int indent) { void (*save) (enum c_oracle_request, tree identifier); /* Temporarily hide any binding oracle. Without this, calls to debug_tree from the debugger will end up calling into the oracle, making for a confusing debug session. As the oracle isn't needed here for normal operation, it's simplest to suppress it. */ save = c_binding_oracle; c_binding_oracle = NULL; print_node (file, "symbol", I_SYMBOL_DECL (node), indent + 4); print_node (file, "tag", I_TAG_DECL (node), indent + 4); print_node (file, "label", I_LABEL_DECL (node), indent + 4); if (C_IS_RESERVED_WORD (node) && C_RID_CODE (node) != RID_CXX_COMPAT_WARN) { tree rid = ridpointers[C_RID_CODE (node)]; indent_to (file, indent + 4); fprintf (file, "rid " HOST_PTR_PRINTF " \"%s\"", (void *) rid, IDENTIFIER_POINTER (rid)); } c_binding_oracle = save; } /* Establish a binding between NAME, an IDENTIFIER_NODE, and DECL, which may be any of several kinds of DECL or TYPE or error_mark_node, in the scope SCOPE. */ static void bind (tree name, tree decl, struct c_scope *scope, bool invisible, bool nested, location_t locus) { struct c_binding *b, **here; if (binding_freelist) { b = binding_freelist; binding_freelist = b->prev; } else b = ggc_alloc<c_binding> (); b->shadowed = 0; b->decl = decl; b->id = name; b->depth = scope->depth; b->invisible = invisible; b->nested = nested; b->inner_comp = 0; b->in_struct = 0; b->locus = locus; b->u.type = NULL; b->prev = scope->bindings; scope->bindings = b; if (decl_jump_unsafe (decl)) scope->has_jump_unsafe_decl = 1; if (!name) return; switch (TREE_CODE (decl)) { case LABEL_DECL: here = &I_LABEL_BINDING (name); break; case ENUMERAL_TYPE: case UNION_TYPE: case RECORD_TYPE: here = &I_TAG_BINDING (name); break; case VAR_DECL: case FUNCTION_DECL: case TYPE_DECL: case CONST_DECL: case PARM_DECL: case ERROR_MARK: here = &I_SYMBOL_BINDING (name); break; default: gcc_unreachable (); } /* Locate the appropriate place in the chain of shadowed decls to insert this binding. Normally, scope == current_scope and this does nothing. */ while (*here && (*here)->depth > scope->depth) here = &(*here)->shadowed; b->shadowed = *here; *here = b; } /* Clear the binding structure B, stick it on the binding_freelist, and return the former value of b->prev. This is used by pop_scope and get_parm_info to iterate destructively over all the bindings from a given scope. */ static struct c_binding * free_binding_and_advance (struct c_binding *b) { struct c_binding *prev = b->prev; memset (b, 0, sizeof (struct c_binding)); b->prev = binding_freelist; binding_freelist = b; return prev; } /* Bind a label. Like bind, but skip fields which aren't used for labels, and add the LABEL_VARS value. */ static void bind_label (tree name, tree label, struct c_scope *scope, struct c_label_vars *label_vars) { struct c_binding *b; bind (name, label, scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); scope->has_label_bindings = true; b = scope->bindings; gcc_assert (b->decl == label); label_vars->shadowed = b->u.label; b->u.label = label_vars; } /* Hook called at end of compilation to assume 1 elt for a file-scope tentative array defn that wasn't complete before. */ void c_finish_incomplete_decl (tree decl) { if (TREE_CODE (decl) == VAR_DECL) { tree type = TREE_TYPE (decl); if (type != error_mark_node && TREE_CODE (type) == ARRAY_TYPE && !DECL_EXTERNAL (decl) && TYPE_DOMAIN (type) == 0) { warning_at (DECL_SOURCE_LOCATION (decl), 0, "array %q+D assumed to have one element", decl); complete_array_type (&TREE_TYPE (decl), NULL_TREE, true); relayout_decl (decl); } } } /* Record that inline function FUNC contains a reference (location LOC) to static DECL (file-scope or function-local according to TYPE). */ void record_inline_static (location_t loc, tree func, tree decl, enum c_inline_static_type type) { c_inline_static *csi = ggc_alloc<c_inline_static> (); csi->location = loc; csi->function = func; csi->static_decl = decl; csi->type = type; csi->next = c_inline_statics; c_inline_statics = csi; } /* Check for references to static declarations in inline functions at the end of the translation unit and diagnose them if the functions are still inline definitions. */ static void check_inline_statics (void) { struct c_inline_static *csi; for (csi = c_inline_statics; csi; csi = csi->next) { if (DECL_EXTERNAL (csi->function)) switch (csi->type) { case csi_internal: pedwarn (csi->location, 0, "%qD is static but used in inline function %qD " "which is not static", csi->static_decl, csi->function); break; case csi_modifiable: pedwarn (csi->location, 0, "%q+D is static but declared in inline function %qD " "which is not static", csi->static_decl, csi->function); break; default: gcc_unreachable (); } } c_inline_statics = NULL; } /* Fill in a c_spot_bindings structure. If DEFINING is true, set it for the current state, otherwise set it to uninitialized. */ static void set_spot_bindings (struct c_spot_bindings *p, bool defining) { if (defining) { p->scope = current_scope; p->bindings_in_scope = current_scope->bindings; } else { p->scope = NULL; p->bindings_in_scope = NULL; } p->stmt_exprs = 0; p->left_stmt_expr = false; } /* Update spot bindings P as we pop out of SCOPE. Return true if we should push decls for a label. */ static bool update_spot_bindings (struct c_scope *scope, struct c_spot_bindings *p) { if (p->scope != scope) { /* This label or goto is defined in some other scope, or it is a label which is not yet defined. There is nothing to update. */ return false; } /* Adjust the spot bindings to refer to the bindings already defined in the enclosing scope. */ p->scope = scope->outer; p->bindings_in_scope = p->scope->bindings; return true; } /* The Objective-C front-end often needs to determine the current scope. */ void * objc_get_current_scope (void) { return current_scope; } /* The following function is used only by Objective-C. It needs to live here because it accesses the innards of c_scope. */ void objc_mark_locals_volatile (void *enclosing_blk) { struct c_scope *scope; struct c_binding *b; for (scope = current_scope; scope && scope != enclosing_blk; scope = scope->outer) { for (b = scope->bindings; b; b = b->prev) objc_volatilize_decl (b->decl); /* Do not climb up past the current function. */ if (scope->function_body) break; } } /* Return true if we are in the global binding level. */ bool global_bindings_p (void) { return current_scope == file_scope; } void keep_next_level (void) { keep_next_level_flag = true; } /* Set the flag for the FLOAT_CONST_DECIMAL64 pragma being ON. */ void set_float_const_decimal64 (void) { current_scope->float_const_decimal64 = true; } /* Clear the flag for the FLOAT_CONST_DECIMAL64 pragma. */ void clear_float_const_decimal64 (void) { current_scope->float_const_decimal64 = false; } /* Return nonzero if an unsuffixed float constant is _Decimal64. */ bool float_const_decimal64_p (void) { return current_scope->float_const_decimal64; } /* Identify this scope as currently being filled with parameters. */ void declare_parm_level (void) { current_scope->parm_flag = true; } void push_scope (void) { if (next_is_function_body) { /* This is the transition from the parameters to the top level of the function body. These are the same scope (C99 6.2.1p4,6) so we do not push another scope structure. next_is_function_body is set only by store_parm_decls, which in turn is called when and only when we are about to encounter the opening curly brace for the function body. The outermost block of a function always gets a BLOCK node, because the debugging output routines expect that each function has at least one BLOCK. */ current_scope->parm_flag = false; current_scope->function_body = true; current_scope->keep = true; current_scope->outer_function = current_function_scope; current_function_scope = current_scope; keep_next_level_flag = false; next_is_function_body = false; /* The FLOAT_CONST_DECIMAL64 pragma applies to nested scopes. */ if (current_scope->outer) current_scope->float_const_decimal64 = current_scope->outer->float_const_decimal64; else current_scope->float_const_decimal64 = false; } else { struct c_scope *scope; if (scope_freelist) { scope = scope_freelist; scope_freelist = scope->outer; } else scope = ggc_cleared_alloc<c_scope> (); /* The FLOAT_CONST_DECIMAL64 pragma applies to nested scopes. */ if (current_scope) scope->float_const_decimal64 = current_scope->float_const_decimal64; else scope->float_const_decimal64 = false; scope->keep = keep_next_level_flag; scope->outer = current_scope; scope->depth = current_scope ? (current_scope->depth + 1) : 0; /* Check for scope depth overflow. Unlikely (2^28 == 268,435,456) but possible. */ if (current_scope && scope->depth == 0) { scope->depth--; sorry ("GCC supports only %u nested scopes", scope->depth); } current_scope = scope; keep_next_level_flag = false; } } /* This is called when we are leaving SCOPE. For each label defined in SCOPE, add any appropriate decls to its decls_in_scope fields. These are the decls whose initialization will be skipped by a goto later in the function. */ static void update_label_decls (struct c_scope *scope) { struct c_scope *s; s = scope; while (s != NULL) { if (s->has_label_bindings) { struct c_binding *b; for (b = s->bindings; b != NULL; b = b->prev) { struct c_label_vars *label_vars; struct c_binding *b1; bool hjud; unsigned int ix; struct c_goto_bindings *g; if (TREE_CODE (b->decl) != LABEL_DECL) continue; label_vars = b->u.label; b1 = label_vars->label_bindings.bindings_in_scope; if (label_vars->label_bindings.scope == NULL) hjud = false; else hjud = label_vars->label_bindings.scope->has_jump_unsafe_decl; if (update_spot_bindings (scope, &label_vars->label_bindings)) { /* This label is defined in this scope. */ if (hjud) { for (; b1 != NULL; b1 = b1->prev) { /* A goto from later in the function to this label will never see the initialization of B1, if any. Save it to issue a warning if needed. */ if (decl_jump_unsafe (b1->decl)) vec_safe_push(label_vars->decls_in_scope, b1->decl); } } } /* Update the bindings of any goto statements associated with this label. */ FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g) update_spot_bindings (scope, &g->goto_bindings); } } /* Don't search beyond the current function. */ if (s == current_function_scope) break; s = s->outer; } } /* Set the TYPE_CONTEXT of all of TYPE's variants to CONTEXT. */ static void set_type_context (tree type, tree context) { for (type = TYPE_MAIN_VARIANT (type); type; type = TYPE_NEXT_VARIANT (type)) TYPE_CONTEXT (type) = context; } /* Exit a scope. Restore the state of the identifier-decl mappings that were in effect when this scope was entered. Return a BLOCK node containing all the DECLs in this scope that are of interest to debug info generation. */ tree pop_scope (void) { struct c_scope *scope = current_scope; tree block, context, p; struct c_binding *b; bool functionbody = scope->function_body; bool keep = functionbody || scope->keep || scope->bindings; update_label_decls (scope); /* If appropriate, create a BLOCK to record the decls for the life of this function. */ block = 0; if (keep) { block = make_node (BLOCK); BLOCK_SUBBLOCKS (block) = scope->blocks; TREE_USED (block) = 1; /* In each subblock, record that this is its superior. */ for (p = scope->blocks; p; p = BLOCK_CHAIN (p)) BLOCK_SUPERCONTEXT (p) = block; BLOCK_VARS (block) = 0; } /* The TYPE_CONTEXTs for all of the tagged types belonging to this scope must be set so that they point to the appropriate construct, i.e. either to the current FUNCTION_DECL node, or else to the BLOCK node we just constructed. Note that for tagged types whose scope is just the formal parameter list for some function type specification, we can't properly set their TYPE_CONTEXTs here, because we don't have a pointer to the appropriate FUNCTION_TYPE node readily available to us. For those cases, the TYPE_CONTEXTs of the relevant tagged type nodes get set in `grokdeclarator' as soon as we have created the FUNCTION_TYPE node which will represent the "scope" for these "parameter list local" tagged types. */ if (scope->function_body) context = current_function_decl; else if (scope == file_scope) { tree file_decl = build_translation_unit_decl (NULL_TREE); context = file_decl; debug_hooks->register_main_translation_unit (file_decl); } else context = block; /* Clear all bindings in this scope. */ for (b = scope->bindings; b; b = free_binding_and_advance (b)) { p = b->decl; switch (TREE_CODE (p)) { case LABEL_DECL: /* Warnings for unused labels, errors for undefined labels. */ if (TREE_USED (p) && !DECL_INITIAL (p)) { error ("label %q+D used but not defined", p); DECL_INITIAL (p) = error_mark_node; } else warn_for_unused_label (p); /* Labels go in BLOCK_VARS. */ DECL_CHAIN (p) = BLOCK_VARS (block); BLOCK_VARS (block) = p; gcc_assert (I_LABEL_BINDING (b->id) == b); I_LABEL_BINDING (b->id) = b->shadowed; /* Also pop back to the shadowed label_vars. */ release_tree_vector (b->u.label->decls_in_scope); b->u.label = b->u.label->shadowed; break; case ENUMERAL_TYPE: case UNION_TYPE: case RECORD_TYPE: set_type_context (p, context); /* Types may not have tag-names, in which case the type appears in the bindings list with b->id NULL. */ if (b->id) { gcc_assert (I_TAG_BINDING (b->id) == b); I_TAG_BINDING (b->id) = b->shadowed; } break; case FUNCTION_DECL: /* Propagate TREE_ADDRESSABLE from nested functions to their containing functions. */ if (!TREE_ASM_WRITTEN (p) && DECL_INITIAL (p) != 0 && TREE_ADDRESSABLE (p) && DECL_ABSTRACT_ORIGIN (p) != 0 && DECL_ABSTRACT_ORIGIN (p) != p) TREE_ADDRESSABLE (DECL_ABSTRACT_ORIGIN (p)) = 1; if (!DECL_EXTERNAL (p) && !DECL_INITIAL (p) && scope != file_scope && scope != external_scope) { error ("nested function %q+D declared but never defined", p); undef_nested_function = true; } else if (DECL_DECLARED_INLINE_P (p) && TREE_PUBLIC (p) && !DECL_INITIAL (p)) { /* C99 6.7.4p6: "a function with external linkage... declared with an inline function specifier ... shall also be defined in the same translation unit." */ if (!flag_gnu89_inline && !lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (p)) && scope != external_scope) pedwarn (input_location, 0, "inline function %q+D declared but never defined", p); DECL_EXTERNAL (p) = 1; } goto common_symbol; case VAR_DECL: /* Warnings for unused variables. */ if ((!TREE_USED (p) || !DECL_READ_P (p)) && !TREE_NO_WARNING (p) && !DECL_IN_SYSTEM_HEADER (p) && DECL_NAME (p) && !DECL_ARTIFICIAL (p) && scope != file_scope && scope != external_scope) { if (!TREE_USED (p)) warning (OPT_Wunused_variable, "unused variable %q+D", p); else if (DECL_CONTEXT (p) == current_function_decl) warning_at (DECL_SOURCE_LOCATION (p), OPT_Wunused_but_set_variable, "variable %qD set but not used", p); } if (b->inner_comp) { error ("type of array %q+D completed incompatibly with" " implicit initialization", p); } /* Fall through. */ case TYPE_DECL: case CONST_DECL: common_symbol: /* All of these go in BLOCK_VARS, but only if this is the binding in the home scope. */ if (!b->nested) { DECL_CHAIN (p) = BLOCK_VARS (block); BLOCK_VARS (block) = p; } else if (VAR_OR_FUNCTION_DECL_P (p) && scope != file_scope) { /* For block local externs add a special DECL_EXTERNAL decl for debug info generation. */ tree extp = copy_node (p); DECL_EXTERNAL (extp) = 1; TREE_STATIC (extp) = 0; TREE_PUBLIC (extp) = 1; DECL_INITIAL (extp) = NULL_TREE; DECL_LANG_SPECIFIC (extp) = NULL; DECL_CONTEXT (extp) = current_function_decl; if (TREE_CODE (p) == FUNCTION_DECL) { DECL_RESULT (extp) = NULL_TREE; DECL_SAVED_TREE (extp) = NULL_TREE; DECL_STRUCT_FUNCTION (extp) = NULL; } if (b->locus != UNKNOWN_LOCATION) DECL_SOURCE_LOCATION (extp) = b->locus; DECL_CHAIN (extp) = BLOCK_VARS (block); BLOCK_VARS (block) = extp; } /* If this is the file scope set DECL_CONTEXT of each decl to the TRANSLATION_UNIT_DECL. This makes same_translation_unit_p work. */ if (scope == file_scope) { DECL_CONTEXT (p) = context; if (TREE_CODE (p) == TYPE_DECL && TREE_TYPE (p) != error_mark_node) set_type_context (TREE_TYPE (p), context); } /* Fall through. */ /* Parameters go in DECL_ARGUMENTS, not BLOCK_VARS, and have already been put there by store_parm_decls. Unused- parameter warnings are handled by function.c. error_mark_node obviously does not go in BLOCK_VARS and does not get unused-variable warnings. */ case PARM_DECL: case ERROR_MARK: /* It is possible for a decl not to have a name. We get here with b->id NULL in this case. */ if (b->id) { gcc_assert (I_SYMBOL_BINDING (b->id) == b); I_SYMBOL_BINDING (b->id) = b->shadowed; if (b->shadowed && b->shadowed->u.type) TREE_TYPE (b->shadowed->decl) = b->shadowed->u.type; } break; default: gcc_unreachable (); } } /* Dispose of the block that we just made inside some higher level. */ if ((scope->function_body || scope == file_scope) && context) { DECL_INITIAL (context) = block; BLOCK_SUPERCONTEXT (block) = context; } else if (scope->outer) { if (block) SCOPE_LIST_APPEND (scope->outer, blocks, block); /* If we did not make a block for the scope just exited, any blocks made for inner scopes must be carried forward so they will later become subblocks of something else. */ else if (scope->blocks) SCOPE_LIST_CONCAT (scope->outer, blocks, scope, blocks); } /* Pop the current scope, and free the structure for reuse. */ current_scope = scope->outer; if (scope->function_body) current_function_scope = scope->outer_function; memset (scope, 0, sizeof (struct c_scope)); scope->outer = scope_freelist; scope_freelist = scope; return block; } void push_file_scope (void) { tree decl; if (file_scope) return; push_scope (); file_scope = current_scope; start_fname_decls (); for (decl = visible_builtins; decl; decl = DECL_CHAIN (decl)) bind (DECL_NAME (decl), decl, file_scope, /*invisible=*/false, /*nested=*/true, DECL_SOURCE_LOCATION (decl)); } void pop_file_scope (void) { /* In case there were missing closebraces, get us back to the global binding level. */ while (current_scope != file_scope) pop_scope (); /* __FUNCTION__ is defined at file scope (""). This call may not be necessary as my tests indicate it still works without it. */ finish_fname_decls (); check_inline_statics (); /* This is the point to write out a PCH if we're doing that. In that case we do not want to do anything else. */ if (pch_file) { c_common_write_pch (); return; } /* Pop off the file scope and close this translation unit. */ pop_scope (); file_scope = 0; maybe_apply_pending_pragma_weaks (); } /* Adjust the bindings for the start of a statement expression. */ void c_bindings_start_stmt_expr (struct c_spot_bindings* switch_bindings) { struct c_scope *scope; for (scope = current_scope; scope != NULL; scope = scope->outer) { struct c_binding *b; if (!scope->has_label_bindings) continue; for (b = scope->bindings; b != NULL; b = b->prev) { struct c_label_vars *label_vars; unsigned int ix; struct c_goto_bindings *g; if (TREE_CODE (b->decl) != LABEL_DECL) continue; label_vars = b->u.label; ++label_vars->label_bindings.stmt_exprs; FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g) ++g->goto_bindings.stmt_exprs; } } if (switch_bindings != NULL) ++switch_bindings->stmt_exprs; } /* Adjust the bindings for the end of a statement expression. */ void c_bindings_end_stmt_expr (struct c_spot_bindings *switch_bindings) { struct c_scope *scope; for (scope = current_scope; scope != NULL; scope = scope->outer) { struct c_binding *b; if (!scope->has_label_bindings) continue; for (b = scope->bindings; b != NULL; b = b->prev) { struct c_label_vars *label_vars; unsigned int ix; struct c_goto_bindings *g; if (TREE_CODE (b->decl) != LABEL_DECL) continue; label_vars = b->u.label; --label_vars->label_bindings.stmt_exprs; if (label_vars->label_bindings.stmt_exprs < 0) { label_vars->label_bindings.left_stmt_expr = true; label_vars->label_bindings.stmt_exprs = 0; } FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g) { --g->goto_bindings.stmt_exprs; if (g->goto_bindings.stmt_exprs < 0) { g->goto_bindings.left_stmt_expr = true; g->goto_bindings.stmt_exprs = 0; } } } } if (switch_bindings != NULL) { --switch_bindings->stmt_exprs; gcc_assert (switch_bindings->stmt_exprs >= 0); } } /* Push a definition or a declaration of struct, union or enum tag "name". "type" should be the type node. We assume that the tag "name" is not already defined, and has a location of LOC. Note that the definition may really be just a forward reference. In that case, the TYPE_SIZE will be zero. */ static void pushtag (location_t loc, tree name, tree type) { /* Record the identifier as the type's name if it has none. */ if (name && !TYPE_NAME (type)) TYPE_NAME (type) = name; bind (name, type, current_scope, /*invisible=*/false, /*nested=*/false, loc); /* Create a fake NULL-named TYPE_DECL node whose TREE_TYPE will be the tagged type we just added to the current scope. This fake NULL-named TYPE_DECL node helps dwarfout.c to know when it needs to output a representation of a tagged type, and it also gives us a convenient place to record the "scope start" address for the tagged type. */ TYPE_STUB_DECL (type) = pushdecl (build_decl (loc, TYPE_DECL, NULL_TREE, type)); /* An approximation for now, so we can tell this is a function-scope tag. This will be updated in pop_scope. */ TYPE_CONTEXT (type) = DECL_CONTEXT (TYPE_STUB_DECL (type)); if (warn_cxx_compat && name != NULL_TREE) { struct c_binding *b = I_SYMBOL_BINDING (name); if (b != NULL && b->decl != NULL_TREE && TREE_CODE (b->decl) == TYPE_DECL && (B_IN_CURRENT_SCOPE (b) || (current_scope == file_scope && B_IN_EXTERNAL_SCOPE (b))) && (TYPE_MAIN_VARIANT (TREE_TYPE (b->decl)) != TYPE_MAIN_VARIANT (type))) { warning_at (loc, OPT_Wc___compat, ("using %qD as both a typedef and a tag is " "invalid in C++"), b->decl); if (b->locus != UNKNOWN_LOCATION) inform (b->locus, "originally defined here"); } } } /* An exported interface to pushtag. This is used by the gdb plugin's binding oracle to introduce a new tag binding. */ void c_pushtag (location_t loc, tree name, tree type) { pushtag (loc, name, type); } /* An exported interface to bind a declaration. LOC is the location to use. DECL is the declaration to bind. The decl's name is used to determine how it is bound. If DECL is a VAR_DECL, then IS_GLOBAL determines whether the decl is put into the global (file and external) scope or the current function's scope; if DECL is not a VAR_DECL then it is always put into the file scope. */ void c_bind (location_t loc, tree decl, bool is_global) { struct c_scope *scope; bool nested = false; if (TREE_CODE (decl) != VAR_DECL || current_function_scope == NULL) { /* Types and functions are always considered to be global. */ scope = file_scope; DECL_EXTERNAL (decl) = 1; TREE_PUBLIC (decl) = 1; } else if (is_global) { /* Also bind it into the external scope. */ bind (DECL_NAME (decl), decl, external_scope, true, false, loc); nested = true; scope = file_scope; DECL_EXTERNAL (decl) = 1; TREE_PUBLIC (decl) = 1; } else { DECL_CONTEXT (decl) = current_function_decl; TREE_PUBLIC (decl) = 0; scope = current_function_scope; } bind (DECL_NAME (decl), decl, scope, false, nested, loc); } /* Subroutine of compare_decls. Allow harmless mismatches in return and argument types provided that the type modes match. This function return a unified type given a suitable match, and 0 otherwise. */ static tree match_builtin_function_types (tree newtype, tree oldtype) { tree newrettype, oldrettype; tree newargs, oldargs; tree trytype, tryargs; /* Accept the return type of the new declaration if same modes. */ oldrettype = TREE_TYPE (oldtype); newrettype = TREE_TYPE (newtype); if (TYPE_MODE (oldrettype) != TYPE_MODE (newrettype)) return 0; oldargs = TYPE_ARG_TYPES (oldtype); newargs = TYPE_ARG_TYPES (newtype); tryargs = newargs; while (oldargs || newargs) { if (!oldargs || !newargs || !TREE_VALUE (oldargs) || !TREE_VALUE (newargs) || TYPE_MODE (TREE_VALUE (oldargs)) != TYPE_MODE (TREE_VALUE (newargs))) return 0; oldargs = TREE_CHAIN (oldargs); newargs = TREE_CHAIN (newargs); } trytype = build_function_type (newrettype, tryargs); return build_type_attribute_variant (trytype, TYPE_ATTRIBUTES (oldtype)); } /* Subroutine of diagnose_mismatched_decls. Check for function type mismatch involving an empty arglist vs a nonempty one and give clearer diagnostics. */ static void diagnose_arglist_conflict (tree newdecl, tree olddecl, tree newtype, tree oldtype) { tree t; if (TREE_CODE (olddecl) != FUNCTION_DECL || !comptypes (TREE_TYPE (oldtype), TREE_TYPE (newtype)) || !((!prototype_p (oldtype) && DECL_INITIAL (olddecl) == 0) || (!prototype_p (newtype) && DECL_INITIAL (newdecl) == 0))) return; t = TYPE_ARG_TYPES (oldtype); if (t == 0) t = TYPE_ARG_TYPES (newtype); for (; t; t = TREE_CHAIN (t)) { tree type = TREE_VALUE (t); if (TREE_CHAIN (t) == 0 && TYPE_MAIN_VARIANT (type) != void_type_node) { inform (input_location, "a parameter list with an ellipsis can%'t match " "an empty parameter name list declaration"); break; } if (c_type_promotes_to (type) != type) { inform (input_location, "an argument type that has a default promotion can%'t match " "an empty parameter name list declaration"); break; } } } /* Another subroutine of diagnose_mismatched_decls. OLDDECL is an old-style function definition, NEWDECL is a prototype declaration. Diagnose inconsistencies in the argument list. Returns TRUE if the prototype is compatible, FALSE if not. */ static bool validate_proto_after_old_defn (tree newdecl, tree newtype, tree oldtype) { tree newargs, oldargs; int i; #define END_OF_ARGLIST(t) ((t) == void_type_node) oldargs = TYPE_ACTUAL_ARG_TYPES (oldtype); newargs = TYPE_ARG_TYPES (newtype); i = 1; for (;;) { tree oldargtype = TREE_VALUE (oldargs); tree newargtype = TREE_VALUE (newargs); if (oldargtype == error_mark_node || newargtype == error_mark_node) return false; oldargtype = (TYPE_ATOMIC (oldargtype) ? c_build_qualified_type (TYPE_MAIN_VARIANT (oldargtype), TYPE_QUAL_ATOMIC) : TYPE_MAIN_VARIANT (oldargtype)); newargtype = (TYPE_ATOMIC (newargtype) ? c_build_qualified_type (TYPE_MAIN_VARIANT (newargtype), TYPE_QUAL_ATOMIC) : TYPE_MAIN_VARIANT (newargtype)); if (END_OF_ARGLIST (oldargtype) && END_OF_ARGLIST (newargtype)) break; /* Reaching the end of just one list means the two decls don't agree on the number of arguments. */ if (END_OF_ARGLIST (oldargtype)) { error ("prototype for %q+D declares more arguments " "than previous old-style definition", newdecl); return false; } else if (END_OF_ARGLIST (newargtype)) { error ("prototype for %q+D declares fewer arguments " "than previous old-style definition", newdecl); return false; } /* Type for passing arg must be consistent with that declared for the arg. */ else if (!comptypes (oldargtype, newargtype)) { error ("prototype for %q+D declares argument %d" " with incompatible type", newdecl, i); return false; } oldargs = TREE_CHAIN (oldargs); newargs = TREE_CHAIN (newargs); i++; } /* If we get here, no errors were found, but do issue a warning for this poor-style construct. */ warning (0, "prototype for %q+D follows non-prototype definition", newdecl); return true; #undef END_OF_ARGLIST } /* Subroutine of diagnose_mismatched_decls. Report the location of DECL, first in a pair of mismatched declarations, using the diagnostic function DIAG. */ static void locate_old_decl (tree decl) { if (TREE_CODE (decl) == FUNCTION_DECL && DECL_BUILT_IN (decl) && !C_DECL_DECLARED_BUILTIN (decl)) ; else if (DECL_INITIAL (decl)) inform (input_location, "previous definition of %q+D was here", decl); else if (C_DECL_IMPLICIT (decl)) inform (input_location, "previous implicit declaration of %q+D was here", decl); else inform (input_location, "previous declaration of %q+D was here", decl); } /* Subroutine of duplicate_decls. Compare NEWDECL to OLDDECL. Returns true if the caller should proceed to merge the two, false if OLDDECL should simply be discarded. As a side effect, issues all necessary diagnostics for invalid or poor-style combinations. If it returns true, writes the types of NEWDECL and OLDDECL to *NEWTYPEP and *OLDTYPEP - these may have been adjusted from TREE_TYPE (NEWDECL, OLDDECL) respectively. */ static bool diagnose_mismatched_decls (tree newdecl, tree olddecl, tree *newtypep, tree *oldtypep) { tree newtype, oldtype; bool pedwarned = false; bool warned = false; bool retval = true; #define DECL_EXTERN_INLINE(DECL) (DECL_DECLARED_INLINE_P (DECL) \ && DECL_EXTERNAL (DECL)) /* If we have error_mark_node for either decl or type, just discard the previous decl - we're in an error cascade already. */ if (olddecl == error_mark_node || newdecl == error_mark_node) return false; *oldtypep = oldtype = TREE_TYPE (olddecl); *newtypep = newtype = TREE_TYPE (newdecl); if (oldtype == error_mark_node || newtype == error_mark_node) return false; /* Two different categories of symbol altogether. This is an error unless OLDDECL is a builtin. OLDDECL will be discarded in any case. */ if (TREE_CODE (olddecl) != TREE_CODE (newdecl)) { if (!(TREE_CODE (olddecl) == FUNCTION_DECL && DECL_BUILT_IN (olddecl) && !C_DECL_DECLARED_BUILTIN (olddecl))) { error ("%q+D redeclared as different kind of symbol", newdecl); locate_old_decl (olddecl); } else if (TREE_PUBLIC (newdecl)) warning (0, "built-in function %q+D declared as non-function", newdecl); else warning (OPT_Wshadow, "declaration of %q+D shadows " "a built-in function", newdecl); return false; } /* Enumerators have no linkage, so may only be declared once in a given scope. */ if (TREE_CODE (olddecl) == CONST_DECL) { error ("redeclaration of enumerator %q+D", newdecl); locate_old_decl (olddecl); return false; } if (!comptypes (oldtype, newtype)) { if (TREE_CODE (olddecl) == FUNCTION_DECL && DECL_BUILT_IN (olddecl) && !C_DECL_DECLARED_BUILTIN (olddecl)) { /* Accept harmless mismatch in function types. This is for the ffs and fprintf builtins. */ tree trytype = match_builtin_function_types (newtype, oldtype); if (trytype && comptypes (newtype, trytype)) *oldtypep = oldtype = trytype; else { /* If types don't match for a built-in, throw away the built-in. No point in calling locate_old_decl here, it won't print anything. */ warning (0, "conflicting types for built-in function %q+D", newdecl); return false; } } else if (TREE_CODE (olddecl) == FUNCTION_DECL && DECL_IS_BUILTIN (olddecl)) { /* A conflicting function declaration for a predeclared function that isn't actually built in. Objective C uses these. The new declaration silently overrides everything but the volatility (i.e. noreturn) indication. See also below. FIXME: Make Objective C use normal builtins. */ TREE_THIS_VOLATILE (newdecl) |= TREE_THIS_VOLATILE (olddecl); return false; } /* Permit void foo (...) to match int foo (...) if the latter is the definition and implicit int was used. See c-torture/compile/920625-2.c. */ else if (TREE_CODE (newdecl) == FUNCTION_DECL && DECL_INITIAL (newdecl) && TYPE_MAIN_VARIANT (TREE_TYPE (oldtype)) == void_type_node && TYPE_MAIN_VARIANT (TREE_TYPE (newtype)) == integer_type_node && C_FUNCTION_IMPLICIT_INT (newdecl) && !DECL_INITIAL (olddecl)) { pedwarned = pedwarn (input_location, 0, "conflicting types for %q+D", newdecl); /* Make sure we keep void as the return type. */ TREE_TYPE (newdecl) = *newtypep = newtype = oldtype; C_FUNCTION_IMPLICIT_INT (newdecl) = 0; } /* Permit void foo (...) to match an earlier call to foo (...) with no declared type (thus, implicitly int). */ else if (TREE_CODE (newdecl) == FUNCTION_DECL && TYPE_MAIN_VARIANT (TREE_TYPE (newtype)) == void_type_node && TYPE_MAIN_VARIANT (TREE_TYPE (oldtype)) == integer_type_node && C_DECL_IMPLICIT (olddecl) && !DECL_INITIAL (olddecl)) { pedwarned = pedwarn (input_location, 0, "conflicting types for %q+D", newdecl); /* Make sure we keep void as the return type. */ TREE_TYPE (olddecl) = *oldtypep = oldtype = newtype; } else { int new_quals = TYPE_QUALS (newtype); int old_quals = TYPE_QUALS (oldtype); if (new_quals != old_quals) { addr_space_t new_addr = DECODE_QUAL_ADDR_SPACE (new_quals); addr_space_t old_addr = DECODE_QUAL_ADDR_SPACE (old_quals); if (new_addr != old_addr) { if (ADDR_SPACE_GENERIC_P (new_addr)) error ("conflicting named address spaces (generic vs %s) " "for %q+D", c_addr_space_name (old_addr), newdecl); else if (ADDR_SPACE_GENERIC_P (old_addr)) error ("conflicting named address spaces (%s vs generic) " "for %q+D", c_addr_space_name (new_addr), newdecl); else error ("conflicting named address spaces (%s vs %s) " "for %q+D", c_addr_space_name (new_addr), c_addr_space_name (old_addr), newdecl); } if (CLEAR_QUAL_ADDR_SPACE (new_quals) != CLEAR_QUAL_ADDR_SPACE (old_quals)) error ("conflicting type qualifiers for %q+D", newdecl); } else error ("conflicting types for %q+D", newdecl); diagnose_arglist_conflict (newdecl, olddecl, newtype, oldtype); locate_old_decl (olddecl); return false; } } /* Redeclaration of a type is a constraint violation (6.7.2.3p1), but silently ignore the redeclaration if either is in a system header. (Conflicting redeclarations were handled above.) This is allowed for C11 if the types are the same, not just compatible. */ if (TREE_CODE (newdecl) == TYPE_DECL) { bool types_different = false; int comptypes_result; comptypes_result = comptypes_check_different_types (oldtype, newtype, &types_different); if (comptypes_result != 1 || types_different) { error ("redefinition of typedef %q+D with different type", newdecl); locate_old_decl (olddecl); return false; } if (DECL_IN_SYSTEM_HEADER (newdecl) || DECL_IN_SYSTEM_HEADER (olddecl) || TREE_NO_WARNING (newdecl) || TREE_NO_WARNING (olddecl)) return true; /* Allow OLDDECL to continue in use. */ if (variably_modified_type_p (newtype, NULL)) { error ("redefinition of typedef %q+D with variably modified type", newdecl); locate_old_decl (olddecl); } else if (pedwarn_c99 (input_location, OPT_Wpedantic, "redefinition of typedef %q+D", newdecl)) locate_old_decl (olddecl); return true; } /* Function declarations can either be 'static' or 'extern' (no qualifier is equivalent to 'extern' - C99 6.2.2p5) and therefore can never conflict with each other on account of linkage (6.2.2p4). Multiple definitions are not allowed (6.9p3,5) but gnu89 mode permits two definitions if one is 'extern inline' and one is not. The non- extern-inline definition supersedes the extern-inline definition. */ else if (TREE_CODE (newdecl) == FUNCTION_DECL) { /* If you declare a built-in function name as static, or define the built-in with an old-style definition (so we can't validate the argument list) the built-in definition is overridden, but optionally warn this was a bad choice of name. */ if (DECL_BUILT_IN (olddecl) && !C_DECL_DECLARED_BUILTIN (olddecl) && (!TREE_PUBLIC (newdecl) || (DECL_INITIAL (newdecl) && !prototype_p (TREE_TYPE (newdecl))))) { warning (OPT_Wshadow, "declaration of %q+D shadows " "a built-in function", newdecl); /* Discard the old built-in function. */ return false; } if (DECL_INITIAL (newdecl)) { if (DECL_INITIAL (olddecl)) { /* If both decls are in the same TU and the new declaration isn't overriding an extern inline reject the new decl. In c99, no overriding is allowed in the same translation unit. */ if ((!DECL_EXTERN_INLINE (olddecl) || DECL_EXTERN_INLINE (newdecl) || (!flag_gnu89_inline && (!DECL_DECLARED_INLINE_P (olddecl) || !lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (olddecl))) && (!DECL_DECLARED_INLINE_P (newdecl) || !lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (newdecl)))) ) && same_translation_unit_p (newdecl, olddecl)) { error ("redefinition of %q+D", newdecl); locate_old_decl (olddecl); return false; } } } /* If we have a prototype after an old-style function definition, the argument types must be checked specially. */ else if (DECL_INITIAL (olddecl) && !prototype_p (oldtype) && prototype_p (newtype) && TYPE_ACTUAL_ARG_TYPES (oldtype) && !validate_proto_after_old_defn (newdecl, newtype, oldtype)) { locate_old_decl (olddecl); return false; } /* A non-static declaration (even an "extern") followed by a static declaration is undefined behavior per C99 6.2.2p3-5,7. The same is true for a static forward declaration at block scope followed by a non-static declaration/definition at file scope. Static followed by non-static at the same scope is not undefined behavior, and is the most convenient way to get some effects (see e.g. what unwind-dw2-fde-glibc.c does to the definition of _Unwind_Find_FDE in unwind-dw2-fde.c), but we do diagnose it if -Wtraditional. */ if (TREE_PUBLIC (olddecl) && !TREE_PUBLIC (newdecl)) { /* Two exceptions to the rule. If olddecl is an extern inline, or a predeclared function that isn't actually built in, newdecl silently overrides olddecl. The latter occur only in Objective C; see also above. (FIXME: Make Objective C use normal builtins.) */ if (!DECL_IS_BUILTIN (olddecl) && !DECL_EXTERN_INLINE (olddecl)) { error ("static declaration of %q+D follows " "non-static declaration", newdecl); locate_old_decl (olddecl); } return false; } else if (TREE_PUBLIC (newdecl) && !TREE_PUBLIC (olddecl)) { if (DECL_CONTEXT (olddecl)) { error ("non-static declaration of %q+D follows " "static declaration", newdecl); locate_old_decl (olddecl); return false; } else if (warn_traditional) { warned |= warning (OPT_Wtraditional, "non-static declaration of %q+D " "follows static declaration", newdecl); } } /* Make sure gnu_inline attribute is either not present, or present on all inline decls. */ if (DECL_DECLARED_INLINE_P (olddecl) && DECL_DECLARED_INLINE_P (newdecl)) { bool newa = lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (newdecl)) != NULL; bool olda = lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (olddecl)) != NULL; if (newa != olda) { error_at (input_location, "%<gnu_inline%> attribute present on %q+D", newa ? newdecl : olddecl); error_at (DECL_SOURCE_LOCATION (newa ? olddecl : newdecl), "but not here"); } } } else if (TREE_CODE (newdecl) == VAR_DECL) { /* Only variables can be thread-local, and all declarations must agree on this property. */ if (C_DECL_THREADPRIVATE_P (olddecl) && !DECL_THREAD_LOCAL_P (newdecl)) { /* Nothing to check. Since OLDDECL is marked threadprivate and NEWDECL does not have a thread-local attribute, we will merge the threadprivate attribute into NEWDECL. */ ; } else if (DECL_THREAD_LOCAL_P (newdecl) != DECL_THREAD_LOCAL_P (olddecl)) { if (DECL_THREAD_LOCAL_P (newdecl)) error ("thread-local declaration of %q+D follows " "non-thread-local declaration", newdecl); else error ("non-thread-local declaration of %q+D follows " "thread-local declaration", newdecl); locate_old_decl (olddecl); return false; } /* Multiple initialized definitions are not allowed (6.9p3,5). */ if (DECL_INITIAL (newdecl) && DECL_INITIAL (olddecl)) { error ("redefinition of %q+D", newdecl); locate_old_decl (olddecl); return false; } /* Objects declared at file scope: if the first declaration had external linkage (even if it was an external reference) the second must have external linkage as well, or the behavior is undefined. If the first declaration had internal linkage, then the second must too, or else be an external reference (in which case the composite declaration still has internal linkage). As for function declarations, we warn about the static-then- extern case only for -Wtraditional. See generally 6.2.2p3-5,7. */ if (DECL_FILE_SCOPE_P (newdecl) && TREE_PUBLIC (newdecl) != TREE_PUBLIC (olddecl)) { if (DECL_EXTERNAL (newdecl)) { if (!DECL_FILE_SCOPE_P (olddecl)) { error ("extern declaration of %q+D follows " "declaration with no linkage", newdecl); locate_old_decl (olddecl); return false; } else if (warn_traditional) { warned |= warning (OPT_Wtraditional, "non-static declaration of %q+D " "follows static declaration", newdecl); } } else { if (TREE_PUBLIC (newdecl)) error ("non-static declaration of %q+D follows " "static declaration", newdecl); else error ("static declaration of %q+D follows " "non-static declaration", newdecl); locate_old_decl (olddecl); return false; } } /* Two objects with the same name declared at the same block scope must both be external references (6.7p3). */ else if (!DECL_FILE_SCOPE_P (newdecl)) { if (DECL_EXTERNAL (newdecl)) { /* Extern with initializer at block scope, which will already have received an error. */ } else if (DECL_EXTERNAL (olddecl)) { error ("declaration of %q+D with no linkage follows " "extern declaration", newdecl); locate_old_decl (olddecl); } else { error ("redeclaration of %q+D with no linkage", newdecl); locate_old_decl (olddecl); } return false; } /* C++ does not permit a decl to appear multiple times at file scope. */ if (warn_cxx_compat && DECL_FILE_SCOPE_P (newdecl) && !DECL_EXTERNAL (newdecl) && !DECL_EXTERNAL (olddecl)) warned |= warning_at (DECL_SOURCE_LOCATION (newdecl), OPT_Wc___compat, ("duplicate declaration of %qD is " "invalid in C++"), newdecl); } /* warnings */ /* All decls must agree on a visibility. */ if (CODE_CONTAINS_STRUCT (TREE_CODE (newdecl), TS_DECL_WITH_VIS) && DECL_VISIBILITY_SPECIFIED (newdecl) && DECL_VISIBILITY_SPECIFIED (olddecl) && DECL_VISIBILITY (newdecl) != DECL_VISIBILITY (olddecl)) { warned |= warning (0, "redeclaration of %q+D with different visibility " "(old visibility preserved)", newdecl); } if (TREE_CODE (newdecl) == FUNCTION_DECL) { /* Diagnose inline __attribute__ ((noinline)) which is silly. */ if (DECL_DECLARED_INLINE_P (newdecl) && lookup_attribute ("noinline", DECL_ATTRIBUTES (olddecl))) warned |= warning (OPT_Wattributes, "inline declaration of %qD follows " "declaration with attribute noinline", newdecl); else if (DECL_DECLARED_INLINE_P (olddecl) && lookup_attribute ("noinline", DECL_ATTRIBUTES (newdecl))) warned |= warning (OPT_Wattributes, "declaration of %q+D with attribute " "noinline follows inline declaration ", newdecl); else if (lookup_attribute ("noinline", DECL_ATTRIBUTES (newdecl)) && lookup_attribute ("always_inline", DECL_ATTRIBUTES (olddecl))) warned |= warning (OPT_Wattributes, "declaration of %q+D with attribute " "%qs follows declaration with attribute %qs", newdecl, "noinline", "always_inline"); else if (lookup_attribute ("always_inline", DECL_ATTRIBUTES (newdecl)) && lookup_attribute ("noinline", DECL_ATTRIBUTES (olddecl))) warned |= warning (OPT_Wattributes, "declaration of %q+D with attribute " "%qs follows declaration with attribute %qs", newdecl, "always_inline", "noinline"); else if (lookup_attribute ("cold", DECL_ATTRIBUTES (newdecl)) && lookup_attribute ("hot", DECL_ATTRIBUTES (olddecl))) warned |= warning (OPT_Wattributes, "declaration of %q+D with attribute %qs follows " "declaration with attribute %qs", newdecl, "cold", "hot"); else if (lookup_attribute ("hot", DECL_ATTRIBUTES (newdecl)) && lookup_attribute ("cold", DECL_ATTRIBUTES (olddecl))) warned |= warning (OPT_Wattributes, "declaration of %q+D with attribute %qs follows " "declaration with attribute %qs", newdecl, "hot", "cold"); } else /* PARM_DECL, VAR_DECL */ { /* Redeclaration of a parameter is a constraint violation (this is not explicitly stated, but follows from C99 6.7p3 [no more than one declaration of the same identifier with no linkage in the same scope, except type tags] and 6.2.2p6 [parameters have no linkage]). We must check for a forward parameter declaration, indicated by TREE_ASM_WRITTEN on the old declaration - this is an extension, the mandatory diagnostic for which is handled by mark_forward_parm_decls. */ if (TREE_CODE (newdecl) == PARM_DECL && (!TREE_ASM_WRITTEN (olddecl) || TREE_ASM_WRITTEN (newdecl))) { error ("redefinition of parameter %q+D", newdecl); locate_old_decl (olddecl); return false; } } /* Optional warning for completely redundant decls. */ if (!warned && !pedwarned && warn_redundant_decls /* Don't warn about a function declaration followed by a definition. */ && !(TREE_CODE (newdecl) == FUNCTION_DECL && DECL_INITIAL (newdecl) && !DECL_INITIAL (olddecl)) /* Don't warn about redundant redeclarations of builtins. */ && !(TREE_CODE (newdecl) == FUNCTION_DECL && !DECL_BUILT_IN (newdecl) && DECL_BUILT_IN (olddecl) && !C_DECL_DECLARED_BUILTIN (olddecl)) /* Don't warn about an extern followed by a definition. */ && !(DECL_EXTERNAL (olddecl) && !DECL_EXTERNAL (newdecl)) /* Don't warn about forward parameter decls. */ && !(TREE_CODE (newdecl) == PARM_DECL && TREE_ASM_WRITTEN (olddecl) && !TREE_ASM_WRITTEN (newdecl)) /* Don't warn about a variable definition following a declaration. */ && !(TREE_CODE (newdecl) == VAR_DECL && DECL_INITIAL (newdecl) && !DECL_INITIAL (olddecl))) { warned = warning (OPT_Wredundant_decls, "redundant redeclaration of %q+D", newdecl); } /* Report location of previous decl/defn. */ if (warned || pedwarned) locate_old_decl (olddecl); #undef DECL_EXTERN_INLINE return retval; } /* Subroutine of duplicate_decls. NEWDECL has been found to be consistent with OLDDECL, but carries new information. Merge the new information into OLDDECL. This function issues no diagnostics. */ static void merge_decls (tree newdecl, tree olddecl, tree newtype, tree oldtype) { bool new_is_definition = (TREE_CODE (newdecl) == FUNCTION_DECL && DECL_INITIAL (newdecl) != 0); bool new_is_prototype = (TREE_CODE (newdecl) == FUNCTION_DECL && prototype_p (TREE_TYPE (newdecl))); bool old_is_prototype = (TREE_CODE (olddecl) == FUNCTION_DECL && prototype_p (TREE_TYPE (olddecl))); /* For real parm decl following a forward decl, rechain the old decl in its new location and clear TREE_ASM_WRITTEN (it's not a forward decl anymore). */ if (TREE_CODE (newdecl) == PARM_DECL && TREE_ASM_WRITTEN (olddecl) && !TREE_ASM_WRITTEN (newdecl)) { struct c_binding *b, **here; for (here = &current_scope->bindings; *here; here = &(*here)->prev) if ((*here)->decl == olddecl) goto found; gcc_unreachable (); found: b = *here; *here = b->prev; b->prev = current_scope->bindings; current_scope->bindings = b; TREE_ASM_WRITTEN (olddecl) = 0; } DECL_ATTRIBUTES (newdecl) = targetm.merge_decl_attributes (olddecl, newdecl); /* For typedefs use the old type, as the new type's DECL_NAME points at newdecl, which will be ggc_freed. */ if (TREE_CODE (newdecl) == TYPE_DECL) { /* But NEWTYPE might have an attribute, honor that. */ tree tem = newtype; newtype = oldtype; if (TYPE_USER_ALIGN (tem)) { if (TYPE_ALIGN (tem) > TYPE_ALIGN (newtype)) TYPE_ALIGN (newtype) = TYPE_ALIGN (tem); TYPE_USER_ALIGN (newtype) = true; } /* And remove the new type from the variants list. */ if (TYPE_NAME (TREE_TYPE (newdecl)) == newdecl) { tree remove = TREE_TYPE (newdecl); for (tree t = TYPE_MAIN_VARIANT (remove); ; t = TYPE_NEXT_VARIANT (t)) if (TYPE_NEXT_VARIANT (t) == remove) { TYPE_NEXT_VARIANT (t) = TYPE_NEXT_VARIANT (remove); break; } } } /* Merge the data types specified in the two decls. */ TREE_TYPE (newdecl) = TREE_TYPE (olddecl) = composite_type (newtype, oldtype); /* Lay the type out, unless already done. */ if (!comptypes (oldtype, TREE_TYPE (newdecl))) { if (TREE_TYPE (newdecl) != error_mark_node) layout_type (TREE_TYPE (newdecl)); if (TREE_CODE (newdecl) != FUNCTION_DECL && TREE_CODE (newdecl) != TYPE_DECL && TREE_CODE (newdecl) != CONST_DECL) layout_decl (newdecl, 0); } else { /* Since the type is OLDDECL's, make OLDDECL's size go with. */ DECL_SIZE (newdecl) = DECL_SIZE (olddecl); DECL_SIZE_UNIT (newdecl) = DECL_SIZE_UNIT (olddecl); DECL_MODE (newdecl) = DECL_MODE (olddecl); if (DECL_ALIGN (olddecl) > DECL_ALIGN (newdecl)) { DECL_ALIGN (newdecl) = DECL_ALIGN (olddecl); DECL_USER_ALIGN (newdecl) |= DECL_USER_ALIGN (olddecl); } } /* Keep the old rtl since we can safely use it. */ if (HAS_RTL_P (olddecl)) COPY_DECL_RTL (olddecl, newdecl); /* Merge the type qualifiers. */ if (TREE_READONLY (newdecl)) TREE_READONLY (olddecl) = 1; if (TREE_THIS_VOLATILE (newdecl)) TREE_THIS_VOLATILE (olddecl) = 1; /* Merge deprecatedness. */ if (TREE_DEPRECATED (newdecl)) TREE_DEPRECATED (olddecl) = 1; /* If a decl is in a system header and the other isn't, keep the one on the system header. Otherwise, keep source location of definition rather than declaration and of prototype rather than non-prototype unless that prototype is built-in. */ if (CODE_CONTAINS_STRUCT (TREE_CODE (olddecl), TS_DECL_WITH_VIS) && DECL_IN_SYSTEM_HEADER (olddecl) && !DECL_IN_SYSTEM_HEADER (newdecl) ) DECL_SOURCE_LOCATION (newdecl) = DECL_SOURCE_LOCATION (olddecl); else if (CODE_CONTAINS_STRUCT (TREE_CODE (olddecl), TS_DECL_WITH_VIS) && DECL_IN_SYSTEM_HEADER (newdecl) && !DECL_IN_SYSTEM_HEADER (olddecl)) DECL_SOURCE_LOCATION (olddecl) = DECL_SOURCE_LOCATION (newdecl); else if ((DECL_INITIAL (newdecl) == 0 && DECL_INITIAL (olddecl) != 0) || (old_is_prototype && !new_is_prototype && !C_DECL_BUILTIN_PROTOTYPE (olddecl))) DECL_SOURCE_LOCATION (newdecl) = DECL_SOURCE_LOCATION (olddecl); /* Merge the initialization information. */ if (DECL_INITIAL (newdecl) == 0) DECL_INITIAL (newdecl) = DECL_INITIAL (olddecl); /* Merge the threadprivate attribute. */ if (TREE_CODE (olddecl) == VAR_DECL && C_DECL_THREADPRIVATE_P (olddecl)) C_DECL_THREADPRIVATE_P (newdecl) = 1; if (CODE_CONTAINS_STRUCT (TREE_CODE (olddecl), TS_DECL_WITH_VIS)) { /* Copy the assembler name. Currently, it can only be defined in the prototype. */ COPY_DECL_ASSEMBLER_NAME (olddecl, newdecl); /* Use visibility of whichever declaration had it specified */ if (DECL_VISIBILITY_SPECIFIED (olddecl)) { DECL_VISIBILITY (newdecl) = DECL_VISIBILITY (olddecl); DECL_VISIBILITY_SPECIFIED (newdecl) = 1; } if (TREE_CODE (newdecl) == FUNCTION_DECL) { DECL_STATIC_CONSTRUCTOR(newdecl) |= DECL_STATIC_CONSTRUCTOR(olddecl); DECL_STATIC_DESTRUCTOR (newdecl) |= DECL_STATIC_DESTRUCTOR (olddecl); DECL_NO_LIMIT_STACK (newdecl) |= DECL_NO_LIMIT_STACK (olddecl); DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (newdecl) |= DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (olddecl); TREE_THIS_VOLATILE (newdecl) |= TREE_THIS_VOLATILE (olddecl); DECL_IS_MALLOC (newdecl) |= DECL_IS_MALLOC (olddecl); DECL_IS_OPERATOR_NEW (newdecl) |= DECL_IS_OPERATOR_NEW (olddecl); TREE_READONLY (newdecl) |= TREE_READONLY (olddecl); DECL_PURE_P (newdecl) |= DECL_PURE_P (olddecl); DECL_IS_NOVOPS (newdecl) |= DECL_IS_NOVOPS (olddecl); } /* Merge the storage class information. */ merge_weak (newdecl, olddecl); /* For functions, static overrides non-static. */ if (TREE_CODE (newdecl) == FUNCTION_DECL) { TREE_PUBLIC (newdecl) &= TREE_PUBLIC (olddecl); /* This is since we don't automatically copy the attributes of NEWDECL into OLDDECL. */ TREE_PUBLIC (olddecl) = TREE_PUBLIC (newdecl); /* If this clears `static', clear it in the identifier too. */ if (!TREE_PUBLIC (olddecl)) TREE_PUBLIC (DECL_NAME (olddecl)) = 0; } } /* In c99, 'extern' declaration before (or after) 'inline' means this function is not DECL_EXTERNAL, unless 'gnu_inline' attribute is present. */ if (TREE_CODE (newdecl) == FUNCTION_DECL && !flag_gnu89_inline && (DECL_DECLARED_INLINE_P (newdecl) || DECL_DECLARED_INLINE_P (olddecl)) && (!DECL_DECLARED_INLINE_P (newdecl) || !DECL_DECLARED_INLINE_P (olddecl) || !DECL_EXTERNAL (olddecl)) && DECL_EXTERNAL (newdecl) && !lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (newdecl)) && !current_function_decl) DECL_EXTERNAL (newdecl) = 0; /* An inline definition following a static declaration is not DECL_EXTERNAL. */ if (new_is_definition && (DECL_DECLARED_INLINE_P (newdecl) || DECL_DECLARED_INLINE_P (olddecl)) && !TREE_PUBLIC (olddecl)) DECL_EXTERNAL (newdecl) = 0; if (DECL_EXTERNAL (newdecl)) { TREE_STATIC (newdecl) = TREE_STATIC (olddecl); DECL_EXTERNAL (newdecl) = DECL_EXTERNAL (olddecl); /* An extern decl does not override previous storage class. */ TREE_PUBLIC (newdecl) = TREE_PUBLIC (olddecl); if (!DECL_EXTERNAL (newdecl)) { DECL_CONTEXT (newdecl) = DECL_CONTEXT (olddecl); DECL_COMMON (newdecl) = DECL_COMMON (olddecl); } } else { TREE_STATIC (olddecl) = TREE_STATIC (newdecl); TREE_PUBLIC (olddecl) = TREE_PUBLIC (newdecl); } if (TREE_CODE (newdecl) == FUNCTION_DECL) { /* If we're redefining a function previously defined as extern inline, make sure we emit debug info for the inline before we throw it away, in case it was inlined into a function that hasn't been written out yet. */ if (new_is_definition && DECL_INITIAL (olddecl)) /* The new defn must not be inline. */ DECL_UNINLINABLE (newdecl) = 1; else { /* If either decl says `inline', this fn is inline, unless its definition was passed already. */ if (DECL_DECLARED_INLINE_P (newdecl) || DECL_DECLARED_INLINE_P (olddecl)) DECL_DECLARED_INLINE_P (newdecl) = 1; DECL_UNINLINABLE (newdecl) = DECL_UNINLINABLE (olddecl) = (DECL_UNINLINABLE (newdecl) || DECL_UNINLINABLE (olddecl)); DECL_DISREGARD_INLINE_LIMITS (newdecl) = DECL_DISREGARD_INLINE_LIMITS (olddecl) = (DECL_DISREGARD_INLINE_LIMITS (newdecl) || DECL_DISREGARD_INLINE_LIMITS (olddecl)); } if (DECL_BUILT_IN (olddecl)) { /* If redeclaring a builtin function, it stays built in. But it gets tagged as having been declared. */ DECL_BUILT_IN_CLASS (newdecl) = DECL_BUILT_IN_CLASS (olddecl); DECL_FUNCTION_CODE (newdecl) = DECL_FUNCTION_CODE (olddecl); C_DECL_DECLARED_BUILTIN (newdecl) = 1; if (new_is_prototype) { C_DECL_BUILTIN_PROTOTYPE (newdecl) = 0; if (DECL_BUILT_IN_CLASS (newdecl) == BUILT_IN_NORMAL) { enum built_in_function fncode = DECL_FUNCTION_CODE (newdecl); switch (fncode) { /* If a compatible prototype of these builtin functions is seen, assume the runtime implements it with the expected semantics. */ case BUILT_IN_STPCPY: if (builtin_decl_explicit_p (fncode)) set_builtin_decl_implicit_p (fncode, true); break; default: if (builtin_decl_explicit_p (fncode)) set_builtin_decl_declared_p (fncode, true); break; } } } else C_DECL_BUILTIN_PROTOTYPE (newdecl) = C_DECL_BUILTIN_PROTOTYPE (olddecl); } /* Preserve function specific target and optimization options */ if (DECL_FUNCTION_SPECIFIC_TARGET (olddecl) && !DECL_FUNCTION_SPECIFIC_TARGET (newdecl)) DECL_FUNCTION_SPECIFIC_TARGET (newdecl) = DECL_FUNCTION_SPECIFIC_TARGET (olddecl); if (DECL_FUNCTION_SPECIFIC_OPTIMIZATION (olddecl) && !DECL_FUNCTION_SPECIFIC_OPTIMIZATION (newdecl)) DECL_FUNCTION_SPECIFIC_OPTIMIZATION (newdecl) = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (olddecl); /* Also preserve various other info from the definition. */ if (!new_is_definition) { tree t; DECL_RESULT (newdecl) = DECL_RESULT (olddecl); DECL_INITIAL (newdecl) = DECL_INITIAL (olddecl); DECL_STRUCT_FUNCTION (newdecl) = DECL_STRUCT_FUNCTION (olddecl); DECL_SAVED_TREE (newdecl) = DECL_SAVED_TREE (olddecl); DECL_ARGUMENTS (newdecl) = copy_list (DECL_ARGUMENTS (olddecl)); for (t = DECL_ARGUMENTS (newdecl); t ; t = DECL_CHAIN (t)) DECL_CONTEXT (t) = newdecl; /* See if we've got a function to instantiate from. */ if (DECL_SAVED_TREE (olddecl)) DECL_ABSTRACT_ORIGIN (newdecl) = DECL_ABSTRACT_ORIGIN (olddecl); } } /* Merge the USED information. */ if (TREE_USED (olddecl)) TREE_USED (newdecl) = 1; else if (TREE_USED (newdecl)) TREE_USED (olddecl) = 1; if (TREE_CODE (olddecl) == VAR_DECL || TREE_CODE (olddecl) == PARM_DECL) DECL_READ_P (newdecl) |= DECL_READ_P (olddecl); if (DECL_PRESERVE_P (olddecl)) DECL_PRESERVE_P (newdecl) = 1; else if (DECL_PRESERVE_P (newdecl)) DECL_PRESERVE_P (olddecl) = 1; /* Copy most of the decl-specific fields of NEWDECL into OLDDECL. But preserve OLDDECL's DECL_UID, DECL_CONTEXT and DECL_ARGUMENTS (if appropriate). */ { unsigned olddecl_uid = DECL_UID (olddecl); tree olddecl_context = DECL_CONTEXT (olddecl); tree olddecl_arguments = NULL; if (TREE_CODE (olddecl) == FUNCTION_DECL) olddecl_arguments = DECL_ARGUMENTS (olddecl); memcpy ((char *) olddecl + sizeof (struct tree_common), (char *) newdecl + sizeof (struct tree_common), sizeof (struct tree_decl_common) - sizeof (struct tree_common)); DECL_USER_ALIGN (olddecl) = DECL_USER_ALIGN (newdecl); switch (TREE_CODE (olddecl)) { case FUNCTION_DECL: case VAR_DECL: { struct symtab_node *snode = olddecl->decl_with_vis.symtab_node; memcpy ((char *) olddecl + sizeof (struct tree_decl_common), (char *) newdecl + sizeof (struct tree_decl_common), tree_code_size (TREE_CODE (olddecl)) - sizeof (struct tree_decl_common)); olddecl->decl_with_vis.symtab_node = snode; if ((DECL_EXTERNAL (olddecl) || TREE_PUBLIC (olddecl) || TREE_STATIC (olddecl)) && DECL_SECTION_NAME (newdecl) != NULL) set_decl_section_name (olddecl, DECL_SECTION_NAME (newdecl)); /* This isn't quite correct for something like int __thread x attribute ((tls_model ("local-exec"))); extern int __thread x; as we'll lose the "local-exec" model. */ if (TREE_CODE (olddecl) == VAR_DECL && DECL_THREAD_LOCAL_P (newdecl)) set_decl_tls_model (olddecl, DECL_TLS_MODEL (newdecl)); break; } case FIELD_DECL: case PARM_DECL: case LABEL_DECL: case RESULT_DECL: case CONST_DECL: case TYPE_DECL: memcpy ((char *) olddecl + sizeof (struct tree_decl_common), (char *) newdecl + sizeof (struct tree_decl_common), tree_code_size (TREE_CODE (olddecl)) - sizeof (struct tree_decl_common)); break; default: memcpy ((char *) olddecl + sizeof (struct tree_decl_common), (char *) newdecl + sizeof (struct tree_decl_common), sizeof (struct tree_decl_non_common) - sizeof (struct tree_decl_common)); } DECL_UID (olddecl) = olddecl_uid; DECL_CONTEXT (olddecl) = olddecl_context; if (TREE_CODE (olddecl) == FUNCTION_DECL) DECL_ARGUMENTS (olddecl) = olddecl_arguments; } /* If OLDDECL had its DECL_RTL instantiated, re-invoke make_decl_rtl so that encode_section_info has a chance to look at the new decl flags and attributes. */ if (DECL_RTL_SET_P (olddecl) && (TREE_CODE (olddecl) == FUNCTION_DECL || (TREE_CODE (olddecl) == VAR_DECL && TREE_STATIC (olddecl)))) make_decl_rtl (olddecl); } /* Handle when a new declaration NEWDECL has the same name as an old one OLDDECL in the same binding contour. Prints an error message if appropriate. If safely possible, alter OLDDECL to look like NEWDECL, and return true. Otherwise, return false. */ static bool duplicate_decls (tree newdecl, tree olddecl) { tree newtype = NULL, oldtype = NULL; if (!diagnose_mismatched_decls (newdecl, olddecl, &newtype, &oldtype)) { /* Avoid `unused variable' and other warnings for OLDDECL. */ TREE_NO_WARNING (olddecl) = 1; return false; } merge_decls (newdecl, olddecl, newtype, oldtype); /* The NEWDECL will no longer be needed. Before releasing the node, be sure to remove function from symbol table that might have been inserted there to record comdat group. Be sure to however do not free DECL_STRUCT_FUNCTION because this structure is shared in between NEWDECL and OLDECL. */ if (TREE_CODE (newdecl) == FUNCTION_DECL) DECL_STRUCT_FUNCTION (newdecl) = NULL; if (TREE_CODE (newdecl) == FUNCTION_DECL || TREE_CODE (newdecl) == VAR_DECL) { struct symtab_node *snode = symtab_node::get (newdecl); if (snode) snode->remove (); } ggc_free (newdecl); return true; } /* Check whether decl-node NEW_DECL shadows an existing declaration. */ static void warn_if_shadowing (tree new_decl) { struct c_binding *b; /* Shadow warnings wanted? */ if (!warn_shadow /* No shadow warnings for internally generated vars. */ || DECL_IS_BUILTIN (new_decl) /* No shadow warnings for vars made for inlining. */ || DECL_FROM_INLINE (new_decl)) return; /* Is anything being shadowed? Invisible decls do not count. */ for (b = I_SYMBOL_BINDING (DECL_NAME (new_decl)); b; b = b->shadowed) if (b->decl && b->decl != new_decl && !b->invisible && (b->decl == error_mark_node || diagnostic_report_warnings_p (global_dc, DECL_SOURCE_LOCATION (b->decl)))) { tree old_decl = b->decl; bool warned = false; if (old_decl == error_mark_node) { warning (OPT_Wshadow, "declaration of %q+D shadows previous " "non-variable", new_decl); break; } else if (TREE_CODE (old_decl) == PARM_DECL) warned = warning (OPT_Wshadow, "declaration of %q+D shadows a parameter", new_decl); else if (DECL_FILE_SCOPE_P (old_decl)) { /* Do not warn if a variable shadows a function, unless the variable is a function or a pointer-to-function. */ if (TREE_CODE (old_decl) == FUNCTION_DECL && TREE_CODE (new_decl) != FUNCTION_DECL && !FUNCTION_POINTER_TYPE_P (TREE_TYPE (new_decl))) continue; warned = warning_at (DECL_SOURCE_LOCATION (new_decl), OPT_Wshadow, "declaration of %qD shadows a global " "declaration", new_decl); } else if (TREE_CODE (old_decl) == FUNCTION_DECL && DECL_BUILT_IN (old_decl)) { warning (OPT_Wshadow, "declaration of %q+D shadows " "a built-in function", new_decl); break; } else warned = warning (OPT_Wshadow, "declaration of %q+D shadows a " "previous local", new_decl); if (warned) inform (DECL_SOURCE_LOCATION (old_decl), "shadowed declaration is here"); break; } } /* Record a decl-node X as belonging to the current lexical scope. Check for errors (such as an incompatible declaration for the same name already seen in the same scope). Returns either X or an old decl for the same name. If an old decl is returned, it may have been smashed to agree with what X says. */ tree pushdecl (tree x) { tree name = DECL_NAME (x); struct c_scope *scope = current_scope; struct c_binding *b; bool nested = false; location_t locus = DECL_SOURCE_LOCATION (x); /* Must set DECL_CONTEXT for everything not at file scope or DECL_FILE_SCOPE_P won't work. Local externs don't count unless they have initializers (which generate code). */ if (current_function_decl && ((TREE_CODE (x) != FUNCTION_DECL && TREE_CODE (x) != VAR_DECL) || DECL_INITIAL (x) || !DECL_EXTERNAL (x))) DECL_CONTEXT (x) = current_function_decl; /* Anonymous decls are just inserted in the scope. */ if (!name) { bind (name, x, scope, /*invisible=*/false, /*nested=*/false, locus); return x; } /* First, see if there is another declaration with the same name in the current scope. If there is, duplicate_decls may do all the work for us. If duplicate_decls returns false, that indicates two incompatible decls in the same scope; we are to silently replace the old one (duplicate_decls has issued all appropriate diagnostics). In particular, we should not consider possible duplicates in the external scope, or shadowing. */ b = I_SYMBOL_BINDING (name); if (b && B_IN_SCOPE (b, scope)) { struct c_binding *b_ext, *b_use; tree type = TREE_TYPE (x); tree visdecl = b->decl; tree vistype = TREE_TYPE (visdecl); if (TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE && COMPLETE_TYPE_P (TREE_TYPE (x))) b->inner_comp = false; b_use = b; b_ext = b; /* If this is an external linkage declaration, we should check for compatibility with the type in the external scope before setting the type at this scope based on the visible information only. */ if (TREE_PUBLIC (x) && TREE_PUBLIC (visdecl)) { while (b_ext && !B_IN_EXTERNAL_SCOPE (b_ext)) b_ext = b_ext->shadowed; if (b_ext) { b_use = b_ext; if (b_use->u.type) TREE_TYPE (b_use->decl) = b_use->u.type; } } if (duplicate_decls (x, b_use->decl)) { if (b_use != b) { /* Save the updated type in the external scope and restore the proper type for this scope. */ tree thistype; if (comptypes (vistype, type)) thistype = composite_type (vistype, type); else thistype = TREE_TYPE (b_use->decl); b_use->u.type = TREE_TYPE (b_use->decl); if (TREE_CODE (b_use->decl) == FUNCTION_DECL && DECL_BUILT_IN (b_use->decl)) thistype = build_type_attribute_variant (thistype, TYPE_ATTRIBUTES (b_use->u.type)); TREE_TYPE (b_use->decl) = thistype; } return b_use->decl; } else goto skip_external_and_shadow_checks; } /* All declarations with external linkage, and all external references, go in the external scope, no matter what scope is current. However, the binding in that scope is ignored for purposes of normal name lookup. A separate binding structure is created in the requested scope; this governs the normal visibility of the symbol. The binding in the externals scope is used exclusively for detecting duplicate declarations of the same object, no matter what scope they are in; this is what we do here. (C99 6.2.7p2: All declarations that refer to the same object or function shall have compatible type; otherwise, the behavior is undefined.) */ if (DECL_EXTERNAL (x) || scope == file_scope) { tree type = TREE_TYPE (x); tree vistype = 0; tree visdecl = 0; bool type_saved = false; if (b && !B_IN_EXTERNAL_SCOPE (b) && (TREE_CODE (b->decl) == FUNCTION_DECL || TREE_CODE (b->decl) == VAR_DECL) && DECL_FILE_SCOPE_P (b->decl)) { visdecl = b->decl; vistype = TREE_TYPE (visdecl); } if (scope != file_scope && !DECL_IN_SYSTEM_HEADER (x)) warning (OPT_Wnested_externs, "nested extern declaration of %qD", x); while (b && !B_IN_EXTERNAL_SCOPE (b)) { /* If this decl might be modified, save its type. This is done here rather than when the decl is first bound because the type may change after first binding, through being completed or through attributes being added. If we encounter multiple such decls, only the first should have its type saved; the others will already have had their proper types saved and the types will not have changed as their scopes will not have been re-entered. */ if (DECL_P (b->decl) && DECL_FILE_SCOPE_P (b->decl) && !type_saved) { b->u.type = TREE_TYPE (b->decl); type_saved = true; } if (B_IN_FILE_SCOPE (b) && TREE_CODE (b->decl) == VAR_DECL && TREE_STATIC (b->decl) && TREE_CODE (TREE_TYPE (b->decl)) == ARRAY_TYPE && !TYPE_DOMAIN (TREE_TYPE (b->decl)) && TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) && TYPE_MAX_VALUE (TYPE_DOMAIN (type)) && !integer_zerop (TYPE_MAX_VALUE (TYPE_DOMAIN (type)))) { /* Array type completed in inner scope, which should be diagnosed if the completion does not have size 1 and it does not get completed in the file scope. */ b->inner_comp = true; } b = b->shadowed; } /* If a matching external declaration has been found, set its type to the composite of all the types of that declaration. After the consistency checks, it will be reset to the composite of the visible types only. */ if (b && (TREE_PUBLIC (x) || same_translation_unit_p (x, b->decl)) && b->u.type) TREE_TYPE (b->decl) = b->u.type; /* The point of the same_translation_unit_p check here is, we want to detect a duplicate decl for a construct like foo() { extern bar(); } ... static bar(); but not if they are in different translation units. In any case, the static does not go in the externals scope. */ if (b && (TREE_PUBLIC (x) || same_translation_unit_p (x, b->decl)) && duplicate_decls (x, b->decl)) { tree thistype; if (vistype) { if (comptypes (vistype, type)) thistype = composite_type (vistype, type); else thistype = TREE_TYPE (b->decl); } else thistype = type; b->u.type = TREE_TYPE (b->decl); if (TREE_CODE (b->decl) == FUNCTION_DECL && DECL_BUILT_IN (b->decl)) thistype = build_type_attribute_variant (thistype, TYPE_ATTRIBUTES (b->u.type)); TREE_TYPE (b->decl) = thistype; bind (name, b->decl, scope, /*invisible=*/false, /*nested=*/true, locus); return b->decl; } else if (TREE_PUBLIC (x)) { if (visdecl && !b && duplicate_decls (x, visdecl)) { /* An external declaration at block scope referring to a visible entity with internal linkage. The composite type will already be correct for this scope, so we just need to fall through to make the declaration in this scope. */ nested = true; x = visdecl; } else { bind (name, x, external_scope, /*invisible=*/true, /*nested=*/false, locus); nested = true; } } } if (TREE_CODE (x) != PARM_DECL) warn_if_shadowing (x); skip_external_and_shadow_checks: if (TREE_CODE (x) == TYPE_DECL) { /* So this is a typedef, set its underlying type. */ set_underlying_type (x); /* If X is a typedef defined in the current function, record it for the purpose of implementing the -Wunused-local-typedefs warning. */ record_locally_defined_typedef (x); } bind (name, x, scope, /*invisible=*/false, nested, locus); /* If x's type is incomplete because it's based on a structure or union which has not yet been fully declared, attach it to that structure or union type, so we can go back and complete the variable declaration later, if the structure or union gets fully declared. If the input is erroneous, we can have error_mark in the type slot (e.g. "f(void a, ...)") - that doesn't count as an incomplete type. */ if (TREE_TYPE (x) != error_mark_node && !COMPLETE_TYPE_P (TREE_TYPE (x))) { tree element = TREE_TYPE (x); while (TREE_CODE (element) == ARRAY_TYPE) element = TREE_TYPE (element); element = TYPE_MAIN_VARIANT (element); if ((TREE_CODE (element) == RECORD_TYPE || TREE_CODE (element) == UNION_TYPE) && (TREE_CODE (x) != TYPE_DECL || TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE) && !COMPLETE_TYPE_P (element)) C_TYPE_INCOMPLETE_VARS (element) = tree_cons (NULL_TREE, x, C_TYPE_INCOMPLETE_VARS (element)); } return x; } /* Record X as belonging to file scope. This is used only internally by the Objective-C front end, and is limited to its needs. duplicate_decls is not called; if there is any preexisting decl for this identifier, it is an ICE. */ tree pushdecl_top_level (tree x) { tree name; bool nested = false; gcc_assert (TREE_CODE (x) == VAR_DECL || TREE_CODE (x) == CONST_DECL); name = DECL_NAME (x); gcc_assert (TREE_CODE (x) == CONST_DECL || !I_SYMBOL_BINDING (name)); if (TREE_PUBLIC (x)) { bind (name, x, external_scope, /*invisible=*/true, /*nested=*/false, UNKNOWN_LOCATION); nested = true; } if (file_scope) bind (name, x, file_scope, /*invisible=*/false, nested, UNKNOWN_LOCATION); return x; } static void implicit_decl_warning (location_t loc, tree id, tree olddecl) { if (warn_implicit_function_declaration) { bool warned; if (flag_isoc99) warned = pedwarn (loc, OPT_Wimplicit_function_declaration, "implicit declaration of function %qE", id); else warned = warning_at (loc, OPT_Wimplicit_function_declaration, G_("implicit declaration of function %qE"), id); if (olddecl && warned) locate_old_decl (olddecl); } } /* This function represents mapping of a function code FCODE to its respective header. */ static const char * header_for_builtin_fn (enum built_in_function fcode) { switch (fcode) { CASE_FLT_FN (BUILT_IN_ACOS): CASE_FLT_FN (BUILT_IN_ACOSH): CASE_FLT_FN (BUILT_IN_ASIN): CASE_FLT_FN (BUILT_IN_ASINH): CASE_FLT_FN (BUILT_IN_ATAN): CASE_FLT_FN (BUILT_IN_ATANH): CASE_FLT_FN (BUILT_IN_ATAN2): CASE_FLT_FN (BUILT_IN_CBRT): CASE_FLT_FN (BUILT_IN_CEIL): CASE_FLT_FN (BUILT_IN_COPYSIGN): CASE_FLT_FN (BUILT_IN_COS): CASE_FLT_FN (BUILT_IN_COSH): CASE_FLT_FN (BUILT_IN_ERF): CASE_FLT_FN (BUILT_IN_ERFC): CASE_FLT_FN (BUILT_IN_EXP): CASE_FLT_FN (BUILT_IN_EXP2): CASE_FLT_FN (BUILT_IN_EXPM1): CASE_FLT_FN (BUILT_IN_FABS): CASE_FLT_FN (BUILT_IN_FDIM): CASE_FLT_FN (BUILT_IN_FLOOR): CASE_FLT_FN (BUILT_IN_FMA): CASE_FLT_FN (BUILT_IN_FMAX): CASE_FLT_FN (BUILT_IN_FMIN): CASE_FLT_FN (BUILT_IN_FMOD): CASE_FLT_FN (BUILT_IN_FREXP): CASE_FLT_FN (BUILT_IN_HYPOT): CASE_FLT_FN (BUILT_IN_ILOGB): CASE_FLT_FN (BUILT_IN_LDEXP): CASE_FLT_FN (BUILT_IN_LGAMMA): CASE_FLT_FN (BUILT_IN_LLRINT): CASE_FLT_FN (BUILT_IN_LLROUND): CASE_FLT_FN (BUILT_IN_LOG): CASE_FLT_FN (BUILT_IN_LOG10): CASE_FLT_FN (BUILT_IN_LOG1P): CASE_FLT_FN (BUILT_IN_LOG2): CASE_FLT_FN (BUILT_IN_LOGB): CASE_FLT_FN (BUILT_IN_LRINT): CASE_FLT_FN (BUILT_IN_LROUND): CASE_FLT_FN (BUILT_IN_MODF): CASE_FLT_FN (BUILT_IN_NAN): CASE_FLT_FN (BUILT_IN_NEARBYINT): CASE_FLT_FN (BUILT_IN_NEXTAFTER): CASE_FLT_FN (BUILT_IN_NEXTTOWARD): CASE_FLT_FN (BUILT_IN_POW): CASE_FLT_FN (BUILT_IN_REMAINDER): CASE_FLT_FN (BUILT_IN_REMQUO): CASE_FLT_FN (BUILT_IN_RINT): CASE_FLT_FN (BUILT_IN_ROUND): CASE_FLT_FN (BUILT_IN_SCALBLN): CASE_FLT_FN (BUILT_IN_SCALBN): CASE_FLT_FN (BUILT_IN_SIN): CASE_FLT_FN (BUILT_IN_SINH): CASE_FLT_FN (BUILT_IN_SINCOS): CASE_FLT_FN (BUILT_IN_SQRT): CASE_FLT_FN (BUILT_IN_TAN): CASE_FLT_FN (BUILT_IN_TANH): CASE_FLT_FN (BUILT_IN_TGAMMA): CASE_FLT_FN (BUILT_IN_TRUNC): case BUILT_IN_ISINF: case BUILT_IN_ISNAN: return "<math.h>"; CASE_FLT_FN (BUILT_IN_CABS): CASE_FLT_FN (BUILT_IN_CACOS): CASE_FLT_FN (BUILT_IN_CACOSH): CASE_FLT_FN (BUILT_IN_CARG): CASE_FLT_FN (BUILT_IN_CASIN): CASE_FLT_FN (BUILT_IN_CASINH): CASE_FLT_FN (BUILT_IN_CATAN): CASE_FLT_FN (BUILT_IN_CATANH): CASE_FLT_FN (BUILT_IN_CCOS): CASE_FLT_FN (BUILT_IN_CCOSH): CASE_FLT_FN (BUILT_IN_CEXP): CASE_FLT_FN (BUILT_IN_CIMAG): CASE_FLT_FN (BUILT_IN_CLOG): CASE_FLT_FN (BUILT_IN_CONJ): CASE_FLT_FN (BUILT_IN_CPOW): CASE_FLT_FN (BUILT_IN_CPROJ): CASE_FLT_FN (BUILT_IN_CREAL): CASE_FLT_FN (BUILT_IN_CSIN): CASE_FLT_FN (BUILT_IN_CSINH): CASE_FLT_FN (BUILT_IN_CSQRT): CASE_FLT_FN (BUILT_IN_CTAN): CASE_FLT_FN (BUILT_IN_CTANH): return "<complex.h>"; case BUILT_IN_MEMCHR: case BUILT_IN_MEMCMP: case BUILT_IN_MEMCPY: case BUILT_IN_MEMMOVE: case BUILT_IN_MEMSET: case BUILT_IN_STRCAT: case BUILT_IN_STRCHR: case BUILT_IN_STRCMP: case BUILT_IN_STRCPY: case BUILT_IN_STRCSPN: case BUILT_IN_STRLEN: case BUILT_IN_STRNCAT: case BUILT_IN_STRNCMP: case BUILT_IN_STRNCPY: case BUILT_IN_STRPBRK: case BUILT_IN_STRRCHR: case BUILT_IN_STRSPN: case BUILT_IN_STRSTR: return "<string.h>"; case BUILT_IN_FPRINTF: case BUILT_IN_PUTC: case BUILT_IN_FPUTC: case BUILT_IN_FPUTS: case BUILT_IN_FSCANF: case BUILT_IN_FWRITE: case BUILT_IN_PRINTF: case BUILT_IN_PUTCHAR: case BUILT_IN_PUTS: case BUILT_IN_SCANF: case BUILT_IN_SNPRINTF: case BUILT_IN_SPRINTF: case BUILT_IN_SSCANF: case BUILT_IN_VFPRINTF: case BUILT_IN_VFSCANF: case BUILT_IN_VPRINTF: case BUILT_IN_VSCANF: case BUILT_IN_VSNPRINTF: case BUILT_IN_VSPRINTF: case BUILT_IN_VSSCANF: return "<stdio.h>"; case BUILT_IN_ISALNUM: case BUILT_IN_ISALPHA: case BUILT_IN_ISBLANK: case BUILT_IN_ISCNTRL: case BUILT_IN_ISDIGIT: case BUILT_IN_ISGRAPH: case BUILT_IN_ISLOWER: case BUILT_IN_ISPRINT: case BUILT_IN_ISPUNCT: case BUILT_IN_ISSPACE: case BUILT_IN_ISUPPER: case BUILT_IN_ISXDIGIT: case BUILT_IN_TOLOWER: case BUILT_IN_TOUPPER: return "<ctype.h>"; case BUILT_IN_ISWALNUM: case BUILT_IN_ISWALPHA: case BUILT_IN_ISWBLANK: case BUILT_IN_ISWCNTRL: case BUILT_IN_ISWDIGIT: case BUILT_IN_ISWGRAPH: case BUILT_IN_ISWLOWER: case BUILT_IN_ISWPRINT: case BUILT_IN_ISWPUNCT: case BUILT_IN_ISWSPACE: case BUILT_IN_ISWUPPER: case BUILT_IN_ISWXDIGIT: case BUILT_IN_TOWLOWER: case BUILT_IN_TOWUPPER: return "<wctype.h>"; case BUILT_IN_ABORT: case BUILT_IN_ABS: case BUILT_IN_CALLOC: case BUILT_IN_EXIT: case BUILT_IN_FREE: case BUILT_IN_LABS: case BUILT_IN_LLABS: case BUILT_IN_MALLOC: case BUILT_IN_REALLOC: case BUILT_IN__EXIT2: case BUILT_IN_ALIGNED_ALLOC: return "<stdlib.h>"; case BUILT_IN_IMAXABS: return "<inttypes.h>"; case BUILT_IN_STRFTIME: return "<time.h>"; default: return NULL; } } /* Generate an implicit declaration for identifier FUNCTIONID at LOC as a function of type int (). */ tree implicitly_declare (location_t loc, tree functionid) { struct c_binding *b; tree decl = 0; tree asmspec_tree; for (b = I_SYMBOL_BINDING (functionid); b; b = b->shadowed) { if (B_IN_SCOPE (b, external_scope)) { decl = b->decl; break; } } if (decl) { if (decl == error_mark_node) return decl; /* FIXME: Objective-C has weird not-really-builtin functions which are supposed to be visible automatically. They wind up in the external scope because they're pushed before the file scope gets created. Catch this here and rebind them into the file scope. */ if (!DECL_BUILT_IN (decl) && DECL_IS_BUILTIN (decl)) { bind (functionid, decl, file_scope, /*invisible=*/false, /*nested=*/true, DECL_SOURCE_LOCATION (decl)); return decl; } else { tree newtype = default_function_type; if (b->u.type) TREE_TYPE (decl) = b->u.type; /* Implicit declaration of a function already declared (somehow) in a different scope, or as a built-in. If this is the first time this has happened, warn; then recycle the old declaration but with the new type. */ if (!C_DECL_IMPLICIT (decl)) { implicit_decl_warning (loc, functionid, decl); C_DECL_IMPLICIT (decl) = 1; } if (DECL_BUILT_IN (decl)) { newtype = build_type_attribute_variant (newtype, TYPE_ATTRIBUTES (TREE_TYPE (decl))); if (!comptypes (newtype, TREE_TYPE (decl))) { bool warned = warning_at (loc, 0, "incompatible implicit " "declaration of built-in " "function %qD", decl); /* See if we can hint which header to include. */ const char *header = header_for_builtin_fn (DECL_FUNCTION_CODE (decl)); if (header != NULL && warned) inform (loc, "include %qs or provide a declaration of %qD", header, decl); newtype = TREE_TYPE (decl); } } else { if (!comptypes (newtype, TREE_TYPE (decl))) { error_at (loc, "incompatible implicit declaration of " "function %qD", decl); locate_old_decl (decl); } } b->u.type = TREE_TYPE (decl); TREE_TYPE (decl) = newtype; bind (functionid, decl, current_scope, /*invisible=*/false, /*nested=*/true, DECL_SOURCE_LOCATION (decl)); return decl; } } /* Not seen before. */ decl = build_decl (loc, FUNCTION_DECL, functionid, default_function_type); DECL_EXTERNAL (decl) = 1; TREE_PUBLIC (decl) = 1; C_DECL_IMPLICIT (decl) = 1; implicit_decl_warning (loc, functionid, 0); asmspec_tree = maybe_apply_renaming_pragma (decl, /*asmname=*/NULL); if (asmspec_tree) set_user_assembler_name (decl, TREE_STRING_POINTER (asmspec_tree)); /* C89 says implicit declarations are in the innermost block. So we record the decl in the standard fashion. */ decl = pushdecl (decl); /* No need to call objc_check_decl here - it's a function type. */ rest_of_decl_compilation (decl, 0, 0); /* Write a record describing this implicit function declaration to the prototypes file (if requested). */ gen_aux_info_record (decl, 0, 1, 0); /* Possibly apply some default attributes to this implicit declaration. */ decl_attributes (&decl, NULL_TREE, 0); return decl; } /* Issue an error message for a reference to an undeclared variable ID, including a reference to a builtin outside of function-call context. Establish a binding of the identifier to error_mark_node in an appropriate scope, which will suppress further errors for the same identifier. The error message should be given location LOC. */ void undeclared_variable (location_t loc, tree id) { static bool already = false; struct c_scope *scope; if (current_function_decl == 0) { error_at (loc, "%qE undeclared here (not in a function)", id); scope = current_scope; } else { if (!objc_diagnose_private_ivar (id)) error_at (loc, "%qE undeclared (first use in this function)", id); if (!already) { inform (loc, "each undeclared identifier is reported only" " once for each function it appears in"); already = true; } /* If we are parsing old-style parameter decls, current_function_decl will be nonnull but current_function_scope will be null. */ scope = current_function_scope ? current_function_scope : current_scope; } bind (id, error_mark_node, scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); } /* Subroutine of lookup_label, declare_label, define_label: construct a LABEL_DECL with all the proper frills. Also create a struct c_label_vars initialized for the current scope. */ static tree make_label (location_t location, tree name, bool defining, struct c_label_vars **p_label_vars) { tree label = build_decl (location, LABEL_DECL, name, void_type_node); DECL_CONTEXT (label) = current_function_decl; DECL_MODE (label) = VOIDmode; c_label_vars *label_vars = ggc_alloc<c_label_vars> (); label_vars->shadowed = NULL; set_spot_bindings (&label_vars->label_bindings, defining); label_vars->decls_in_scope = make_tree_vector (); label_vars->gotos = NULL; *p_label_vars = label_vars; return label; } /* Get the LABEL_DECL corresponding to identifier NAME as a label. Create one if none exists so far for the current function. This is called when a label is used in a goto expression or has its address taken. */ tree lookup_label (tree name) { tree label; struct c_label_vars *label_vars; if (current_function_scope == 0) { error ("label %qE referenced outside of any function", name); return 0; } /* Use a label already defined or ref'd with this name, but not if it is inherited from a containing function and wasn't declared using __label__. */ label = I_LABEL_DECL (name); if (label && (DECL_CONTEXT (label) == current_function_decl || C_DECLARED_LABEL_FLAG (label))) { /* If the label has only been declared, update its apparent location to point here, for better diagnostics if it turns out not to have been defined. */ if (DECL_INITIAL (label) == NULL_TREE) DECL_SOURCE_LOCATION (label) = input_location; return label; } /* No label binding for that identifier; make one. */ label = make_label (input_location, name, false, &label_vars); /* Ordinary labels go in the current function scope. */ bind_label (name, label, current_function_scope, label_vars); return label; } /* Issue a warning about DECL for a goto statement at GOTO_LOC going to LABEL. */ static void warn_about_goto (location_t goto_loc, tree label, tree decl) { if (variably_modified_type_p (TREE_TYPE (decl), NULL_TREE)) error_at (goto_loc, "jump into scope of identifier with variably modified type"); else warning_at (goto_loc, OPT_Wjump_misses_init, "jump skips variable initialization"); inform (DECL_SOURCE_LOCATION (label), "label %qD defined here", label); inform (DECL_SOURCE_LOCATION (decl), "%qD declared here", decl); } /* Look up a label because of a goto statement. This is like lookup_label, but also issues any appropriate warnings. */ tree lookup_label_for_goto (location_t loc, tree name) { tree label; struct c_label_vars *label_vars; unsigned int ix; tree decl; label = lookup_label (name); if (label == NULL_TREE) return NULL_TREE; /* If we are jumping to a different function, we can't issue any useful warnings. */ if (DECL_CONTEXT (label) != current_function_decl) { gcc_assert (C_DECLARED_LABEL_FLAG (label)); return label; } label_vars = I_LABEL_BINDING (name)->u.label; /* If the label has not yet been defined, then push this goto on a list for possible later warnings. */ if (label_vars->label_bindings.scope == NULL) { c_goto_bindings *g = ggc_alloc<c_goto_bindings> (); g->loc = loc; set_spot_bindings (&g->goto_bindings, true); vec_safe_push (label_vars->gotos, g); return label; } /* If there are any decls in label_vars->decls_in_scope, then this goto has missed the declaration of the decl. This happens for a case like int i = 1; lab: ... goto lab; Issue a warning or error. */ FOR_EACH_VEC_SAFE_ELT (label_vars->decls_in_scope, ix, decl) warn_about_goto (loc, label, decl); if (label_vars->label_bindings.left_stmt_expr) { error_at (loc, "jump into statement expression"); inform (DECL_SOURCE_LOCATION (label), "label %qD defined here", label); } return label; } /* Make a label named NAME in the current function, shadowing silently any that may be inherited from containing functions or containing scopes. This is called for __label__ declarations. */ tree declare_label (tree name) { struct c_binding *b = I_LABEL_BINDING (name); tree label; struct c_label_vars *label_vars; /* Check to make sure that the label hasn't already been declared at this scope */ if (b && B_IN_CURRENT_SCOPE (b)) { error ("duplicate label declaration %qE", name); locate_old_decl (b->decl); /* Just use the previous declaration. */ return b->decl; } label = make_label (input_location, name, false, &label_vars); C_DECLARED_LABEL_FLAG (label) = 1; /* Declared labels go in the current scope. */ bind_label (name, label, current_scope, label_vars); return label; } /* When we define a label, issue any appropriate warnings if there are any gotos earlier in the function which jump to this label. */ static void check_earlier_gotos (tree label, struct c_label_vars* label_vars) { unsigned int ix; struct c_goto_bindings *g; FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g) { struct c_binding *b; struct c_scope *scope; /* We have a goto to this label. The goto is going forward. In g->scope, the goto is going to skip any binding which was defined after g->bindings_in_scope. */ if (g->goto_bindings.scope->has_jump_unsafe_decl) { for (b = g->goto_bindings.scope->bindings; b != g->goto_bindings.bindings_in_scope; b = b->prev) { if (decl_jump_unsafe (b->decl)) warn_about_goto (g->loc, label, b->decl); } } /* We also need to warn about decls defined in any scopes between the scope of the label and the scope of the goto. */ for (scope = label_vars->label_bindings.scope; scope != g->goto_bindings.scope; scope = scope->outer) { gcc_assert (scope != NULL); if (scope->has_jump_unsafe_decl) { if (scope == label_vars->label_bindings.scope) b = label_vars->label_bindings.bindings_in_scope; else b = scope->bindings; for (; b != NULL; b = b->prev) { if (decl_jump_unsafe (b->decl)) warn_about_goto (g->loc, label, b->decl); } } } if (g->goto_bindings.stmt_exprs > 0) { error_at (g->loc, "jump into statement expression"); inform (DECL_SOURCE_LOCATION (label), "label %qD defined here", label); } } /* Now that the label is defined, we will issue warnings about subsequent gotos to this label when we see them. */ vec_safe_truncate (label_vars->gotos, 0); label_vars->gotos = NULL; } /* Define a label, specifying the location in the source file. Return the LABEL_DECL node for the label, if the definition is valid. Otherwise return 0. */ tree define_label (location_t location, tree name) { /* Find any preexisting label with this name. It is an error if that label has already been defined in this function, or if there is a containing function with a declared label with the same name. */ tree label = I_LABEL_DECL (name); if (label && ((DECL_CONTEXT (label) == current_function_decl && DECL_INITIAL (label) != 0) || (DECL_CONTEXT (label) != current_function_decl && C_DECLARED_LABEL_FLAG (label)))) { error_at (location, "duplicate label %qD", label); locate_old_decl (label); return 0; } else if (label && DECL_CONTEXT (label) == current_function_decl) { struct c_label_vars *label_vars = I_LABEL_BINDING (name)->u.label; /* The label has been used or declared already in this function, but not defined. Update its location to point to this definition. */ DECL_SOURCE_LOCATION (label) = location; set_spot_bindings (&label_vars->label_bindings, true); /* Issue warnings as required about any goto statements from earlier in the function. */ check_earlier_gotos (label, label_vars); } else { struct c_label_vars *label_vars; /* No label binding for that identifier; make one. */ label = make_label (location, name, true, &label_vars); /* Ordinary labels go in the current function scope. */ bind_label (name, label, current_function_scope, label_vars); } if (!in_system_header_at (input_location) && lookup_name (name)) warning_at (location, OPT_Wtraditional, "traditional C lacks a separate namespace " "for labels, identifier %qE conflicts", name); /* Mark label as having been defined. */ DECL_INITIAL (label) = error_mark_node; return label; } /* Get the bindings for a new switch statement. This is used to issue warnings as appropriate for jumps from the switch to case or default labels. */ struct c_spot_bindings * c_get_switch_bindings (void) { struct c_spot_bindings *switch_bindings; switch_bindings = XNEW (struct c_spot_bindings); set_spot_bindings (switch_bindings, true); return switch_bindings; } void c_release_switch_bindings (struct c_spot_bindings *bindings) { gcc_assert (bindings->stmt_exprs == 0 && !bindings->left_stmt_expr); XDELETE (bindings); } /* This is called at the point of a case or default label to issue warnings about decls as needed. It returns true if it found an error, not just a warning. */ bool c_check_switch_jump_warnings (struct c_spot_bindings *switch_bindings, location_t switch_loc, location_t case_loc) { bool saw_error; struct c_scope *scope; saw_error = false; for (scope = current_scope; scope != switch_bindings->scope; scope = scope->outer) { struct c_binding *b; gcc_assert (scope != NULL); if (!scope->has_jump_unsafe_decl) continue; for (b = scope->bindings; b != NULL; b = b->prev) { if (decl_jump_unsafe (b->decl)) { if (variably_modified_type_p (TREE_TYPE (b->decl), NULL_TREE)) { saw_error = true; error_at (case_loc, ("switch jumps into scope of identifier with " "variably modified type")); } else warning_at (case_loc, OPT_Wjump_misses_init, "switch jumps over variable initialization"); inform (switch_loc, "switch starts here"); inform (DECL_SOURCE_LOCATION (b->decl), "%qD declared here", b->decl); } } } if (switch_bindings->stmt_exprs > 0) { saw_error = true; error_at (case_loc, "switch jumps into statement expression"); inform (switch_loc, "switch starts here"); } return saw_error; } /* Given NAME, an IDENTIFIER_NODE, return the structure (or union or enum) definition for that name. If THISLEVEL_ONLY is nonzero, searches only the current_scope. CODE says which kind of type the caller wants; it is RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE. If PLOC is not NULL and this returns non-null, it sets *PLOC to the location where the tag was defined. If the wrong kind of type is found, an error is reported. */ static tree lookup_tag (enum tree_code code, tree name, int thislevel_only, location_t *ploc) { struct c_binding *b = I_TAG_BINDING (name); int thislevel = 0; if (!b || !b->decl) return 0; /* We only care about whether it's in this level if thislevel_only was set or it might be a type clash. */ if (thislevel_only || TREE_CODE (b->decl) != code) { /* For our purposes, a tag in the external scope is the same as a tag in the file scope. (Primarily relevant to Objective-C and its builtin structure tags, which get pushed before the file scope is created.) */ if (B_IN_CURRENT_SCOPE (b) || (current_scope == file_scope && B_IN_EXTERNAL_SCOPE (b))) thislevel = 1; } if (thislevel_only && !thislevel) return 0; if (TREE_CODE (b->decl) != code) { /* Definition isn't the kind we were looking for. */ pending_invalid_xref = name; pending_invalid_xref_location = input_location; /* If in the same binding level as a declaration as a tag of a different type, this must not be allowed to shadow that tag, so give the error immediately. (For example, "struct foo; union foo;" is invalid.) */ if (thislevel) pending_xref_error (); } if (ploc != NULL) *ploc = b->locus; return b->decl; } /* Print an error message now for a recent invalid struct, union or enum cross reference. We don't print them immediately because they are not invalid when used in the `struct foo;' construct for shadowing. */ void pending_xref_error (void) { if (pending_invalid_xref != 0) error_at (pending_invalid_xref_location, "%qE defined as wrong kind of tag", pending_invalid_xref); pending_invalid_xref = 0; } /* Look up NAME in the current scope and its superiors in the namespace of variables, functions and typedefs. Return a ..._DECL node of some kind representing its definition, or return 0 if it is undefined. */ tree lookup_name (tree name) { struct c_binding *b = I_SYMBOL_BINDING (name); if (b && !b->invisible) { maybe_record_typedef_use (b->decl); return b->decl; } return 0; } /* Similar to `lookup_name' but look only at the indicated scope. */ static tree lookup_name_in_scope (tree name, struct c_scope *scope) { struct c_binding *b; for (b = I_SYMBOL_BINDING (name); b; b = b->shadowed) if (B_IN_SCOPE (b, scope)) return b->decl; return 0; } /* Create the predefined scalar types of C, and some nodes representing standard constants (0, 1, (void *) 0). Initialize the global scope. Make definitions for built-in primitive functions. */ void c_init_decl_processing (void) { location_t save_loc = input_location; /* Initialize reserved words for parser. */ c_parse_init (); current_function_decl = 0; gcc_obstack_init (&parser_obstack); /* Make the externals scope. */ push_scope (); external_scope = current_scope; /* Declarations from c_common_nodes_and_builtins must not be associated with this input file, lest we get differences between using and not using preprocessed headers. */ input_location = BUILTINS_LOCATION; c_common_nodes_and_builtins (); /* In C, comparisons and TRUTH_* expressions have type int. */ truthvalue_type_node = integer_type_node; truthvalue_true_node = integer_one_node; truthvalue_false_node = integer_zero_node; /* Even in C99, which has a real boolean type. */ pushdecl (build_decl (UNKNOWN_LOCATION, TYPE_DECL, get_identifier ("_Bool"), boolean_type_node)); input_location = save_loc; make_fname_decl = c_make_fname_decl; start_fname_decls (); } /* Create the VAR_DECL at LOC for __FUNCTION__ etc. ID is the name to give the decl, NAME is the initialization string and TYPE_DEP indicates whether NAME depended on the type of the function. As we don't yet implement delayed emission of static data, we mark the decl as emitted so it is not placed in the output. Anything using it must therefore pull out the STRING_CST initializer directly. FIXME. */ static tree c_make_fname_decl (location_t loc, tree id, int type_dep) { const char *name = fname_as_string (type_dep); tree decl, type, init; size_t length = strlen (name); type = build_array_type (char_type_node, build_index_type (size_int (length))); type = c_build_qualified_type (type, TYPE_QUAL_CONST); decl = build_decl (loc, VAR_DECL, id, type); TREE_STATIC (decl) = 1; TREE_READONLY (decl) = 1; DECL_ARTIFICIAL (decl) = 1; init = build_string (length + 1, name); free (CONST_CAST (char *, name)); TREE_TYPE (init) = type; DECL_INITIAL (decl) = init; TREE_USED (decl) = 1; if (current_function_decl /* For invalid programs like this: void foo() const char* p = __FUNCTION__; the __FUNCTION__ is believed to appear in K&R style function parameter declarator. In that case we still don't have function_scope. */ && (!seen_error () || current_function_scope)) { DECL_CONTEXT (decl) = current_function_decl; bind (id, decl, current_function_scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); } finish_decl (decl, loc, init, NULL_TREE, NULL_TREE); return decl; } tree c_builtin_function (tree decl) { tree type = TREE_TYPE (decl); tree id = DECL_NAME (decl); const char *name = IDENTIFIER_POINTER (id); C_DECL_BUILTIN_PROTOTYPE (decl) = prototype_p (type); /* Should never be called on a symbol with a preexisting meaning. */ gcc_assert (!I_SYMBOL_BINDING (id)); bind (id, decl, external_scope, /*invisible=*/true, /*nested=*/false, UNKNOWN_LOCATION); /* Builtins in the implementation namespace are made visible without needing to be explicitly declared. See push_file_scope. */ if (name[0] == '_' && (name[1] == '_' || ISUPPER (name[1]))) { DECL_CHAIN (decl) = visible_builtins; visible_builtins = decl; } return decl; } tree c_builtin_function_ext_scope (tree decl) { tree type = TREE_TYPE (decl); tree id = DECL_NAME (decl); const char *name = IDENTIFIER_POINTER (id); C_DECL_BUILTIN_PROTOTYPE (decl) = prototype_p (type); if (external_scope) bind (id, decl, external_scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); /* Builtins in the implementation namespace are made visible without needing to be explicitly declared. See push_file_scope. */ if (name[0] == '_' && (name[1] == '_' || ISUPPER (name[1]))) { DECL_CHAIN (decl) = visible_builtins; visible_builtins = decl; } return decl; } /* Called when a declaration is seen that contains no names to declare. If its type is a reference to a structure, union or enum inherited from a containing scope, shadow that tag name for the current scope with a forward reference. If its type defines a new named structure or union or defines an enum, it is valid but we need not do anything here. Otherwise, it is an error. */ void shadow_tag (const struct c_declspecs *declspecs) { shadow_tag_warned (declspecs, 0); } /* WARNED is 1 if we have done a pedwarn, 2 if we have done a warning, but no pedwarn. */ void shadow_tag_warned (const struct c_declspecs *declspecs, int warned) { bool found_tag = false; if (declspecs->type && !declspecs->default_int_p && !declspecs->typedef_p) { tree value = declspecs->type; enum tree_code code = TREE_CODE (value); if (code == RECORD_TYPE || code == UNION_TYPE || code == ENUMERAL_TYPE) /* Used to test also that TYPE_SIZE (value) != 0. That caused warning for `struct foo;' at top level in the file. */ { tree name = TYPE_NAME (value); tree t; found_tag = true; if (declspecs->restrict_p) { error ("invalid use of %<restrict%>"); warned = 1; } if (name == 0) { if (warned != 1 && code != ENUMERAL_TYPE) /* Empty unnamed enum OK */ { pedwarn (input_location, 0, "unnamed struct/union that defines no instances"); warned = 1; } } else if (declspecs->typespec_kind != ctsk_tagdef && declspecs->typespec_kind != ctsk_tagfirstref && declspecs->storage_class != csc_none) { if (warned != 1) pedwarn (input_location, 0, "empty declaration with storage class specifier " "does not redeclare tag"); warned = 1; pending_xref_error (); } else if (declspecs->typespec_kind != ctsk_tagdef && declspecs->typespec_kind != ctsk_tagfirstref && (declspecs->const_p || declspecs->volatile_p || declspecs->atomic_p || declspecs->restrict_p || declspecs->address_space)) { if (warned != 1) pedwarn (input_location, 0, "empty declaration with type qualifier " "does not redeclare tag"); warned = 1; pending_xref_error (); } else if (declspecs->typespec_kind != ctsk_tagdef && declspecs->typespec_kind != ctsk_tagfirstref && declspecs->alignas_p) { if (warned != 1) pedwarn (input_location, 0, "empty declaration with %<_Alignas%> " "does not redeclare tag"); warned = 1; pending_xref_error (); } else { pending_invalid_xref = 0; t = lookup_tag (code, name, 1, NULL); if (t == 0) { t = make_node (code); pushtag (input_location, name, t); } } } else { if (warned != 1 && !in_system_header_at (input_location)) { pedwarn (input_location, 0, "useless type name in empty declaration"); warned = 1; } } } else if (warned != 1 && !in_system_header_at (input_location) && declspecs->typedef_p) { pedwarn (input_location, 0, "useless type name in empty declaration"); warned = 1; } pending_invalid_xref = 0; if (declspecs->inline_p) { error ("%<inline%> in empty declaration"); warned = 1; } if (declspecs->noreturn_p) { error ("%<_Noreturn%> in empty declaration"); warned = 1; } if (current_scope == file_scope && declspecs->storage_class == csc_auto) { error ("%<auto%> in file-scope empty declaration"); warned = 1; } if (current_scope == file_scope && declspecs->storage_class == csc_register) { error ("%<register%> in file-scope empty declaration"); warned = 1; } if (!warned && !in_system_header_at (input_location) && declspecs->storage_class != csc_none) { warning (0, "useless storage class specifier in empty declaration"); warned = 2; } if (!warned && !in_system_header_at (input_location) && declspecs->thread_p) { warning (0, "useless %qs in empty declaration", declspecs->thread_gnu_p ? "__thread" : "_Thread_local"); warned = 2; } if (!warned && !in_system_header_at (input_location) && (declspecs->const_p || declspecs->volatile_p || declspecs->atomic_p || declspecs->restrict_p || declspecs->address_space)) { warning (0, "useless type qualifier in empty declaration"); warned = 2; } if (!warned && !in_system_header_at (input_location) && declspecs->alignas_p) { warning (0, "useless %<_Alignas%> in empty declaration"); warned = 2; } if (warned != 1) { if (!found_tag) pedwarn (input_location, 0, "empty declaration"); } } /* Return the qualifiers from SPECS as a bitwise OR of TYPE_QUAL_* bits. SPECS represents declaration specifiers that the grammar only permits to contain type qualifiers and attributes. */ int quals_from_declspecs (const struct c_declspecs *specs) { int quals = ((specs->const_p ? TYPE_QUAL_CONST : 0) | (specs->volatile_p ? TYPE_QUAL_VOLATILE : 0) | (specs->restrict_p ? TYPE_QUAL_RESTRICT : 0) | (specs->atomic_p ? TYPE_QUAL_ATOMIC : 0) | (ENCODE_QUAL_ADDR_SPACE (specs->address_space))); gcc_assert (!specs->type && !specs->decl_attr && specs->typespec_word == cts_none && specs->storage_class == csc_none && !specs->typedef_p && !specs->explicit_signed_p && !specs->deprecated_p && !specs->long_p && !specs->long_long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p && !specs->complex_p && !specs->inline_p && !specs->noreturn_p && !specs->thread_p); return quals; } /* Construct an array declarator. LOC is the location of the beginning of the array (usually the opening brace). EXPR is the expression inside [], or NULL_TREE. QUALS are the type qualifiers inside the [] (to be applied to the pointer to which a parameter array is converted). STATIC_P is true if "static" is inside the [], false otherwise. VLA_UNSPEC_P is true if the array is [*], a VLA of unspecified length which is nevertheless a complete type, false otherwise. The field for the contained declarator is left to be filled in by set_array_declarator_inner. */ struct c_declarator * build_array_declarator (location_t loc, tree expr, struct c_declspecs *quals, bool static_p, bool vla_unspec_p) { struct c_declarator *declarator = XOBNEW (&parser_obstack, struct c_declarator); declarator->id_loc = loc; declarator->kind = cdk_array; declarator->declarator = 0; declarator->u.array.dimen = expr; if (quals) { declarator->u.array.attrs = quals->attrs; declarator->u.array.quals = quals_from_declspecs (quals); } else { declarator->u.array.attrs = NULL_TREE; declarator->u.array.quals = 0; } declarator->u.array.static_p = static_p; declarator->u.array.vla_unspec_p = vla_unspec_p; if (static_p || quals != NULL) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not support %<static%> or type " "qualifiers in parameter array declarators"); if (vla_unspec_p) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not support %<[*]%> array declarators"); if (vla_unspec_p) { if (!current_scope->parm_flag) { /* C99 6.7.5.2p4 */ error_at (loc, "%<[*]%> not allowed in other than " "function prototype scope"); declarator->u.array.vla_unspec_p = false; return NULL; } current_scope->had_vla_unspec = true; } return declarator; } /* Set the contained declarator of an array declarator. DECL is the declarator, as constructed by build_array_declarator; INNER is what appears on the left of the []. */ struct c_declarator * set_array_declarator_inner (struct c_declarator *decl, struct c_declarator *inner) { decl->declarator = inner; return decl; } /* INIT is a constructor that forms DECL's initializer. If the final element initializes a flexible array field, add the size of that initializer to DECL's size. */ static void add_flexible_array_elts_to_size (tree decl, tree init) { tree elt, type; if (vec_safe_is_empty (CONSTRUCTOR_ELTS (init))) return; elt = CONSTRUCTOR_ELTS (init)->last ().value; type = TREE_TYPE (elt); if (TREE_CODE (type) == ARRAY_TYPE && TYPE_SIZE (type) == NULL_TREE && TYPE_DOMAIN (type) != NULL_TREE && TYPE_MAX_VALUE (TYPE_DOMAIN (type)) == NULL_TREE) { complete_array_type (&type, elt, false); DECL_SIZE (decl) = size_binop (PLUS_EXPR, DECL_SIZE (decl), TYPE_SIZE (type)); DECL_SIZE_UNIT (decl) = size_binop (PLUS_EXPR, DECL_SIZE_UNIT (decl), TYPE_SIZE_UNIT (type)); } } /* Decode a "typename", such as "int **", returning a ..._TYPE node. Set *EXPR, if EXPR not NULL, to any expression to be evaluated before the type name, and set *EXPR_CONST_OPERANDS, if EXPR_CONST_OPERANDS not NULL, to indicate whether the type name may appear in a constant expression. */ tree groktypename (struct c_type_name *type_name, tree *expr, bool *expr_const_operands) { tree type; tree attrs = type_name->specs->attrs; type_name->specs->attrs = NULL_TREE; type = grokdeclarator (type_name->declarator, type_name->specs, TYPENAME, false, NULL, &attrs, expr, expr_const_operands, DEPRECATED_NORMAL); /* Apply attributes. */ decl_attributes (&type, attrs, 0); return type; } /* Wrapper for decl_attributes that adds some implicit attributes to VAR_DECLs or FUNCTION_DECLs. */ static tree c_decl_attributes (tree *node, tree attributes, int flags) { /* Add implicit "omp declare target" attribute if requested. */ if (current_omp_declare_target_attribute && ((TREE_CODE (*node) == VAR_DECL && (TREE_STATIC (*node) || DECL_EXTERNAL (*node))) || TREE_CODE (*node) == FUNCTION_DECL)) { if (TREE_CODE (*node) == VAR_DECL && ((DECL_CONTEXT (*node) && TREE_CODE (DECL_CONTEXT (*node)) == FUNCTION_DECL) || (current_function_decl && !DECL_EXTERNAL (*node)))) error ("%q+D in block scope inside of declare target directive", *node); else if (TREE_CODE (*node) == VAR_DECL && !lang_hooks.types.omp_mappable_type (TREE_TYPE (*node))) error ("%q+D in declare target directive does not have mappable type", *node); else attributes = tree_cons (get_identifier ("omp declare target"), NULL_TREE, attributes); } return decl_attributes (node, attributes, flags); } /* Decode a declarator in an ordinary declaration or data definition. This is called as soon as the type information and variable name have been parsed, before parsing the initializer if any. Here we create the ..._DECL node, fill in its type, and put it on the list of decls for the current context. The ..._DECL node is returned as the value. Exception: for arrays where the length is not specified, the type is left null, to be filled in by `finish_decl'. Function definitions do not come here; they go to start_function instead. However, external and forward declarations of functions do go through here. Structure field declarations are done by grokfield and not through here. */ tree start_decl (struct c_declarator *declarator, struct c_declspecs *declspecs, bool initialized, tree attributes) { tree decl; tree tem; tree expr = NULL_TREE; enum deprecated_states deprecated_state = DEPRECATED_NORMAL; /* An object declared as __attribute__((deprecated)) suppresses warnings of uses of other deprecated items. */ if (lookup_attribute ("deprecated", attributes)) deprecated_state = DEPRECATED_SUPPRESS; decl = grokdeclarator (declarator, declspecs, NORMAL, initialized, NULL, &attributes, &expr, NULL, deprecated_state); if (!decl || decl == error_mark_node) return NULL_TREE; if (expr) add_stmt (fold_convert (void_type_node, expr)); if (TREE_CODE (decl) != FUNCTION_DECL && MAIN_NAME_P (DECL_NAME (decl))) warning (OPT_Wmain, "%q+D is usually a function", decl); if (initialized) /* Is it valid for this decl to have an initializer at all? If not, set INITIALIZED to zero, which will indirectly tell 'finish_decl' to ignore the initializer once it is parsed. */ switch (TREE_CODE (decl)) { case TYPE_DECL: error ("typedef %qD is initialized (use __typeof__ instead)", decl); initialized = 0; break; case FUNCTION_DECL: error ("function %qD is initialized like a variable", decl); initialized = 0; break; case PARM_DECL: /* DECL_INITIAL in a PARM_DECL is really DECL_ARG_TYPE. */ error ("parameter %qD is initialized", decl); initialized = 0; break; default: /* Don't allow initializations for incomplete types except for arrays which might be completed by the initialization. */ /* This can happen if the array size is an undefined macro. We already gave a warning, so we don't need another one. */ if (TREE_TYPE (decl) == error_mark_node) initialized = 0; else if (COMPLETE_TYPE_P (TREE_TYPE (decl))) { /* A complete type is ok if size is fixed. */ if (TREE_CODE (TYPE_SIZE (TREE_TYPE (decl))) != INTEGER_CST || C_DECL_VARIABLE_SIZE (decl)) { error ("variable-sized object may not be initialized"); initialized = 0; } } else if (TREE_CODE (TREE_TYPE (decl)) != ARRAY_TYPE) { error ("variable %qD has initializer but incomplete type", decl); initialized = 0; } else if (C_DECL_VARIABLE_SIZE (decl)) { /* Although C99 is unclear about whether incomplete arrays of VLAs themselves count as VLAs, it does not make sense to permit them to be initialized given that ordinary VLAs may not be initialized. */ error ("variable-sized object may not be initialized"); initialized = 0; } } if (initialized) { if (current_scope == file_scope) TREE_STATIC (decl) = 1; /* Tell 'pushdecl' this is an initialized decl even though we don't yet have the initializer expression. Also tell 'finish_decl' it may store the real initializer. */ DECL_INITIAL (decl) = error_mark_node; } /* If this is a function declaration, write a record describing it to the prototypes file (if requested). */ if (TREE_CODE (decl) == FUNCTION_DECL) gen_aux_info_record (decl, 0, 0, prototype_p (TREE_TYPE (decl))); /* ANSI specifies that a tentative definition which is not merged with a non-tentative definition behaves exactly like a definition with an initializer equal to zero. (Section 3.7.2) -fno-common gives strict ANSI behavior, though this tends to break a large body of code that grew up without this rule. Thread-local variables are never common, since there's no entrenched body of code to break, and it allows more efficient variable references in the presence of dynamic linking. */ if (TREE_CODE (decl) == VAR_DECL && !initialized && TREE_PUBLIC (decl) && !DECL_THREAD_LOCAL_P (decl) && !flag_no_common) DECL_COMMON (decl) = 1; /* Set attributes here so if duplicate decl, will have proper attributes. */ c_decl_attributes (&decl, attributes, 0); /* Handle gnu_inline attribute. */ if (declspecs->inline_p && !flag_gnu89_inline && TREE_CODE (decl) == FUNCTION_DECL && (lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (decl)) || current_function_decl)) { if (declspecs->storage_class == csc_auto && current_scope != file_scope) ; else if (declspecs->storage_class != csc_static) DECL_EXTERNAL (decl) = !DECL_EXTERNAL (decl); } if (TREE_CODE (decl) == FUNCTION_DECL && targetm.calls.promote_prototypes (TREE_TYPE (decl))) { struct c_declarator *ce = declarator; if (ce->kind == cdk_pointer) ce = declarator->declarator; if (ce->kind == cdk_function) { tree args = ce->u.arg_info->parms; for (; args; args = DECL_CHAIN (args)) { tree type = TREE_TYPE (args); if (type && INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node)) DECL_ARG_TYPE (args) = c_type_promotes_to (type); } } } if (TREE_CODE (decl) == FUNCTION_DECL && DECL_DECLARED_INLINE_P (decl) && DECL_UNINLINABLE (decl) && lookup_attribute ("noinline", DECL_ATTRIBUTES (decl))) warning (OPT_Wattributes, "inline function %q+D given attribute noinline", decl); /* C99 6.7.4p3: An inline definition of a function with external linkage shall not contain a definition of a modifiable object with static storage duration... */ if (TREE_CODE (decl) == VAR_DECL && current_scope != file_scope && TREE_STATIC (decl) && !TREE_READONLY (decl) && DECL_DECLARED_INLINE_P (current_function_decl) && DECL_EXTERNAL (current_function_decl)) record_inline_static (input_location, current_function_decl, decl, csi_modifiable); if (c_dialect_objc () && (TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == FUNCTION_DECL)) objc_check_global_decl (decl); /* Add this decl to the current scope. TEM may equal DECL or it may be a previous decl of the same name. */ tem = pushdecl (decl); if (initialized && DECL_EXTERNAL (tem)) { DECL_EXTERNAL (tem) = 0; TREE_STATIC (tem) = 1; } return tem; } /* Subroutine of finish_decl. TYPE is the type of an uninitialized object DECL or the non-array element type if DECL is an uninitialized array. If that type has a const member, diagnose this. */ static void diagnose_uninitialized_cst_member (tree decl, tree type) { tree field; for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) { tree field_type; if (TREE_CODE (field) != FIELD_DECL) continue; field_type = strip_array_types (TREE_TYPE (field)); if (TYPE_QUALS (field_type) & TYPE_QUAL_CONST) { warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat, "uninitialized const member in %qT is invalid in C++", strip_array_types (TREE_TYPE (decl))); inform (DECL_SOURCE_LOCATION (field), "%qD should be initialized", field); } if (TREE_CODE (field_type) == RECORD_TYPE || TREE_CODE (field_type) == UNION_TYPE) diagnose_uninitialized_cst_member (decl, field_type); } } /* Finish processing of a declaration; install its initial value. If ORIGTYPE is not NULL_TREE, it is the original type of INIT. If the length of an array type is not known before, it must be determined now, from the initial value, or it is an error. INIT_LOC is the location of the initial value. */ void finish_decl (tree decl, location_t init_loc, tree init, tree origtype, tree asmspec_tree) { tree type; bool was_incomplete = (DECL_SIZE (decl) == 0); const char *asmspec = 0; /* If a name was specified, get the string. */ if ((TREE_CODE (decl) == FUNCTION_DECL || TREE_CODE (decl) == VAR_DECL) && DECL_FILE_SCOPE_P (decl)) asmspec_tree = maybe_apply_renaming_pragma (decl, asmspec_tree); if (asmspec_tree) asmspec = TREE_STRING_POINTER (asmspec_tree); if (TREE_CODE (decl) == VAR_DECL && TREE_STATIC (decl) && global_bindings_p ()) /* So decl is a global variable. Record the types it uses so that we can decide later to emit debug info for them. */ record_types_used_by_current_var_decl (decl); /* If `start_decl' didn't like having an initialization, ignore it now. */ if (init != 0 && DECL_INITIAL (decl) == 0) init = 0; /* Don't crash if parm is initialized. */ if (TREE_CODE (decl) == PARM_DECL) init = 0; if (init) store_init_value (init_loc, decl, init, origtype); if (c_dialect_objc () && (TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == FUNCTION_DECL || TREE_CODE (decl) == FIELD_DECL)) objc_check_decl (decl); type = TREE_TYPE (decl); /* Deduce size of array from initialization, if not already known. */ if (TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) == 0 && TREE_CODE (decl) != TYPE_DECL) { bool do_default = (TREE_STATIC (decl) /* Even if pedantic, an external linkage array may have incomplete type at first. */ ? pedantic && !TREE_PUBLIC (decl) : !DECL_EXTERNAL (decl)); int failure = complete_array_type (&TREE_TYPE (decl), DECL_INITIAL (decl), do_default); /* Get the completed type made by complete_array_type. */ type = TREE_TYPE (decl); switch (failure) { case 1: error ("initializer fails to determine size of %q+D", decl); break; case 2: if (do_default) error ("array size missing in %q+D", decl); /* If a `static' var's size isn't known, make it extern as well as static, so it does not get allocated. If it is not `static', then do not mark extern; finish_incomplete_decl will give it a default size and it will get allocated. */ else if (!pedantic && TREE_STATIC (decl) && !TREE_PUBLIC (decl)) DECL_EXTERNAL (decl) = 1; break; case 3: error ("zero or negative size array %q+D", decl); break; case 0: /* For global variables, update the copy of the type that exists in the binding. */ if (TREE_PUBLIC (decl)) { struct c_binding *b_ext = I_SYMBOL_BINDING (DECL_NAME (decl)); while (b_ext && !B_IN_EXTERNAL_SCOPE (b_ext)) b_ext = b_ext->shadowed; if (b_ext) { if (b_ext->u.type && comptypes (b_ext->u.type, type)) b_ext->u.type = composite_type (b_ext->u.type, type); else b_ext->u.type = type; } } break; default: gcc_unreachable (); } if (DECL_INITIAL (decl)) TREE_TYPE (DECL_INITIAL (decl)) = type; relayout_decl (decl); } if (TREE_CODE (decl) == VAR_DECL) { if (init && TREE_CODE (init) == CONSTRUCTOR) add_flexible_array_elts_to_size (decl, init); if (DECL_SIZE (decl) == 0 && TREE_TYPE (decl) != error_mark_node && COMPLETE_TYPE_P (TREE_TYPE (decl))) layout_decl (decl, 0); if (DECL_SIZE (decl) == 0 /* Don't give an error if we already gave one earlier. */ && TREE_TYPE (decl) != error_mark_node && (TREE_STATIC (decl) /* A static variable with an incomplete type is an error if it is initialized. Also if it is not file scope. Otherwise, let it through, but if it is not `extern' then it may cause an error message later. */ ? (DECL_INITIAL (decl) != 0 || !DECL_FILE_SCOPE_P (decl)) /* An automatic variable with an incomplete type is an error. */ : !DECL_EXTERNAL (decl))) { error ("storage size of %q+D isn%'t known", decl); TREE_TYPE (decl) = error_mark_node; } if ((DECL_EXTERNAL (decl) || TREE_STATIC (decl)) && DECL_SIZE (decl) != 0) { if (TREE_CODE (DECL_SIZE (decl)) == INTEGER_CST) constant_expression_warning (DECL_SIZE (decl)); else { error ("storage size of %q+D isn%'t constant", decl); TREE_TYPE (decl) = error_mark_node; } } if (TREE_USED (type)) { TREE_USED (decl) = 1; DECL_READ_P (decl) = 1; } } /* If this is a function and an assembler name is specified, reset DECL_RTL so we can give it its new name. Also, update builtin_decl if it was a normal built-in. */ if (TREE_CODE (decl) == FUNCTION_DECL && asmspec) { if (DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL) set_builtin_user_assembler_name (decl, asmspec); set_user_assembler_name (decl, asmspec); } /* If #pragma weak was used, mark the decl weak now. */ maybe_apply_pragma_weak (decl); /* Output the assembler code and/or RTL code for variables and functions, unless the type is an undefined structure or union. If not, it will get done when the type is completed. */ if (TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == FUNCTION_DECL) { /* Determine the ELF visibility. */ if (TREE_PUBLIC (decl)) c_determine_visibility (decl); /* This is a no-op in c-lang.c or something real in objc-act.c. */ if (c_dialect_objc ()) objc_check_decl (decl); if (asmspec) { /* If this is not a static variable, issue a warning. It doesn't make any sense to give an ASMSPEC for an ordinary, non-register local variable. Historically, GCC has accepted -- but ignored -- the ASMSPEC in this case. */ if (!DECL_FILE_SCOPE_P (decl) && TREE_CODE (decl) == VAR_DECL && !C_DECL_REGISTER (decl) && !TREE_STATIC (decl)) warning (0, "ignoring asm-specifier for non-static local " "variable %q+D", decl); else set_user_assembler_name (decl, asmspec); } if (DECL_FILE_SCOPE_P (decl)) { if (DECL_INITIAL (decl) == NULL_TREE || DECL_INITIAL (decl) == error_mark_node) /* Don't output anything when a tentative file-scope definition is seen. But at end of compilation, do output code for them. */ DECL_DEFER_OUTPUT (decl) = 1; if (asmspec && C_DECL_REGISTER (decl)) DECL_HARD_REGISTER (decl) = 1; rest_of_decl_compilation (decl, true, 0); } else { /* In conjunction with an ASMSPEC, the `register' keyword indicates that we should place the variable in a particular register. */ if (asmspec && C_DECL_REGISTER (decl)) { DECL_HARD_REGISTER (decl) = 1; /* This cannot be done for a structure with volatile fields, on which DECL_REGISTER will have been reset. */ if (!DECL_REGISTER (decl)) error ("cannot put object with volatile field into register"); } if (TREE_CODE (decl) != FUNCTION_DECL) { /* If we're building a variable sized type, and we might be reachable other than via the top of the current binding level, then create a new BIND_EXPR so that we deallocate the object at the right time. */ /* Note that DECL_SIZE can be null due to errors. */ if (DECL_SIZE (decl) && !TREE_CONSTANT (DECL_SIZE (decl)) && STATEMENT_LIST_HAS_LABEL (cur_stmt_list)) { tree bind; bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL); TREE_SIDE_EFFECTS (bind) = 1; add_stmt (bind); BIND_EXPR_BODY (bind) = push_stmt_list (); } add_stmt (build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl)); } } if (!DECL_FILE_SCOPE_P (decl)) { /* Recompute the RTL of a local array now if it used to be an incomplete type. */ if (was_incomplete && !TREE_STATIC (decl) && !DECL_EXTERNAL (decl)) { /* If we used it already as memory, it must stay in memory. */ TREE_ADDRESSABLE (decl) = TREE_USED (decl); /* If it's still incomplete now, no init will save it. */ if (DECL_SIZE (decl) == 0) DECL_INITIAL (decl) = 0; } } } if (TREE_CODE (decl) == TYPE_DECL) { if (!DECL_FILE_SCOPE_P (decl) && variably_modified_type_p (TREE_TYPE (decl), NULL_TREE)) add_stmt (build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl)); rest_of_decl_compilation (decl, DECL_FILE_SCOPE_P (decl), 0); } /* Install a cleanup (aka destructor) if one was given. */ if (TREE_CODE (decl) == VAR_DECL && !TREE_STATIC (decl)) { tree attr = lookup_attribute ("cleanup", DECL_ATTRIBUTES (decl)); if (attr) { tree cleanup_id = TREE_VALUE (TREE_VALUE (attr)); tree cleanup_decl = lookup_name (cleanup_id); tree cleanup; vec<tree, va_gc> *v; /* Build "cleanup(&decl)" for the destructor. */ cleanup = build_unary_op (input_location, ADDR_EXPR, decl, 0); vec_alloc (v, 1); v->quick_push (cleanup); cleanup = c_build_function_call_vec (DECL_SOURCE_LOCATION (decl), vNULL, cleanup_decl, v, NULL); vec_free (v); /* Don't warn about decl unused; the cleanup uses it. */ TREE_USED (decl) = 1; TREE_USED (cleanup_decl) = 1; DECL_READ_P (decl) = 1; push_cleanup (decl, cleanup, false); } } if (warn_cxx_compat && TREE_CODE (decl) == VAR_DECL && !DECL_EXTERNAL (decl) && DECL_INITIAL (decl) == NULL_TREE) { type = strip_array_types (type); if (TREE_READONLY (decl)) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat, "uninitialized const %qD is invalid in C++", decl); else if ((TREE_CODE (type) == RECORD_TYPE || TREE_CODE (type) == UNION_TYPE) && C_TYPE_FIELDS_READONLY (type)) diagnose_uninitialized_cst_member (decl, type); } invoke_plugin_callbacks (PLUGIN_FINISH_DECL, decl); } /* Given a parsed parameter declaration, decode it into a PARM_DECL. EXPR is NULL or a pointer to an expression that needs to be evaluated for the side effects of array size expressions in the parameters. */ tree grokparm (const struct c_parm *parm, tree *expr) { tree attrs = parm->attrs; tree decl = grokdeclarator (parm->declarator, parm->specs, PARM, false, NULL, &attrs, expr, NULL, DEPRECATED_NORMAL); decl_attributes (&decl, attrs, 0); return decl; } /* Given a parsed parameter declaration, decode it into a PARM_DECL and push that on the current scope. EXPR is a pointer to an expression that needs to be evaluated for the side effects of array size expressions in the parameters. */ void push_parm_decl (const struct c_parm *parm, tree *expr) { tree attrs = parm->attrs; tree decl; decl = grokdeclarator (parm->declarator, parm->specs, PARM, false, NULL, &attrs, expr, NULL, DEPRECATED_NORMAL); decl_attributes (&decl, attrs, 0); decl = pushdecl (decl); finish_decl (decl, input_location, NULL_TREE, NULL_TREE, NULL_TREE); } /* Mark all the parameter declarations to date as forward decls. Also diagnose use of this extension. */ void mark_forward_parm_decls (void) { struct c_binding *b; if (pedantic && !current_scope->warned_forward_parm_decls) { pedwarn (input_location, OPT_Wpedantic, "ISO C forbids forward parameter declarations"); current_scope->warned_forward_parm_decls = true; } for (b = current_scope->bindings; b; b = b->prev) if (TREE_CODE (b->decl) == PARM_DECL) TREE_ASM_WRITTEN (b->decl) = 1; } /* Build a COMPOUND_LITERAL_EXPR. TYPE is the type given in the compound literal, which may be an incomplete array type completed by the initializer; INIT is a CONSTRUCTOR at LOC that initializes the compound literal. NON_CONST is true if the initializers contain something that cannot occur in a constant expression. */ tree build_compound_literal (location_t loc, tree type, tree init, bool non_const) { /* We do not use start_decl here because we have a type, not a declarator; and do not use finish_decl because the decl should be stored inside the COMPOUND_LITERAL_EXPR rather than added elsewhere as a DECL_EXPR. */ tree decl; tree complit; tree stmt; if (type == error_mark_node || init == error_mark_node) return error_mark_node; decl = build_decl (loc, VAR_DECL, NULL_TREE, type); DECL_EXTERNAL (decl) = 0; TREE_PUBLIC (decl) = 0; TREE_STATIC (decl) = (current_scope == file_scope); DECL_CONTEXT (decl) = current_function_decl; TREE_USED (decl) = 1; DECL_READ_P (decl) = 1; TREE_TYPE (decl) = type; TREE_READONLY (decl) = (TYPE_READONLY (type) || (TREE_CODE (type) == ARRAY_TYPE && TYPE_READONLY (TREE_TYPE (type)))); store_init_value (loc, decl, init, NULL_TREE); if (TREE_CODE (type) == ARRAY_TYPE && !COMPLETE_TYPE_P (type)) { int failure = complete_array_type (&TREE_TYPE (decl), DECL_INITIAL (decl), true); /* If complete_array_type returns 3, it means that the initial value of the compound literal is empty. Allow it. */ gcc_assert (failure == 0 || failure == 3); type = TREE_TYPE (decl); TREE_TYPE (DECL_INITIAL (decl)) = type; } if (type == error_mark_node || !COMPLETE_TYPE_P (type)) { c_incomplete_type_error (NULL_TREE, type); return error_mark_node; } stmt = build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl); complit = build1 (COMPOUND_LITERAL_EXPR, type, stmt); TREE_SIDE_EFFECTS (complit) = 1; layout_decl (decl, 0); if (TREE_STATIC (decl)) { /* This decl needs a name for the assembler output. */ set_compound_literal_name (decl); DECL_DEFER_OUTPUT (decl) = 1; DECL_COMDAT (decl) = 1; DECL_ARTIFICIAL (decl) = 1; DECL_IGNORED_P (decl) = 1; pushdecl (decl); rest_of_decl_compilation (decl, 1, 0); } if (non_const) { complit = build2 (C_MAYBE_CONST_EXPR, type, NULL, complit); C_MAYBE_CONST_EXPR_NON_CONST (complit) = 1; } return complit; } /* Check the type of a compound literal. Here we just check that it is valid for C++. */ void check_compound_literal_type (location_t loc, struct c_type_name *type_name) { if (warn_cxx_compat && (type_name->specs->typespec_kind == ctsk_tagdef || type_name->specs->typespec_kind == ctsk_tagfirstref)) warning_at (loc, OPT_Wc___compat, "defining a type in a compound literal is invalid in C++"); } /* Determine whether TYPE is a structure with a flexible array member, or a union containing such a structure (possibly recursively). */ static bool flexible_array_type_p (tree type) { tree x; switch (TREE_CODE (type)) { case RECORD_TYPE: x = TYPE_FIELDS (type); if (x == NULL_TREE) return false; while (DECL_CHAIN (x) != NULL_TREE) x = DECL_CHAIN (x); if (TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE && TYPE_SIZE (TREE_TYPE (x)) == NULL_TREE && TYPE_DOMAIN (TREE_TYPE (x)) != NULL_TREE && TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (x))) == NULL_TREE) return true; return false; case UNION_TYPE: for (x = TYPE_FIELDS (type); x != NULL_TREE; x = DECL_CHAIN (x)) { if (flexible_array_type_p (TREE_TYPE (x))) return true; } return false; default: return false; } } /* Performs sanity checks on the TYPE and WIDTH of the bit-field NAME, replacing with appropriate values if they are invalid. */ static void check_bitfield_type_and_width (tree *type, tree *width, tree orig_name) { tree type_mv; unsigned int max_width; unsigned HOST_WIDE_INT w; const char *name = (orig_name ? identifier_to_locale (IDENTIFIER_POINTER (orig_name)) : _("<anonymous>")); /* Detect and ignore out of range field width and process valid field widths. */ if (!INTEGRAL_TYPE_P (TREE_TYPE (*width))) { error ("bit-field %qs width not an integer constant", name); *width = integer_one_node; } else { if (TREE_CODE (*width) != INTEGER_CST) { *width = c_fully_fold (*width, false, NULL); if (TREE_CODE (*width) == INTEGER_CST) pedwarn (input_location, OPT_Wpedantic, "bit-field %qs width not an integer constant expression", name); } if (TREE_CODE (*width) != INTEGER_CST) { error ("bit-field %qs width not an integer constant", name); *width = integer_one_node; } constant_expression_warning (*width); if (tree_int_cst_sgn (*width) < 0) { error ("negative width in bit-field %qs", name); *width = integer_one_node; } else if (integer_zerop (*width) && orig_name) { error ("zero width for bit-field %qs", name); *width = integer_one_node; } } /* Detect invalid bit-field type. */ if (TREE_CODE (*type) != INTEGER_TYPE && TREE_CODE (*type) != BOOLEAN_TYPE && TREE_CODE (*type) != ENUMERAL_TYPE) { error ("bit-field %qs has invalid type", name); *type = unsigned_type_node; } type_mv = TYPE_MAIN_VARIANT (*type); if (!in_system_header_at (input_location) && type_mv != integer_type_node && type_mv != unsigned_type_node && type_mv != boolean_type_node) pedwarn_c90 (input_location, OPT_Wpedantic, "type of bit-field %qs is a GCC extension", name); max_width = TYPE_PRECISION (*type); if (0 < compare_tree_int (*width, max_width)) { error ("width of %qs exceeds its type", name); w = max_width; *width = build_int_cst (integer_type_node, w); } else w = tree_to_uhwi (*width); if (TREE_CODE (*type) == ENUMERAL_TYPE) { struct lang_type *lt = TYPE_LANG_SPECIFIC (*type); if (!lt || w < tree_int_cst_min_precision (lt->enum_min, TYPE_SIGN (*type)) || w < tree_int_cst_min_precision (lt->enum_max, TYPE_SIGN (*type))) warning (0, "%qs is narrower than values of its type", name); } } /* Print warning about variable length array if necessary. */ static void warn_variable_length_array (tree name, tree size) { if (TREE_CONSTANT (size)) { if (name) pedwarn_c90 (input_location, OPT_Wvla, "ISO C90 forbids array %qE whose size " "can%'t be evaluated", name); else pedwarn_c90 (input_location, OPT_Wvla, "ISO C90 forbids array " "whose size can%'t be evaluated"); } else { if (name) pedwarn_c90 (input_location, OPT_Wvla, "ISO C90 forbids variable length array %qE", name); else pedwarn_c90 (input_location, OPT_Wvla, "ISO C90 forbids variable " "length array"); } } /* Print warning about defaulting to int if necessary. */ static void warn_defaults_to (location_t location, int opt, const char *gmsgid, ...) { diagnostic_info diagnostic; va_list ap; va_start (ap, gmsgid); diagnostic_set_info (&diagnostic, gmsgid, &ap, location, flag_isoc99 ? DK_PEDWARN : DK_WARNING); diagnostic.option_index = opt; report_diagnostic (&diagnostic); va_end (ap); } /* Given declspecs and a declarator, determine the name and type of the object declared and construct a ..._DECL node for it. (In one case we can return a ..._TYPE node instead. For invalid input we sometimes return 0.) DECLSPECS is a c_declspecs structure for the declaration specifiers. DECL_CONTEXT says which syntactic context this declaration is in: NORMAL for most contexts. Make a VAR_DECL or FUNCTION_DECL or TYPE_DECL. FUNCDEF for a function definition. Like NORMAL but a few different error messages in each case. Return value may be zero meaning this definition is too screwy to try to parse. PARM for a parameter declaration (either within a function prototype or before a function body). Make a PARM_DECL, or return void_type_node. TYPENAME if for a typename (in a cast or sizeof). Don't make a DECL node; just return the ..._TYPE node. FIELD for a struct or union field; make a FIELD_DECL. INITIALIZED is true if the decl has an initializer. WIDTH is non-NULL for bit-fields, and is a pointer to an INTEGER_CST node representing the width of the bit-field. DECL_ATTRS points to the list of attributes that should be added to this decl. Any nested attributes that belong on the decl itself will be added to this list. If EXPR is not NULL, any expressions that need to be evaluated as part of evaluating variably modified types will be stored in *EXPR. If EXPR_CONST_OPERANDS is not NULL, *EXPR_CONST_OPERANDS will be set to indicate whether operands in *EXPR can be used in constant expressions. DEPRECATED_STATE is a deprecated_states value indicating whether deprecation warnings should be suppressed. In the TYPENAME case, DECLARATOR is really an absolute declarator. It may also be so in the PARM case, for a prototype where the argument type is specified but not the name. This function is where the complicated C meanings of `static' and `extern' are interpreted. */ static tree grokdeclarator (const struct c_declarator *declarator, struct c_declspecs *declspecs, enum decl_context decl_context, bool initialized, tree *width, tree *decl_attrs, tree *expr, bool *expr_const_operands, enum deprecated_states deprecated_state) { tree type = declspecs->type; bool threadp = declspecs->thread_p; enum c_storage_class storage_class = declspecs->storage_class; int constp; int restrictp; int volatilep; int atomicp; int type_quals = TYPE_UNQUALIFIED; tree name = NULL_TREE; bool funcdef_flag = false; bool funcdef_syntax = false; bool size_varies = false; tree decl_attr = declspecs->decl_attr; int array_ptr_quals = TYPE_UNQUALIFIED; tree array_ptr_attrs = NULL_TREE; int array_parm_static = 0; bool array_parm_vla_unspec_p = false; tree returned_attrs = NULL_TREE; bool bitfield = width != NULL; tree element_type; tree orig_qual_type = NULL; size_t orig_qual_indirect = 0; struct c_arg_info *arg_info = 0; addr_space_t as1, as2, address_space; location_t loc = UNKNOWN_LOCATION; const char *errmsg; tree expr_dummy; bool expr_const_operands_dummy; enum c_declarator_kind first_non_attr_kind; unsigned int alignas_align = 0; if (TREE_CODE (type) == ERROR_MARK) return error_mark_node; if (expr == NULL) expr = &expr_dummy; if (expr_const_operands == NULL) expr_const_operands = &expr_const_operands_dummy; *expr = declspecs->expr; *expr_const_operands = declspecs->expr_const_operands; if (decl_context == FUNCDEF) funcdef_flag = true, decl_context = NORMAL; /* Look inside a declarator for the name being declared and get it as an IDENTIFIER_NODE, for an error message. */ { const struct c_declarator *decl = declarator; first_non_attr_kind = cdk_attrs; while (decl) switch (decl->kind) { case cdk_array: loc = decl->id_loc; /* FALL THRU. */ case cdk_function: case cdk_pointer: funcdef_syntax = (decl->kind == cdk_function); if (first_non_attr_kind == cdk_attrs) first_non_attr_kind = decl->kind; decl = decl->declarator; break; case cdk_attrs: decl = decl->declarator; break; case cdk_id: loc = decl->id_loc; if (decl->u.id) name = decl->u.id; if (first_non_attr_kind == cdk_attrs) first_non_attr_kind = decl->kind; decl = 0; break; default: gcc_unreachable (); } if (name == 0) { gcc_assert (decl_context == PARM || decl_context == TYPENAME || (decl_context == FIELD && declarator->kind == cdk_id)); gcc_assert (!initialized); } } /* A function definition's declarator must have the form of a function declarator. */ if (funcdef_flag && !funcdef_syntax) return 0; /* If this looks like a function definition, make it one, even if it occurs where parms are expected. Then store_parm_decls will reject it and not use it as a parm. */ if (decl_context == NORMAL && !funcdef_flag && current_scope->parm_flag) decl_context = PARM; if (declspecs->deprecated_p && deprecated_state != DEPRECATED_SUPPRESS) warn_deprecated_use (declspecs->type, declspecs->decl_attr); if ((decl_context == NORMAL || decl_context == FIELD) && current_scope == file_scope && variably_modified_type_p (type, NULL_TREE)) { if (name) error_at (loc, "variably modified %qE at file scope", name); else error_at (loc, "variably modified field at file scope"); type = integer_type_node; } size_varies = C_TYPE_VARIABLE_SIZE (type) != 0; /* Diagnose defaulting to "int". */ if (declspecs->default_int_p && !in_system_header_at (input_location)) { /* Issue a warning if this is an ISO C 99 program or if -Wreturn-type and this is a function, or if -Wimplicit; prefer the former warning since it is more explicit. */ if ((warn_implicit_int || warn_return_type || flag_isoc99) && funcdef_flag) warn_about_return_type = 1; else { if (name) warn_defaults_to (loc, OPT_Wimplicit_int, "type defaults to %<int%> in declaration " "of %qE", name); else warn_defaults_to (loc, OPT_Wimplicit_int, "type defaults to %<int%> in type name"); } } /* Adjust the type if a bit-field is being declared, -funsigned-bitfields applied and the type is not explicitly "signed". */ if (bitfield && !flag_signed_bitfields && !declspecs->explicit_signed_p && TREE_CODE (type) == INTEGER_TYPE) type = unsigned_type_for (type); /* Figure out the type qualifiers for the declaration. There are two ways a declaration can become qualified. One is something like `const int i' where the `const' is explicit. Another is something like `typedef const int CI; CI i' where the type of the declaration contains the `const'. A third possibility is that there is a type qualifier on the element type of a typedefed array type, in which case we should extract that qualifier so that c_apply_type_quals_to_decl receives the full list of qualifiers to work with (C90 is not entirely clear about whether duplicate qualifiers should be diagnosed in this case, but it seems most appropriate to do so). */ element_type = strip_array_types (type); constp = declspecs->const_p + TYPE_READONLY (element_type); restrictp = declspecs->restrict_p + TYPE_RESTRICT (element_type); volatilep = declspecs->volatile_p + TYPE_VOLATILE (element_type); atomicp = declspecs->atomic_p + TYPE_ATOMIC (element_type); as1 = declspecs->address_space; as2 = TYPE_ADDR_SPACE (element_type); address_space = ADDR_SPACE_GENERIC_P (as1)? as2 : as1; if (constp > 1) pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<const%>"); if (restrictp > 1) pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<restrict%>"); if (volatilep > 1) pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<volatile%>"); if (atomicp > 1) pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<_Atomic%>"); if (!ADDR_SPACE_GENERIC_P (as1) && !ADDR_SPACE_GENERIC_P (as2) && as1 != as2) error_at (loc, "conflicting named address spaces (%s vs %s)", c_addr_space_name (as1), c_addr_space_name (as2)); if ((TREE_CODE (type) == ARRAY_TYPE || first_non_attr_kind == cdk_array) && TYPE_QUALS (element_type)) { orig_qual_type = type; type = TYPE_MAIN_VARIANT (type); } type_quals = ((constp ? TYPE_QUAL_CONST : 0) | (restrictp ? TYPE_QUAL_RESTRICT : 0) | (volatilep ? TYPE_QUAL_VOLATILE : 0) | (atomicp ? TYPE_QUAL_ATOMIC : 0) | ENCODE_QUAL_ADDR_SPACE (address_space)); if (type_quals != TYPE_QUALS (element_type)) orig_qual_type = NULL_TREE; /* Applying the _Atomic qualifier to an array type (through the use of typedefs or typeof) must be detected here. If the qualifier is introduced later, any appearance of applying it to an array is actually applying it to an element of that array. */ if (atomicp && TREE_CODE (type) == ARRAY_TYPE) error_at (loc, "%<_Atomic%>-qualified array type"); /* Warn about storage classes that are invalid for certain kinds of declarations (parameters, typenames, etc.). */ if (funcdef_flag && (threadp || storage_class == csc_auto || storage_class == csc_register || storage_class == csc_typedef)) { if (storage_class == csc_auto) pedwarn (loc, (current_scope == file_scope) ? 0 : OPT_Wpedantic, "function definition declared %<auto%>"); if (storage_class == csc_register) error_at (loc, "function definition declared %<register%>"); if (storage_class == csc_typedef) error_at (loc, "function definition declared %<typedef%>"); if (threadp) error_at (loc, "function definition declared %qs", declspecs->thread_gnu_p ? "__thread" : "_Thread_local"); threadp = false; if (storage_class == csc_auto || storage_class == csc_register || storage_class == csc_typedef) storage_class = csc_none; } else if (decl_context != NORMAL && (storage_class != csc_none || threadp)) { if (decl_context == PARM && storage_class == csc_register) ; else { switch (decl_context) { case FIELD: if (name) error_at (loc, "storage class specified for structure " "field %qE", name); else error_at (loc, "storage class specified for structure field"); break; case PARM: if (name) error_at (loc, "storage class specified for parameter %qE", name); else error_at (loc, "storage class specified for unnamed parameter"); break; default: error_at (loc, "storage class specified for typename"); break; } storage_class = csc_none; threadp = false; } } else if (storage_class == csc_extern && initialized && !funcdef_flag) { /* 'extern' with initialization is invalid if not at file scope. */ if (current_scope == file_scope) { /* It is fine to have 'extern const' when compiling at C and C++ intersection. */ if (!(warn_cxx_compat && constp)) warning_at (loc, 0, "%qE initialized and declared %<extern%>", name); } else error_at (loc, "%qE has both %<extern%> and initializer", name); } else if (current_scope == file_scope) { if (storage_class == csc_auto) error_at (loc, "file-scope declaration of %qE specifies %<auto%>", name); if (pedantic && storage_class == csc_register) pedwarn (input_location, OPT_Wpedantic, "file-scope declaration of %qE specifies %<register%>", name); } else { if (storage_class == csc_extern && funcdef_flag) error_at (loc, "nested function %qE declared %<extern%>", name); else if (threadp && storage_class == csc_none) { error_at (loc, "function-scope %qE implicitly auto and declared " "%qs", name, declspecs->thread_gnu_p ? "__thread" : "_Thread_local"); threadp = false; } } /* Now figure out the structure of the declarator proper. Descend through it, creating more complex types, until we reach the declared identifier (or NULL_TREE, in an absolute declarator). At each stage we maintain an unqualified version of the type together with any qualifiers that should be applied to it with c_build_qualified_type; this way, array types including multidimensional array types are first built up in unqualified form and then the qualified form is created with TYPE_MAIN_VARIANT pointing to the unqualified form. */ while (declarator && declarator->kind != cdk_id) { if (type == error_mark_node) { declarator = declarator->declarator; continue; } /* Each level of DECLARATOR is either a cdk_array (for ...[..]), a cdk_pointer (for *...), a cdk_function (for ...(...)), a cdk_attrs (for nested attributes), or a cdk_id (for the name being declared or the place in an absolute declarator where the name was omitted). For the last case, we have just exited the loop. At this point, TYPE is the type of elements of an array, or for a function to return, or for a pointer to point to. After this sequence of ifs, TYPE is the type of the array or function or pointer, and DECLARATOR has had its outermost layer removed. */ if (array_ptr_quals != TYPE_UNQUALIFIED || array_ptr_attrs != NULL_TREE || array_parm_static) { /* Only the innermost declarator (making a parameter be of array type which is converted to pointer type) may have static or type qualifiers. */ error_at (loc, "static or type qualifiers in non-parameter array declarator"); array_ptr_quals = TYPE_UNQUALIFIED; array_ptr_attrs = NULL_TREE; array_parm_static = 0; } switch (declarator->kind) { case cdk_attrs: { /* A declarator with embedded attributes. */ tree attrs = declarator->u.attrs; const struct c_declarator *inner_decl; int attr_flags = 0; declarator = declarator->declarator; inner_decl = declarator; while (inner_decl->kind == cdk_attrs) inner_decl = inner_decl->declarator; if (inner_decl->kind == cdk_id) attr_flags |= (int) ATTR_FLAG_DECL_NEXT; else if (inner_decl->kind == cdk_function) attr_flags |= (int) ATTR_FLAG_FUNCTION_NEXT; else if (inner_decl->kind == cdk_array) attr_flags |= (int) ATTR_FLAG_ARRAY_NEXT; returned_attrs = decl_attributes (&type, chainon (returned_attrs, attrs), attr_flags); break; } case cdk_array: { tree itype = NULL_TREE; tree size = declarator->u.array.dimen; /* The index is a signed object `sizetype' bits wide. */ tree index_type = c_common_signed_type (sizetype); array_ptr_quals = declarator->u.array.quals; array_ptr_attrs = declarator->u.array.attrs; array_parm_static = declarator->u.array.static_p; array_parm_vla_unspec_p = declarator->u.array.vla_unspec_p; declarator = declarator->declarator; /* Check for some types that there cannot be arrays of. */ if (VOID_TYPE_P (type)) { if (name) error_at (loc, "declaration of %qE as array of voids", name); else error_at (loc, "declaration of type name as array of voids"); type = error_mark_node; } if (TREE_CODE (type) == FUNCTION_TYPE) { if (name) error_at (loc, "declaration of %qE as array of functions", name); else error_at (loc, "declaration of type name as array of " "functions"); type = error_mark_node; } if (pedantic && !in_system_header_at (input_location) && flexible_array_type_p (type)) pedwarn (loc, OPT_Wpedantic, "invalid use of structure with flexible array member"); if (size == error_mark_node) type = error_mark_node; if (type == error_mark_node) continue; /* If size was specified, set ITYPE to a range-type for that size. Otherwise, ITYPE remains null. finish_decl may figure it out from an initial value. */ if (size) { bool size_maybe_const = true; bool size_int_const = (TREE_CODE (size) == INTEGER_CST && !TREE_OVERFLOW (size)); bool this_size_varies = false; /* Strip NON_LVALUE_EXPRs since we aren't using as an lvalue. */ STRIP_TYPE_NOPS (size); if (!INTEGRAL_TYPE_P (TREE_TYPE (size))) { if (name) error_at (loc, "size of array %qE has non-integer type", name); else error_at (loc, "size of unnamed array has non-integer type"); size = integer_one_node; } size = c_fully_fold (size, false, &size_maybe_const); if (pedantic && size_maybe_const && integer_zerop (size)) { if (name) pedwarn (loc, OPT_Wpedantic, "ISO C forbids zero-size array %qE", name); else pedwarn (loc, OPT_Wpedantic, "ISO C forbids zero-size array"); } if (TREE_CODE (size) == INTEGER_CST && size_maybe_const) { constant_expression_warning (size); if (tree_int_cst_sgn (size) < 0) { if (name) error_at (loc, "size of array %qE is negative", name); else error_at (loc, "size of unnamed array is negative"); size = integer_one_node; } /* Handle a size folded to an integer constant but not an integer constant expression. */ if (!size_int_const) { /* If this is a file scope declaration of an ordinary identifier, this is invalid code; diagnosing it here and not subsequently treating the type as variable-length avoids more confusing diagnostics later. */ if ((decl_context == NORMAL || decl_context == FIELD) && current_scope == file_scope) pedwarn (input_location, 0, "variably modified %qE at file scope", name); else this_size_varies = size_varies = true; warn_variable_length_array (name, size); } } else if ((decl_context == NORMAL || decl_context == FIELD) && current_scope == file_scope) { error_at (loc, "variably modified %qE at file scope", name); size = integer_one_node; } else { /* Make sure the array size remains visibly nonconstant even if it is (eg) a const variable with known value. */ this_size_varies = size_varies = true; warn_variable_length_array (name, size); if (flag_sanitize & SANITIZE_VLA && decl_context == NORMAL && do_ubsan_in_current_function ()) { /* Evaluate the array size only once. */ size = c_save_expr (size); size = c_fully_fold (size, false, NULL); size = fold_build2 (COMPOUND_EXPR, TREE_TYPE (size), ubsan_instrument_vla (loc, size), size); } } if (integer_zerop (size) && !this_size_varies) { /* A zero-length array cannot be represented with an unsigned index type, which is what we'll get with build_index_type. Create an open-ended range instead. */ itype = build_range_type (sizetype, size, NULL_TREE); } else { /* Arrange for the SAVE_EXPR on the inside of the MINUS_EXPR, which allows the -1 to get folded with the +1 that happens when building TYPE_SIZE. */ if (size_varies) size = save_expr (size); if (this_size_varies && TREE_CODE (size) == INTEGER_CST) size = build2 (COMPOUND_EXPR, TREE_TYPE (size), integer_zero_node, size); /* Compute the maximum valid index, that is, size - 1. Do the calculation in index_type, so that if it is a variable the computations will be done in the proper mode. */ itype = fold_build2_loc (loc, MINUS_EXPR, index_type, convert (index_type, size), convert (index_type, size_one_node)); /* The above overflows when size does not fit in index_type. ??? While a size of INT_MAX+1 technically shouldn't cause an overflow (because we subtract 1), handling this case seems like an unnecessary complication. */ if (TREE_CODE (size) == INTEGER_CST && !int_fits_type_p (size, index_type)) { if (name) error_at (loc, "size of array %qE is too large", name); else error_at (loc, "size of unnamed array is too large"); type = error_mark_node; continue; } itype = build_index_type (itype); } if (this_size_varies) { if (*expr) *expr = build2 (COMPOUND_EXPR, TREE_TYPE (size), *expr, size); else *expr = size; *expr_const_operands &= size_maybe_const; } } else if (decl_context == FIELD) { bool flexible_array_member = false; if (array_parm_vla_unspec_p) /* Field names can in fact have function prototype scope so [*] is disallowed here through making the field variably modified, not through being something other than a declaration with function prototype scope. */ size_varies = true; else { const struct c_declarator *t = declarator; while (t->kind == cdk_attrs) t = t->declarator; flexible_array_member = (t->kind == cdk_id); } if (flexible_array_member && !in_system_header_at (input_location)) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not " "support flexible array members"); /* ISO C99 Flexible array members are effectively identical to GCC's zero-length array extension. */ if (flexible_array_member || array_parm_vla_unspec_p) itype = build_range_type (sizetype, size_zero_node, NULL_TREE); } else if (decl_context == PARM) { if (array_parm_vla_unspec_p) { itype = build_range_type (sizetype, size_zero_node, NULL_TREE); size_varies = true; } } else if (decl_context == TYPENAME) { if (array_parm_vla_unspec_p) { /* C99 6.7.5.2p4 */ warning (0, "%<[*]%> not in a declaration"); /* We use this to avoid messing up with incomplete array types of the same type, that would otherwise be modified below. */ itype = build_range_type (sizetype, size_zero_node, NULL_TREE); size_varies = true; } } /* Complain about arrays of incomplete types. */ if (!COMPLETE_TYPE_P (type)) { error_at (loc, "array type has incomplete element type %qT", type); type = error_mark_node; } else /* When itype is NULL, a shared incomplete array type is returned for all array of a given type. Elsewhere we make sure we don't complete that type before copying it, but here we want to make sure we don't ever modify the shared type, so we gcc_assert (itype) below. */ { addr_space_t as = DECODE_QUAL_ADDR_SPACE (type_quals); if (!ADDR_SPACE_GENERIC_P (as) && as != TYPE_ADDR_SPACE (type)) type = build_qualified_type (type, ENCODE_QUAL_ADDR_SPACE (as)); type = build_array_type (type, itype); } if (type != error_mark_node) { if (size_varies) { /* It is ok to modify type here even if itype is NULL: if size_varies, we're in a multi-dimensional array and the inner type has variable size, so the enclosing shared array type must too. */ if (size && TREE_CODE (size) == INTEGER_CST) type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type)); C_TYPE_VARIABLE_SIZE (type) = 1; } /* The GCC extension for zero-length arrays differs from ISO flexible array members in that sizeof yields zero. */ if (size && integer_zerop (size)) { gcc_assert (itype); type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type)); TYPE_SIZE (type) = bitsize_zero_node; TYPE_SIZE_UNIT (type) = size_zero_node; SET_TYPE_STRUCTURAL_EQUALITY (type); } if (array_parm_vla_unspec_p) { gcc_assert (itype); /* The type is complete. C99 6.7.5.2p4 */ type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type)); TYPE_SIZE (type) = bitsize_zero_node; TYPE_SIZE_UNIT (type) = size_zero_node; SET_TYPE_STRUCTURAL_EQUALITY (type); } } if (decl_context != PARM && (array_ptr_quals != TYPE_UNQUALIFIED || array_ptr_attrs != NULL_TREE || array_parm_static)) { error_at (loc, "static or type qualifiers in non-parameter array declarator"); array_ptr_quals = TYPE_UNQUALIFIED; array_ptr_attrs = NULL_TREE; array_parm_static = 0; } orig_qual_indirect++; break; } case cdk_function: { /* Say it's a definition only for the declarator closest to the identifier, apart possibly from some attributes. */ bool really_funcdef = false; tree arg_types; orig_qual_type = NULL_TREE; if (funcdef_flag) { const struct c_declarator *t = declarator->declarator; while (t->kind == cdk_attrs) t = t->declarator; really_funcdef = (t->kind == cdk_id); } /* Declaring a function type. Make sure we have a valid type for the function to return. */ if (type == error_mark_node) continue; size_varies = false; /* Warn about some types functions can't return. */ if (TREE_CODE (type) == FUNCTION_TYPE) { if (name) error_at (loc, "%qE declared as function returning a " "function", name); else error_at (loc, "type name declared as function " "returning a function"); type = integer_type_node; } if (TREE_CODE (type) == ARRAY_TYPE) { if (name) error_at (loc, "%qE declared as function returning an array", name); else error_at (loc, "type name declared as function returning " "an array"); type = integer_type_node; } errmsg = targetm.invalid_return_type (type); if (errmsg) { error (errmsg); type = integer_type_node; } /* Construct the function type and go to the next inner layer of declarator. */ arg_info = declarator->u.arg_info; arg_types = grokparms (arg_info, really_funcdef); /* Type qualifiers before the return type of the function qualify the return type, not the function type. */ if (type_quals) { /* Type qualifiers on a function return type are normally permitted by the standard but have no effect, so give a warning at -Wreturn-type. Qualifiers on a void return type are banned on function definitions in ISO C; GCC used to used them for noreturn functions. */ if (VOID_TYPE_P (type) && really_funcdef) pedwarn (loc, 0, "function definition has qualified void return type"); else warning_at (loc, OPT_Wignored_qualifiers, "type qualifiers ignored on function return type"); type = c_build_qualified_type (type, type_quals); } type_quals = TYPE_UNQUALIFIED; type = build_function_type (type, arg_types); declarator = declarator->declarator; /* Set the TYPE_CONTEXTs for each tagged type which is local to the formal parameter list of this FUNCTION_TYPE to point to the FUNCTION_TYPE node itself. */ { c_arg_tag *tag; unsigned ix; FOR_EACH_VEC_SAFE_ELT_REVERSE (arg_info->tags, ix, tag) TYPE_CONTEXT (tag->type) = type; } break; } case cdk_pointer: { /* Merge any constancy or volatility into the target type for the pointer. */ if ((type_quals & TYPE_QUAL_ATOMIC) && TREE_CODE (type) == FUNCTION_TYPE) { error_at (loc, "%<_Atomic%>-qualified function type"); type_quals &= ~TYPE_QUAL_ATOMIC; } else if (pedantic && TREE_CODE (type) == FUNCTION_TYPE && type_quals) pedwarn (loc, OPT_Wpedantic, "ISO C forbids qualified function types"); if (type_quals) type = c_build_qualified_type (type, type_quals, orig_qual_type, orig_qual_indirect); orig_qual_type = NULL_TREE; size_varies = false; /* When the pointed-to type involves components of variable size, care must be taken to ensure that the size evaluation code is emitted early enough to dominate all the possible later uses and late enough for the variables on which it depends to have been assigned. This is expected to happen automatically when the pointed-to type has a name/declaration of it's own, but special attention is required if the type is anonymous. We handle the NORMAL and FIELD contexts here by attaching an artificial TYPE_DECL to such pointed-to type. This forces the sizes evaluation at a safe point and ensures it is not deferred until e.g. within a deeper conditional context. We expect nothing to be needed here for PARM or TYPENAME. Pushing a TYPE_DECL at this point for TYPENAME would actually be incorrect, as we might be in the middle of an expression with side effects on the pointed-to type size "arguments" prior to the pointer declaration point and the fake TYPE_DECL in the enclosing context would force the size evaluation prior to the side effects. */ if (!TYPE_NAME (type) && (decl_context == NORMAL || decl_context == FIELD) && variably_modified_type_p (type, NULL_TREE)) { tree decl = build_decl (loc, TYPE_DECL, NULL_TREE, type); DECL_ARTIFICIAL (decl) = 1; pushdecl (decl); finish_decl (decl, loc, NULL_TREE, NULL_TREE, NULL_TREE); TYPE_NAME (type) = decl; } type = c_build_pointer_type (type); /* Process type qualifiers (such as const or volatile) that were given inside the `*'. */ type_quals = declarator->u.pointer_quals; declarator = declarator->declarator; break; } default: gcc_unreachable (); } } *decl_attrs = chainon (returned_attrs, *decl_attrs); /* Now TYPE has the actual type, apart from any qualifiers in TYPE_QUALS. */ /* Warn about address space used for things other than static memory or pointers. */ address_space = DECODE_QUAL_ADDR_SPACE (type_quals); if (!ADDR_SPACE_GENERIC_P (address_space)) { if (decl_context == NORMAL) { switch (storage_class) { case csc_auto: error ("%qs combined with %<auto%> qualifier for %qE", c_addr_space_name (address_space), name); break; case csc_register: error ("%qs combined with %<register%> qualifier for %qE", c_addr_space_name (address_space), name); break; case csc_none: if (current_function_scope) { error ("%qs specified for auto variable %qE", c_addr_space_name (address_space), name); break; } break; case csc_static: case csc_extern: case csc_typedef: break; default: gcc_unreachable (); } } else if (decl_context == PARM && TREE_CODE (type) != ARRAY_TYPE) { if (name) error ("%qs specified for parameter %qE", c_addr_space_name (address_space), name); else error ("%qs specified for unnamed parameter", c_addr_space_name (address_space)); } else if (decl_context == FIELD) { if (name) error ("%qs specified for structure field %qE", c_addr_space_name (address_space), name); else error ("%qs specified for structure field", c_addr_space_name (address_space)); } } /* Check the type and width of a bit-field. */ if (bitfield) { check_bitfield_type_and_width (&type, width, name); /* C11 makes it implementation-defined (6.7.2.1#5) whether atomic types are permitted for bit-fields; we have no code to make bit-field accesses atomic, so disallow them. */ if (type_quals & TYPE_QUAL_ATOMIC) { if (name) error ("bit-field %qE has atomic type", name); else error ("bit-field has atomic type"); type_quals &= ~TYPE_QUAL_ATOMIC; } } /* Reject invalid uses of _Alignas. */ if (declspecs->alignas_p) { if (storage_class == csc_typedef) error_at (loc, "alignment specified for typedef %qE", name); else if (storage_class == csc_register) error_at (loc, "alignment specified for %<register%> object %qE", name); else if (decl_context == PARM) { if (name) error_at (loc, "alignment specified for parameter %qE", name); else error_at (loc, "alignment specified for unnamed parameter"); } else if (bitfield) { if (name) error_at (loc, "alignment specified for bit-field %qE", name); else error_at (loc, "alignment specified for unnamed bit-field"); } else if (TREE_CODE (type) == FUNCTION_TYPE) error_at (loc, "alignment specified for function %qE", name); else if (declspecs->align_log != -1) { alignas_align = 1U << declspecs->align_log; if (alignas_align < min_align_of_type (type)) { if (name) error_at (loc, "%<_Alignas%> specifiers cannot reduce " "alignment of %qE", name); else error_at (loc, "%<_Alignas%> specifiers cannot reduce " "alignment of unnamed field"); alignas_align = 0; } } } /* Did array size calculations overflow or does the array cover more than half of the address-space? */ if (TREE_CODE (type) == ARRAY_TYPE && COMPLETE_TYPE_P (type) && TREE_CODE (TYPE_SIZE_UNIT (type)) == INTEGER_CST && ! valid_constant_size_p (TYPE_SIZE_UNIT (type))) { if (name) error_at (loc, "size of array %qE is too large", name); else error_at (loc, "size of unnamed array is too large"); /* If we proceed with the array type as it is, we'll eventually crash in tree_to_[su]hwi(). */ type = error_mark_node; } /* If this is declaring a typedef name, return a TYPE_DECL. */ if (storage_class == csc_typedef) { tree decl; if ((type_quals & TYPE_QUAL_ATOMIC) && TREE_CODE (type) == FUNCTION_TYPE) { error_at (loc, "%<_Atomic%>-qualified function type"); type_quals &= ~TYPE_QUAL_ATOMIC; } else if (pedantic && TREE_CODE (type) == FUNCTION_TYPE && type_quals) pedwarn (loc, OPT_Wpedantic, "ISO C forbids qualified function types"); if (type_quals) type = c_build_qualified_type (type, type_quals, orig_qual_type, orig_qual_indirect); decl = build_decl (declarator->id_loc, TYPE_DECL, declarator->u.id, type); if (declspecs->explicit_signed_p) C_TYPEDEF_EXPLICITLY_SIGNED (decl) = 1; if (declspecs->inline_p) pedwarn (loc, 0,"typedef %q+D declared %<inline%>", decl); if (declspecs->noreturn_p) pedwarn (loc, 0,"typedef %q+D declared %<_Noreturn%>", decl); if (warn_cxx_compat && declarator->u.id != NULL_TREE) { struct c_binding *b = I_TAG_BINDING (declarator->u.id); if (b != NULL && b->decl != NULL_TREE && (B_IN_CURRENT_SCOPE (b) || (current_scope == file_scope && B_IN_EXTERNAL_SCOPE (b))) && TYPE_MAIN_VARIANT (b->decl) != TYPE_MAIN_VARIANT (type)) { warning_at (declarator->id_loc, OPT_Wc___compat, ("using %qD as both a typedef and a tag is " "invalid in C++"), decl); if (b->locus != UNKNOWN_LOCATION) inform (b->locus, "originally defined here"); } } return decl; } /* If this is a type name (such as, in a cast or sizeof), compute the type and return it now. */ if (decl_context == TYPENAME) { /* Note that the grammar rejects storage classes in typenames and fields. */ gcc_assert (storage_class == csc_none && !threadp && !declspecs->inline_p && !declspecs->noreturn_p); if ((type_quals & TYPE_QUAL_ATOMIC) && TREE_CODE (type) == FUNCTION_TYPE) { error_at (loc, "%<_Atomic%>-qualified function type"); type_quals &= ~TYPE_QUAL_ATOMIC; } else if (pedantic && TREE_CODE (type) == FUNCTION_TYPE && type_quals) pedwarn (loc, OPT_Wpedantic, "ISO C forbids const or volatile function types"); if (type_quals) type = c_build_qualified_type (type, type_quals, orig_qual_type, orig_qual_indirect); return type; } if (pedantic && decl_context == FIELD && variably_modified_type_p (type, NULL_TREE)) { /* C99 6.7.2.1p8 */ pedwarn (loc, OPT_Wpedantic, "a member of a structure or union cannot " "have a variably modified type"); } /* Aside from typedefs and type names (handle above), `void' at top level (not within pointer) is allowed only in public variables. We don't complain about parms either, but that is because a better error message can be made later. */ if (VOID_TYPE_P (type) && decl_context != PARM && !((decl_context != FIELD && TREE_CODE (type) != FUNCTION_TYPE) && (storage_class == csc_extern || (current_scope == file_scope && !(storage_class == csc_static || storage_class == csc_register))))) { error_at (loc, "variable or field %qE declared void", name); type = integer_type_node; } /* Now create the decl, which may be a VAR_DECL, a PARM_DECL or a FUNCTION_DECL, depending on DECL_CONTEXT and TYPE. */ { tree decl; if (decl_context == PARM) { tree promoted_type; bool array_parameter_p = false; /* A parameter declared as an array of T is really a pointer to T. One declared as a function is really a pointer to a function. */ if (TREE_CODE (type) == ARRAY_TYPE) { /* Transfer const-ness of array into that of type pointed to. */ type = TREE_TYPE (type); if (orig_qual_type != NULL_TREE) { if (orig_qual_indirect == 0) orig_qual_type = TREE_TYPE (orig_qual_type); else orig_qual_indirect--; } if (type_quals) type = c_build_qualified_type (type, type_quals, orig_qual_type, orig_qual_indirect); type = c_build_pointer_type (type); type_quals = array_ptr_quals; if (type_quals) type = c_build_qualified_type (type, type_quals); /* We don't yet implement attributes in this context. */ if (array_ptr_attrs != NULL_TREE) warning_at (loc, OPT_Wattributes, "attributes in parameter array declarator ignored"); size_varies = false; array_parameter_p = true; } else if (TREE_CODE (type) == FUNCTION_TYPE) { if (type_quals & TYPE_QUAL_ATOMIC) { error_at (loc, "%<_Atomic%>-qualified function type"); type_quals &= ~TYPE_QUAL_ATOMIC; } else if (type_quals) pedwarn (loc, OPT_Wpedantic, "ISO C forbids qualified function types"); if (type_quals) type = c_build_qualified_type (type, type_quals); type = c_build_pointer_type (type); type_quals = TYPE_UNQUALIFIED; } else if (type_quals) type = c_build_qualified_type (type, type_quals); decl = build_decl (declarator->id_loc, PARM_DECL, declarator->u.id, type); if (size_varies) C_DECL_VARIABLE_SIZE (decl) = 1; C_ARRAY_PARAMETER (decl) = array_parameter_p; /* Compute the type actually passed in the parmlist, for the case where there is no prototype. (For example, shorts and chars are passed as ints.) When there is a prototype, this is overridden later. */ if (type == error_mark_node) promoted_type = type; else promoted_type = c_type_promotes_to (type); DECL_ARG_TYPE (decl) = promoted_type; if (declspecs->inline_p) pedwarn (loc, 0, "parameter %q+D declared %<inline%>", decl); if (declspecs->noreturn_p) pedwarn (loc, 0, "parameter %q+D declared %<_Noreturn%>", decl); } else if (decl_context == FIELD) { /* Note that the grammar rejects storage classes in typenames and fields. */ gcc_assert (storage_class == csc_none && !threadp && !declspecs->inline_p && !declspecs->noreturn_p); /* Structure field. It may not be a function. */ if (TREE_CODE (type) == FUNCTION_TYPE) { error_at (loc, "field %qE declared as a function", name); type = build_pointer_type (type); } else if (TREE_CODE (type) != ERROR_MARK && !COMPLETE_OR_UNBOUND_ARRAY_TYPE_P (type)) { if (name) error_at (loc, "field %qE has incomplete type", name); else error_at (loc, "unnamed field has incomplete type"); type = error_mark_node; } else if (TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) == NULL_TREE) { /* We have a flexible array member through a typedef. Set suitable range. Whether this is a correct position for a flexible array member will be determined elsewhere. */ if (!in_system_header_at (input_location)) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not " "support flexible array members"); type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type)); TYPE_DOMAIN (type) = build_range_type (sizetype, size_zero_node, NULL_TREE); } type = c_build_qualified_type (type, type_quals, orig_qual_type, orig_qual_indirect); decl = build_decl (declarator->id_loc, FIELD_DECL, declarator->u.id, type); DECL_NONADDRESSABLE_P (decl) = bitfield; if (bitfield && !declarator->u.id) TREE_NO_WARNING (decl) = 1; if (size_varies) C_DECL_VARIABLE_SIZE (decl) = 1; } else if (TREE_CODE (type) == FUNCTION_TYPE) { if (storage_class == csc_register || threadp) { error_at (loc, "invalid storage class for function %qE", name); } else if (current_scope != file_scope) { /* Function declaration not at file scope. Storage classes other than `extern' are not allowed, C99 6.7.1p5, and `extern' makes no difference. However, GCC allows 'auto', perhaps with 'inline', to support nested functions. */ if (storage_class == csc_auto) pedwarn (loc, OPT_Wpedantic, "invalid storage class for function %qE", name); else if (storage_class == csc_static) { error_at (loc, "invalid storage class for function %qE", name); if (funcdef_flag) storage_class = declspecs->storage_class = csc_none; else return 0; } } decl = build_decl (declarator->id_loc, FUNCTION_DECL, declarator->u.id, type); decl = build_decl_attribute_variant (decl, decl_attr); if (type_quals & TYPE_QUAL_ATOMIC) { error_at (loc, "%<_Atomic%>-qualified function type"); type_quals &= ~TYPE_QUAL_ATOMIC; } else if (pedantic && type_quals && !DECL_IN_SYSTEM_HEADER (decl)) pedwarn (loc, OPT_Wpedantic, "ISO C forbids qualified function types"); /* Every function declaration is an external reference (DECL_EXTERNAL) except for those which are not at file scope and are explicitly declared "auto". This is forbidden by standard C (C99 6.7.1p5) and is interpreted by GCC to signify a forward declaration of a nested function. */ if (storage_class == csc_auto && current_scope != file_scope) DECL_EXTERNAL (decl) = 0; /* In C99, a function which is declared 'inline' with 'extern' is not an external reference (which is confusing). It means that the later definition of the function must be output in this file, C99 6.7.4p6. In GNU C89, a function declared 'extern inline' is an external reference. */ else if (declspecs->inline_p && storage_class != csc_static) DECL_EXTERNAL (decl) = ((storage_class == csc_extern) == flag_gnu89_inline); else DECL_EXTERNAL (decl) = !initialized; /* Record absence of global scope for `static' or `auto'. */ TREE_PUBLIC (decl) = !(storage_class == csc_static || storage_class == csc_auto); /* For a function definition, record the argument information block where store_parm_decls will look for it. */ if (funcdef_flag) current_function_arg_info = arg_info; if (declspecs->default_int_p) C_FUNCTION_IMPLICIT_INT (decl) = 1; /* Record presence of `inline' and `_Noreturn', if it is reasonable. */ if (flag_hosted && MAIN_NAME_P (declarator->u.id)) { if (declspecs->inline_p) pedwarn (loc, 0, "cannot inline function %<main%>"); if (declspecs->noreturn_p) pedwarn (loc, 0, "%<main%> declared %<_Noreturn%>"); } else { if (declspecs->inline_p) /* Record that the function is declared `inline'. */ DECL_DECLARED_INLINE_P (decl) = 1; if (declspecs->noreturn_p) { if (flag_isoc99) pedwarn_c99 (loc, OPT_Wpedantic, "ISO C99 does not support %<_Noreturn%>"); else pedwarn_c99 (loc, OPT_Wpedantic, "ISO C90 does not support %<_Noreturn%>"); TREE_THIS_VOLATILE (decl) = 1; } } } else { /* It's a variable. */ /* An uninitialized decl with `extern' is a reference. */ int extern_ref = !initialized && storage_class == csc_extern; type = c_build_qualified_type (type, type_quals, orig_qual_type, orig_qual_indirect); /* C99 6.2.2p7: It is invalid (compile-time undefined behavior) to create an 'extern' declaration for a variable if there is a global declaration that is 'static' and the global declaration is not visible. (If the static declaration _is_ currently visible, the 'extern' declaration is taken to refer to that decl.) */ if (extern_ref && current_scope != file_scope) { tree global_decl = identifier_global_value (declarator->u.id); tree visible_decl = lookup_name (declarator->u.id); if (global_decl && global_decl != visible_decl && TREE_CODE (global_decl) == VAR_DECL && !TREE_PUBLIC (global_decl)) error_at (loc, "variable previously declared %<static%> " "redeclared %<extern%>"); } decl = build_decl (declarator->id_loc, VAR_DECL, declarator->u.id, type); if (size_varies) C_DECL_VARIABLE_SIZE (decl) = 1; if (declspecs->inline_p) pedwarn (loc, 0, "variable %q+D declared %<inline%>", decl); if (declspecs->noreturn_p) pedwarn (loc, 0, "variable %q+D declared %<_Noreturn%>", decl); /* At file scope, an initialized extern declaration may follow a static declaration. In that case, DECL_EXTERNAL will be reset later in start_decl. */ DECL_EXTERNAL (decl) = (storage_class == csc_extern); /* At file scope, the presence of a `static' or `register' storage class specifier, or the absence of all storage class specifiers makes this declaration a definition (perhaps tentative). Also, the absence of `static' makes it public. */ if (current_scope == file_scope) { TREE_PUBLIC (decl) = storage_class != csc_static; TREE_STATIC (decl) = !extern_ref; } /* Not at file scope, only `static' makes a static definition. */ else { TREE_STATIC (decl) = (storage_class == csc_static); TREE_PUBLIC (decl) = extern_ref; } if (threadp) set_decl_tls_model (decl, decl_default_tls_model (decl)); } if ((storage_class == csc_extern || (storage_class == csc_none && TREE_CODE (type) == FUNCTION_TYPE && !funcdef_flag)) && variably_modified_type_p (type, NULL_TREE)) { /* C99 6.7.5.2p2 */ if (TREE_CODE (type) == FUNCTION_TYPE) error_at (loc, "non-nested function with variably modified type"); else error_at (loc, "object with variably modified type must have " "no linkage"); } /* Record `register' declaration for warnings on & and in case doing stupid register allocation. */ if (storage_class == csc_register) { C_DECL_REGISTER (decl) = 1; DECL_REGISTER (decl) = 1; } /* Record constancy and volatility. */ c_apply_type_quals_to_decl (type_quals, decl); /* Apply _Alignas specifiers. */ if (alignas_align) { DECL_ALIGN (decl) = alignas_align * BITS_PER_UNIT; DECL_USER_ALIGN (decl) = 1; } /* If a type has volatile components, it should be stored in memory. Otherwise, the fact that those components are volatile will be ignored, and would even crash the compiler. Of course, this only makes sense on VAR,PARM, and RESULT decl's. */ if (C_TYPE_FIELDS_VOLATILE (TREE_TYPE (decl)) && (TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == PARM_DECL || TREE_CODE (decl) == RESULT_DECL)) { /* It is not an error for a structure with volatile fields to be declared register, but reset DECL_REGISTER since it cannot actually go in a register. */ int was_reg = C_DECL_REGISTER (decl); C_DECL_REGISTER (decl) = 0; DECL_REGISTER (decl) = 0; c_mark_addressable (decl); C_DECL_REGISTER (decl) = was_reg; } /* This is the earliest point at which we might know the assembler name of a variable. Thus, if it's known before this, die horribly. */ gcc_assert (!DECL_ASSEMBLER_NAME_SET_P (decl)); if (warn_cxx_compat && TREE_CODE (decl) == VAR_DECL && TREE_PUBLIC (decl) && TREE_STATIC (decl) && (TREE_CODE (TREE_TYPE (decl)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (decl)) == UNION_TYPE || TREE_CODE (TREE_TYPE (decl)) == ENUMERAL_TYPE) && TYPE_NAME (TREE_TYPE (decl)) == NULL_TREE) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat, ("non-local variable %qD with anonymous type is " "questionable in C++"), decl); return decl; } } /* Decode the parameter-list info for a function type or function definition. The argument is the value returned by `get_parm_info' (or made in c-parse.c if there is an identifier list instead of a parameter decl list). These two functions are separate because when a function returns or receives functions then each is called multiple times but the order of calls is different. The last call to `grokparms' is always the one that contains the formal parameter names of a function definition. Return a list of arg types to use in the FUNCTION_TYPE for this function. FUNCDEF_FLAG is true for a function definition, false for a mere declaration. A nonempty identifier-list gets an error message when FUNCDEF_FLAG is false. */ static tree grokparms (struct c_arg_info *arg_info, bool funcdef_flag) { tree arg_types = arg_info->types; if (funcdef_flag && arg_info->had_vla_unspec) { /* A function definition isn't function prototype scope C99 6.2.1p4. */ /* C99 6.7.5.2p4 */ error ("%<[*]%> not allowed in other than function prototype scope"); } if (arg_types == 0 && !funcdef_flag && !in_system_header_at (input_location)) warning (OPT_Wstrict_prototypes, "function declaration isn%'t a prototype"); if (arg_types == error_mark_node) return 0; /* don't set TYPE_ARG_TYPES in this case */ else if (arg_types && TREE_CODE (TREE_VALUE (arg_types)) == IDENTIFIER_NODE) { if (!funcdef_flag) { pedwarn (input_location, 0, "parameter names (without types) in function declaration"); arg_info->parms = NULL_TREE; } else arg_info->parms = arg_info->types; arg_info->types = 0; return 0; } else { tree parm, type, typelt; unsigned int parmno; const char *errmsg; /* If there is a parameter of incomplete type in a definition, this is an error. In a declaration this is valid, and a struct or union type may be completed later, before any calls or definition of the function. In the case where the tag was first declared within the parameter list, a warning has already been given. If a parameter has void type, then however the function cannot be defined or called, so warn. */ for (parm = arg_info->parms, typelt = arg_types, parmno = 1; parm; parm = DECL_CHAIN (parm), typelt = TREE_CHAIN (typelt), parmno++) { type = TREE_VALUE (typelt); if (type == error_mark_node) continue; if (!COMPLETE_TYPE_P (type)) { if (funcdef_flag) { if (DECL_NAME (parm)) error_at (input_location, "parameter %u (%q+D) has incomplete type", parmno, parm); else error_at (DECL_SOURCE_LOCATION (parm), "parameter %u has incomplete type", parmno); TREE_VALUE (typelt) = error_mark_node; TREE_TYPE (parm) = error_mark_node; arg_types = NULL_TREE; } else if (VOID_TYPE_P (type)) { if (DECL_NAME (parm)) warning_at (input_location, 0, "parameter %u (%q+D) has void type", parmno, parm); else warning_at (DECL_SOURCE_LOCATION (parm), 0, "parameter %u has void type", parmno); } } errmsg = targetm.invalid_parameter_type (type); if (errmsg) { error (errmsg); TREE_VALUE (typelt) = error_mark_node; TREE_TYPE (parm) = error_mark_node; arg_types = NULL_TREE; } if (DECL_NAME (parm) && TREE_USED (parm)) warn_if_shadowing (parm); } return arg_types; } } /* Allocate and initialize a c_arg_info structure from the parser's obstack. */ struct c_arg_info * build_arg_info (void) { struct c_arg_info *ret = XOBNEW (&parser_obstack, struct c_arg_info); ret->parms = NULL_TREE; ret->tags = NULL; ret->types = NULL_TREE; ret->others = NULL_TREE; ret->pending_sizes = NULL; ret->had_vla_unspec = 0; return ret; } /* Take apart the current scope and return a c_arg_info structure with info on a parameter list just parsed. This structure is later fed to 'grokparms' and 'store_parm_decls'. ELLIPSIS being true means the argument list ended in '...' so don't append a sentinel (void_list_node) to the end of the type-list. EXPR is NULL or an expression that needs to be evaluated for the side effects of array size expressions in the parameters. */ struct c_arg_info * get_parm_info (bool ellipsis, tree expr) { struct c_binding *b = current_scope->bindings; struct c_arg_info *arg_info = build_arg_info (); tree parms = 0; vec<c_arg_tag, va_gc> *tags = NULL; tree types = 0; tree others = 0; static bool explained_incomplete_types = false; bool gave_void_only_once_err = false; arg_info->had_vla_unspec = current_scope->had_vla_unspec; /* The bindings in this scope must not get put into a block. We will take care of deleting the binding nodes. */ current_scope->bindings = 0; /* This function is only called if there was *something* on the parameter list. */ gcc_assert (b); /* A parameter list consisting solely of 'void' indicates that the function takes no arguments. But if the 'void' is qualified (by 'const' or 'volatile'), or has a storage class specifier ('register'), then the behavior is undefined; issue an error. Typedefs for 'void' are OK (see DR#157). */ if (b->prev == 0 /* one binding */ && TREE_CODE (b->decl) == PARM_DECL /* which is a parameter */ && !DECL_NAME (b->decl) /* anonymous */ && VOID_TYPE_P (TREE_TYPE (b->decl))) /* of void type */ { if (TYPE_QUALS (TREE_TYPE (b->decl)) != TYPE_UNQUALIFIED || C_DECL_REGISTER (b->decl)) error ("%<void%> as only parameter may not be qualified"); /* There cannot be an ellipsis. */ if (ellipsis) error ("%<void%> must be the only parameter"); arg_info->types = void_list_node; return arg_info; } if (!ellipsis) types = void_list_node; /* Break up the bindings list into parms, tags, types, and others; apply sanity checks; purge the name-to-decl bindings. */ while (b) { tree decl = b->decl; tree type = TREE_TYPE (decl); c_arg_tag tag; const char *keyword; switch (TREE_CODE (decl)) { case PARM_DECL: if (b->id) { gcc_assert (I_SYMBOL_BINDING (b->id) == b); I_SYMBOL_BINDING (b->id) = b->shadowed; } /* Check for forward decls that never got their actual decl. */ if (TREE_ASM_WRITTEN (decl)) error ("parameter %q+D has just a forward declaration", decl); /* Check for (..., void, ...) and issue an error. */ else if (VOID_TYPE_P (type) && !DECL_NAME (decl)) { if (!gave_void_only_once_err) { error ("%<void%> must be the only parameter"); gave_void_only_once_err = true; } } else { /* Valid parameter, add it to the list. */ DECL_CHAIN (decl) = parms; parms = decl; /* Since there is a prototype, args are passed in their declared types. The back end may override this later. */ DECL_ARG_TYPE (decl) = type; types = tree_cons (0, type, types); } break; case ENUMERAL_TYPE: keyword = "enum"; goto tag; case UNION_TYPE: keyword = "union"; goto tag; case RECORD_TYPE: keyword = "struct"; goto tag; tag: /* Types may not have tag-names, in which case the type appears in the bindings list with b->id NULL. */ if (b->id) { gcc_assert (I_TAG_BINDING (b->id) == b); I_TAG_BINDING (b->id) = b->shadowed; } /* Warn about any struct, union or enum tags defined in a parameter list. The scope of such types is limited to the parameter list, which is rarely if ever desirable (it's impossible to call such a function with type- correct arguments). An anonymous union parm type is meaningful as a GNU extension, so don't warn for that. */ if (TREE_CODE (decl) != UNION_TYPE || b->id != 0) { if (b->id) /* The %s will be one of 'struct', 'union', or 'enum'. */ warning (0, "%<%s %E%> declared inside parameter list", keyword, b->id); else /* The %s will be one of 'struct', 'union', or 'enum'. */ warning (0, "anonymous %s declared inside parameter list", keyword); if (!explained_incomplete_types) { warning (0, "its scope is only this definition or declaration," " which is probably not what you want"); explained_incomplete_types = true; } } tag.id = b->id; tag.type = decl; vec_safe_push (tags, tag); break; case CONST_DECL: case TYPE_DECL: case FUNCTION_DECL: /* CONST_DECLs appear here when we have an embedded enum, and TYPE_DECLs appear here when we have an embedded struct or union. No warnings for this - we already warned about the type itself. FUNCTION_DECLs appear when there is an implicit function declaration in the parameter list. */ /* When we reinsert this decl in the function body, we need to reconstruct whether it was marked as nested. */ gcc_assert (TREE_CODE (decl) == FUNCTION_DECL ? b->nested : !b->nested); DECL_CHAIN (decl) = others; others = decl; /* fall through */ case ERROR_MARK: /* error_mark_node appears here when we have an undeclared variable. Just throw it away. */ if (b->id) { gcc_assert (I_SYMBOL_BINDING (b->id) == b); I_SYMBOL_BINDING (b->id) = b->shadowed; } break; /* Other things that might be encountered. */ case LABEL_DECL: case VAR_DECL: default: gcc_unreachable (); } b = free_binding_and_advance (b); } arg_info->parms = parms; arg_info->tags = tags; arg_info->types = types; arg_info->others = others; arg_info->pending_sizes = expr; return arg_info; } /* Get the struct, enum or union (CODE says which) with tag NAME. Define the tag as a forward-reference with location LOC if it is not defined. Return a c_typespec structure for the type specifier. */ struct c_typespec parser_xref_tag (location_t loc, enum tree_code code, tree name) { struct c_typespec ret; tree ref; location_t refloc; ret.expr = NULL_TREE; ret.expr_const_operands = true; /* If a cross reference is requested, look up the type already defined for this tag and return it. */ ref = lookup_tag (code, name, 0, &refloc); /* If this is the right type of tag, return what we found. (This reference will be shadowed by shadow_tag later if appropriate.) If this is the wrong type of tag, do not return it. If it was the wrong type in the same scope, we will have had an error message already; if in a different scope and declaring a name, pending_xref_error will give an error message; but if in a different scope and not declaring a name, this tag should shadow the previous declaration of a different type of tag, and this would not work properly if we return the reference found. (For example, with "struct foo" in an outer scope, "union foo;" must shadow that tag with a new one of union type.) */ ret.kind = (ref ? ctsk_tagref : ctsk_tagfirstref); if (ref && TREE_CODE (ref) == code) { if (C_TYPE_DEFINED_IN_STRUCT (ref) && loc != UNKNOWN_LOCATION && warn_cxx_compat) { switch (code) { case ENUMERAL_TYPE: warning_at (loc, OPT_Wc___compat, ("enum type defined in struct or union " "is not visible in C++")); inform (refloc, "enum type defined here"); break; case RECORD_TYPE: warning_at (loc, OPT_Wc___compat, ("struct defined in struct or union " "is not visible in C++")); inform (refloc, "struct defined here"); break; case UNION_TYPE: warning_at (loc, OPT_Wc___compat, ("union defined in struct or union " "is not visible in C++")); inform (refloc, "union defined here"); break; default: gcc_unreachable(); } } ret.spec = ref; return ret; } /* If no such tag is yet defined, create a forward-reference node and record it as the "definition". When a real declaration of this type is found, the forward-reference will be altered into a real type. */ ref = make_node (code); if (code == ENUMERAL_TYPE) { /* Give the type a default layout like unsigned int to avoid crashing if it does not get defined. */ SET_TYPE_MODE (ref, TYPE_MODE (unsigned_type_node)); TYPE_ALIGN (ref) = TYPE_ALIGN (unsigned_type_node); TYPE_USER_ALIGN (ref) = 0; TYPE_UNSIGNED (ref) = 1; TYPE_PRECISION (ref) = TYPE_PRECISION (unsigned_type_node); TYPE_MIN_VALUE (ref) = TYPE_MIN_VALUE (unsigned_type_node); TYPE_MAX_VALUE (ref) = TYPE_MAX_VALUE (unsigned_type_node); } pushtag (loc, name, ref); ret.spec = ref; return ret; } /* Get the struct, enum or union (CODE says which) with tag NAME. Define the tag as a forward-reference if it is not defined. Return a tree for the type. */ tree xref_tag (enum tree_code code, tree name) { return parser_xref_tag (input_location, code, name).spec; } /* Make sure that the tag NAME is defined *in the current scope* at least as a forward reference. LOC is the location of the struct's definition. CODE says which kind of tag NAME ought to be. This stores the current value of the file static STRUCT_PARSE_INFO in *ENCLOSING_STRUCT_PARSE_INFO, and points STRUCT_PARSE_INFO at a new c_struct_parse_info structure. The old value of STRUCT_PARSE_INFO is restored in finish_struct. */ tree start_struct (location_t loc, enum tree_code code, tree name, struct c_struct_parse_info **enclosing_struct_parse_info) { /* If there is already a tag defined at this scope (as a forward reference), just return it. */ tree ref = NULL_TREE; location_t refloc = UNKNOWN_LOCATION; if (name != NULL_TREE) ref = lookup_tag (code, name, 1, &refloc); if (ref && TREE_CODE (ref) == code) { if (TYPE_SIZE (ref)) { if (code == UNION_TYPE) error_at (loc, "redefinition of %<union %E%>", name); else error_at (loc, "redefinition of %<struct %E%>", name); if (refloc != UNKNOWN_LOCATION) inform (refloc, "originally defined here"); /* Don't create structures using a name already in use. */ ref = NULL_TREE; } else if (C_TYPE_BEING_DEFINED (ref)) { if (code == UNION_TYPE) error_at (loc, "nested redefinition of %<union %E%>", name); else error_at (loc, "nested redefinition of %<struct %E%>", name); /* Don't bother to report "originally defined here" for a nested redefinition; the original definition should be obvious. */ /* Don't create structures that contain themselves. */ ref = NULL_TREE; } } /* Otherwise create a forward-reference just so the tag is in scope. */ if (ref == NULL_TREE || TREE_CODE (ref) != code) { ref = make_node (code); pushtag (loc, name, ref); } C_TYPE_BEING_DEFINED (ref) = 1; TYPE_PACKED (ref) = flag_pack_struct; *enclosing_struct_parse_info = struct_parse_info; struct_parse_info = XNEW (struct c_struct_parse_info); struct_parse_info->struct_types.create (0); struct_parse_info->fields.create (0); struct_parse_info->typedefs_seen.create (0); /* FIXME: This will issue a warning for a use of a type defined within a statement expr used within sizeof, et. al. This is not terribly serious as C++ doesn't permit statement exprs within sizeof anyhow. */ if (warn_cxx_compat && (in_sizeof || in_typeof || in_alignof)) warning_at (loc, OPT_Wc___compat, "defining type in %qs expression is invalid in C++", (in_sizeof ? "sizeof" : (in_typeof ? "typeof" : "alignof"))); return ref; } /* Process the specs, declarator and width (NULL if omitted) of a structure component, returning a FIELD_DECL node. WIDTH is non-NULL for bit-fields only, and is an INTEGER_CST node. DECL_ATTRS is as for grokdeclarator. LOC is the location of the structure component. This is done during the parsing of the struct declaration. The FIELD_DECL nodes are chained together and the lot of them are ultimately passed to `build_struct' to make the RECORD_TYPE node. */ tree grokfield (location_t loc, struct c_declarator *declarator, struct c_declspecs *declspecs, tree width, tree *decl_attrs) { tree value; if (declarator->kind == cdk_id && declarator->u.id == NULL_TREE && width == NULL_TREE) { /* This is an unnamed decl. If we have something of the form "union { list } ;" then this is the anonymous union extension. Similarly for struct. If this is something of the form "struct foo;", then If MS or Plan 9 extensions are enabled, this is handled as an anonymous struct. Otherwise this is a forward declaration of a structure tag. If this is something of the form "foo;" and foo is a TYPE_DECL, then If foo names a structure or union without a tag, then this is an anonymous struct (this is permitted by C11). If MS or Plan 9 extensions are enabled and foo names a structure, then again this is an anonymous struct. Otherwise this is an error. Oh what a horrid tangled web we weave. I wonder if MS consciously took this from Plan 9 or if it was an accident of implementation that took root before someone noticed the bug... */ tree type = declspecs->type; bool type_ok = (TREE_CODE (type) == RECORD_TYPE || TREE_CODE (type) == UNION_TYPE); bool ok = false; if (type_ok && (flag_ms_extensions || flag_plan9_extensions || !declspecs->typedef_p)) { if (flag_ms_extensions || flag_plan9_extensions) ok = true; else if (TYPE_NAME (type) == NULL) ok = true; else ok = false; } if (!ok) { pedwarn (loc, 0, "declaration does not declare anything"); return NULL_TREE; } if (flag_isoc99) pedwarn_c99 (loc, OPT_Wpedantic, "ISO C99 doesn%'t support unnamed structs/unions"); else pedwarn_c99 (loc, OPT_Wpedantic, "ISO C90 doesn%'t support unnamed structs/unions"); } value = grokdeclarator (declarator, declspecs, FIELD, false, width ? &width : NULL, decl_attrs, NULL, NULL, DEPRECATED_NORMAL); finish_decl (value, loc, NULL_TREE, NULL_TREE, NULL_TREE); DECL_INITIAL (value) = width; if (warn_cxx_compat && DECL_NAME (value) != NULL_TREE) { /* If we currently have a binding for this field, set the in_struct field in the binding, so that we warn about lookups which find it. */ struct c_binding *b = I_SYMBOL_BINDING (DECL_NAME (value)); if (b != NULL) { /* If the in_struct field is not yet set, push it on a list to be cleared when this struct is finished. */ if (!b->in_struct) { struct_parse_info->fields.safe_push (b); b->in_struct = 1; } } } return value; } /* Subroutine of detect_field_duplicates: return whether X and Y, which are both fields in the same struct, have duplicate field names. */ static bool is_duplicate_field (tree x, tree y) { if (DECL_NAME (x) != NULL_TREE && DECL_NAME (x) == DECL_NAME (y)) return true; /* When using -fplan9-extensions, an anonymous field whose name is a typedef can duplicate a field name. */ if (flag_plan9_extensions && (DECL_NAME (x) == NULL_TREE || DECL_NAME (y) == NULL_TREE)) { tree xt, xn, yt, yn; xt = TREE_TYPE (x); if (DECL_NAME (x) != NULL_TREE) xn = DECL_NAME (x); else if ((TREE_CODE (xt) == RECORD_TYPE || TREE_CODE (xt) == UNION_TYPE) && TYPE_NAME (xt) != NULL_TREE && TREE_CODE (TYPE_NAME (xt)) == TYPE_DECL) xn = DECL_NAME (TYPE_NAME (xt)); else xn = NULL_TREE; yt = TREE_TYPE (y); if (DECL_NAME (y) != NULL_TREE) yn = DECL_NAME (y); else if ((TREE_CODE (yt) == RECORD_TYPE || TREE_CODE (yt) == UNION_TYPE) && TYPE_NAME (yt) != NULL_TREE && TREE_CODE (TYPE_NAME (yt)) == TYPE_DECL) yn = DECL_NAME (TYPE_NAME (yt)); else yn = NULL_TREE; if (xn != NULL_TREE && xn == yn) return true; } return false; } /* Subroutine of detect_field_duplicates: add the fields of FIELDLIST to HTAB, giving errors for any duplicates. */ static void detect_field_duplicates_hash (tree fieldlist, hash_table<pointer_hash <tree_node> > *htab) { tree x, y; tree_node **slot; for (x = fieldlist; x ; x = DECL_CHAIN (x)) if ((y = DECL_NAME (x)) != 0) { slot = htab->find_slot (y, INSERT); if (*slot) { error ("duplicate member %q+D", x); DECL_NAME (x) = NULL_TREE; } *slot = y; } else if (TREE_CODE (TREE_TYPE (x)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (x)) == UNION_TYPE) { detect_field_duplicates_hash (TYPE_FIELDS (TREE_TYPE (x)), htab); /* When using -fplan9-extensions, an anonymous field whose name is a typedef can duplicate a field name. */ if (flag_plan9_extensions && TYPE_NAME (TREE_TYPE (x)) != NULL_TREE && TREE_CODE (TYPE_NAME (TREE_TYPE (x))) == TYPE_DECL) { tree xn = DECL_NAME (TYPE_NAME (TREE_TYPE (x))); slot = htab->find_slot (xn, INSERT); if (*slot) error ("duplicate member %q+D", TYPE_NAME (TREE_TYPE (x))); *slot = xn; } } } /* Generate an error for any duplicate field names in FIELDLIST. Munge the list such that this does not present a problem later. */ static void detect_field_duplicates (tree fieldlist) { tree x, y; int timeout = 10; /* If the struct is the list of instance variables of an Objective-C class, then we need to check all the instance variables of superclasses when checking for duplicates (since you can't have an instance variable in a subclass with the same name as an instance variable in a superclass). We pass on this job to the Objective-C compiler. objc_detect_field_duplicates() will return false if we are not checking the list of instance variables and the C frontend should proceed with the standard field duplicate checks. If we are checking the list of instance variables, the ObjC frontend will do the check, emit the errors if needed, and then return true. */ if (c_dialect_objc ()) if (objc_detect_field_duplicates (false)) return; /* First, see if there are more than "a few" fields. This is trivially true if there are zero or one fields. */ if (!fieldlist || !DECL_CHAIN (fieldlist)) return; x = fieldlist; do { timeout--; if (DECL_NAME (x) == NULL_TREE && (TREE_CODE (TREE_TYPE (x)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (x)) == UNION_TYPE)) timeout = 0; x = DECL_CHAIN (x); } while (timeout > 0 && x); /* If there were "few" fields and no anonymous structures or unions, avoid the overhead of allocating a hash table. Instead just do the nested traversal thing. */ if (timeout > 0) { for (x = DECL_CHAIN (fieldlist); x; x = DECL_CHAIN (x)) /* When using -fplan9-extensions, we can have duplicates between typedef names and fields. */ if (DECL_NAME (x) || (flag_plan9_extensions && DECL_NAME (x) == NULL_TREE && (TREE_CODE (TREE_TYPE (x)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (x)) == UNION_TYPE) && TYPE_NAME (TREE_TYPE (x)) != NULL_TREE && TREE_CODE (TYPE_NAME (TREE_TYPE (x))) == TYPE_DECL)) { for (y = fieldlist; y != x; y = TREE_CHAIN (y)) if (is_duplicate_field (y, x)) { error ("duplicate member %q+D", x); DECL_NAME (x) = NULL_TREE; } } } else { hash_table<pointer_hash <tree_node> > htab (37); detect_field_duplicates_hash (fieldlist, &htab); } } /* Finish up struct info used by -Wc++-compat. */ static void warn_cxx_compat_finish_struct (tree fieldlist) { unsigned int ix; tree x; struct c_binding *b; /* Set the C_TYPE_DEFINED_IN_STRUCT flag for each type defined in the current struct. We do this now at the end of the struct because the flag is used to issue visibility warnings, and we only want to issue those warnings if the type is referenced outside of the struct declaration. */ FOR_EACH_VEC_ELT (struct_parse_info->struct_types, ix, x) C_TYPE_DEFINED_IN_STRUCT (x) = 1; /* The TYPEDEFS_SEEN field of STRUCT_PARSE_INFO is a list of typedefs used when declaring fields in this struct. If the name of any of the fields is also a typedef name then the struct would not parse in C++, because the C++ lookup rules say that the typedef name would be looked up in the context of the struct, and would thus be the field rather than the typedef. */ if (!struct_parse_info->typedefs_seen.is_empty () && fieldlist != NULL_TREE) { /* Use a hash_set<tree> using the name of the typedef. We can use a hash_set<tree> because identifiers are interned. */ hash_set<tree> tset; FOR_EACH_VEC_ELT (struct_parse_info->typedefs_seen, ix, x) tset.add (DECL_NAME (x)); for (x = fieldlist; x != NULL_TREE; x = DECL_CHAIN (x)) { if (DECL_NAME (x) != NULL_TREE && tset.contains (DECL_NAME (x))) { warning_at (DECL_SOURCE_LOCATION (x), OPT_Wc___compat, ("using %qD as both field and typedef name is " "invalid in C++"), x); /* FIXME: It would be nice to report the location where the typedef name is used. */ } } } /* For each field which has a binding and which was not defined in an enclosing struct, clear the in_struct field. */ FOR_EACH_VEC_ELT (struct_parse_info->fields, ix, b) b->in_struct = 0; } /* Fill in the fields of a RECORD_TYPE or UNION_TYPE node, T. LOC is the location of the RECORD_TYPE or UNION_TYPE's definition. FIELDLIST is a chain of FIELD_DECL nodes for the fields. ATTRIBUTES are attributes to be applied to the structure. ENCLOSING_STRUCT_PARSE_INFO is the value of STRUCT_PARSE_INFO when the struct was started. */ tree finish_struct (location_t loc, tree t, tree fieldlist, tree attributes, struct c_struct_parse_info *enclosing_struct_parse_info) { tree x; bool toplevel = file_scope == current_scope; int saw_named_field; /* If this type was previously laid out as a forward reference, make sure we lay it out again. */ TYPE_SIZE (t) = 0; decl_attributes (&t, attributes, (int) ATTR_FLAG_TYPE_IN_PLACE); if (pedantic) { for (x = fieldlist; x; x = DECL_CHAIN (x)) { if (DECL_NAME (x) != 0) break; if (flag_isoc11 && (TREE_CODE (TREE_TYPE (x)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (x)) == UNION_TYPE)) break; } if (x == 0) { if (TREE_CODE (t) == UNION_TYPE) { if (fieldlist) pedwarn (loc, OPT_Wpedantic, "union has no named members"); else pedwarn (loc, OPT_Wpedantic, "union has no members"); } else { if (fieldlist) pedwarn (loc, OPT_Wpedantic, "struct has no named members"); else pedwarn (loc, OPT_Wpedantic, "struct has no members"); } } } /* Install struct as DECL_CONTEXT of each field decl. Also process specified field sizes, found in the DECL_INITIAL, storing 0 there after the type has been changed to precision equal to its width, rather than the precision of the specified standard type. (Correct layout requires the original type to have been preserved until now.) */ saw_named_field = 0; for (x = fieldlist; x; x = DECL_CHAIN (x)) { if (TREE_TYPE (x) == error_mark_node) continue; DECL_CONTEXT (x) = t; /* If any field is const, the structure type is pseudo-const. */ if (TREE_READONLY (x)) C_TYPE_FIELDS_READONLY (t) = 1; else { /* A field that is pseudo-const makes the structure likewise. */ tree t1 = strip_array_types (TREE_TYPE (x)); if ((TREE_CODE (t1) == RECORD_TYPE || TREE_CODE (t1) == UNION_TYPE) && C_TYPE_FIELDS_READONLY (t1)) C_TYPE_FIELDS_READONLY (t) = 1; } /* Any field that is volatile means variables of this type must be treated in some ways as volatile. */ if (TREE_THIS_VOLATILE (x)) C_TYPE_FIELDS_VOLATILE (t) = 1; /* Any field of nominal variable size implies structure is too. */ if (C_DECL_VARIABLE_SIZE (x)) C_TYPE_VARIABLE_SIZE (t) = 1; if (DECL_INITIAL (x)) { unsigned HOST_WIDE_INT width = tree_to_uhwi (DECL_INITIAL (x)); DECL_SIZE (x) = bitsize_int (width); DECL_BIT_FIELD (x) = 1; SET_DECL_C_BIT_FIELD (x); } if (TYPE_PACKED (t) && (DECL_BIT_FIELD (x) || TYPE_ALIGN (TREE_TYPE (x)) > BITS_PER_UNIT)) DECL_PACKED (x) = 1; /* Detect flexible array member in an invalid context. */ if (TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE && TYPE_SIZE (TREE_TYPE (x)) == NULL_TREE && TYPE_DOMAIN (TREE_TYPE (x)) != NULL_TREE && TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (x))) == NULL_TREE) { if (TREE_CODE (t) == UNION_TYPE) { error_at (DECL_SOURCE_LOCATION (x), "flexible array member in union"); TREE_TYPE (x) = error_mark_node; } else if (DECL_CHAIN (x) != NULL_TREE) { error_at (DECL_SOURCE_LOCATION (x), "flexible array member not at end of struct"); TREE_TYPE (x) = error_mark_node; } else if (!saw_named_field) { error_at (DECL_SOURCE_LOCATION (x), "flexible array member in otherwise empty struct"); TREE_TYPE (x) = error_mark_node; } } if (pedantic && TREE_CODE (t) == RECORD_TYPE && flexible_array_type_p (TREE_TYPE (x))) pedwarn (DECL_SOURCE_LOCATION (x), OPT_Wpedantic, "invalid use of structure with flexible array member"); if (DECL_NAME (x) || TREE_CODE (TREE_TYPE (x)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (x)) == UNION_TYPE) saw_named_field = 1; } detect_field_duplicates (fieldlist); /* Now we have the nearly final fieldlist. Record it, then lay out the structure or union (including the fields). */ TYPE_FIELDS (t) = fieldlist; layout_type (t); if (TYPE_SIZE_UNIT (t) && TREE_CODE (TYPE_SIZE_UNIT (t)) == INTEGER_CST && !TREE_OVERFLOW (TYPE_SIZE_UNIT (t)) && !valid_constant_size_p (TYPE_SIZE_UNIT (t))) error ("type %qT is too large", t); /* Give bit-fields their proper types. */ { tree *fieldlistp = &fieldlist; while (*fieldlistp) if (TREE_CODE (*fieldlistp) == FIELD_DECL && DECL_INITIAL (*fieldlistp) && TREE_TYPE (*fieldlistp) != error_mark_node) { unsigned HOST_WIDE_INT width = tree_to_uhwi (DECL_INITIAL (*fieldlistp)); tree type = TREE_TYPE (*fieldlistp); if (width != TYPE_PRECISION (type)) { TREE_TYPE (*fieldlistp) = c_build_bitfield_integer_type (width, TYPE_UNSIGNED (type)); DECL_MODE (*fieldlistp) = TYPE_MODE (TREE_TYPE (*fieldlistp)); } DECL_INITIAL (*fieldlistp) = 0; } else fieldlistp = &DECL_CHAIN (*fieldlistp); } /* Now we have the truly final field list. Store it in this type and in the variants. */ TYPE_FIELDS (t) = fieldlist; /* If there are lots of fields, sort so we can look through them fast. We arbitrarily consider 16 or more elts to be "a lot". */ { int len = 0; for (x = fieldlist; x; x = DECL_CHAIN (x)) { if (len > 15 || DECL_NAME (x) == NULL) break; len += 1; } if (len > 15) { tree *field_array; struct lang_type *space; struct sorted_fields_type *space2; len += list_length (x); /* Use the same allocation policy here that make_node uses, to ensure that this lives as long as the rest of the struct decl. All decls in an inline function need to be saved. */ space = ggc_cleared_alloc<struct lang_type> (); space2 = (sorted_fields_type *) ggc_internal_alloc (sizeof (struct sorted_fields_type) + len * sizeof (tree)); len = 0; space->s = space2; field_array = &space2->elts[0]; for (x = fieldlist; x; x = DECL_CHAIN (x)) { field_array[len++] = x; /* If there is anonymous struct or union, break out of the loop. */ if (DECL_NAME (x) == NULL) break; } /* Found no anonymous struct/union. Add the TYPE_LANG_SPECIFIC. */ if (x == NULL) { TYPE_LANG_SPECIFIC (t) = space; TYPE_LANG_SPECIFIC (t)->s->len = len; field_array = TYPE_LANG_SPECIFIC (t)->s->elts; qsort (field_array, len, sizeof (tree), field_decl_cmp); } } } for (x = TYPE_MAIN_VARIANT (t); x; x = TYPE_NEXT_VARIANT (x)) { TYPE_FIELDS (x) = TYPE_FIELDS (t); TYPE_LANG_SPECIFIC (x) = TYPE_LANG_SPECIFIC (t); C_TYPE_FIELDS_READONLY (x) = C_TYPE_FIELDS_READONLY (t); C_TYPE_FIELDS_VOLATILE (x) = C_TYPE_FIELDS_VOLATILE (t); C_TYPE_VARIABLE_SIZE (x) = C_TYPE_VARIABLE_SIZE (t); } /* If this was supposed to be a transparent union, but we can't make it one, warn and turn off the flag. */ if (TREE_CODE (t) == UNION_TYPE && TYPE_TRANSPARENT_AGGR (t) && (!TYPE_FIELDS (t) || TYPE_MODE (t) != DECL_MODE (TYPE_FIELDS (t)))) { TYPE_TRANSPARENT_AGGR (t) = 0; warning_at (loc, 0, "union cannot be made transparent"); } /* If this structure or union completes the type of any previous variable declaration, lay it out and output its rtl. */ for (x = C_TYPE_INCOMPLETE_VARS (TYPE_MAIN_VARIANT (t)); x; x = TREE_CHAIN (x)) { tree decl = TREE_VALUE (x); if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE) layout_array_type (TREE_TYPE (decl)); if (TREE_CODE (decl) != TYPE_DECL) { layout_decl (decl, 0); if (c_dialect_objc ()) objc_check_decl (decl); rest_of_decl_compilation (decl, toplevel, 0); } } C_TYPE_INCOMPLETE_VARS (TYPE_MAIN_VARIANT (t)) = 0; /* Update type location to the one of the definition, instead of e.g. a forward declaration. */ if (TYPE_STUB_DECL (t)) DECL_SOURCE_LOCATION (TYPE_STUB_DECL (t)) = loc; /* Finish debugging output for this type. */ rest_of_type_compilation (t, toplevel); /* If we're inside a function proper, i.e. not file-scope and not still parsing parameters, then arrange for the size of a variable sized type to be bound now. */ if (building_stmt_list_p () && variably_modified_type_p (t, NULL_TREE)) add_stmt (build_stmt (loc, DECL_EXPR, build_decl (loc, TYPE_DECL, NULL, t))); if (warn_cxx_compat) warn_cxx_compat_finish_struct (fieldlist); struct_parse_info->struct_types.release (); struct_parse_info->fields.release (); struct_parse_info->typedefs_seen.release (); XDELETE (struct_parse_info); struct_parse_info = enclosing_struct_parse_info; /* If this struct is defined inside a struct, add it to struct_types. */ if (warn_cxx_compat && struct_parse_info != NULL && !in_sizeof && !in_typeof && !in_alignof) struct_parse_info->struct_types.safe_push (t); return t; } /* Lay out the type T, and its element type, and so on. */ static void layout_array_type (tree t) { if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE) layout_array_type (TREE_TYPE (t)); layout_type (t); } /* Begin compiling the definition of an enumeration type. NAME is its name (or null if anonymous). LOC is the enum's location. Returns the type object, as yet incomplete. Also records info about it so that build_enumerator may be used to declare the individual values as they are read. */ tree start_enum (location_t loc, struct c_enum_contents *the_enum, tree name) { tree enumtype = NULL_TREE; location_t enumloc = UNKNOWN_LOCATION; /* If this is the real definition for a previous forward reference, fill in the contents in the same object that used to be the forward reference. */ if (name != NULL_TREE) enumtype = lookup_tag (ENUMERAL_TYPE, name, 1, &enumloc); if (enumtype == 0 || TREE_CODE (enumtype) != ENUMERAL_TYPE) { enumtype = make_node (ENUMERAL_TYPE); pushtag (loc, name, enumtype); } if (C_TYPE_BEING_DEFINED (enumtype)) error_at (loc, "nested redefinition of %<enum %E%>", name); C_TYPE_BEING_DEFINED (enumtype) = 1; if (TYPE_VALUES (enumtype) != 0) { /* This enum is a named one that has been declared already. */ error_at (loc, "redeclaration of %<enum %E%>", name); if (enumloc != UNKNOWN_LOCATION) inform (enumloc, "originally defined here"); /* Completely replace its old definition. The old enumerators remain defined, however. */ TYPE_VALUES (enumtype) = 0; } the_enum->enum_next_value = integer_zero_node; the_enum->enum_overflow = 0; if (flag_short_enums) TYPE_PACKED (enumtype) = 1; /* FIXME: This will issue a warning for a use of a type defined within sizeof in a statement expr. This is not terribly serious as C++ doesn't permit statement exprs within sizeof anyhow. */ if (warn_cxx_compat && (in_sizeof || in_typeof || in_alignof)) warning_at (loc, OPT_Wc___compat, "defining type in %qs expression is invalid in C++", (in_sizeof ? "sizeof" : (in_typeof ? "typeof" : "alignof"))); return enumtype; } /* After processing and defining all the values of an enumeration type, install their decls in the enumeration type and finish it off. ENUMTYPE is the type object, VALUES a list of decl-value pairs, and ATTRIBUTES are the specified attributes. Returns ENUMTYPE. */ tree finish_enum (tree enumtype, tree values, tree attributes) { tree pair, tem; tree minnode = 0, maxnode = 0; int precision; signop sign; bool toplevel = (file_scope == current_scope); struct lang_type *lt; decl_attributes (&enumtype, attributes, (int) ATTR_FLAG_TYPE_IN_PLACE); /* Calculate the maximum value of any enumerator in this type. */ if (values == error_mark_node) minnode = maxnode = integer_zero_node; else { minnode = maxnode = TREE_VALUE (values); for (pair = TREE_CHAIN (values); pair; pair = TREE_CHAIN (pair)) { tree value = TREE_VALUE (pair); if (tree_int_cst_lt (maxnode, value)) maxnode = value; if (tree_int_cst_lt (value, minnode)) minnode = value; } } /* Construct the final type of this enumeration. It is the same as one of the integral types - the narrowest one that fits, except that normally we only go as narrow as int - and signed iff any of the values are negative. */ sign = (tree_int_cst_sgn (minnode) >= 0) ? UNSIGNED : SIGNED; precision = MAX (tree_int_cst_min_precision (minnode, sign), tree_int_cst_min_precision (maxnode, sign)); /* If the precision of the type was specified with an attribute and it was too small, give an error. Otherwise, use it. */ if (TYPE_PRECISION (enumtype)) { if (precision > TYPE_PRECISION (enumtype)) { TYPE_PRECISION (enumtype) = 0; error ("specified mode too small for enumeral values"); } else precision = TYPE_PRECISION (enumtype); } if (TYPE_PACKED (enumtype) || precision > TYPE_PRECISION (integer_type_node) || TYPE_PRECISION (enumtype)) { tem = c_common_type_for_size (precision, sign == UNSIGNED ? 1 : 0); if (tem == NULL) { warning (0, "enumeration values exceed range of largest integer"); tem = long_long_integer_type_node; } } else tem = sign == UNSIGNED ? unsigned_type_node : integer_type_node; TYPE_MIN_VALUE (enumtype) = TYPE_MIN_VALUE (tem); TYPE_MAX_VALUE (enumtype) = TYPE_MAX_VALUE (tem); TYPE_UNSIGNED (enumtype) = TYPE_UNSIGNED (tem); TYPE_SIZE (enumtype) = 0; TYPE_PRECISION (enumtype) = TYPE_PRECISION (tem); layout_type (enumtype); if (values != error_mark_node) { /* Change the type of the enumerators to be the enum type. We need to do this irrespective of the size of the enum, for proper type checking. Replace the DECL_INITIALs of the enumerators, and the value slots of the list, with copies that have the enum type; they cannot be modified in place because they may be shared (e.g. integer_zero_node) Finally, change the purpose slots to point to the names of the decls. */ for (pair = values; pair; pair = TREE_CHAIN (pair)) { tree enu = TREE_PURPOSE (pair); tree ini = DECL_INITIAL (enu); TREE_TYPE (enu) = enumtype; /* The ISO C Standard mandates enumerators to have type int, even though the underlying type of an enum type is unspecified. However, GCC allows enumerators of any integer type as an extensions. build_enumerator() converts any enumerators that fit in an int to type int, to avoid promotions to unsigned types when comparing integers with enumerators that fit in the int range. When -pedantic is given, build_enumerator() would have already warned about those that don't fit. Here we convert the rest to the enumerator type. */ if (TREE_TYPE (ini) != integer_type_node) ini = convert (enumtype, ini); DECL_INITIAL (enu) = ini; TREE_PURPOSE (pair) = DECL_NAME (enu); TREE_VALUE (pair) = ini; } TYPE_VALUES (enumtype) = values; } /* Record the min/max values so that we can warn about bit-field enumerations that are too small for the values. */ lt = ggc_cleared_alloc<struct lang_type> (); lt->enum_min = minnode; lt->enum_max = maxnode; TYPE_LANG_SPECIFIC (enumtype) = lt; /* Fix up all variant types of this enum type. */ for (tem = TYPE_MAIN_VARIANT (enumtype); tem; tem = TYPE_NEXT_VARIANT (tem)) { if (tem == enumtype) continue; TYPE_VALUES (tem) = TYPE_VALUES (enumtype); TYPE_MIN_VALUE (tem) = TYPE_MIN_VALUE (enumtype); TYPE_MAX_VALUE (tem) = TYPE_MAX_VALUE (enumtype); TYPE_SIZE (tem) = TYPE_SIZE (enumtype); TYPE_SIZE_UNIT (tem) = TYPE_SIZE_UNIT (enumtype); SET_TYPE_MODE (tem, TYPE_MODE (enumtype)); TYPE_PRECISION (tem) = TYPE_PRECISION (enumtype); TYPE_ALIGN (tem) = TYPE_ALIGN (enumtype); TYPE_USER_ALIGN (tem) = TYPE_USER_ALIGN (enumtype); TYPE_UNSIGNED (tem) = TYPE_UNSIGNED (enumtype); TYPE_LANG_SPECIFIC (tem) = TYPE_LANG_SPECIFIC (enumtype); } /* Finish debugging output for this type. */ rest_of_type_compilation (enumtype, toplevel); /* If this enum is defined inside a struct, add it to struct_types. */ if (warn_cxx_compat && struct_parse_info != NULL && !in_sizeof && !in_typeof && !in_alignof) struct_parse_info->struct_types.safe_push (enumtype); return enumtype; } /* Build and install a CONST_DECL for one value of the current enumeration type (one that was begun with start_enum). DECL_LOC is the location of the enumerator. LOC is the location of the '=' operator if any, DECL_LOC otherwise. Return a tree-list containing the CONST_DECL and its value. Assignment of sequential values by default is handled here. */ tree build_enumerator (location_t decl_loc, location_t loc, struct c_enum_contents *the_enum, tree name, tree value) { tree decl, type; /* Validate and default VALUE. */ if (value != 0) { /* Don't issue more errors for error_mark_node (i.e. an undeclared identifier) - just ignore the value expression. */ if (value == error_mark_node) value = 0; else if (!INTEGRAL_TYPE_P (TREE_TYPE (value))) { error_at (loc, "enumerator value for %qE is not an integer constant", name); value = 0; } else { if (TREE_CODE (value) != INTEGER_CST) { value = c_fully_fold (value, false, NULL); if (TREE_CODE (value) == INTEGER_CST) pedwarn (loc, OPT_Wpedantic, "enumerator value for %qE is not an integer " "constant expression", name); } if (TREE_CODE (value) != INTEGER_CST) { error ("enumerator value for %qE is not an integer constant", name); value = 0; } else { value = default_conversion (value); constant_expression_warning (value); } } } /* Default based on previous value. */ /* It should no longer be possible to have NON_LVALUE_EXPR in the default. */ if (value == 0) { value = the_enum->enum_next_value; if (the_enum->enum_overflow) error_at (loc, "overflow in enumeration values"); } /* Even though the underlying type of an enum is unspecified, the type of enumeration constants is explicitly defined as int (6.4.4.3/2 in the C99 Standard). GCC allows any integer type as an extension. */ else if (!int_fits_type_p (value, integer_type_node)) pedwarn (loc, OPT_Wpedantic, "ISO C restricts enumerator values to range of %<int%>"); /* The ISO C Standard mandates enumerators to have type int, even though the underlying type of an enum type is unspecified. However, GCC allows enumerators of any integer type as an extensions. Here we convert any enumerators that fit in an int to type int, to avoid promotions to unsigned types when comparing integers with enumerators that fit in the int range. When -pedantic is given, we would have already warned about those that don't fit. We have to do this here rather than in finish_enum because this value may be used to define more enumerators. */ if (int_fits_type_p (value, integer_type_node)) value = convert (integer_type_node, value); /* Set basis for default for next value. */ the_enum->enum_next_value = build_binary_op (EXPR_LOC_OR_LOC (value, input_location), PLUS_EXPR, value, integer_one_node, 0); the_enum->enum_overflow = tree_int_cst_lt (the_enum->enum_next_value, value); /* Now create a declaration for the enum value name. */ type = TREE_TYPE (value); type = c_common_type_for_size (MAX (TYPE_PRECISION (type), TYPE_PRECISION (integer_type_node)), (TYPE_PRECISION (type) >= TYPE_PRECISION (integer_type_node) && TYPE_UNSIGNED (type))); decl = build_decl (decl_loc, CONST_DECL, name, type); DECL_INITIAL (decl) = convert (type, value); pushdecl (decl); return tree_cons (decl, value, NULL_TREE); } /* Create the FUNCTION_DECL for a function definition. DECLSPECS, DECLARATOR and ATTRIBUTES are the parts of the declaration; they describe the function's name and the type it returns, but twisted together in a fashion that parallels the syntax of C. This function creates a binding context for the function body as well as setting up the FUNCTION_DECL in current_function_decl. Returns 1 on success. If the DECLARATOR is not suitable for a function (it defines a datum instead), we return 0, which tells yyparse to report a parse error. */ int start_function (struct c_declspecs *declspecs, struct c_declarator *declarator, tree attributes) { tree decl1, old_decl; tree restype, resdecl; location_t loc; current_function_returns_value = 0; /* Assume, until we see it does. */ current_function_returns_null = 0; current_function_returns_abnormally = 0; warn_about_return_type = 0; c_switch_stack = NULL; /* Indicate no valid break/continue context by setting these variables to some non-null, non-label value. We'll notice and emit the proper error message in c_finish_bc_stmt. */ c_break_label = c_cont_label = size_zero_node; decl1 = grokdeclarator (declarator, declspecs, FUNCDEF, true, NULL, &attributes, NULL, NULL, DEPRECATED_NORMAL); /* If the declarator is not suitable for a function definition, cause a syntax error. */ if (decl1 == 0 || TREE_CODE (decl1) != FUNCTION_DECL) return 0; loc = DECL_SOURCE_LOCATION (decl1); c_decl_attributes (&decl1, attributes, 0); if (DECL_DECLARED_INLINE_P (decl1) && DECL_UNINLINABLE (decl1) && lookup_attribute ("noinline", DECL_ATTRIBUTES (decl1))) warning_at (loc, OPT_Wattributes, "inline function %qD given attribute noinline", decl1); /* Handle gnu_inline attribute. */ if (declspecs->inline_p && !flag_gnu89_inline && TREE_CODE (decl1) == FUNCTION_DECL && (lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (decl1)) || current_function_decl)) { if (declspecs->storage_class != csc_static) DECL_EXTERNAL (decl1) = !DECL_EXTERNAL (decl1); } announce_function (decl1); if (!COMPLETE_OR_VOID_TYPE_P (TREE_TYPE (TREE_TYPE (decl1)))) { error_at (loc, "return type is an incomplete type"); /* Make it return void instead. */ TREE_TYPE (decl1) = build_function_type (void_type_node, TYPE_ARG_TYPES (TREE_TYPE (decl1))); } if (warn_about_return_type) warn_defaults_to (loc, flag_isoc99 ? OPT_Wimplicit_int : (warn_return_type ? OPT_Wreturn_type : OPT_Wimplicit_int), "return type defaults to %<int%>"); /* Make the init_value nonzero so pushdecl knows this is not tentative. error_mark_node is replaced below (in pop_scope) with the BLOCK. */ DECL_INITIAL (decl1) = error_mark_node; /* A nested function is not global. */ if (current_function_decl != 0) TREE_PUBLIC (decl1) = 0; /* If this definition isn't a prototype and we had a prototype declaration before, copy the arg type info from that prototype. */ old_decl = lookup_name_in_scope (DECL_NAME (decl1), current_scope); if (old_decl && TREE_CODE (old_decl) != FUNCTION_DECL) old_decl = 0; current_function_prototype_locus = UNKNOWN_LOCATION; current_function_prototype_built_in = false; current_function_prototype_arg_types = NULL_TREE; if (!prototype_p (TREE_TYPE (decl1))) { if (old_decl != 0 && TREE_CODE (TREE_TYPE (old_decl)) == FUNCTION_TYPE && comptypes (TREE_TYPE (TREE_TYPE (decl1)), TREE_TYPE (TREE_TYPE (old_decl)))) { TREE_TYPE (decl1) = composite_type (TREE_TYPE (old_decl), TREE_TYPE (decl1)); current_function_prototype_locus = DECL_SOURCE_LOCATION (old_decl); current_function_prototype_built_in = C_DECL_BUILTIN_PROTOTYPE (old_decl); current_function_prototype_arg_types = TYPE_ARG_TYPES (TREE_TYPE (decl1)); } if (TREE_PUBLIC (decl1)) { /* If there is an external prototype declaration of this function, record its location but do not copy information to this decl. This may be an invisible declaration (built-in or in a scope which has finished) or simply have more refined argument types than any declaration found above. */ struct c_binding *b; for (b = I_SYMBOL_BINDING (DECL_NAME (decl1)); b; b = b->shadowed) if (B_IN_SCOPE (b, external_scope)) break; if (b) { tree ext_decl, ext_type; ext_decl = b->decl; ext_type = b->u.type ? b->u.type : TREE_TYPE (ext_decl); if (TREE_CODE (ext_type) == FUNCTION_TYPE && comptypes (TREE_TYPE (TREE_TYPE (decl1)), TREE_TYPE (ext_type))) { current_function_prototype_locus = DECL_SOURCE_LOCATION (ext_decl); current_function_prototype_built_in = C_DECL_BUILTIN_PROTOTYPE (ext_decl); current_function_prototype_arg_types = TYPE_ARG_TYPES (ext_type); } } } } /* Optionally warn of old-fashioned def with no previous prototype. */ if (warn_strict_prototypes && old_decl != error_mark_node && !prototype_p (TREE_TYPE (decl1)) && C_DECL_ISNT_PROTOTYPE (old_decl)) warning_at (loc, OPT_Wstrict_prototypes, "function declaration isn%'t a prototype"); /* Optionally warn of any global def with no previous prototype. */ else if (warn_missing_prototypes && old_decl != error_mark_node && TREE_PUBLIC (decl1) && !MAIN_NAME_P (DECL_NAME (decl1)) && C_DECL_ISNT_PROTOTYPE (old_decl) && !DECL_DECLARED_INLINE_P (decl1)) warning_at (loc, OPT_Wmissing_prototypes, "no previous prototype for %qD", decl1); /* Optionally warn of any def with no previous prototype if the function has already been used. */ else if (warn_missing_prototypes && old_decl != 0 && old_decl != error_mark_node && TREE_USED (old_decl) && !prototype_p (TREE_TYPE (old_decl))) warning_at (loc, OPT_Wmissing_prototypes, "%qD was used with no prototype before its definition", decl1); /* Optionally warn of any global def with no previous declaration. */ else if (warn_missing_declarations && TREE_PUBLIC (decl1) && old_decl == 0 && !MAIN_NAME_P (DECL_NAME (decl1)) && !DECL_DECLARED_INLINE_P (decl1)) warning_at (loc, OPT_Wmissing_declarations, "no previous declaration for %qD", decl1); /* Optionally warn of any def with no previous declaration if the function has already been used. */ else if (warn_missing_declarations && old_decl != 0 && old_decl != error_mark_node && TREE_USED (old_decl) && C_DECL_IMPLICIT (old_decl)) warning_at (loc, OPT_Wmissing_declarations, "%qD was used with no declaration before its definition", decl1); /* This function exists in static storage. (This does not mean `static' in the C sense!) */ TREE_STATIC (decl1) = 1; /* This is the earliest point at which we might know the assembler name of the function. Thus, if it's set before this, die horribly. */ gcc_assert (!DECL_ASSEMBLER_NAME_SET_P (decl1)); /* If #pragma weak was used, mark the decl weak now. */ if (current_scope == file_scope) maybe_apply_pragma_weak (decl1); /* Warn for unlikely, improbable, or stupid declarations of `main'. */ if (warn_main && MAIN_NAME_P (DECL_NAME (decl1))) { if (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl1))) != integer_type_node) pedwarn (loc, OPT_Wmain, "return type of %qD is not %<int%>", decl1); else if (TYPE_ATOMIC (TREE_TYPE (TREE_TYPE (decl1)))) pedwarn (loc, OPT_Wmain, "%<_Atomic%>-qualified return type of %qD", decl1); check_main_parameter_types (decl1); if (!TREE_PUBLIC (decl1)) pedwarn (loc, OPT_Wmain, "%qD is normally a non-static function", decl1); } /* Record the decl so that the function name is defined. If we already have a decl for this name, and it is a FUNCTION_DECL, use the old decl. */ current_function_decl = pushdecl (decl1); push_scope (); declare_parm_level (); restype = TREE_TYPE (TREE_TYPE (current_function_decl)); resdecl = build_decl (loc, RESULT_DECL, NULL_TREE, restype); DECL_ARTIFICIAL (resdecl) = 1; DECL_IGNORED_P (resdecl) = 1; DECL_RESULT (current_function_decl) = resdecl; start_fname_decls (); return 1; } /* Subroutine of store_parm_decls which handles new-style function definitions (prototype format). The parms already have decls, so we need only record them as in effect and complain if any redundant old-style parm decls were written. */ static void store_parm_decls_newstyle (tree fndecl, const struct c_arg_info *arg_info) { tree decl; c_arg_tag *tag; unsigned ix; if (current_scope->bindings) { error_at (DECL_SOURCE_LOCATION (fndecl), "old-style parameter declarations in prototyped " "function definition"); /* Get rid of the old-style declarations. */ pop_scope (); push_scope (); } /* Don't issue this warning for nested functions, and don't issue this warning if we got here because ARG_INFO_TYPES was error_mark_node (this happens when a function definition has just an ellipsis in its parameter list). */ else if (!in_system_header_at (input_location) && !current_function_scope && arg_info->types != error_mark_node) warning_at (DECL_SOURCE_LOCATION (fndecl), OPT_Wtraditional, "traditional C rejects ISO C style function definitions"); /* Now make all the parameter declarations visible in the function body. We can bypass most of the grunt work of pushdecl. */ for (decl = arg_info->parms; decl; decl = DECL_CHAIN (decl)) { DECL_CONTEXT (decl) = current_function_decl; if (DECL_NAME (decl)) { bind (DECL_NAME (decl), decl, current_scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); if (!TREE_USED (decl)) warn_if_shadowing (decl); } else error_at (DECL_SOURCE_LOCATION (decl), "parameter name omitted"); } /* Record the parameter list in the function declaration. */ DECL_ARGUMENTS (fndecl) = arg_info->parms; /* Now make all the ancillary declarations visible, likewise. */ for (decl = arg_info->others; decl; decl = DECL_CHAIN (decl)) { DECL_CONTEXT (decl) = current_function_decl; if (DECL_NAME (decl)) bind (DECL_NAME (decl), decl, current_scope, /*invisible=*/false, /*nested=*/(TREE_CODE (decl) == FUNCTION_DECL), UNKNOWN_LOCATION); } /* And all the tag declarations. */ FOR_EACH_VEC_SAFE_ELT_REVERSE (arg_info->tags, ix, tag) if (tag->id) bind (tag->id, tag->type, current_scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); } /* Subroutine of store_parm_decls which handles old-style function definitions (separate parameter list and declarations). */ static void store_parm_decls_oldstyle (tree fndecl, const struct c_arg_info *arg_info) { struct c_binding *b; tree parm, decl, last; tree parmids = arg_info->parms; hash_set<tree> seen_args; if (!in_system_header_at (input_location)) warning_at (DECL_SOURCE_LOCATION (fndecl), OPT_Wold_style_definition, "old-style function definition"); /* Match each formal parameter name with its declaration. Save each decl in the appropriate TREE_PURPOSE slot of the parmids chain. */ for (parm = parmids; parm; parm = TREE_CHAIN (parm)) { if (TREE_VALUE (parm) == 0) { error_at (DECL_SOURCE_LOCATION (fndecl), "parameter name missing from parameter list"); TREE_PURPOSE (parm) = 0; continue; } b = I_SYMBOL_BINDING (TREE_VALUE (parm)); if (b && B_IN_CURRENT_SCOPE (b)) { decl = b->decl; /* Skip erroneous parameters. */ if (decl == error_mark_node) continue; /* If we got something other than a PARM_DECL it is an error. */ if (TREE_CODE (decl) != PARM_DECL) error_at (DECL_SOURCE_LOCATION (decl), "%qD declared as a non-parameter", decl); /* If the declaration is already marked, we have a duplicate name. Complain and ignore the duplicate. */ else if (seen_args.contains (decl)) { error_at (DECL_SOURCE_LOCATION (decl), "multiple parameters named %qD", decl); TREE_PURPOSE (parm) = 0; continue; } /* If the declaration says "void", complain and turn it into an int. */ else if (VOID_TYPE_P (TREE_TYPE (decl))) { error_at (DECL_SOURCE_LOCATION (decl), "parameter %qD declared with void type", decl); TREE_TYPE (decl) = integer_type_node; DECL_ARG_TYPE (decl) = integer_type_node; layout_decl (decl, 0); } warn_if_shadowing (decl); } /* If no declaration found, default to int. */ else { /* FIXME diagnostics: This should be the location of the argument, not the FNDECL. E.g., for an old-style declaration int f10(v) { blah; } We should use the location of the V, not the F10. Unfortunately, the V is an IDENTIFIER_NODE which has no location. In the future we need locations for c_arg_info entries. See gcc.dg/Wshadow-3.c for an example of this problem. */ decl = build_decl (DECL_SOURCE_LOCATION (fndecl), PARM_DECL, TREE_VALUE (parm), integer_type_node); DECL_ARG_TYPE (decl) = TREE_TYPE (decl); pushdecl (decl); warn_if_shadowing (decl); if (flag_isoc99) pedwarn (DECL_SOURCE_LOCATION (decl), OPT_Wimplicit_int, "type of %qD defaults to %<int%>", decl); else warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wmissing_parameter_type, "type of %qD defaults to %<int%>", decl); } TREE_PURPOSE (parm) = decl; seen_args.add (decl); } /* Now examine the parms chain for incomplete declarations and declarations with no corresponding names. */ for (b = current_scope->bindings; b; b = b->prev) { parm = b->decl; if (TREE_CODE (parm) != PARM_DECL) continue; if (TREE_TYPE (parm) != error_mark_node && !COMPLETE_TYPE_P (TREE_TYPE (parm))) { error_at (DECL_SOURCE_LOCATION (parm), "parameter %qD has incomplete type", parm); TREE_TYPE (parm) = error_mark_node; } if (!seen_args.contains (parm)) { error_at (DECL_SOURCE_LOCATION (parm), "declaration for parameter %qD but no such parameter", parm); /* Pretend the parameter was not missing. This gets us to a standard state and minimizes further error messages. */ parmids = chainon (parmids, tree_cons (parm, 0, 0)); } } /* Chain the declarations together in the order of the list of names. Store that chain in the function decl, replacing the list of names. Update the current scope to match. */ DECL_ARGUMENTS (fndecl) = 0; for (parm = parmids; parm; parm = TREE_CHAIN (parm)) if (TREE_PURPOSE (parm)) break; if (parm && TREE_PURPOSE (parm)) { last = TREE_PURPOSE (parm); DECL_ARGUMENTS (fndecl) = last; for (parm = TREE_CHAIN (parm); parm; parm = TREE_CHAIN (parm)) if (TREE_PURPOSE (parm)) { DECL_CHAIN (last) = TREE_PURPOSE (parm); last = TREE_PURPOSE (parm); } DECL_CHAIN (last) = 0; } /* If there was a previous prototype, set the DECL_ARG_TYPE of each argument according to the type previously specified, and report any mismatches. */ if (current_function_prototype_arg_types) { tree type; for (parm = DECL_ARGUMENTS (fndecl), type = current_function_prototype_arg_types; parm || (type && TREE_VALUE (type) != error_mark_node && (TYPE_MAIN_VARIANT (TREE_VALUE (type)) != void_type_node)); parm = DECL_CHAIN (parm), type = TREE_CHAIN (type)) { if (parm == 0 || type == 0 || TYPE_MAIN_VARIANT (TREE_VALUE (type)) == void_type_node) { if (current_function_prototype_built_in) warning_at (DECL_SOURCE_LOCATION (fndecl), 0, "number of arguments doesn%'t match " "built-in prototype"); else { /* FIXME diagnostics: This should be the location of FNDECL, but there is bug when a prototype is declared inside function context, but defined outside of it (e.g., gcc.dg/pr15698-2.c). In which case FNDECL gets the location of the prototype, not the definition. */ error_at (input_location, "number of arguments doesn%'t match prototype"); error_at (current_function_prototype_locus, "prototype declaration"); } break; } /* Type for passing arg must be consistent with that declared for the arg. ISO C says we take the unqualified type for parameters declared with qualified type. */ if (TREE_TYPE (parm) != error_mark_node && TREE_TYPE (type) != error_mark_node && ((TYPE_ATOMIC (DECL_ARG_TYPE (parm)) != TYPE_ATOMIC (TREE_VALUE (type))) || !comptypes (TYPE_MAIN_VARIANT (DECL_ARG_TYPE (parm)), TYPE_MAIN_VARIANT (TREE_VALUE (type))))) { if ((TYPE_ATOMIC (DECL_ARG_TYPE (parm)) == TYPE_ATOMIC (TREE_VALUE (type))) && (TYPE_MAIN_VARIANT (TREE_TYPE (parm)) == TYPE_MAIN_VARIANT (TREE_VALUE (type)))) { /* Adjust argument to match prototype. E.g. a previous `int foo(float);' prototype causes `int foo(x) float x; {...}' to be treated like `int foo(float x) {...}'. This is particularly useful for argument types like uid_t. */ DECL_ARG_TYPE (parm) = TREE_TYPE (parm); if (targetm.calls.promote_prototypes (TREE_TYPE (current_function_decl)) && INTEGRAL_TYPE_P (TREE_TYPE (parm)) && TYPE_PRECISION (TREE_TYPE (parm)) < TYPE_PRECISION (integer_type_node)) DECL_ARG_TYPE (parm) = c_type_promotes_to (TREE_TYPE (parm)); /* ??? Is it possible to get here with a built-in prototype or will it always have been diagnosed as conflicting with an old-style definition and discarded? */ if (current_function_prototype_built_in) warning_at (DECL_SOURCE_LOCATION (parm), OPT_Wpedantic, "promoted argument %qD " "doesn%'t match built-in prototype", parm); else { pedwarn (DECL_SOURCE_LOCATION (parm), OPT_Wpedantic, "promoted argument %qD " "doesn%'t match prototype", parm); pedwarn (current_function_prototype_locus, OPT_Wpedantic, "prototype declaration"); } } else { if (current_function_prototype_built_in) warning_at (DECL_SOURCE_LOCATION (parm), 0, "argument %qD doesn%'t match " "built-in prototype", parm); else { error_at (DECL_SOURCE_LOCATION (parm), "argument %qD doesn%'t match prototype", parm); error_at (current_function_prototype_locus, "prototype declaration"); } } } } TYPE_ACTUAL_ARG_TYPES (TREE_TYPE (fndecl)) = 0; } /* Otherwise, create a prototype that would match. */ else { tree actual = 0, last = 0, type; for (parm = DECL_ARGUMENTS (fndecl); parm; parm = DECL_CHAIN (parm)) { type = tree_cons (NULL_TREE, DECL_ARG_TYPE (parm), NULL_TREE); if (last) TREE_CHAIN (last) = type; else actual = type; last = type; } type = tree_cons (NULL_TREE, void_type_node, NULL_TREE); if (last) TREE_CHAIN (last) = type; else actual = type; /* We are going to assign a new value for the TYPE_ACTUAL_ARG_TYPES of the type of this function, but we need to avoid having this affect the types of other similarly-typed functions, so we must first force the generation of an identical (but separate) type node for the relevant function type. The new node we create will be a variant of the main variant of the original function type. */ TREE_TYPE (fndecl) = build_variant_type_copy (TREE_TYPE (fndecl)); TYPE_ACTUAL_ARG_TYPES (TREE_TYPE (fndecl)) = actual; } } /* Store parameter declarations passed in ARG_INFO into the current function declaration. */ void store_parm_decls_from (struct c_arg_info *arg_info) { current_function_arg_info = arg_info; store_parm_decls (); } /* Store the parameter declarations into the current function declaration. This is called after parsing the parameter declarations, before digesting the body of the function. For an old-style definition, construct a prototype out of the old-style parameter declarations and inject it into the function's type. */ void store_parm_decls (void) { tree fndecl = current_function_decl; bool proto; /* The argument information block for FNDECL. */ struct c_arg_info *arg_info = current_function_arg_info; current_function_arg_info = 0; /* True if this definition is written with a prototype. Note: despite C99 6.7.5.3p14, we can *not* treat an empty argument list in a function definition as equivalent to (void) -- an empty argument list specifies the function has no parameters, but only (void) sets up a prototype for future calls. */ proto = arg_info->types != 0; if (proto) store_parm_decls_newstyle (fndecl, arg_info); else store_parm_decls_oldstyle (fndecl, arg_info); /* The next call to push_scope will be a function body. */ next_is_function_body = true; /* Write a record describing this function definition to the prototypes file (if requested). */ gen_aux_info_record (fndecl, 1, 0, proto); /* Initialize the RTL code for the function. */ allocate_struct_function (fndecl, false); if (warn_unused_local_typedefs) cfun->language = ggc_cleared_alloc<language_function> (); /* Begin the statement tree for this function. */ DECL_SAVED_TREE (fndecl) = push_stmt_list (); /* ??? Insert the contents of the pending sizes list into the function to be evaluated. The only reason left to have this is void foo(int n, int array[n++]) because we throw away the array type in favor of a pointer type, and thus won't naturally see the SAVE_EXPR containing the increment. All other pending sizes would be handled by gimplify_parameters. */ if (arg_info->pending_sizes) add_stmt (arg_info->pending_sizes); } /* Store PARM_DECLs in PARMS into scope temporarily. Used for c_finish_omp_declare_simd for function prototypes. No diagnostics should be done. */ void temp_store_parm_decls (tree fndecl, tree parms) { push_scope (); for (tree p = parms; p; p = DECL_CHAIN (p)) { DECL_CONTEXT (p) = fndecl; if (DECL_NAME (p)) bind (DECL_NAME (p), p, current_scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); } } /* Undo what temp_store_parm_decls did. */ void temp_pop_parm_decls (void) { /* Clear all bindings in this temporary scope, so that pop_scope doesn't create a BLOCK. */ struct c_binding *b = current_scope->bindings; current_scope->bindings = NULL; for (; b; b = free_binding_and_advance (b)) { gcc_assert (TREE_CODE (b->decl) == PARM_DECL || b->decl == error_mark_node); gcc_assert (I_SYMBOL_BINDING (b->id) == b); I_SYMBOL_BINDING (b->id) = b->shadowed; if (b->shadowed && b->shadowed->u.type) TREE_TYPE (b->shadowed->decl) = b->shadowed->u.type; } pop_scope (); } /* Finish up a function declaration and compile that function all the way to assembler language output. Then free the storage for the function definition. This is called after parsing the body of the function definition. */ void finish_function (void) { tree fndecl = current_function_decl; if (c_dialect_objc ()) objc_finish_function (); if (TREE_CODE (fndecl) == FUNCTION_DECL && targetm.calls.promote_prototypes (TREE_TYPE (fndecl))) { tree args = DECL_ARGUMENTS (fndecl); for (; args; args = DECL_CHAIN (args)) { tree type = TREE_TYPE (args); if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node)) DECL_ARG_TYPE (args) = c_type_promotes_to (type); } } if (DECL_INITIAL (fndecl) && DECL_INITIAL (fndecl) != error_mark_node) BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl; /* Must mark the RESULT_DECL as being in this function. */ if (DECL_RESULT (fndecl) && DECL_RESULT (fndecl) != error_mark_node) DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl; if (MAIN_NAME_P (DECL_NAME (fndecl)) && flag_hosted && TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (fndecl))) == integer_type_node && flag_isoc99) { /* Hack. We don't want the middle-end to warn that this return is unreachable, so we mark its location as special. Using UNKNOWN_LOCATION has the problem that it gets clobbered in annotate_one_with_locus. A cleaner solution might be to ensure ! should_carry_locus_p (stmt), but that needs a flag. */ c_finish_return (BUILTINS_LOCATION, integer_zero_node, NULL_TREE); } /* Tie off the statement tree for this function. */ DECL_SAVED_TREE (fndecl) = pop_stmt_list (DECL_SAVED_TREE (fndecl)); /* If the function has _Cilk_spawn in front of a function call inside it i.e. it is a spawning function, then add the appropriate Cilk plus functions inside. */ if (fn_contains_cilk_spawn_p (cfun)) cfun->cilk_frame_decl = insert_cilk_frame (fndecl); finish_fname_decls (); /* Complain if there's just no return statement. */ if (warn_return_type && TREE_CODE (TREE_TYPE (TREE_TYPE (fndecl))) != VOID_TYPE && !current_function_returns_value && !current_function_returns_null /* Don't complain if we are no-return. */ && !current_function_returns_abnormally /* Don't complain if we are declared noreturn. */ && !TREE_THIS_VOLATILE (fndecl) /* Don't warn for main(). */ && !MAIN_NAME_P (DECL_NAME (fndecl)) /* Or if they didn't actually specify a return type. */ && !C_FUNCTION_IMPLICIT_INT (fndecl) /* Normally, with -Wreturn-type, flow will complain, but we might optimize out static functions. */ && !TREE_PUBLIC (fndecl)) { warning (OPT_Wreturn_type, "no return statement in function returning non-void"); TREE_NO_WARNING (fndecl) = 1; } /* Complain about parameters that are only set, but never otherwise used. */ if (warn_unused_but_set_parameter) { tree decl; for (decl = DECL_ARGUMENTS (fndecl); decl; decl = DECL_CHAIN (decl)) if (TREE_USED (decl) && TREE_CODE (decl) == PARM_DECL && !DECL_READ_P (decl) && DECL_NAME (decl) && !DECL_ARTIFICIAL (decl) && !TREE_NO_WARNING (decl)) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wunused_but_set_parameter, "parameter %qD set but not used", decl); } /* Complain about locally defined typedefs that are not used in this function. */ maybe_warn_unused_local_typedefs (); /* Store the end of the function, so that we get good line number info for the epilogue. */ cfun->function_end_locus = input_location; /* Finalize the ELF visibility for the function. */ c_determine_visibility (fndecl); /* For GNU C extern inline functions disregard inline limits. */ if (DECL_EXTERNAL (fndecl) && DECL_DECLARED_INLINE_P (fndecl)) DECL_DISREGARD_INLINE_LIMITS (fndecl) = 1; /* Genericize before inlining. Delay genericizing nested functions until their parent function is genericized. Since finalizing requires GENERIC, delay that as well. */ if (DECL_INITIAL (fndecl) && DECL_INITIAL (fndecl) != error_mark_node && !undef_nested_function) { if (!decl_function_context (fndecl)) { invoke_plugin_callbacks (PLUGIN_PRE_GENERICIZE, fndecl); c_genericize (fndecl); /* ??? Objc emits functions after finalizing the compilation unit. This should be cleaned up later and this conditional removed. */ if (symtab->global_info_ready) { cgraph_node::add_new_function (fndecl, false); return; } cgraph_node::finalize_function (fndecl, false); } else { /* Register this function with cgraph just far enough to get it added to our parent's nested function list. Handy, since the C front end doesn't have such a list. */ (void) cgraph_node::get_create (fndecl); } } if (!decl_function_context (fndecl)) undef_nested_function = false; if (cfun->language != NULL) { ggc_free (cfun->language); cfun->language = NULL; } /* We're leaving the context of this function, so zap cfun. It's still in DECL_STRUCT_FUNCTION, and we'll restore it in tree_rest_of_compilation. */ set_cfun (NULL); current_function_decl = NULL; } /* Check the declarations given in a for-loop for satisfying the C99 constraints. If exactly one such decl is found, return it. LOC is the location of the opening parenthesis of the for loop. The last parameter allows you to control the "for loop initial declarations are only allowed in C99 mode". Normally, you should pass flag_isoc99 as that parameter. But in some cases (Objective-C foreach loop, for example) we want to run the checks in this function even if not in C99 mode, so we allow the caller to turn off the error about not being in C99 mode. */ tree check_for_loop_decls (location_t loc, bool turn_off_iso_c99_error) { struct c_binding *b; tree one_decl = NULL_TREE; int n_decls = 0; if (!turn_off_iso_c99_error) { static bool hint = true; /* If we get here, declarations have been used in a for loop without the C99 for loop scope. This doesn't make much sense, so don't allow it. */ error_at (loc, "%<for%> loop initial declarations " "are only allowed in C99 or C11 mode"); if (hint) { inform (loc, "use option -std=c99, -std=gnu99, -std=c11 or -std=gnu11 " "to compile your code"); hint = false; } return NULL_TREE; } /* C99 subclause 6.8.5 paragraph 3: [#3] The declaration part of a for statement shall only declare identifiers for objects having storage class auto or register. It isn't clear whether, in this sentence, "identifiers" binds to "shall only declare" or to "objects" - that is, whether all identifiers declared must be identifiers for objects, or whether the restriction only applies to those that are. (A question on this in comp.std.c in November 2000 received no answer.) We implement the strictest interpretation, to avoid creating an extension which later causes problems. */ for (b = current_scope->bindings; b; b = b->prev) { tree id = b->id; tree decl = b->decl; if (!id) continue; switch (TREE_CODE (decl)) { case VAR_DECL: { location_t decl_loc = DECL_SOURCE_LOCATION (decl); if (TREE_STATIC (decl)) error_at (decl_loc, "declaration of static variable %qD in %<for%> loop " "initial declaration", decl); else if (DECL_EXTERNAL (decl)) error_at (decl_loc, "declaration of %<extern%> variable %qD in %<for%> loop " "initial declaration", decl); } break; case RECORD_TYPE: error_at (loc, "%<struct %E%> declared in %<for%> loop initial " "declaration", id); break; case UNION_TYPE: error_at (loc, "%<union %E%> declared in %<for%> loop initial declaration", id); break; case ENUMERAL_TYPE: error_at (loc, "%<enum %E%> declared in %<for%> loop " "initial declaration", id); break; default: error_at (loc, "declaration of non-variable " "%qD in %<for%> loop initial declaration", decl); } n_decls++; one_decl = decl; } return n_decls == 1 ? one_decl : NULL_TREE; } /* Save and reinitialize the variables used during compilation of a C function. */ void c_push_function_context (void) { struct language_function *p = cfun->language; /* cfun->language might have been already allocated by the use of -Wunused-local-typedefs. In that case, just re-use it. */ if (p == NULL) cfun->language = p = ggc_cleared_alloc<language_function> (); p->base.x_stmt_tree = c_stmt_tree; c_stmt_tree.x_cur_stmt_list = vec_safe_copy (c_stmt_tree.x_cur_stmt_list); p->x_break_label = c_break_label; p->x_cont_label = c_cont_label; p->x_switch_stack = c_switch_stack; p->arg_info = current_function_arg_info; p->returns_value = current_function_returns_value; p->returns_null = current_function_returns_null; p->returns_abnormally = current_function_returns_abnormally; p->warn_about_return_type = warn_about_return_type; push_function_context (); } /* Restore the variables used during compilation of a C function. */ void c_pop_function_context (void) { struct language_function *p; pop_function_context (); p = cfun->language; /* When -Wunused-local-typedefs is in effect, cfun->languages is used to store data throughout the life time of the current cfun, So don't deallocate it. */ if (!warn_unused_local_typedefs) cfun->language = NULL; if (DECL_STRUCT_FUNCTION (current_function_decl) == 0 && DECL_SAVED_TREE (current_function_decl) == NULL_TREE) { /* Stop pointing to the local nodes about to be freed. */ /* But DECL_INITIAL must remain nonzero so we know this was an actual function definition. */ DECL_INITIAL (current_function_decl) = error_mark_node; DECL_ARGUMENTS (current_function_decl) = 0; } c_stmt_tree = p->base.x_stmt_tree; p->base.x_stmt_tree.x_cur_stmt_list = NULL; c_break_label = p->x_break_label; c_cont_label = p->x_cont_label; c_switch_stack = p->x_switch_stack; current_function_arg_info = p->arg_info; current_function_returns_value = p->returns_value; current_function_returns_null = p->returns_null; current_function_returns_abnormally = p->returns_abnormally; warn_about_return_type = p->warn_about_return_type; } /* The functions below are required for functionality of doing function at once processing in the C front end. Currently these functions are not called from anywhere in the C front end, but as these changes continue, that will change. */ /* Returns the stmt_tree (if any) to which statements are currently being added. If there is no active statement-tree, NULL is returned. */ stmt_tree current_stmt_tree (void) { return &c_stmt_tree; } /* Return the global value of T as a symbol. */ tree identifier_global_value (tree t) { struct c_binding *b; for (b = I_SYMBOL_BINDING (t); b; b = b->shadowed) if (B_IN_FILE_SCOPE (b) || B_IN_EXTERNAL_SCOPE (b)) return b->decl; return 0; } /* In C, the only C-linkage public declaration is at file scope. */ tree c_linkage_bindings (tree name) { return identifier_global_value (name); } /* Record a builtin type for C. If NAME is non-NULL, it is the name used; otherwise the name is found in ridpointers from RID_INDEX. */ void record_builtin_type (enum rid rid_index, const char *name, tree type) { tree id, decl; if (name == 0) id = ridpointers[(int) rid_index]; else id = get_identifier (name); decl = build_decl (UNKNOWN_LOCATION, TYPE_DECL, id, type); pushdecl (decl); if (debug_hooks->type_decl) debug_hooks->type_decl (decl, false); } /* Build the void_list_node (void_type_node having been created). */ tree build_void_list_node (void) { tree t = build_tree_list (NULL_TREE, void_type_node); return t; } /* Return a c_parm structure with the given SPECS, ATTRS and DECLARATOR. */ struct c_parm * build_c_parm (struct c_declspecs *specs, tree attrs, struct c_declarator *declarator) { struct c_parm *ret = XOBNEW (&parser_obstack, struct c_parm); ret->specs = specs; ret->attrs = attrs; ret->declarator = declarator; return ret; } /* Return a declarator with nested attributes. TARGET is the inner declarator to which these attributes apply. ATTRS are the attributes. */ struct c_declarator * build_attrs_declarator (tree attrs, struct c_declarator *target) { struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator); ret->kind = cdk_attrs; ret->declarator = target; ret->u.attrs = attrs; return ret; } /* Return a declarator for a function with arguments specified by ARGS and return type specified by TARGET. */ struct c_declarator * build_function_declarator (struct c_arg_info *args, struct c_declarator *target) { struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator); ret->kind = cdk_function; ret->declarator = target; ret->u.arg_info = args; return ret; } /* Return a declarator for the identifier IDENT (which may be NULL_TREE for an abstract declarator). */ struct c_declarator * build_id_declarator (tree ident) { struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator); ret->kind = cdk_id; ret->declarator = 0; ret->u.id = ident; /* Default value - may get reset to a more precise location. */ ret->id_loc = input_location; return ret; } /* Return something to represent absolute declarators containing a *. TARGET is the absolute declarator that the * contains. TYPE_QUALS_ATTRS is a structure for type qualifiers and attributes to apply to the pointer type. */ struct c_declarator * make_pointer_declarator (struct c_declspecs *type_quals_attrs, struct c_declarator *target) { tree attrs; int quals = 0; struct c_declarator *itarget = target; struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator); if (type_quals_attrs) { attrs = type_quals_attrs->attrs; quals = quals_from_declspecs (type_quals_attrs); if (attrs != NULL_TREE) itarget = build_attrs_declarator (attrs, target); } ret->kind = cdk_pointer; ret->declarator = itarget; ret->u.pointer_quals = quals; return ret; } /* Return a pointer to a structure for an empty list of declaration specifiers. */ struct c_declspecs * build_null_declspecs (void) { struct c_declspecs *ret = XOBNEW (&parser_obstack, struct c_declspecs); memset (&ret->locations, 0, cdw_number_of_elements); ret->type = 0; ret->expr = 0; ret->decl_attr = 0; ret->attrs = 0; ret->align_log = -1; ret->typespec_word = cts_none; ret->storage_class = csc_none; ret->expr_const_operands = true; ret->declspecs_seen_p = false; ret->typespec_kind = ctsk_none; ret->non_sc_seen_p = false; ret->typedef_p = false; ret->explicit_signed_p = false; ret->deprecated_p = false; ret->default_int_p = false; ret->long_p = false; ret->long_long_p = false; ret->short_p = false; ret->signed_p = false; ret->unsigned_p = false; ret->complex_p = false; ret->inline_p = false; ret->noreturn_p = false; ret->thread_p = false; ret->thread_gnu_p = false; ret->const_p = false; ret->volatile_p = false; ret->atomic_p = false; ret->restrict_p = false; ret->saturating_p = false; ret->alignas_p = false; ret->address_space = ADDR_SPACE_GENERIC; return ret; } /* Add the address space ADDRSPACE to the declaration specifiers SPECS, returning SPECS. */ struct c_declspecs * declspecs_add_addrspace (source_location location, struct c_declspecs *specs, addr_space_t as) { specs->non_sc_seen_p = true; specs->declspecs_seen_p = true; if (!ADDR_SPACE_GENERIC_P (specs->address_space) && specs->address_space != as) error ("incompatible address space qualifiers %qs and %qs", c_addr_space_name (as), c_addr_space_name (specs->address_space)); else { specs->address_space = as; specs->locations[cdw_address_space] = location; } return specs; } /* Add the type qualifier QUAL to the declaration specifiers SPECS, returning SPECS. */ struct c_declspecs * declspecs_add_qual (source_location loc, struct c_declspecs *specs, tree qual) { enum rid i; bool dupe = false; specs->non_sc_seen_p = true; specs->declspecs_seen_p = true; gcc_assert (TREE_CODE (qual) == IDENTIFIER_NODE && C_IS_RESERVED_WORD (qual)); i = C_RID_CODE (qual); switch (i) { case RID_CONST: dupe = specs->const_p; specs->const_p = true; specs->locations[cdw_const] = loc; break; case RID_VOLATILE: dupe = specs->volatile_p; specs->volatile_p = true; specs->locations[cdw_volatile] = loc; break; case RID_RESTRICT: dupe = specs->restrict_p; specs->restrict_p = true; specs->locations[cdw_restrict] = loc; break; case RID_ATOMIC: dupe = specs->atomic_p; specs->atomic_p = true; break; default: gcc_unreachable (); } if (dupe) pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %qE", qual); return specs; } /* Add the type specifier TYPE to the declaration specifiers SPECS, returning SPECS. */ struct c_declspecs * declspecs_add_type (location_t loc, struct c_declspecs *specs, struct c_typespec spec) { tree type = spec.spec; specs->non_sc_seen_p = true; specs->declspecs_seen_p = true; specs->typespec_kind = spec.kind; if (TREE_DEPRECATED (type)) specs->deprecated_p = true; /* Handle type specifier keywords. */ if (TREE_CODE (type) == IDENTIFIER_NODE && C_IS_RESERVED_WORD (type) && C_RID_CODE (type) != RID_CXX_COMPAT_WARN) { enum rid i = C_RID_CODE (type); if (specs->type) { error_at (loc, "two or more data types in declaration specifiers"); return specs; } if ((int) i <= (int) RID_LAST_MODIFIER) { /* "long", "short", "signed", "unsigned", "_Complex" or "_Sat". */ bool dupe = false; switch (i) { case RID_LONG: if (specs->long_long_p) { error_at (loc, "%<long long long%> is too long for GCC"); break; } if (specs->long_p) { if (specs->typespec_word == cts_double) { error_at (loc, ("both %<long long%> and %<double%> in " "declaration specifiers")); break; } pedwarn_c90 (loc, OPT_Wlong_long, "ISO C90 does not support %<long long%>"); specs->long_long_p = 1; specs->locations[cdw_long_long] = loc; break; } if (specs->short_p) error_at (loc, ("both %<long%> and %<short%> in " "declaration specifiers")); else if (specs->typespec_word == cts_auto_type) error_at (loc, ("both %<long%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->typespec_word == cts_void) error_at (loc, ("both %<long%> and %<void%> in " "declaration specifiers")); else if (specs->typespec_word == cts_int_n) error_at (loc, ("both %<long%> and %<__int%d%> in " "declaration specifiers"), int_n_data[specs->int_n_idx].bitsize); else if (specs->typespec_word == cts_bool) error_at (loc, ("both %<long%> and %<_Bool%> in " "declaration specifiers")); else if (specs->typespec_word == cts_char) error_at (loc, ("both %<long%> and %<char%> in " "declaration specifiers")); else if (specs->typespec_word == cts_float) error_at (loc, ("both %<long%> and %<float%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat32) error_at (loc, ("both %<long%> and %<_Decimal32%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat64) error_at (loc, ("both %<long%> and %<_Decimal64%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat128) error_at (loc, ("both %<long%> and %<_Decimal128%> in " "declaration specifiers")); else { specs->long_p = true; specs->locations[cdw_long] = loc; } break; case RID_SHORT: dupe = specs->short_p; if (specs->long_p) error_at (loc, ("both %<long%> and %<short%> in " "declaration specifiers")); else if (specs->typespec_word == cts_auto_type) error_at (loc, ("both %<short%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->typespec_word == cts_void) error_at (loc, ("both %<short%> and %<void%> in " "declaration specifiers")); else if (specs->typespec_word == cts_int_n) error_at (loc, ("both %<short%> and %<__int%d%> in " "declaration specifiers"), int_n_data[specs->int_n_idx].bitsize); else if (specs->typespec_word == cts_bool) error_at (loc, ("both %<short%> and %<_Bool%> in " "declaration specifiers")); else if (specs->typespec_word == cts_char) error_at (loc, ("both %<short%> and %<char%> in " "declaration specifiers")); else if (specs->typespec_word == cts_float) error_at (loc, ("both %<short%> and %<float%> in " "declaration specifiers")); else if (specs->typespec_word == cts_double) error_at (loc, ("both %<short%> and %<double%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat32) error_at (loc, ("both %<short%> and %<_Decimal32%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat64) error_at (loc, ("both %<short%> and %<_Decimal64%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat128) error_at (loc, ("both %<short%> and %<_Decimal128%> in " "declaration specifiers")); else { specs->short_p = true; specs->locations[cdw_short] = loc; } break; case RID_SIGNED: dupe = specs->signed_p; if (specs->unsigned_p) error_at (loc, ("both %<signed%> and %<unsigned%> in " "declaration specifiers")); else if (specs->typespec_word == cts_auto_type) error_at (loc, ("both %<signed%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->typespec_word == cts_void) error_at (loc, ("both %<signed%> and %<void%> in " "declaration specifiers")); else if (specs->typespec_word == cts_bool) error_at (loc, ("both %<signed%> and %<_Bool%> in " "declaration specifiers")); else if (specs->typespec_word == cts_float) error_at (loc, ("both %<signed%> and %<float%> in " "declaration specifiers")); else if (specs->typespec_word == cts_double) error_at (loc, ("both %<signed%> and %<double%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat32) error_at (loc, ("both %<signed%> and %<_Decimal32%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat64) error_at (loc, ("both %<signed%> and %<_Decimal64%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat128) error_at (loc, ("both %<signed%> and %<_Decimal128%> in " "declaration specifiers")); else { specs->signed_p = true; specs->locations[cdw_signed] = loc; } break; case RID_UNSIGNED: dupe = specs->unsigned_p; if (specs->signed_p) error_at (loc, ("both %<signed%> and %<unsigned%> in " "declaration specifiers")); else if (specs->typespec_word == cts_auto_type) error_at (loc, ("both %<unsigned%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->typespec_word == cts_void) error_at (loc, ("both %<unsigned%> and %<void%> in " "declaration specifiers")); else if (specs->typespec_word == cts_bool) error_at (loc, ("both %<unsigned%> and %<_Bool%> in " "declaration specifiers")); else if (specs->typespec_word == cts_float) error_at (loc, ("both %<unsigned%> and %<float%> in " "declaration specifiers")); else if (specs->typespec_word == cts_double) error_at (loc, ("both %<unsigned%> and %<double%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat32) error_at (loc, ("both %<unsigned%> and %<_Decimal32%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat64) error_at (loc, ("both %<unsigned%> and %<_Decimal64%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat128) error_at (loc, ("both %<unsigned%> and %<_Decimal128%> in " "declaration specifiers")); else { specs->unsigned_p = true; specs->locations[cdw_unsigned] = loc; } break; case RID_COMPLEX: dupe = specs->complex_p; if (!in_system_header_at (loc)) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not support complex types"); if (specs->typespec_word == cts_auto_type) error_at (loc, ("both %<complex%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->typespec_word == cts_void) error_at (loc, ("both %<complex%> and %<void%> in " "declaration specifiers")); else if (specs->typespec_word == cts_bool) error_at (loc, ("both %<complex%> and %<_Bool%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat32) error_at (loc, ("both %<complex%> and %<_Decimal32%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat64) error_at (loc, ("both %<complex%> and %<_Decimal64%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat128) error_at (loc, ("both %<complex%> and %<_Decimal128%> in " "declaration specifiers")); else if (specs->typespec_word == cts_fract) error_at (loc, ("both %<complex%> and %<_Fract%> in " "declaration specifiers")); else if (specs->typespec_word == cts_accum) error_at (loc, ("both %<complex%> and %<_Accum%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<complex%> and %<_Sat%> in " "declaration specifiers")); else { specs->complex_p = true; specs->locations[cdw_complex] = loc; } break; case RID_SAT: dupe = specs->saturating_p; pedwarn (loc, OPT_Wpedantic, "ISO C does not support saturating types"); if (specs->typespec_word == cts_int_n) { error_at (loc, ("both %<_Sat%> and %<__int%d%> in " "declaration specifiers"), int_n_data[specs->int_n_idx].bitsize); } else if (specs->typespec_word == cts_auto_type) error_at (loc, ("both %<_Sat%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->typespec_word == cts_void) error_at (loc, ("both %<_Sat%> and %<void%> in " "declaration specifiers")); else if (specs->typespec_word == cts_bool) error_at (loc, ("both %<_Sat%> and %<_Bool%> in " "declaration specifiers")); else if (specs->typespec_word == cts_char) error_at (loc, ("both %<_Sat%> and %<char%> in " "declaration specifiers")); else if (specs->typespec_word == cts_int) error_at (loc, ("both %<_Sat%> and %<int%> in " "declaration specifiers")); else if (specs->typespec_word == cts_float) error_at (loc, ("both %<_Sat%> and %<float%> in " "declaration specifiers")); else if (specs->typespec_word == cts_double) error_at (loc, ("both %<_Sat%> and %<double%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat32) error_at (loc, ("both %<_Sat%> and %<_Decimal32%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat64) error_at (loc, ("both %<_Sat%> and %<_Decimal64%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat128) error_at (loc, ("both %<_Sat%> and %<_Decimal128%> in " "declaration specifiers")); else if (specs->complex_p) error_at (loc, ("both %<_Sat%> and %<complex%> in " "declaration specifiers")); else { specs->saturating_p = true; specs->locations[cdw_saturating] = loc; } break; default: gcc_unreachable (); } if (dupe) error_at (loc, "duplicate %qE", type); return specs; } else { /* "void", "_Bool", "char", "int", "float", "double", "_Decimal32", "__intN", "_Decimal64", "_Decimal128", "_Fract", "_Accum" or "__auto_type". */ if (specs->typespec_word != cts_none) { error_at (loc, "two or more data types in declaration specifiers"); return specs; } switch (i) { case RID_AUTO_TYPE: if (specs->long_p) error_at (loc, ("both %<long%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->complex_p) error_at (loc, ("both %<complex%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<__auto_type%> in " "declaration specifiers")); else { specs->typespec_word = cts_auto_type; specs->locations[cdw_typespec] = loc; } return specs; case RID_INT_N_0: case RID_INT_N_1: case RID_INT_N_2: case RID_INT_N_3: specs->int_n_idx = i - RID_INT_N_0; if (!in_system_header_at (input_location)) pedwarn (loc, OPT_Wpedantic, "ISO C does not support %<__int%d%> types", int_n_data[specs->int_n_idx].bitsize); if (specs->long_p) error_at (loc, ("both %<__int%d%> and %<long%> in " "declaration specifiers"), int_n_data[specs->int_n_idx].bitsize); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<__int%d%> in " "declaration specifiers"), int_n_data[specs->int_n_idx].bitsize); else if (specs->short_p) error_at (loc, ("both %<__int%d%> and %<short%> in " "declaration specifiers"), int_n_data[specs->int_n_idx].bitsize); else if (! int_n_enabled_p [specs->int_n_idx]) error_at (loc, "%<__int%d%> is not supported on this target", int_n_data[specs->int_n_idx].bitsize); else { specs->typespec_word = cts_int_n; specs->locations[cdw_typespec] = loc; } return specs; case RID_VOID: if (specs->long_p) error_at (loc, ("both %<long%> and %<void%> in " "declaration specifiers")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<void%> in " "declaration specifiers")); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %<void%> in " "declaration specifiers")); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %<void%> in " "declaration specifiers")); else if (specs->complex_p) error_at (loc, ("both %<complex%> and %<void%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<void%> in " "declaration specifiers")); else { specs->typespec_word = cts_void; specs->locations[cdw_typespec] = loc; } return specs; case RID_BOOL: if (!in_system_header_at (loc)) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not support boolean types"); if (specs->long_p) error_at (loc, ("both %<long%> and %<_Bool%> in " "declaration specifiers")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<_Bool%> in " "declaration specifiers")); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %<_Bool%> in " "declaration specifiers")); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %<_Bool%> in " "declaration specifiers")); else if (specs->complex_p) error_at (loc, ("both %<complex%> and %<_Bool%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<_Bool%> in " "declaration specifiers")); else { specs->typespec_word = cts_bool; specs->locations[cdw_typespec] = loc; } return specs; case RID_CHAR: if (specs->long_p) error_at (loc, ("both %<long%> and %<char%> in " "declaration specifiers")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<char%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<char%> in " "declaration specifiers")); else { specs->typespec_word = cts_char; specs->locations[cdw_typespec] = loc; } return specs; case RID_INT: if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<int%> in " "declaration specifiers")); else { specs->typespec_word = cts_int; specs->locations[cdw_typespec] = loc; } return specs; case RID_FLOAT: if (specs->long_p) error_at (loc, ("both %<long%> and %<float%> in " "declaration specifiers")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<float%> in " "declaration specifiers")); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %<float%> in " "declaration specifiers")); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %<float%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<float%> in " "declaration specifiers")); else { specs->typespec_word = cts_float; specs->locations[cdw_typespec] = loc; } return specs; case RID_DOUBLE: if (specs->long_long_p) error_at (loc, ("both %<long long%> and %<double%> in " "declaration specifiers")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<double%> in " "declaration specifiers")); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %<double%> in " "declaration specifiers")); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %<double%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<double%> in " "declaration specifiers")); else { specs->typespec_word = cts_double; specs->locations[cdw_typespec] = loc; } return specs; case RID_DFLOAT32: case RID_DFLOAT64: case RID_DFLOAT128: { const char *str; if (i == RID_DFLOAT32) str = "_Decimal32"; else if (i == RID_DFLOAT64) str = "_Decimal64"; else str = "_Decimal128"; if (specs->long_long_p) error_at (loc, ("both %<long long%> and %<%s%> in " "declaration specifiers"), str); if (specs->long_p) error_at (loc, ("both %<long%> and %<%s%> in " "declaration specifiers"), str); else if (specs->short_p) error_at (loc, ("both %<short%> and %<%s%> in " "declaration specifiers"), str); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %<%s%> in " "declaration specifiers"), str); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %<%s%> in " "declaration specifiers"), str); else if (specs->complex_p) error_at (loc, ("both %<complex%> and %<%s%> in " "declaration specifiers"), str); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<%s%> in " "declaration specifiers"), str); else if (i == RID_DFLOAT32) specs->typespec_word = cts_dfloat32; else if (i == RID_DFLOAT64) specs->typespec_word = cts_dfloat64; else specs->typespec_word = cts_dfloat128; specs->locations[cdw_typespec] = loc; } if (!targetm.decimal_float_supported_p ()) error_at (loc, ("decimal floating point not supported " "for this target")); pedwarn (loc, OPT_Wpedantic, "ISO C does not support decimal floating point"); return specs; case RID_FRACT: case RID_ACCUM: { const char *str; if (i == RID_FRACT) str = "_Fract"; else str = "_Accum"; if (specs->complex_p) error_at (loc, ("both %<complex%> and %<%s%> in " "declaration specifiers"), str); else if (i == RID_FRACT) specs->typespec_word = cts_fract; else specs->typespec_word = cts_accum; specs->locations[cdw_typespec] = loc; } if (!targetm.fixed_point_supported_p ()) error_at (loc, "fixed-point types not supported for this target"); pedwarn (loc, OPT_Wpedantic, "ISO C does not support fixed-point types"); return specs; default: /* ObjC reserved word "id", handled below. */ break; } } } /* Now we have a typedef (a TYPE_DECL node), an identifier (some form of ObjC type, cases such as "int" and "long" being handled above), a TYPE (struct, union, enum and typeof specifiers) or an ERROR_MARK. In none of these cases may there have previously been any type specifiers. */ if (specs->type || specs->typespec_word != cts_none || specs->long_p || specs->short_p || specs->signed_p || specs->unsigned_p || specs->complex_p) error_at (loc, "two or more data types in declaration specifiers"); else if (TREE_CODE (type) == TYPE_DECL) { if (TREE_TYPE (type) == error_mark_node) ; /* Allow the type to default to int to avoid cascading errors. */ else { specs->type = TREE_TYPE (type); specs->decl_attr = DECL_ATTRIBUTES (type); specs->typedef_p = true; specs->explicit_signed_p = C_TYPEDEF_EXPLICITLY_SIGNED (type); specs->locations[cdw_typedef] = loc; /* If this typedef name is defined in a struct, then a C++ lookup would return a different value. */ if (warn_cxx_compat && I_SYMBOL_BINDING (DECL_NAME (type))->in_struct) warning_at (loc, OPT_Wc___compat, "C++ lookup of %qD would return a field, not a type", type); /* If we are parsing a struct, record that a struct field used a typedef. */ if (warn_cxx_compat && struct_parse_info != NULL) struct_parse_info->typedefs_seen.safe_push (type); } } else if (TREE_CODE (type) == IDENTIFIER_NODE) { tree t = lookup_name (type); if (!t || TREE_CODE (t) != TYPE_DECL) error_at (loc, "%qE fails to be a typedef or built in type", type); else if (TREE_TYPE (t) == error_mark_node) ; else { specs->type = TREE_TYPE (t); specs->locations[cdw_typespec] = loc; } } else { if (TREE_CODE (type) != ERROR_MARK && spec.kind == ctsk_typeof) { specs->typedef_p = true; specs->locations[cdw_typedef] = loc; if (spec.expr) { if (specs->expr) specs->expr = build2 (COMPOUND_EXPR, TREE_TYPE (spec.expr), specs->expr, spec.expr); else specs->expr = spec.expr; specs->expr_const_operands &= spec.expr_const_operands; } } specs->type = type; } return specs; } /* Add the storage class specifier or function specifier SCSPEC to the declaration specifiers SPECS, returning SPECS. */ struct c_declspecs * declspecs_add_scspec (source_location loc, struct c_declspecs *specs, tree scspec) { enum rid i; enum c_storage_class n = csc_none; bool dupe = false; specs->declspecs_seen_p = true; gcc_assert (TREE_CODE (scspec) == IDENTIFIER_NODE && C_IS_RESERVED_WORD (scspec)); i = C_RID_CODE (scspec); if (specs->non_sc_seen_p) warning (OPT_Wold_style_declaration, "%qE is not at beginning of declaration", scspec); switch (i) { case RID_INLINE: /* C99 permits duplicate inline. Although of doubtful utility, it seems simplest to permit it in gnu89 mode as well, as there is also little utility in maintaining this as a difference between gnu89 and C99 inline. */ dupe = false; specs->inline_p = true; specs->locations[cdw_inline] = loc; break; case RID_NORETURN: /* Duplicate _Noreturn is permitted. */ dupe = false; specs->noreturn_p = true; specs->locations[cdw_noreturn] = loc; break; case RID_THREAD: dupe = specs->thread_p; if (specs->storage_class == csc_auto) error ("%qE used with %<auto%>", scspec); else if (specs->storage_class == csc_register) error ("%qE used with %<register%>", scspec); else if (specs->storage_class == csc_typedef) error ("%qE used with %<typedef%>", scspec); else { specs->thread_p = true; specs->thread_gnu_p = (strcmp (IDENTIFIER_POINTER (scspec), "__thread") == 0); /* A diagnostic is not required for the use of this identifier in the implementation namespace; only diagnose it for the C11 spelling because of existing code using the other spelling. */ if (!specs->thread_gnu_p) { if (flag_isoc99) pedwarn_c99 (loc, OPT_Wpedantic, "ISO C99 does not support %qE", scspec); else pedwarn_c99 (loc, OPT_Wpedantic, "ISO C90 does not support %qE", scspec); } specs->locations[cdw_thread] = loc; } break; case RID_AUTO: n = csc_auto; break; case RID_EXTERN: n = csc_extern; /* Diagnose "__thread extern". */ if (specs->thread_p && specs->thread_gnu_p) error ("%<__thread%> before %<extern%>"); break; case RID_REGISTER: n = csc_register; break; case RID_STATIC: n = csc_static; /* Diagnose "__thread static". */ if (specs->thread_p && specs->thread_gnu_p) error ("%<__thread%> before %<static%>"); break; case RID_TYPEDEF: n = csc_typedef; break; default: gcc_unreachable (); } if (n != csc_none && n == specs->storage_class) dupe = true; if (dupe) { if (i == RID_THREAD) error ("duplicate %<_Thread_local%> or %<__thread%>"); else error ("duplicate %qE", scspec); } if (n != csc_none) { if (specs->storage_class != csc_none && n != specs->storage_class) { error ("multiple storage classes in declaration specifiers"); } else { specs->storage_class = n; specs->locations[cdw_storage_class] = loc; if (n != csc_extern && n != csc_static && specs->thread_p) { error ("%qs used with %qE", specs->thread_gnu_p ? "__thread" : "_Thread_local", scspec); specs->thread_p = false; } } } return specs; } /* Add the attributes ATTRS to the declaration specifiers SPECS, returning SPECS. */ struct c_declspecs * declspecs_add_attrs (source_location loc, struct c_declspecs *specs, tree attrs) { specs->attrs = chainon (attrs, specs->attrs); specs->locations[cdw_attributes] = loc; specs->declspecs_seen_p = true; return specs; } /* Add an _Alignas specifier (expression ALIGN, or type whose alignment is ALIGN) to the declaration specifiers SPECS, returning SPECS. */ struct c_declspecs * declspecs_add_alignas (source_location loc, struct c_declspecs *specs, tree align) { int align_log; specs->alignas_p = true; specs->locations[cdw_alignas] = loc; if (align == error_mark_node) return specs; align_log = check_user_alignment (align, true); if (align_log > specs->align_log) specs->align_log = align_log; return specs; } /* Combine "long", "short", "signed", "unsigned" and "_Complex" type specifiers with any other type specifier to determine the resulting type. This is where ISO C checks on complex types are made, since "_Complex long" is a prefix of the valid ISO C type "_Complex long double". */ struct c_declspecs * finish_declspecs (struct c_declspecs *specs) { /* If a type was specified as a whole, we have no modifiers and are done. */ if (specs->type != NULL_TREE) { gcc_assert (!specs->long_p && !specs->long_long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p && !specs->complex_p); /* Set a dummy type. */ if (TREE_CODE (specs->type) == ERROR_MARK) specs->type = integer_type_node; return specs; } /* If none of "void", "_Bool", "char", "int", "float" or "double" has been specified, treat it as "int" unless "_Complex" is present and there are no other specifiers. If we just have "_Complex", it is equivalent to "_Complex double", but e.g. "_Complex short" is equivalent to "_Complex short int". */ if (specs->typespec_word == cts_none) { if (specs->saturating_p) { error_at (specs->locations[cdw_saturating], "%<_Sat%> is used without %<_Fract%> or %<_Accum%>"); if (!targetm.fixed_point_supported_p ()) error_at (specs->locations[cdw_saturating], "fixed-point types not supported for this target"); specs->typespec_word = cts_fract; } else if (specs->long_p || specs->short_p || specs->signed_p || specs->unsigned_p) { specs->typespec_word = cts_int; } else if (specs->complex_p) { specs->typespec_word = cts_double; pedwarn (specs->locations[cdw_complex], OPT_Wpedantic, "ISO C does not support plain %<complex%> meaning " "%<double complex%>"); } else { specs->typespec_word = cts_int; specs->default_int_p = true; /* We don't diagnose this here because grokdeclarator will give more specific diagnostics according to whether it is a function definition. */ } } /* If "signed" was specified, record this to distinguish "int" and "signed int" in the case of a bit-field with -funsigned-bitfields. */ specs->explicit_signed_p = specs->signed_p; /* Now compute the actual type. */ switch (specs->typespec_word) { case cts_auto_type: gcc_assert (!specs->long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p && !specs->complex_p); /* Type to be filled in later. */ break; case cts_void: gcc_assert (!specs->long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p && !specs->complex_p); specs->type = void_type_node; break; case cts_bool: gcc_assert (!specs->long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p && !specs->complex_p); specs->type = boolean_type_node; break; case cts_char: gcc_assert (!specs->long_p && !specs->short_p); gcc_assert (!(specs->signed_p && specs->unsigned_p)); if (specs->signed_p) specs->type = signed_char_type_node; else if (specs->unsigned_p) specs->type = unsigned_char_type_node; else specs->type = char_type_node; if (specs->complex_p) { pedwarn (specs->locations[cdw_complex], OPT_Wpedantic, "ISO C does not support complex integer types"); specs->type = build_complex_type (specs->type); } break; case cts_int_n: gcc_assert (!specs->long_p && !specs->short_p && !specs->long_long_p); gcc_assert (!(specs->signed_p && specs->unsigned_p)); specs->type = (specs->unsigned_p ? int_n_trees[specs->int_n_idx].unsigned_type : int_n_trees[specs->int_n_idx].signed_type); if (specs->complex_p) { pedwarn (specs->locations[cdw_complex], OPT_Wpedantic, "ISO C does not support complex integer types"); specs->type = build_complex_type (specs->type); } break; case cts_int: gcc_assert (!(specs->long_p && specs->short_p)); gcc_assert (!(specs->signed_p && specs->unsigned_p)); if (specs->long_long_p) specs->type = (specs->unsigned_p ? long_long_unsigned_type_node : long_long_integer_type_node); else if (specs->long_p) specs->type = (specs->unsigned_p ? long_unsigned_type_node : long_integer_type_node); else if (specs->short_p) specs->type = (specs->unsigned_p ? short_unsigned_type_node : short_integer_type_node); else specs->type = (specs->unsigned_p ? unsigned_type_node : integer_type_node); if (specs->complex_p) { pedwarn (specs->locations[cdw_complex], OPT_Wpedantic, "ISO C does not support complex integer types"); specs->type = build_complex_type (specs->type); } break; case cts_float: gcc_assert (!specs->long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p); specs->type = (specs->complex_p ? complex_float_type_node : float_type_node); break; case cts_double: gcc_assert (!specs->long_long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p); if (specs->long_p) { specs->type = (specs->complex_p ? complex_long_double_type_node : long_double_type_node); } else { specs->type = (specs->complex_p ? complex_double_type_node : double_type_node); } break; case cts_dfloat32: case cts_dfloat64: case cts_dfloat128: gcc_assert (!specs->long_p && !specs->long_long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p && !specs->complex_p); if (specs->typespec_word == cts_dfloat32) specs->type = dfloat32_type_node; else if (specs->typespec_word == cts_dfloat64) specs->type = dfloat64_type_node; else specs->type = dfloat128_type_node; break; case cts_fract: gcc_assert (!specs->complex_p); if (!targetm.fixed_point_supported_p ()) specs->type = integer_type_node; else if (specs->saturating_p) { if (specs->long_long_p) specs->type = specs->unsigned_p ? sat_unsigned_long_long_fract_type_node : sat_long_long_fract_type_node; else if (specs->long_p) specs->type = specs->unsigned_p ? sat_unsigned_long_fract_type_node : sat_long_fract_type_node; else if (specs->short_p) specs->type = specs->unsigned_p ? sat_unsigned_short_fract_type_node : sat_short_fract_type_node; else specs->type = specs->unsigned_p ? sat_unsigned_fract_type_node : sat_fract_type_node; } else { if (specs->long_long_p) specs->type = specs->unsigned_p ? unsigned_long_long_fract_type_node : long_long_fract_type_node; else if (specs->long_p) specs->type = specs->unsigned_p ? unsigned_long_fract_type_node : long_fract_type_node; else if (specs->short_p) specs->type = specs->unsigned_p ? unsigned_short_fract_type_node : short_fract_type_node; else specs->type = specs->unsigned_p ? unsigned_fract_type_node : fract_type_node; } break; case cts_accum: gcc_assert (!specs->complex_p); if (!targetm.fixed_point_supported_p ()) specs->type = integer_type_node; else if (specs->saturating_p) { if (specs->long_long_p) specs->type = specs->unsigned_p ? sat_unsigned_long_long_accum_type_node : sat_long_long_accum_type_node; else if (specs->long_p) specs->type = specs->unsigned_p ? sat_unsigned_long_accum_type_node : sat_long_accum_type_node; else if (specs->short_p) specs->type = specs->unsigned_p ? sat_unsigned_short_accum_type_node : sat_short_accum_type_node; else specs->type = specs->unsigned_p ? sat_unsigned_accum_type_node : sat_accum_type_node; } else { if (specs->long_long_p) specs->type = specs->unsigned_p ? unsigned_long_long_accum_type_node : long_long_accum_type_node; else if (specs->long_p) specs->type = specs->unsigned_p ? unsigned_long_accum_type_node : long_accum_type_node; else if (specs->short_p) specs->type = specs->unsigned_p ? unsigned_short_accum_type_node : short_accum_type_node; else specs->type = specs->unsigned_p ? unsigned_accum_type_node : accum_type_node; } break; default: gcc_unreachable (); } return specs; } /* A subroutine of c_write_global_declarations. Perform final processing on one file scope's declarations (or the external scope's declarations), GLOBALS. */ static void c_write_global_declarations_1 (tree globals) { tree decl; bool reconsider; /* Process the decls in the order they were written. */ for (decl = globals; decl; decl = DECL_CHAIN (decl)) { /* Check for used but undefined static functions using the C standard's definition of "used", and set TREE_NO_WARNING so that check_global_declarations doesn't repeat the check. */ if (TREE_CODE (decl) == FUNCTION_DECL && DECL_INITIAL (decl) == 0 && DECL_EXTERNAL (decl) && !TREE_PUBLIC (decl) && C_DECL_USED (decl)) { pedwarn (input_location, 0, "%q+F used but never defined", decl); TREE_NO_WARNING (decl) = 1; } wrapup_global_declaration_1 (decl); } do { reconsider = false; for (decl = globals; decl; decl = DECL_CHAIN (decl)) reconsider |= wrapup_global_declaration_2 (decl); } while (reconsider); for (decl = globals; decl; decl = DECL_CHAIN (decl)) check_global_declaration_1 (decl); } /* A subroutine of c_write_global_declarations Emit debug information for each of the declarations in GLOBALS. */ static void c_write_global_declarations_2 (tree globals) { tree decl; for (decl = globals; decl ; decl = DECL_CHAIN (decl)) debug_hooks->global_decl (decl); } /* Callback to collect a source_ref from a DECL. */ static void collect_source_ref_cb (tree decl) { if (!DECL_IS_BUILTIN (decl)) collect_source_ref (LOCATION_FILE (decl_sloc (decl, false))); } /* Preserve the external declarations scope across a garbage collect. */ static GTY(()) tree ext_block; /* Collect all references relevant to SOURCE_FILE. */ static void collect_all_refs (const char *source_file) { tree t; unsigned i; FOR_EACH_VEC_ELT (*all_translation_units, i, t) collect_ada_nodes (BLOCK_VARS (DECL_INITIAL (t)), source_file); collect_ada_nodes (BLOCK_VARS (ext_block), source_file); } /* Iterate over all global declarations and call CALLBACK. */ static void for_each_global_decl (void (*callback) (tree decl)) { tree t; tree decls; tree decl; unsigned i; FOR_EACH_VEC_ELT (*all_translation_units, i, t) { decls = DECL_INITIAL (t); for (decl = BLOCK_VARS (decls); decl; decl = TREE_CHAIN (decl)) callback (decl); } for (decl = BLOCK_VARS (ext_block); decl; decl = TREE_CHAIN (decl)) callback (decl); } void c_write_global_declarations (void) { tree t; unsigned i; /* We don't want to do this if generating a PCH. */ if (pch_file) return; timevar_start (TV_PHASE_DEFERRED); /* Do the Objective-C stuff. This is where all the Objective-C module stuff gets generated (symtab, class/protocol/selector lists etc). */ if (c_dialect_objc ()) objc_write_global_declarations (); /* Close the external scope. */ ext_block = pop_scope (); external_scope = 0; gcc_assert (!current_scope); /* Handle -fdump-ada-spec[-slim]. */ if (flag_dump_ada_spec || flag_dump_ada_spec_slim) { /* Build a table of files to generate specs for */ if (flag_dump_ada_spec_slim) collect_source_ref (main_input_filename); else for_each_global_decl (collect_source_ref_cb); dump_ada_specs (collect_all_refs, NULL); } if (ext_block) { tree tmp = BLOCK_VARS (ext_block); int flags; FILE * stream = dump_begin (TDI_tu, &flags); if (stream && tmp) { dump_node (tmp, flags & ~TDF_SLIM, stream); dump_end (TDI_tu, stream); } } /* Process all file scopes in this compilation, and the external_scope, through wrapup_global_declarations and check_global_declarations. */ FOR_EACH_VEC_ELT (*all_translation_units, i, t) c_write_global_declarations_1 (BLOCK_VARS (DECL_INITIAL (t))); c_write_global_declarations_1 (BLOCK_VARS (ext_block)); timevar_stop (TV_PHASE_DEFERRED); timevar_start (TV_PHASE_OPT_GEN); /* We're done parsing; proceed to optimize and emit assembly. FIXME: shouldn't be the front end's responsibility to call this. */ symtab->finalize_compilation_unit (); timevar_stop (TV_PHASE_OPT_GEN); timevar_start (TV_PHASE_DBGINFO); /* After cgraph has had a chance to emit everything that's going to be emitted, output debug information for globals. */ if (!seen_error ()) { timevar_push (TV_SYMOUT); FOR_EACH_VEC_ELT (*all_translation_units, i, t) c_write_global_declarations_2 (BLOCK_VARS (DECL_INITIAL (t))); c_write_global_declarations_2 (BLOCK_VARS (ext_block)); timevar_pop (TV_SYMOUT); } ext_block = NULL; timevar_stop (TV_PHASE_DBGINFO); } /* Register reserved keyword WORD as qualifier for address space AS. */ void c_register_addr_space (const char *word, addr_space_t as) { int rid = RID_FIRST_ADDR_SPACE + as; tree id; /* Address space qualifiers are only supported in C with GNU extensions enabled. */ if (c_dialect_objc () || flag_no_asm) return; id = get_identifier (word); C_SET_RID_CODE (id, rid); C_IS_RESERVED_WORD (id) = 1; ridpointers [rid] = id; } /* Return identifier to look up for omp declare reduction. */ tree c_omp_reduction_id (enum tree_code reduction_code, tree reduction_id) { const char *p = NULL; switch (reduction_code) { case PLUS_EXPR: p = "+"; break; case MULT_EXPR: p = "*"; break; case MINUS_EXPR: p = "-"; break; case BIT_AND_EXPR: p = "&"; break; case BIT_XOR_EXPR: p = "^"; break; case BIT_IOR_EXPR: p = "|"; break; case TRUTH_ANDIF_EXPR: p = "&&"; break; case TRUTH_ORIF_EXPR: p = "||"; break; case MIN_EXPR: p = "min"; break; case MAX_EXPR: p = "max"; break; default: break; } if (p == NULL) { if (TREE_CODE (reduction_id) != IDENTIFIER_NODE) return error_mark_node; p = IDENTIFIER_POINTER (reduction_id); } const char prefix[] = "omp declare reduction "; size_t lenp = sizeof (prefix); size_t len = strlen (p); char *name = XALLOCAVEC (char, lenp + len); memcpy (name, prefix, lenp - 1); memcpy (name + lenp - 1, p, len + 1); return get_identifier (name); } /* Lookup REDUCTION_ID in the current scope, or create an artificial VAR_DECL, bind it into the current scope and return it. */ tree c_omp_reduction_decl (tree reduction_id) { struct c_binding *b = I_SYMBOL_BINDING (reduction_id); if (b != NULL && B_IN_CURRENT_SCOPE (b)) return b->decl; tree decl = build_decl (BUILTINS_LOCATION, VAR_DECL, reduction_id, integer_type_node); DECL_ARTIFICIAL (decl) = 1; DECL_EXTERNAL (decl) = 1; TREE_STATIC (decl) = 1; TREE_PUBLIC (decl) = 0; bind (reduction_id, decl, current_scope, true, false, BUILTINS_LOCATION); return decl; } /* Lookup REDUCTION_ID in the first scope where it has entry for TYPE. */ tree c_omp_reduction_lookup (tree reduction_id, tree type) { struct c_binding *b = I_SYMBOL_BINDING (reduction_id); while (b) { tree t; for (t = DECL_INITIAL (b->decl); t; t = TREE_CHAIN (t)) if (comptypes (TREE_PURPOSE (t), type)) return TREE_VALUE (t); b = b->shadowed; } return error_mark_node; } /* Helper function called via walk_tree, to diagnose invalid #pragma omp declare reduction combiners or initializers. */ tree c_check_omp_declare_reduction_r (tree *tp, int *, void *data) { tree *vars = (tree *) data; if (SSA_VAR_P (*tp) && !DECL_ARTIFICIAL (*tp) && *tp != vars[0] && *tp != vars[1]) { location_t loc = DECL_SOURCE_LOCATION (vars[0]); if (strcmp (IDENTIFIER_POINTER (DECL_NAME (vars[0])), "omp_out") == 0) error_at (loc, "%<#pragma omp declare reduction%> combiner refers to " "variable %qD which is not %<omp_out%> nor %<omp_in%>", *tp); else error_at (loc, "%<#pragma omp declare reduction%> initializer refers " "to variable %qD which is not %<omp_priv%> nor " "%<omp_orig%>", *tp); return *tp; } return NULL_TREE; } #include "gt-c-c-decl.h"
pp_collision.c
/* Copyright (C) 2017 Atsushi Togo */ /* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> #include <phonoc_array.h> #include <phonoc_const.h> #include <phonoc_utils.h> #include <phonon3_h/imag_self_energy_with_g.h> #include <phonon3_h/pp_collision.h> #include <phonon3_h/interaction.h> #include <triplet_h/triplet.h> #include <triplet_h/triplet_iw.h> #include <lapack_wrapper.h> static void get_collision(double *ise, const size_t num_band0, const size_t num_band, const size_t num_temps, const double *temperatures, const double *g, const char *g_zero, const double *frequencies, const lapack_complex_double *eigenvectors, const size_t triplet[3], const int weight, const int *grid_address, const int *mesh, const double *fc3, const int is_compact_fc3, const double *shortest_vectors, const int svecs_dims[3], const int *multiplicity, const double *masses, const int *p2s_map, const int *s2p_map, const int *band_indices, const int symmetrize_fc3_q, const double cutoff_frequency, const int openmp_per_triplets); static void finalize_ise(double *imag_self_energy, const double *ise, const int *grid_address, const size_t (*triplets)[3], const size_t num_triplets, const size_t num_temps, const size_t num_band0, const int is_NU); void ppc_get_pp_collision(double *imag_self_energy, PHPYCONST int relative_grid_address[24][4][3], /* thm */ const double *frequencies, const lapack_complex_double *eigenvectors, const size_t (*triplets)[3], const size_t num_triplets, const int *weights, const int *grid_address, /* thm */ const size_t *bz_map, /* thm */ const int *mesh, /* thm */ const double *fc3, const int is_compact_fc3, const double *shortest_vectors, const int svecs_dims[3], const int *multiplicity, const double *masses, const int *p2s_map, const int *s2p_map, const Iarray *band_indices, const Darray *temperatures, const int is_NU, const int symmetrize_fc3_q, const double cutoff_frequency) { size_t i; size_t num_band, num_band0, num_band_prod, num_temps; int openmp_per_triplets; double *ise, *freqs_at_gp, *g; char *g_zero; int tp_relative_grid_address[2][24][4][3]; ise = NULL; freqs_at_gp = NULL; g = NULL; g_zero = NULL; num_band0 = band_indices->dims[0]; num_band = svecs_dims[1] * 3; num_band_prod = num_band0 * num_band * num_band; num_temps = temperatures->dims[0]; ise = (double*)malloc(sizeof(double) * num_triplets * num_temps * num_band0); freqs_at_gp = (double*)malloc(sizeof(double) * num_band0); for (i = 0; i < num_band0; i++) { freqs_at_gp[i] = frequencies[triplets[0][0] * num_band + band_indices->data[i]]; } if (num_triplets > num_band) { openmp_per_triplets = 1; } else { openmp_per_triplets = 0; } tpl_set_relative_grid_address(tp_relative_grid_address, relative_grid_address); #pragma omp parallel for schedule(guided) private(g, g_zero) if (openmp_per_triplets) for (i = 0; i < num_triplets; i++) { g = (double*)malloc(sizeof(double) * 2 * num_band_prod); g_zero = (char*)malloc(sizeof(char) * num_band_prod); tpi_get_integration_weight(g, g_zero, freqs_at_gp, num_band0, tp_relative_grid_address, mesh, triplets[i], 1, (int(*)[3])grid_address, bz_map, frequencies, num_band, 2, 1 - openmp_per_triplets); get_collision(ise + i * num_temps * num_band0, num_band0, num_band, num_temps, temperatures->data, g, g_zero, frequencies, eigenvectors, triplets[i], weights[i], grid_address, mesh, fc3, is_compact_fc3, shortest_vectors, svecs_dims, multiplicity, masses, p2s_map, s2p_map, band_indices->data, symmetrize_fc3_q, cutoff_frequency, openmp_per_triplets); free(g_zero); g_zero = NULL; free(g); g = NULL; } finalize_ise(imag_self_energy, ise, grid_address, triplets, num_triplets, num_temps, num_band0, is_NU); free(freqs_at_gp); freqs_at_gp = NULL; free(ise); ise = NULL; } void ppc_get_pp_collision_with_sigma( double *imag_self_energy, const double sigma, const double sigma_cutoff, const double *frequencies, const lapack_complex_double *eigenvectors, const size_t (*triplets)[3], const size_t num_triplets, const int *weights, const int *grid_address, const int *mesh, const double *fc3, const int is_compact_fc3, const double *shortest_vectors, const int svecs_dims[3], const int *multiplicity, const double *masses, const int *p2s_map, const int *s2p_map, const Iarray *band_indices, const Darray *temperatures, const int is_NU, const int symmetrize_fc3_q, const double cutoff_frequency) { size_t i; size_t num_band, num_band0, num_band_prod, num_temps; int openmp_per_triplets, const_adrs_shift; double cutoff; double *ise, *freqs_at_gp, *g; char *g_zero; ise = NULL; freqs_at_gp = NULL; g = NULL; g_zero = NULL; num_band0 = band_indices->dims[0]; num_band = svecs_dims[1] * 3; num_band_prod = num_band0 * num_band * num_band; num_temps = temperatures->dims[0]; const_adrs_shift = num_band_prod; ise = (double*)malloc(sizeof(double) * num_triplets * num_temps * num_band0); freqs_at_gp = (double*)malloc(sizeof(double) * num_band0); for (i = 0; i < num_band0; i++) { freqs_at_gp[i] = frequencies[triplets[0][0] * num_band + band_indices->data[i]]; } if (num_triplets > num_band) { openmp_per_triplets = 1; } else { openmp_per_triplets = 0; } cutoff = sigma * sigma_cutoff; #pragma omp parallel for schedule(guided) private(g, g_zero) if (openmp_per_triplets) for (i = 0; i < num_triplets; i++) { g = (double*)malloc(sizeof(double) * 2 * num_band_prod); g_zero = (char*)malloc(sizeof(char) * num_band_prod); tpi_get_integration_weight_with_sigma(g, g_zero, sigma, cutoff, freqs_at_gp, num_band0, triplets[i], const_adrs_shift, frequencies, num_band, 2, 0); get_collision(ise + i * num_temps * num_band0, num_band0, num_band, num_temps, temperatures->data, g, g_zero, frequencies, eigenvectors, triplets[i], weights[i], grid_address, mesh, fc3, is_compact_fc3, shortest_vectors, svecs_dims, multiplicity, masses, p2s_map, s2p_map, band_indices->data, symmetrize_fc3_q, cutoff_frequency, openmp_per_triplets); free(g_zero); g_zero = NULL; free(g); g = NULL; } finalize_ise(imag_self_energy, ise, grid_address, triplets, num_triplets, num_temps, num_band0, is_NU); free(freqs_at_gp); freqs_at_gp = NULL; free(ise); ise = NULL; } static void get_collision(double *ise, const size_t num_band0, const size_t num_band, const size_t num_temps, const double *temperatures, const double *g, const char *g_zero, const double *frequencies, const lapack_complex_double *eigenvectors, const size_t triplet[3], const int weight, const int *grid_address, const int *mesh, const double *fc3, const int is_compact_fc3, const double *shortest_vectors, const int svecs_dims[3], const int *multiplicity, const double *masses, const int *p2s_map, const int *s2p_map, const int *band_indices, const int symmetrize_fc3_q, const double cutoff_frequency, const int openmp_per_triplets) { size_t i; size_t num_band_prod, num_g_pos; double *fc3_normal_squared; int (*g_pos)[4]; fc3_normal_squared = NULL; g_pos = NULL; num_band_prod = num_band0 * num_band * num_band; fc3_normal_squared = (double*)malloc(sizeof(double) * num_band_prod); g_pos = (int(*)[4])malloc(sizeof(int[4]) * num_band_prod); for (i = 0; i < num_band_prod; i++) { fc3_normal_squared[i] = 0; } num_g_pos = ise_set_g_pos(g_pos, num_band0, num_band, g_zero); itr_get_interaction_at_triplet( fc3_normal_squared, num_band0, num_band, g_pos, num_g_pos, frequencies, eigenvectors, triplet, grid_address, mesh, fc3, is_compact_fc3, shortest_vectors, svecs_dims, multiplicity, masses, p2s_map, s2p_map, band_indices, symmetrize_fc3_q, cutoff_frequency, 0, 0, 1 - openmp_per_triplets); ise_imag_self_energy_at_triplet( ise, num_band0, num_band, fc3_normal_squared, frequencies, triplet, weight, g, g + num_band_prod, g_pos, num_g_pos, temperatures, num_temps, cutoff_frequency, 1 - openmp_per_triplets); free(fc3_normal_squared); fc3_normal_squared = NULL; free(g_pos); g_pos = NULL; } static void finalize_ise(double *imag_self_energy, const double *ise, const int *grid_address, const size_t (*triplets)[3], const size_t num_triplets, const size_t num_temps, const size_t num_band0, const int is_NU) { size_t i, j, k; int is_N; if (is_NU) { for (i = 0; i < 2 * num_temps * num_band0; i++) { imag_self_energy[i] = 0; } for (i = 0; i < num_triplets; i++) { is_N = tpl_is_N(triplets[i], grid_address); for (j = 0; j < num_temps; j++) { for (k = 0; k < num_band0; k++) { if (is_N) { imag_self_energy[j * num_band0 + k] += ise[i * num_temps * num_band0 + j * num_band0 + k]; } else { imag_self_energy[num_temps * num_band0 + j * num_band0 + k] += ise[i * num_temps * num_band0 + j * num_band0 + k]; } } } } } else { for (i = 0; i < num_temps * num_band0; i++) { imag_self_energy[i] = 0; } for (i = 0; i < num_triplets; i++) { for (j = 0; j < num_temps; j++) { for (k = 0; k < num_band0; k++) { imag_self_energy[j * num_band0 + k] += ise[i * num_temps * num_band0 + j * num_band0 + k]; } } } } }
cones.c
#include "scs.h" #include "cones.h" #include "scs_blas.h" /* contains BLAS(X) macros and type info */ #include "linalg.h" #include "util.h" #define CONE_RATE (2) #define CONE_TOL (1e-8) #define CONE_THRESH (1e-6) #define EXP_CONE_MAX_ITERS (100) #define POW_CONE_MAX_ITERS (20) #ifdef USE_LAPACK void BLAS(syevr)(const char *jobz, const char *range, const char *uplo, blas_int *n, scs_float *a, blas_int *lda, scs_float *vl, scs_float *vu, blas_int *il, blas_int *iu, scs_float *abstol, blas_int *m, scs_float *w, scs_float *z, blas_int *ldz, blas_int *isuppz, scs_float *work, blas_int *lwork, blas_int *iwork, blas_int *liwork, blas_int *info); void BLAS(syr)(const char *uplo, const blas_int *n, const scs_float *alpha, const scs_float *x, const blas_int *incx, scs_float *a, const blas_int *lda); void BLAS(scal)(const blas_int *n, const scs_float *sa, scs_float *sx, const blas_int *incx); scs_float BLAS(nrm2)(const blas_int *n, scs_float *x, const blas_int *incx); #endif static scs_int get_sd_cone_size(scs_int s) { RETURN(s * (s + 1)) / 2; } /* * boundaries will contain array of indices of rows of A corresponding to * cone boundaries, boundaries[0] is starting index for cones of size strictly * larger than 1 * RETURNs length of boundaries array, boundaries malloc-ed here so should be * freed */ scs_int get_cone_boundaries(const ScsCone *k, scs_int **boundaries) { scs_int i, count = 0; scs_int len = 1 + k->qsize + k->ssize + k->ed + k->ep + k->psize; scs_int *b = scs_malloc(sizeof(scs_int) * len); b[count] = k->f + k->l; count += 1; if (k->qsize > 0) { memcpy(&b[count], k->q, k->qsize * sizeof(scs_int)); } count += k->qsize; for (i = 0; i < k->ssize; ++i) { b[count + i] = get_sd_cone_size(k->s[i]); } count += k->ssize; for (i = 0; i < k->ep + k->ed; ++i) { b[count + i] = 3; } count += k->ep + k->ed; for (i = 0; i < k->psize; ++i) { b[count + i] = 3; } count += k->psize; *boundaries = b; RETURN len; } scs_int get_full_cone_dims(const ScsCone *k) { scs_int i, c = 0; if (k->f) { c += k->f; } if (k->l) { c += k->l; } if (k->qsize && k->q) { for (i = 0; i < k->qsize; ++i) { c += k->q[i]; } } if (k->ssize && k->s) { for (i = 0; i < k->ssize; ++i) { c += get_sd_cone_size(k->s[i]); } } if (k->ed) { c += 3 * k->ed; } if (k->ep) { c += 3 * k->ep; } if (k->p) { c += 3 * k->psize; } RETURN c; } scs_int validate_cones(const ScsData *d, const ScsCone *k) { scs_int i; if (get_full_cone_dims(k) != d->m) { scs_printf("cone dimensions %li not equal to num rows in A = m = %li\n", (long)get_full_cone_dims(k), (long)d->m); RETURN - 1; } if (k->f && k->f < 0) { scs_printf("free cone error\n"); RETURN - 1; } if (k->l && k->l < 0) { scs_printf("lp cone error\n"); RETURN - 1; } if (k->qsize && k->q) { if (k->qsize < 0) { scs_printf("soc cone error\n"); RETURN - 1; } for (i = 0; i < k->qsize; ++i) { if (k->q[i] < 0) { scs_printf("soc cone error\n"); RETURN - 1; } } } if (k->ssize && k->s) { if (k->ssize < 0) { scs_printf("sd cone error\n"); RETURN - 1; } for (i = 0; i < k->ssize; ++i) { if (k->s[i] < 0) { scs_printf("sd cone error\n"); RETURN - 1; } } } if (k->ed && k->ed < 0) { scs_printf("ep cone error\n"); RETURN - 1; } if (k->ep && k->ep < 0) { scs_printf("ed cone error\n"); RETURN - 1; } if (k->psize && k->p) { if (k->psize < 0) { scs_printf("power cone error\n"); RETURN - 1; } for (i = 0; i < k->psize; ++i) { if (k->p[i] < -1 || k->p[i] > 1) { scs_printf("power cone error, values must be in [-1,1]\n"); RETURN - 1; } } } RETURN 0; } char *get_cone_summary(const ScsInfo *info, ScsConeWork *c) { char *str = scs_malloc(sizeof(char) * 64); sprintf(str, "\tCones: avg projection time: %1.2es\n", c->total_cone_time / (info->iter + 1) / 1e3); c->total_cone_time = 0.0; RETURN str; } void finish_cone(ScsConeWork *c) { DEBUG_FUNC #ifdef USE_LAPACK if (c->Xs) { scs_free(c->Xs); } if (c->Z) { scs_free(c->Z); } if (c->e) { scs_free(c->e); } if (c->work) { scs_free(c->work); } if (c->iwork) { scs_free(c->iwork); } #endif if (c) { scs_free(c); } RETURN; } char *get_cone_header(const ScsCone *k) { char *tmp = scs_malloc(sizeof(char) * 512); scs_int i, soc_vars, soc_blks, sd_vars, sd_blks; sprintf(tmp, "Cones:"); if (k->f) { sprintf(tmp + strlen(tmp), "\tprimal zero / dual free vars: %li\n", (long)k->f); } if (k->l) { sprintf(tmp + strlen(tmp), "\tlinear vars: %li\n", (long)k->l); } soc_vars = 0; soc_blks = 0; if (k->qsize && k->q) { soc_blks = k->qsize; for (i = 0; i < k->qsize; i++) { soc_vars += k->q[i]; } sprintf(tmp + strlen(tmp), "\tsoc vars: %li, soc blks: %li\n", (long)soc_vars, (long)soc_blks); } sd_vars = 0; sd_blks = 0; if (k->ssize && k->s) { sd_blks = k->ssize; for (i = 0; i < k->ssize; i++) { sd_vars += get_sd_cone_size(k->s[i]); } sprintf(tmp + strlen(tmp), "\tsd vars: %li, sd blks: %li\n", (long)sd_vars, (long)sd_blks); } if (k->ep || k->ed) { sprintf(tmp + strlen(tmp), "\texp vars: %li, dual exp vars: %li\n", (long)3 * k->ep, (long)3 * k->ed); } if (k->psize && k->p) { sprintf(tmp + strlen(tmp), "\tprimal + dual power vars: %li\n", (long)3 * k->psize); } RETURN tmp; } scs_int is_simple_semi_definite_cone(scs_int *s, scs_int ssize) { scs_int i; for (i = 0; i < ssize; i++) { if (s[i] > 2) { RETURN 0; /* false */ } } RETURN 1; /* true */ } scs_float exp_newton_one_d(scs_float rho, scs_float y_hat, scs_float z_hat) { scs_float t = MAX(-z_hat, 1e-6); scs_float f, fp; scs_int i; for (i = 0; i < EXP_CONE_MAX_ITERS; ++i) { f = t * (t + z_hat) / rho / rho - y_hat / rho + log(t / rho) + 1; fp = (2 * t + z_hat) / rho / rho + 1 / t; t = t - f / fp; if (t <= -z_hat) { RETURN 0; } else if (t <= 0) { RETURN z_hat; } else if (ABS(f) < CONE_TOL) { break; } } RETURN t + z_hat; } void exp_solve_for_x_with_rho(scs_float *v, scs_float *x, scs_float rho) { x[2] = exp_newton_one_d(rho, v[1], v[2]); x[1] = (x[2] - v[2]) * x[2] / rho; x[0] = v[0] - rho; } scs_float exp_calc_grad(scs_float *v, scs_float *x, scs_float rho) { exp_solve_for_x_with_rho(v, x, rho); if (x[1] <= 1e-12) { RETURN x[0]; } RETURN x[0] + x[1] * log(x[1] / x[2]); } void exp_get_rho_ub(scs_float *v, scs_float *x, scs_float *ub, scs_float *lb) { *lb = 0; *ub = 0.125; while (exp_calc_grad(v, x, *ub) > 0) { *lb = *ub; (*ub) *= 2; } } /* project onto the exponential cone, v has dimension *exactly* 3 */ static scs_int proj_exp_cone(scs_float *v, scs_int iter) { scs_int i; scs_float ub, lb, rho, g, x[3]; scs_float r = v[0], s = v[1], t = v[2]; scs_float tol = CONE_TOL; /* iter < 0 ? CONE_TOL : MAX(CONE_TOL, 1 / POWF((iter + 1), CONE_RATE)); */ /* v in cl(Kexp) */ if ((s * exp(r / s) - t <= CONE_THRESH && s > 0) || (r <= 0 && s == 0 && t >= 0)) { RETURN 0; } /* -v in Kexp^* */ if ((-r < 0 && r * exp(s / r) + exp(1) * t <= CONE_THRESH) || (-r == 0 && -s >= 0 && -t >= 0)) { memset(v, 0, 3 * sizeof(scs_float)); RETURN 0; } /* special case with analytical solution */ if (r < 0 && s < 0) { v[1] = 0.0; v[2] = MAX(v[2], 0); RETURN 0; } /* iterative procedure to find projection, bisects on dual variable: */ exp_get_rho_ub(v, x, &ub, &lb); /* get starting upper and lower bounds */ for (i = 0; i < EXP_CONE_MAX_ITERS; ++i) { rho = (ub + lb) / 2; /* halfway between upper and lower bounds */ g = exp_calc_grad(v, x, rho); /* calculates gradient wrt dual var */ if (g > 0) { lb = rho; } else { ub = rho; } if (ub - lb < tol) { break; } } /* #if EXTRA_VERBOSE > 0 scs_printf("exponential cone proj iters %i\n", i); #endif */ v[0] = x[0]; v[1] = x[1]; v[2] = x[2]; RETURN 0; } scs_int set_up_sd_cone_work_space(ScsConeWork *c, const ScsCone *k) { #ifdef USE_LAPACK scs_int i; blas_int n_max = 0; scs_float eig_tol = 1e-8; blas_int neg_one = -1; blas_int m = 0; blas_int info; scs_float wkopt; #if EXTRA_VERBOSE > 0 #define _STR_EXPAND(tok) #tok #define _STR(tok) _STR_EXPAND(tok) scs_printf("BLAS(func) = '%s'\n", _STR(BLAS(func))); #endif /* eigenvector decomp workspace */ for (i = 0; i < k->ssize; ++i) { if (k->s[i] > n_max) { n_max = (blas_int)k->s[i]; } } c->Xs = scs_calloc(n_max * n_max, sizeof(scs_float)); c->Z = scs_calloc(n_max * n_max, sizeof(scs_float)); c->e = scs_calloc(n_max, sizeof(scs_float)); BLAS(syevr) ("Vectors", "All", "Lower", &n_max, c->Xs, &n_max, SCS_NULL, SCS_NULL, SCS_NULL, SCS_NULL, &eig_tol, &m, c->e, c->Z, &n_max, SCS_NULL, &wkopt, &neg_one, &(c->liwork), &neg_one, &info); if (info != 0) { scs_printf("FATAL: syevr failure, info = %li\n", (long)info); RETURN - 1; } c->lwork = (blas_int)(wkopt + 0.01); /* 0.01 for int casting safety */ c->work = scs_malloc(c->lwork * sizeof(scs_float)); c->iwork = scs_malloc(c->liwork * sizeof(blas_int)); if (!c->Xs || !c->Z || !c->e || !c->work || !c->iwork) { RETURN - 1; } RETURN 0; #else scs_printf("FATAL: Cannot solve SDPs with > 2x2 matrices without linked " "blas+lapack libraries\n"); scs_printf("Install blas+lapack and re-compile SCS with blas+lapack libray " "locations\n"); RETURN - 1; #endif } ScsConeWork *init_cone(const ScsCone *k) { ScsConeWork *c = scs_calloc(1, sizeof(ScsConeWork)); #if EXTRA_VERBOSE > 0 scs_printf("init_cone\n"); #endif c->total_cone_time = 0.0; if (k->ssize && k->s) { if (!is_simple_semi_definite_cone(k->s, k->ssize) && set_up_sd_cone_work_space(c, k) < 0) { finish_cone(c); RETURN SCS_NULL; } } #if EXTRA_VERBOSE > 0 scs_printf("init_cone complete\n"); #ifdef MATLAB_MEX_FILE mexEvalString("drawnow;"); #endif #endif RETURN c; } scs_int project_2x2_sdc(scs_float *X) { scs_float a, b, d, l1, l2, x1, x2, rad; scs_float sqrt2 = SQRTF(2.0); a = X[0]; b = X[1] / sqrt2; d = X[2]; if (ABS(b) < 1e-6) { /* diagonal matrix */ X[0] = MAX(a, 0); X[1] = 0; X[2] = MAX(d, 0); RETURN 0; } rad = SQRTF((a - d) * (a - d) + 4 * b * b); /* l1 >= l2 always, since rad >= 0 */ l1 = 0.5 * (a + d + rad); l2 = 0.5 * (a + d - rad); #if EXTRA_VERBOSE > 0 scs_printf("2x2 SD: a = %4f, b = %4f, (X[1] = %4f, X[2] = %4f), d = %4f, " "rad = %4f, l1 = %4f, l2 = %4f\n", a, b, X[1], X[2], d, rad, l1, l2); #endif if (l2 >= 0) { /* both eigs positive already */ RETURN 0; } if (l1 <= 0) { /* both eigs negative, set to 0 */ X[0] = 0; X[1] = 0; X[2] = 0; RETURN 0; } /* l1 pos, l2 neg */ x1 = 1 / SQRTF(1 + (l1 - a) * (l1 - a) / b / b); x2 = x1 * (l1 - a) / b; X[0] = l1 * x1 * x1; X[1] = (l1 * x1 * x2) * sqrt2; X[2] = l1 * x2 * x2; RETURN 0; } /* size of X is get_sd_cone_size(n) */ static scs_int proj_semi_definite_cone(scs_float *X, const scs_int n, ScsConeWork *c, const scs_int iter) { /* project onto the positive semi-definite cone */ #ifdef USE_LAPACK scs_int i; blas_int one = 1; blas_int m = 0; blas_int nb = (blas_int)n; blas_int nb_plus_one = (blas_int)(n + 1); blas_int cone_sz = (blas_int)(get_sd_cone_size(n)); scs_float sqrt2 = SQRTF(2.0); scs_float sqrt2Inv = 1.0 / sqrt2; scs_float *Xs = c->Xs; scs_float *Z = c->Z; scs_float *e = c->e; scs_float *work = c->work; blas_int *iwork = c->iwork; blas_int lwork = c->lwork; blas_int liwork = c->liwork; scs_float eig_tol = CONE_TOL; /* iter < 0 ? CONE_TOL : MAX(CONE_TOL, 1 / POWF(iter + 1, CONE_RATE)); */ scs_float zero = 0.0; blas_int info; scs_float vupper; #endif if (n == 0) { RETURN 0; } if (n == 1) { if (X[0] < 0.0) { X[0] = 0.0; } RETURN 0; } if (n == 2) { RETURN project_2x2_sdc(X); } #ifdef USE_LAPACK /* expand lower triangular matrix to full matrix */ for (i = 0; i < n; ++i) { memcpy(&(Xs[i * (n + 1)]), &(X[i * n - ((i - 1) * i) / 2]), (n - i) * sizeof(scs_float)); } /* rescale so projection works, and matrix norm preserved see http://www.seas.ucla.edu/~vandenbe/publications/mlbook.pdf pg 3 */ /* scale diags by sqrt(2) */ BLAS(scal)(&nb, &sqrt2, Xs, &nb_plus_one); /* not n_squared */ /* max-eig upper bounded by frobenius norm */ vupper = 1.1 * sqrt2 * BLAS(nrm2)(&cone_sz, X, &one); /* mult by factor to make sure is upper bound */ vupper = MAX(vupper, 0.01); #if EXTRA_VERBOSE > 0 print_array(Xs, n * n, "Xs"); print_array(X, get_sd_cone_size(n), "X"); #endif /* Solve eigenproblem, reuse workspaces */ BLAS(syevr) ("Vectors", "VInterval", "Lower", &nb, Xs, &nb, &zero, &vupper, SCS_NULL, SCS_NULL, &eig_tol, &m, e, Z, &nb, SCS_NULL, work, &lwork, iwork, &liwork, &info); #if EXTRA_VERBOSE > 0 if (info != 0) { scs_printf("WARN: LAPACK syevr error, info = %i\n", info); } scs_printf("syevr input parameter dump:\n"); scs_printf("nb = %li\n", (long)nb); scs_printf("lwork = %li\n", (long)lwork); scs_printf("liwork = %li\n", (long)liwork); scs_printf("vupper = %f\n", vupper); scs_printf("eig_tol = %e\n", eig_tol); print_array(e, m, "e"); print_array(Z, m * n, "Z"); #endif if (info < 0) { RETURN - 1; } memset(Xs, 0, n * n * sizeof(scs_float)); for (i = 0; i < m; ++i) { scs_float a = e[i]; BLAS(syr)("Lower", &nb, &a, &(Z[i * n]), &one, Xs, &nb); } /* scale diags by 1/sqrt(2) */ BLAS(scal)(&nb, &sqrt2Inv, Xs, &nb_plus_one); /* not n_squared */ /* extract just lower triangular matrix */ for (i = 0; i < n; ++i) { memcpy(&(X[i * n - ((i - 1) * i) / 2]), &(Xs[i * (n + 1)]), (n - i) * sizeof(scs_float)); } #if EXTRA_VERBOSE > 0 print_array(Xs, n * n, "Xs"); print_array(X, get_sd_cone_size(n), "X"); #endif #else scs_printf("FAILURE: solving SDP with > 2x2 matrices, but no blas/lapack " "libraries were linked!\n"); scs_printf("SCS will RETURN nonsense!\n"); scale_array(X, NAN, n); RETURN - 1; #endif RETURN 0; } scs_float pow_calc_x(scs_float r, scs_float xh, scs_float rh, scs_float a) { scs_float x = 0.5 * (xh + SQRTF(xh * xh + 4 * a * (rh - r) * r)); RETURN MAX(x, 1e-12); } scs_float pow_calcdxdr(scs_float x, scs_float xh, scs_float rh, scs_float r, scs_float a) { RETURN a *(rh - 2 * r) / (2 * x - xh); } scs_float pow_calc_f(scs_float x, scs_float y, scs_float r, scs_float a) { RETURN POWF(x, a) * POWF(y, (1 - a)) - r; } scs_float pow_calc_fp(scs_float x, scs_float y, scs_float dxdr, scs_float dydr, scs_float a) { RETURN POWF(x, a) * POWF(y, (1 - a)) * (a * dxdr / x + (1 - a) * dydr / y) - 1; } void proj_power_cone(scs_float *v, scs_float a) { scs_float xh = v[0], yh = v[1], rh = ABS(v[2]); scs_float x, y, r; scs_int i; /* v in K_a */ if (xh >= 0 && yh >= 0 && CONE_THRESH + POWF(xh, a) * POWF(yh, (1 - a)) >= rh) { RETURN; } /* -v in K_a^* */ if (xh <= 0 && yh <= 0 && CONE_THRESH + POWF(-xh, a) * POWF(-yh, 1 - a) >= rh * POWF(a, a) * POWF(1 - a, 1 - a)) { v[0] = v[1] = v[2] = 0; RETURN; } r = rh / 2; for (i = 0; i < POW_CONE_MAX_ITERS; ++i) { scs_float f, fp, dxdr, dydr; x = pow_calc_x(r, xh, rh, a); y = pow_calc_x(r, yh, rh, 1 - a); f = pow_calc_f(x, y, r, a); if (ABS(f) < CONE_TOL) { break; } dxdr = pow_calcdxdr(x, xh, rh, r, a); dydr = pow_calcdxdr(y, yh, rh, r, (1 - a)); fp = pow_calc_fp(x, y, dxdr, dydr, a); r = MAX(r - f / fp, 0); r = MIN(r, rh); } v[0] = x; v[1] = y; v[2] = (v[2] < 0) ? -(r) : (r); } /* outward facing cone projection routine, iter is outer algorithm iteration, if iter < 0 then iter is ignored warm_start contains guess of projection (can be set to SCS_NULL) */ scs_int proj_dual_cone(scs_float *x, const ScsCone *k, ScsConeWork *c, const scs_float *warm_start, scs_int iter) { DEBUG_FUNC scs_int i; scs_int count = (k->f ? k->f : 0); timer cone_timer; #if EXTRA_VERBOSE > 0 timer proj_timer; scs_tic(&proj_timer); #endif scs_tic(&cone_timer); if (k->l) { /* project onto positive orthant */ for (i = count; i < count + k->l; ++i) { if (x[i] < 0.0) { x[i] = 0.0; } /* x[i] = (x[i] < 0.0) ? 0.0 : x[i]; */ } count += k->l; #if EXTRA_VERBOSE > 0 scs_printf("pos orthant proj time: %1.2es\n", tocq(&proj_timer) / 1e3); scs_tic(&proj_timer); #endif } if (k->qsize && k->q) { /* project onto SOC */ for (i = 0; i < k->qsize; ++i) { if (k->q[i] == 0) { continue; } if (k->q[i] == 1) { if (x[count] < 0.0) { x[count] = 0.0; } } else { scs_float v1 = x[count]; scs_float s = calc_norm(&(x[count + 1]), k->q[i] - 1); scs_float alpha = (s + v1) / 2.0; if (s <= v1) { /* do nothing */ } else if (s <= -v1) { memset(&(x[count]), 0, k->q[i] * sizeof(scs_float)); } else { x[count] = alpha; scale_array(&(x[count + 1]), alpha / s, k->q[i] - 1); } } count += k->q[i]; } #if EXTRA_VERBOSE > 0 scs_printf("SOC proj time: %1.2es\n", tocq(&proj_timer) / 1e3); scs_tic(&proj_timer); #endif } if (k->ssize && k->s) { /* project onto PSD cone */ for (i = 0; i < k->ssize; ++i) { #if EXTRA_VERBOSE > 0 scs_printf("SD proj size %li\n", (long)k->s[i]); #endif if (k->s[i] == 0) { continue; } if (proj_semi_definite_cone(&(x[count]), k->s[i], c, iter) < 0) { RETURN - 1; } count += get_sd_cone_size(k->s[i]); } #if EXTRA_VERBOSE > 0 scs_printf("SD proj time: %1.2es\n", tocq(&proj_timer) / 1e3); scs_tic(&proj_timer); #endif } if (k->ep) { scs_float r, s, t; scs_int idx; /* * exponential cone is not self dual, if s \in K * then y \in K^* and so if K is the primal cone * here we project onto K^*, via Moreau * \Pi_C^*(y) = y + \Pi_C(-y) */ scale_array(&(x[count]), -1, 3 * k->ep); /* x = -x; */ #ifdef _OPENMP #pragma omp parallel for private(r, s, t, idx) #endif for (i = 0; i < k->ep; ++i) { idx = count + 3 * i; r = x[idx]; s = x[idx + 1]; t = x[idx + 2]; proj_exp_cone(&(x[idx]), iter); x[idx] -= r; x[idx + 1] -= s; x[idx + 2] -= t; } count += 3 * k->ep; #if EXTRA_VERBOSE > 0 scs_printf("EP proj time: %1.2es\n", tocq(&proj_timer) / 1e3); scs_tic(&proj_timer); #endif } if (k->ed) { /* exponential cone: */ #ifdef _OPENMP #pragma omp parallel for #endif for (i = 0; i < k->ed; ++i) { proj_exp_cone(&(x[count + 3 * i]), iter); } count += 3 * k->ed; #if EXTRA_VERBOSE > 0 scs_printf("ED proj time: %1.2es\n", tocq(&proj_timer) / 1e3); scs_tic(&proj_timer); #endif } if (k->psize && k->p) { scs_float v[3]; scs_int idx; /* don't use openmp for power cone ifdef _OPENMP pragma omp parallel for private(v, idx) endif */ for (i = 0; i < k->psize; ++i) { idx = count + 3 * i; if (k->p[i] <= 0) { /* dual power cone */ proj_power_cone(&(x[idx]), -k->p[i]); } else { /* primal power cone, using Moreau */ v[0] = -x[idx]; v[1] = -x[idx + 1]; v[2] = -x[idx + 2]; proj_power_cone(v, k->p[i]); x[idx] += v[0]; x[idx + 1] += v[1]; x[idx + 2] += v[2]; } } count += 3 * k->psize; #if EXTRA_VERBOSE > 0 scs_printf("Power cone proj time: %1.2es\n", tocq(&proj_timer) / 1e3); scs_tic(&proj_timer); #endif } /* project onto OTHER cones */ if (c) { c->total_cone_time += tocq(&cone_timer); } RETURN 0; }
PoW.c
// Copyright (c) 2016-2018 The Ulord Core Foundation #include "PoW.h" #include <stdio.h> #include <stdint.h> #include <string.h> #include <stdlib.h> #include <assert.h> #ifndef MAC_OSX #include <omp.h> #endif #include "my_time.h" #include "common.h" #include "my_rand48_r.h" #include "oneWayFunction.h" // #define SSE_VERSION /* * Step 1: Initialize working memory. */ void initWorkMemory(uint8_t *input, uint32_t inputLen, uint8_t *Maddr, const uint32_t K) { uint32_t i, j; uint8_t a[OUTPUT_LEN], b[OUTPUT_LEN]; funcInfor[0].func(input, inputLen, a); uint64_t randSeed[4] = {0, 0, 0, 0}; #ifndef SSE_VERSION struct my_rand48_data randBuffer[4]; #else struct vrand48_data randBuffer[2]; #endif const uint32_t iterNum = WORK_MEMORY_SIZE >> 5; for (i = 0; i < iterNum; ++i) { if (i % K) { #ifndef SSE_VERSION uint64_t num = 0; for (j = 0; j < 4; ++j) { my_rand64_r(&randBuffer[j], &num); memcpy(b + (j << 3), (uint8_t *)&num, 8*sizeof(uint8_t)); } #else vrand64(b, randBuffer); #endif uint8_t shift_num; uint8_t result[OUTPUT_LEN]; reduce_bit((uint8_t *)&i, 4, (uint8_t *)&shift_num, 8); rrs(b, OUTPUT_LEN, result, shift_num); memcpy(Maddr + (i << 5), result, OUTPUT_LEN*sizeof(uint8_t)); for (j = 0; j < 32; ++j) { a[j] ^= result[j]; } } else { uint8_t t = 0, shift_num = 0; reduce_bit(a, 32, (uint8_t *)&t, 8); t = (t & 0x0f) ^ (t >> 4); reduce_bit((uint8_t *)&i, 4, (uint8_t *)&shift_num, 8); uint8_t a_rrs[INPUT_LEN]; rrs(a, OUTPUT_LEN, a_rrs, shift_num); funcInfor[t].func(a_rrs, 32, a); reduce_bit(a, 8, (uint8_t *)&randSeed[0], 48); reduce_bit(a + 8, 8, (uint8_t *)&randSeed[1], 48); reduce_bit(a + 16, 8, (uint8_t *)&randSeed[2], 48); reduce_bit(a + 24, 8, (uint8_t *)&randSeed[3], 48); #ifndef SSE_VERSION my_seed48_r(randSeed[0], &randBuffer[0]); my_seed48_r(randSeed[1], &randBuffer[1]); my_seed48_r(randSeed[2], &randBuffer[2]); my_seed48_r(randSeed[3], &randBuffer[3]); #else vseed48(randSeed , &randBuffer[0]); vseed48(randSeed + 2, &randBuffer[1]); #endif memcpy(Maddr + (i << 5), a, 32*sizeof(uint8_t)); } } } /* * Step 2: Modify the working memory contents. */ void modifyWorkMemory(uint8_t *Maddr, const uint32_t L, const uint32_t C, uint8_t *result) { uint32_t i, j; uint8_t a[OUTPUT_LEN], b[64]; funcInfor[0].func(Maddr + WORK_MEMORY_SIZE - 32, 32, a); memcpy(result, a, OUTPUT_LEN*sizeof(uint8_t)); uint64_t r = 0; reduce_bit(a, 32, (uint8_t *)&r, 64); const uint32_t iterNum = L << 6; for (i = 0; i < C; ++i) { uint64_t randSeed = 0; reduce_bit(a, 32, (uint8_t *)&randSeed, 48); struct my_rand48_data randBuffer; my_seed48_r(randSeed, &randBuffer); uint8_t t1, t2, s; uint64_t randNum = 0, base = 0; for (j = 0; j < iterNum; ++j) { my_rand48_r(&randBuffer, &randNum); base = randNum + r; uint64_t offset = 0; reduce_bit((uint8_t *)&r, 8, (uint8_t *)&offset, 8); offset = (offset << 8) + 1; uint64_t addr1 = (base + WORK_MEMORY_SIZE - offset) % WORK_MEMORY_SIZE; uint64_t addr2 = (base + offset) % WORK_MEMORY_SIZE; t1 = Maddr[addr1]; t2 = Maddr[addr2]; s = a[j & 0x1f]; Maddr[addr1] = t2 ^ s; Maddr[addr2] = t1 ^ s; b[j & 0x3f] = t1 ^ t2; r = r + s + t1 + t2; } uint8_t t = 0; reduce_bit((uint8_t *)&r, 8, (uint8_t *)&t, 8); t = (t & 0x0f) ^ (t >> 4); reduce_bit(b, 64, a, 256); uint8_t shift_num = 0; uint64_t ir = r + i; reduce_bit((uint8_t *)&ir, 8, (uint8_t *)&shift_num, 8); uint8_t a_rrs[INPUT_LEN]; rrs(a, OUTPUT_LEN, a_rrs, shift_num); funcInfor[t].func(a_rrs, 32, a); for (j = 0; j < OUTPUT_LEN; ++j) { result[j] ^= a[j]; } } } /* * Step 3: Calculate the final result. */ void calculateFinalResult(uint8_t *Maddr, uint8_t *c, const uint32_t D, uint8_t *result) { uint32_t i = 0, j = 0, k = 0; memcpy(result, c, OUTPUT_LEN*sizeof(uint8_t)); const uint32_t num = (WORK_MEMORY_SIZE >> 5) - 1; uint32_t it = 0; uint8_t result_rrs[OUTPUT_LEN]; while(1) { uint8_t t = 0, shift_num = 0; uint32_t d = 0; reduce_bit(result, 32, (uint8_t *)&t, 8); t = (t & 0x0f) ^ (t >> 4); reduce_bit(result, 32, (uint8_t *)&d, D); ++d; for (j = 0; j < d; ++j) { uint32_t index = i << 5; for (k = 0; k < 32; ++k) { result[k] ^= Maddr[index + k]; } ++i; if (i == num) { it = i + t; reduce_bit((uint8_t *)&it, 4, (uint8_t *)&shift_num, 8); rrs(result, OUTPUT_LEN, result_rrs, shift_num); funcInfor[0].func(result_rrs, 32, result); return; } } it = t + i; reduce_bit((uint8_t *)&it, 4, (uint8_t *)&shift_num, 8); rrs(result, OUTPUT_LEN, result_rrs, shift_num); funcInfor[t].func(result_rrs, 32, result); } } /* * Correctness & Performance test for Proof of work */ void testPowFunction(uint8_t *mess, uint32_t messLen, const int64_t iterNum) { int64_t j; uint32_t inputLen = messLen; uint8_t input[INPUT_LEN], output[OUTPUT_LEN]; memset(input, 0, INPUT_LEN*sizeof(uint8_t)); memcpy(input, mess, messLen*sizeof(char)); // Init all one-way function initOneWayFunction(); uint8_t *Maddr = (uint8_t *)malloc(64 * WORK_MEMORY_SIZE*sizeof(uint8_t)); assert(NULL != Maddr); memset(Maddr, 0, 64 * WORK_MEMORY_SIZE*sizeof(uint8_t)); printf("****************************** Correctness test (PoW function) ******************************\n"); printf("Test message: %s\n", mess); powFunction(input, inputLen, Maddr, output); view_data_u8("PoW", output, OUTPUT_LEN); printf("*********************************************************************************************\n"); /* printf("*************************************************** Performance test (PoW function) ***************************************************\n"); uint8_t *result = (uint8_t *)malloc(iterNum * OUTPUT_LEN * sizeof(uint8_t)); assert(NULL != result); memset(result, 0, iterNum * OUTPUT_LEN * sizeof(uint8_t)); uint32_t threadNumArr[] = {1, 4, 8, 12, 16, 20, 24, 32, 48, 64}; uint32_t threadNumTypes = sizeof(threadNumArr) / sizeof(uint32_t); printf(" %-18s", "Algorithm"); for (uint32_t ix = 0; ix < threadNumTypes; ++ix) printf("%12d", threadNumArr[ix]); printf("\n"); printf("00 %-18s\t", "PoW"); for (uint32_t ix = 0; ix < threadNumTypes; ++ix) { omp_set_num_threads(threadNumArr[ix]); double startTime = get_wall_time(); if (threadNumArr[ix] == 1) { for (j = 0; j < iterNum; ++j) { powFunction(input, inputLen, Maddr, result + j * OUTPUT_LEN); } } else { #pragma omp parallel for firstprivate(input), private(j) shared(result) for (j = 0; j < iterNum; ++j) { powFunction(input, inputLen, Maddr + omp_get_thread_num() * WORK_MEMORY_SIZE, result + j * OUTPUT_LEN); } } double endTime = get_wall_time(); double costTime = endTime - startTime; printf("%5.0f bps ", iterNum / costTime); fflush(stdout); // Check result for (j = 0; j < iterNum; j += 1) { if (memcmp(output, result + j * OUTPUT_LEN, OUTPUT_LEN)) { printf("Thread num: %d, j: %ld\n", threadNumArr[ix], j); view_data_u8("output", output, OUTPUT_LEN); view_data_u8("result", result + j * OUTPUT_LEN, OUTPUT_LEN); abort(); } } } printf("\n"); printf("***************************************************************************************************************************************\n"); if (NULL != result) { free(result); result = NULL; } */ if (NULL != Maddr) { free(Maddr); Maddr = NULL; } } #define OUTPUT_BUFFER_SIZE (32 * 1024UL * 1024UL) #define MAX_TEST_INPUT_LEN 140 #define MAX_OUT_FILE_NAME_LEN 25 const char testInputCase[][MAX_TEST_INPUT_LEN] = { "", "HelloWorld", "0123456789" }; void powNistTest(const char *outFileName) { const uint64_t iterNum = 1024UL * 1024UL; // const uint64_t iterNum = 1024UL; uint8_t *outputBuffer = (uint8_t *)malloc(OUTPUT_BUFFER_SIZE * sizeof(uint8_t)); assert(NULL != outputBuffer); memset(outputBuffer, 0, OUTPUT_BUFFER_SIZE * sizeof(uint8_t)); uint8_t *Maddr = (uint8_t *)malloc(WORK_MEMORY_SIZE*sizeof(uint8_t)); assert(NULL != Maddr); memset(Maddr, 0, WORK_MEMORY_SIZE*sizeof(uint8_t)); initOneWayFunction(); uint32_t testInputCaseNum = sizeof(testInputCase) / sizeof(const char [MAX_TEST_INPUT_LEN]); for (uint32_t testCaseIx = 0; testCaseIx < testInputCaseNum; ++testCaseIx) { char curOutFileName[MAX_OUT_FILE_NAME_LEN] = ""; sprintf(curOutFileName, "%s-%u.txt", outFileName, testCaseIx); FILE *fp = NULL; if (NULL != (fp = fopen(curOutFileName, "wb"))) { const uint32_t testInputCaseLen = strlen((char *)testInputCase[testCaseIx]); uint8_t input[MAX_TEST_INPUT_LEN]; memset(input, 0, MAX_TEST_INPUT_LEN*sizeof(uint8_t)); memcpy(input, testInputCase[testCaseIx], testInputCaseLen*sizeof(uint8_t)); double startTime = get_wall_time(); powFunction(input, testInputCaseLen, Maddr, outputBuffer); for (uint64_t i = 1, j = 0; i < iterNum; ++i) { memcpy(input, outputBuffer + j, OUTPUT_LEN * sizeof(uint32_t)); j += OUTPUT_LEN; powFunction(input, OUTPUT_LEN, Maddr, outputBuffer + j); /* if (j == OUTPUT_BUFFER_SIZE) { fwrite(outputBuffer, sizeof(uint8_t), OUTPUT_BUFFER_SIZE / sizeof(uint8_t), fp); j = 0; } */ } double endTime = get_wall_time(); double costTime = endTime - startTime; fprintf(stdout, "TestCaseIx: %d, Input: %s, IterNum: %llu, Time: %4.2f, Performance: %5.2f bps\n", testCaseIx, \ testInputCase[testCaseIx], iterNum, costTime, ((double)(iterNum * OUTPUT_LEN)) / costTime); fflush(stdout); fwrite(outputBuffer, sizeof(uint8_t), OUTPUT_BUFFER_SIZE / sizeof(uint8_t), fp); fclose(fp); } else { fprintf(stderr, "Error: Open %s failed!\n", curOutFileName); abort(); } } if (NULL != outputBuffer) { free(outputBuffer); outputBuffer = NULL; } if (NULL != Maddr) { free(Maddr); Maddr = NULL; } } void helloHash(const uint8_t *mess, uint32_t messLen, uint8_t output[OUTPUT_LEN]) { if(messLen != INPUT_LEN) { printf("helloHash:Invalid message length %d\n", messLen); return; } int64_t j; uint32_t inputLen =messLen; uint8_t input[INPUT_LEN]; memset(input, 0, INPUT_LEN*sizeof(uint8_t)); memcpy(input, mess, inputLen*sizeof(char)); //operation: input uint8_t *Maddr = (uint8_t *)malloc(WORK_MEMORY_SIZE*sizeof(uint8_t)); //1024*1024*1 assert(NULL != Maddr); memset(Maddr, 0, WORK_MEMORY_SIZE*sizeof(uint8_t)); powFunction(input, inputLen,Maddr, output); //view_data_u8("PoW", output, OUTPUT_LEN); //output if (NULL != Maddr) { free(Maddr); Maddr = NULL; } } int my_rand64_r (struct my_rand48_data *buffer, uint64_t *result) { uint64_t X = buffer->__x; X = (X * buffer->__a + buffer->__c) & 0xffffffffffffULL; buffer->__x = X; buffer->__x = (X * buffer->__a + buffer->__c) & 0xffffffffffffULL; X ^= buffer->__x << 16; *result = X; return 0; } int my_seed48_r (uint64_t seedval, struct my_rand48_data *buffer) { buffer->__x = seedval & 0xffffffffffffULL; buffer->__a = 0x5deece66dULL; buffer->__c = 0xb; return 0; } void powFunction(uint8_t *input, uint32_t inputLen, uint8_t *Maddr, uint8_t *output) { uint8_t c[OUTPUT_LEN]; // Step 1: Initialize working memory. initWorkMemory(input, inputLen, Maddr, 128); // view_data_u8("Maddr", Maddr, OUTPUT_LEN); // Step 2: Modify the working memory contents. modifyWorkMemory(Maddr, 4, WORK_MEMORY_SIZE >> 11, c); // view_data_u8("c", c, OUTPUT_LEN); // Step 3: Calculate the final result. calculateFinalResult(Maddr, c, 8, output); // view_data_u8("output", output, OUTPUT_LEN); } int my_rand48_r (struct my_rand48_data *buffer, uint64_t *result) { *result = (buffer->__x * buffer->__a + buffer->__c) & 0xffffffffffffULL; buffer->__x = *result; return 0; }
solver.c
#include"SimpleMOC_header.h" /* Efficient version of attenuate fluxes which determines the change in angular * flux along a particular track across a fine axial region and tallies the * contribution to the scalar flux in the fine axial region. This function * assumes a quadratic source, which is calculated on the fly using neighboring * source values. * * This version decomposes the work into many for loops for efficient SIMD * instructions and to reduce register pressure. For a more descriptive * (but less effiient) version of the code in terms of the underlying physics, * see alt_attenuate_fluxes which solves the problem in a more naive, * straightforward manner. */ void attenuate_fluxes( Track * track, bool forward, Source * QSR, Input * I_in, Params * params_in, float ds, float mu, float az_weight, AttenuateVars * A ) { Input I = *I_in; Params params = *params_in; // unload attenuate vars float * restrict q0 = A->q0; float * restrict q1 = A->q1; float * restrict q2 = A->q2; float * restrict sigT = A->sigT; float * restrict tau = A->tau; float * restrict sigT2 = A->sigT2; float * restrict expVal = A->expVal; float * restrict reuse = A->reuse; float * restrict flux_integral = A->flux_integral; float * restrict tally = A->tally; float * restrict t1 = A->t1; float * restrict t2 = A->t2; float * restrict t3 = A->t3; float * restrict t4 = A->t4; // compute fine axial interval spacing float dz = I.height / (I.fai * I.decomp_assemblies_ax * I.cai); // compute z height in cell float zin = track->z_height - dz * ( (int)( track->z_height / dz ) + 0.5f ); // compute fine axial region ID int fine_id = (int) ( track->z_height / dz ) % I.fai; // compute weight (azimuthal * polar) // NOTE: real app would also have volume weight component float weight = track->p_weight * az_weight; float mu2 = mu * mu; // load fine source region flux vector float * FSR_flux = QSR -> fine_flux[fine_id]; if( fine_id == 0 ) { // adjust z height to account for edge zin -= dz; // cycle over energy groups #ifdef INTEL #pragma simd #elif defined IBM #pragma simd_level(10) #endif for( int g = 0; g < I.n_egroups; g++) { // load neighboring sources float y1 = QSR->fine_source[fine_id][g]; float y2 = QSR->fine_source[fine_id+1][g]; float y3 = QSR->fine_source[fine_id+2][g]; // do quadratic "fitting" float c0 = y2; float c1 = (y1 - y3) / (2.f*dz); float c2 = (y1 - 2.f*y2 + y3) / (2.f*dz*dz); // calculate q0, q1, q2 q0[g] = c0 + c1*zin + c2*zin*zin; q1[g] = c1 + 2.f*c2*zin; q2[g] = c2; } } else if ( fine_id == I.fai - 1 ) { // adjust z height to account for edge zin += dz; // cycle over energy groups #ifdef INTEL #pragma simd #elif defined IBM #pragma simd_level(10) #endif for( int g = 0; g < I.n_egroups; g++) { // load neighboring sources float y1 = QSR->fine_source[fine_id-2][g]; float y2 = QSR->fine_source[fine_id-1][g]; float y3 = QSR->fine_source[fine_id][g]; // do quadratic "fitting" float c0 = y2; float c1 = (y1 - y3) / (2.f*dz); float c2 = (y1 - 2.f*y2 + y3) / (2.f*dz*dz); // calculate q0, q1, q2 q0[g] = c0 + c1*zin + c2*zin*zin; q1[g] = c1 + 2.f*c2*zin; q2[g] = c2; } } else { // cycle over energy groups #ifdef INTEL #pragma simd #elif defined IBM #pragma simd_level(10) #endif for( int g = 0; g < I.n_egroups; g++) { // load neighboring sources float y1 = QSR->fine_source[fine_id-1][g]; float y2 = QSR->fine_source[fine_id][g]; float y3 = QSR->fine_source[fine_id+1][g]; // do quadratic "fitting" float c0 = y2; float c1 = (y1 - y3) / (2.f*dz); float c2 = (y1 - 2.f*y2 + y3) / (2.f*dz*dz); // calculate q0, q1, q2 q0[g] = c0 + c1*zin + c2*zin*zin; q1[g] = c1 + 2.f*c2*zin; q2[g] = c2; } } // cycle over energy groups #ifdef INTEL #pragma simd #elif defined IBM #pragma simd_level(10) #endif for( int g = 0; g < I.n_egroups; g++) { // load total cross section sigT[g] = QSR->sigT[g]; // calculate common values for efficiency tau[g] = sigT[g] * ds; sigT2[g] = sigT[g] * sigT[g]; } // cycle over energy groups #ifdef INTEL #pragma simd #elif defined IBM #pragma simd_level(10) #endif for( int g = 0; g < I.n_egroups; g++) expVal[g] = interpolateTable( params.expTable, tau[g] ); // Flux Integral // Re-used Term #ifdef INTEL #pragma simd #elif defined IBM #pragma simd_level(10) #endif for( int g = 0; g < I.n_egroups; g++) { reuse[g] = tau[g] * (tau[g] - 2.f) + 2.f * expVal[g] / (sigT[g] * sigT2[g]); } float * psi; if(forward) psi = track->f_psi; else psi = track->b_psi; //#pragma vector nontemporal #ifdef INTEL #pragma simd #elif defined IBM #pragma simd_level(10) #endif for( int g = 0; g < I.n_egroups; g++) { // add contribution to new source flux flux_integral[g] = (q0[g] * tau[g] + (sigT[g] * psi[g] - q0[g]) * expVal[g]) / sigT2[g] + q1[g] * mu * reuse[g] + q2[g] * mu2 * (tau[g] * (tau[g] * (tau[g] - 3.f) + 6.f) - 6.f * expVal[g]) / (3.f * sigT2[g] * sigT2[g]); } #ifdef INTEL #pragma simd #elif defined IBM #pragma simd_level(10) #endif for( int g = 0; g < I.n_egroups; g++) { // Prepare tally tally[g] = weight * flux_integral[g]; } #ifdef OPENMP omp_set_lock(QSR->locks + fine_id); #endif #ifdef INTEL #pragma simd #elif defined IBM #pragma simd_level(10) #endif for( int g = 0; g < I.n_egroups; g++) { FSR_flux[g] += tally[g]; } #ifdef OPENMP omp_unset_lock(QSR->locks + fine_id); #endif // Term 1 #ifdef INTEL #pragma simd #elif defined IBM #pragma simd_level(10) #endif for( int g = 0; g < I.n_egroups; g++) { t1[g] = q0[g] * expVal[g] / sigT[g]; } // Term 2 #ifdef INTEL #pragma simd #elif defined IBM #pragma simd_level(10) #endif for( int g = 0; g < I.n_egroups; g++) { t2[g] = q1[g] * mu * (tau[g] - expVal[g]) / sigT2[g]; } // Term 3 #ifdef INTEL #pragma simd #elif defined IBM #pragma simd_level(10) #endif for( int g = 0; g < I.n_egroups; g++) { t3[g] = q2[g] * mu2 * reuse[g]; } // Term 4 #ifdef INTEL #pragma simd #elif defined IBM #pragma simd_level(10) #endif for( int g = 0; g < I.n_egroups; g++) { t4[g] = psi[g] * (1.f - expVal[g]); } // Total psi #ifdef INTEL #pragma simd #elif defined IBM #pragma simd_level(10) #endif for( int g = 0; g < I.n_egroups; g++) { psi[g] = t1[g] + t2[g] + t3[g] + t4[g]; } } // single direction transport sweep void transport_sweep( Params * params, Input * I ) { if(I->mype==0) printf("Starting transport sweep ...\n"); // calculate the height of a node's domain and of each FSR double node_delta_z = I->height / I->decomp_assemblies_ax; double fine_delta_z = node_delta_z / (I->cai * I->fai); /* loop over tracks (implicitly azimuthal angles, tracks in azimuthal * angles, polar angles, and z stacked rays) */ //print_Input_struct( I ); long segments_processed = 0; #pragma omp parallel default(none) \ shared( I, params, node_delta_z, fine_delta_z ) \ reduction(+ : segments_processed ) { #ifdef OPENMP int thread = omp_get_thread_num(); int nthreads = omp_get_num_threads(); unsigned int seed = time(NULL) * (thread+1); #endif //print_Input_struct( I ); #ifdef PAPI int eventset = PAPI_NULL; int num_papi_events; #pragma omp critical { counter_init(&eventset, &num_papi_events, I); } #endif AttenuateVars A; float * ptr = (float * ) malloc( I->n_egroups * 14 * sizeof(float)); A.q0 = ptr; ptr += I->n_egroups; A.q1 = ptr; ptr += I->n_egroups; A.q2 = ptr; ptr += I->n_egroups; A.sigT = ptr; ptr += I->n_egroups; A.tau = ptr; ptr += I->n_egroups; A.sigT2 = ptr; ptr += I->n_egroups; A.expVal = ptr; ptr += I->n_egroups; A.reuse = ptr; ptr += I->n_egroups; A.flux_integral = ptr; ptr += I->n_egroups; A.tally = ptr; ptr += I->n_egroups; A.t1 = ptr; ptr += I->n_egroups; A.t2 = ptr; ptr += I->n_egroups; A.t3 = ptr; ptr += I->n_egroups; A.t4 = ptr; #pragma omp for schedule( dynamic ) for (long i = 0; i < I->ntracks_2D; i++) { #if TIMING_INFO | 0 // print progress #ifdef OPENMP if(I->mype==0 && thread == 0) { printf("\rAttenuating Tracks... (%.0lf%% completed)", (i / ( (double)I->ntracks_2D / (double) nthreads )) / (double) nthreads * 100.0); } #else if( i % 50 == 0) if(I->mype==0) printf("%s%ld%s%ld\n","2D Tracks Completed = ", i," / ", I->ntracks_2D ); #endif #endif // treat positive-z traveling rays first bool pos_z_dir = true; for( int j = 0; j < I->n_polar_angles; j++) { if( j == I->n_polar_angles / 2 ) pos_z_dir = false; float p_angle = params->polar_angles[j]; float mu = cos(p_angle); // start with all z stacked rays int begin_stacked = 0; int end_stacked = I->z_stacked; for( int n = 0; n < params->tracks_2D[i].n_segments; n++) { // calculate distance traveled in cell if segment completed float s_full = params->tracks_2D[i].segments[n].length / sin(p_angle); // allocate varaible for distance traveled in an FSR float ds = 0; // loop over remaining z-stacked rays for( int k = begin_stacked; k < end_stacked; k++) { // initialize s to full length float s = s_full; // select current track Track * track = &params->tracks[i][j][k]; // set flag for completeion of segment bool seg_complete = false; // calculate interval int curr_interval; if( pos_z_dir) curr_interval = get_pos_interval(track->z_height, fine_delta_z); else curr_interval = get_neg_interval(track->z_height, fine_delta_z); while( !seg_complete ) { // flag to reset z position bool reset = false; /* calculate new height based on s * (distance traveled in FSR) */ float z = track->z_height + s * cos(p_angle); // check if still in same FSR (fine axial interval) int new_interval; if( pos_z_dir ) new_interval = get_pos_interval(z, fine_delta_z); else new_interval = get_neg_interval(z, fine_delta_z); if( new_interval == curr_interval ) { seg_complete = true; ds = s; } // otherwise, we need to recalculate distances else { // correct z if( pos_z_dir ) { curr_interval++; z = fine_delta_z * (float) curr_interval; } else{ curr_interval--; z = fine_delta_z * (float) curr_interval; } // calculate distance travelled in FSR (ds) ds = (z - track->z_height) / cos(p_angle); // update track length remaining s -= ds; /* check remaining track length to protect * against potential roundoff errors */ if( s <= 0 ) seg_complete = true; // check if out of bounds or track complete if( z <= 0 || z >= node_delta_z ) { // mark segment as completed seg_complete = true; // remember to no longer treat this track if ( pos_z_dir ) end_stacked--; else begin_stacked++; // reset z height reset = true; } } // pick a random FSR (cache miss expected) #ifdef OPENMP long QSR_id = rand_r(&seed) % I->n_source_regions_per_node; #else long QSR_id = rand() % I->n_source_regions_per_node; #endif /* update sources and fluxes from attenuation * over FSR */ if( I->axial_exp == 2 ) { attenuate_fluxes( track, true, &params->sources[QSR_id], I, params, ds, mu, params->tracks_2D[i].az_weight, &A ); segments_processed++; } else if( I->axial_exp == 0 ) { attenuate_FSR_fluxes( track, true, &params->sources[QSR_id], I, params, ds, mu, params->tracks_2D[i].az_weight, &A ); segments_processed++; } else { printf("Error: invalid axial expansion order"); printf("\n Please input 0 or 2\n"); exit(1); } // update with new z height or reset if finished if( n == params->tracks_2D[i].n_segments - 1 || reset) { if( pos_z_dir) track->z_height = I->axial_z_sep * k; else track->z_height = I->axial_z_sep * (k+1); } else track->z_height = z; } } } } } #ifdef OPENMP if(thread == 0 && I->mype==0) printf("\n"); #endif #ifdef PAPI if( thread == 0 ) { printf("\n"); border_print(); center_print("PAPI COUNTER RESULTS", 79); border_print(); printf("Count \tSmybol \tDescription\n"); } { #pragma omp barrier } counter_stop(&eventset, num_papi_events, I); #endif } I->segments_processed = segments_processed; return; } // run one full transport sweep, return k void two_way_transport_sweep( Params * params, Input * I ) { if(I->mype==0) printf("Starting transport sweep ...\n"); // calculate the height of a node's domain and of each FSR double node_delta_z = I->height / I->decomp_assemblies_ax; int num_intervals = (I->cai * I->fai); double fine_delta_z = node_delta_z / num_intervals; /* loop over tracks (implicitly azimuthal angles, tracks in azimuthal * angles, polar angles, and z stacked rays) */ long segments_processed = 0; #pragma omp parallel default(none) \ shared( I, params, node_delta_z, fine_delta_z, num_intervals ) \ reduction(+ : segments_processed ) { #ifdef OPENMP int thread = omp_get_thread_num(); int nthreads = omp_get_num_threads(); unsigned int seed = time(NULL) * (thread+1); #endif //print_Input_struct( I ); #ifdef PAPI int eventset = PAPI_NULL; int num_papi_events; #pragma omp critical { counter_init(&eventset, &num_papi_events, I); } #endif AttenuateVars A; float * ptr = (float * ) malloc( I->n_egroups * 14 * sizeof(float)); A.q0 = ptr; ptr += I->n_egroups; A.q1 = ptr; ptr += I->n_egroups; A.q2 = ptr; ptr += I->n_egroups; A.sigT = ptr; ptr += I->n_egroups; A.tau = ptr; ptr += I->n_egroups; A.sigT2 = ptr; ptr += I->n_egroups; A.expVal = ptr; ptr += I->n_egroups; A.reuse = ptr; ptr += I->n_egroups; A.flux_integral = ptr; ptr += I->n_egroups; A.tally = ptr; ptr += I->n_egroups; A.t1 = ptr; ptr += I->n_egroups; A.t2 = ptr; ptr += I->n_egroups; A.t3 = ptr; ptr += I->n_egroups; A.t4 = ptr; #pragma omp for schedule( dynamic ) for (long i = 0; i < I->ntracks_2D; i++) { // print progress #ifdef OPENMP if(I->mype==0 && thread == 0) { printf("\rAttenuating Tracks... (%.0lf%% completed)", (i / ( (double)I->ntracks_2D / (double) nthreads )) / (double) nthreads * 100.0); } #else if( i % 50 == 0) if(I->mype==0) printf("%s%ld%s%ld\n","2D Tracks Completed = ", i," / ", I->ntracks_2D ); #endif // allocate arrays for segment storage FIXME double ** seg_dist = malloc( I->z_stacked * sizeof(double *) ); Source *** seg_src = malloc( I->z_stacked * sizeof(Source**) ); int * seg_idx = malloc( I->z_stacked * sizeof(int) ); int * seg_size = malloc( I->z_stacked * sizeof(int) ); // fill matrix with arrays FIXME for( int k = 0; k < I->z_stacked; k++) { seg_size[k] = 2 * I->segments_per_track; seg_dist[k] = malloc( seg_size[k] * sizeof(double) ); seg_src[k] = malloc( seg_size[k] * sizeof(Source *) ); seg_idx[k] = 0; } // treat positive-z traveling rays first bool pos_z_dir = true; for( int j = 0; j < I->n_polar_angles; j++) { if( j == I->n_polar_angles / 2 ) pos_z_dir = false; float p_angle = params->polar_angles[j]; float mu = cos(p_angle); // start with all z stacked rays int begin_stacked = 0; int end_stacked = I->z_stacked; // reset semgnet indexes for( int k = 0; k < I->z_stacked; k++) seg_idx[k] = 0; for( int n = 0; n < params->tracks_2D[i].n_segments; n++) { // calculate distance traveled in cell if segment completed float s_full = params->tracks_2D[i].segments[n].length / sin(p_angle); // allocate varaible for distance traveled in an FSR float ds = 0; // loop over remaining z-stacked rays int tracks_completed = 0; for( int k = begin_stacked; k < end_stacked; k++) { // select current track Track * track = &params->tracks[i][j][k]; // determine current axial interval int interval = (int) track->z_height / fine_delta_z; // calculate distance to domain boundary float bound_dist; if( pos_z_dir) bound_dist = (node_delta_z - track->z_height) / mu; else bound_dist = -track->z_height / mu; // determine track length float s; if( s_full < bound_dist ) s = s_full; else { // note completion of track s = bound_dist; tracks_completed++; } // set flag for completeion of segment bool seg_complete = false; while( !seg_complete ) { // initialize tracking variables long QSR_id = interval + num_intervals * n; float ds; float z; // calculate z height of next fine axial interval float fai_z_height; if( pos_z_dir ) fai_z_height = (interval + 1) * fine_delta_z ; else fai_z_height = interval * fine_delta_z; // calculate z distance to next fine axial interval float z_dist_to_fai = fai_z_height - track->z_height; /* calculate total distance (s) to fine axial * interval */ float s_dist_to_fai = z_dist_to_fai / mu; // determine if a fine axial interval is crossed if( s_dist_to_fai < s ) { if( pos_z_dir ) interval++; else interval--; ds = s_dist_to_fai; z = track->z_height + z_dist_to_fai; } else { ds = s; z = track->z_height + s * mu; } /* shorten remaining segment length and check if * completed (accounting for potential roundoff) */ s -= ds; if( s <= 0 || interval < 0 || interval >= num_intervals) seg_complete = true; // pick a random FSR (cache miss expected) #ifdef OPENMP QSR_id = rand_r(&seed) % I->n_source_regions_per_node; #else QSR_id = rand() % I->n_source_regions_per_node; #endif /* update sources and fluxes from attenuation * over FSR */ if( I->axial_exp == 2 ) { attenuate_fluxes( track, true, &params->sources[QSR_id], I, params, ds, mu, params->tracks_2D[i].az_weight, &A ); segments_processed++; } else if( I->axial_exp == 0 ) attenuate_FSR_fluxes( track, true, &params->sources[QSR_id], I, params, ds, mu, params->tracks_2D[i].az_weight, &A ); else { printf("Error: invalid axial expansion order"); printf("\n Please input 0 or 2\n"); exit(1); } // update track height track->z_height = z; // save segment length and source FIXME seg_dist[k][seg_idx[k]] = ds; seg_src[k][seg_idx[k]] = &params->sources[QSR_id]; seg_idx[k]++; // check if array needs to grow FIXME if( seg_idx[k] >= seg_size[k] ) { seg_size[k] *= 2; seg_dist[k] = (double *) realloc( seg_dist[k], seg_size[k] * sizeof(double) ); seg_src[k] = (Source **) realloc( seg_src[k], seg_size[k] * sizeof(Source *) ); } } } if(pos_z_dir) end_stacked -= tracks_completed; else begin_stacked += tracks_completed; } // loop over all z stacked rays again for( int k = 0; k < I->z_stacked; k++ ) { for( int n = seg_idx[k]-1; n >= 0; n--) { // load distance float ds = seg_dist[k][n]; // select current track Track * track = &params->tracks[i][j][k]; // update sources and fluxes from attenuation over FSR if( I->axial_exp == 2 ) { attenuate_fluxes( track, false, seg_src[k][n], I, params, ds, -mu, params->tracks_2D[i].az_weight, &A ); segments_processed++; } else if( I->axial_exp == 0 ) attenuate_FSR_fluxes( track, false, seg_src[k][n], I, params, ds, -mu, params->tracks_2D[i].az_weight, &A ); // update z height track->z_height -= ds * mu; } } /* Update all tracks with correct starting z location again * NOTE: this is only here to acocunt for roundoff error */ for( int k = 0; k < I->z_stacked; k++) { Track * track = &params->tracks[i][j][k]; if( pos_z_dir) track->z_height = I->axial_z_sep * k; else track->z_height = I->axial_z_sep * (k+1); } } // free memory for( int k = 0; k < I->z_stacked; k++) { free(seg_dist[k]); free(seg_src[k]); } free(seg_dist); free(seg_src); free(seg_idx); free(seg_size); } #ifdef OPENMP if(thread == 0 && I->mype==0) printf("\n"); #endif #ifdef PAPI if( thread == 0 ) { printf("\n"); border_print(); center_print("PAPI COUNTER RESULTS", 79); border_print(); printf("Count \tSmybol \tDescription\n"); } { #pragma omp barrier } counter_stop(&eventset, num_papi_events, I); #endif } //printf("Number of segments processed: %ld\n", segments_processed); I->segments_processed = segments_processed; return; } /* returns integer number for axial interval for tracks traveling in the * positive direction */ int get_pos_interval( float z, float dz) { int interval = (int) (z/dz); return interval; } /* returns integer number for axial interval for tracks traveling in the * negative direction */ int get_neg_interval( float z, float dz) { int interval = (int) ( ceilf( z / dz ) ); return interval; } int calc_next_fai( float z, float dz, bool pos_dir) { int interval = z/dz; float lower_z = dz * (float) interval; if(pos_dir) return interval + 1; else return interval; } /* Determines the change in angular flux along a particular track across a fine * axial region and tallies the contribution to the scalar flux in the fine * axial region. This function assumes a quadratic source, which is calculated * on the fly using neighboring source values. * * This legacy function is unused since it is less efficient than the current * attenuate_fluxes function. However, it provides a more straightforward * description of the underlying physical problem. */ void alt_attenuate_fluxes( Track * track, bool forward, Source * QSR, Input * I, Params * params, float ds, float mu, float az_weight ) { // compute fine axial interval spacing float dz = I->height / (I->fai * I->decomp_assemblies_ax * I->cai); // compute z height in cell float zin = track->z_height - dz * ( (int)( track->z_height / dz ) + 0.5 ); // compute fine axial region ID int fine_id = (int) ( track->z_height / dz ) % I->fai; // compute weight (azimuthal * polar) // NOTE: real app would also have volume weight component float weight = track->p_weight * az_weight; float mu2 = mu * mu; // load fine source region flux vector float * FSR_flux = QSR -> fine_flux[fine_id]; // cycle over energy groups for( int g = 0; g < I->n_egroups; g++) { // load total cross section float sigT = QSR->sigT[g]; // define source parameters float q0, q1, q2; // calculate source components if( fine_id == 0 ) { // load neighboring sources float y2 = QSR->fine_source[fine_id][g]; float y3 = QSR->fine_source[fine_id+1][g]; // do linear "fitting" float c0 = y2; float c1 = (y3 - y2) / dz; // calculate q0, q1, q2 q0 = c0 + c1*zin; q1 = c1; q2 = 0; } else if( fine_id == I->fai - 1 ) { // load neighboring sources float y1 = QSR->fine_source[fine_id-1][g]; float y2 = QSR->fine_source[fine_id][g]; // do linear "fitting" float c0 = y2; float c1 = (y2 - y1) / dz; // calculate q0, q1, q2 q0 = c0 + c1*zin; q1 = c1; q2 = 0; } else { // load neighboring sources float y1 = QSR->fine_source[fine_id-1][g]; float y2 = QSR->fine_source[fine_id][g]; float y3 = QSR->fine_source[fine_id+1][g]; // do quadratic "fitting" float c0 = y2; float c1 = (y1 - y3) / (2*dz); float c2 = (y1 - 2*y2 + y3) / (2*dz*dz); // calculate q0, q1, q2 q0 = c0 + c1*zin + c2*zin*zin; q1 = c1 + 2*c2*zin; q2 = c2; } // calculate common values for efficiency float tau = sigT * ds; float sigT2 = sigT * sigT; // compute exponential ( 1 - exp(-x) ) using table lookup float expVal = interpolateTable( params->expTable, tau ); // load correct angular flux vector float * psi; if(forward) psi = track->f_psi; else psi = track->b_psi; // add contribution to new source flux float flux_integral = (q0 * tau + (sigT * psi[g] - q0) * expVal) / sigT2 + q1 * mu * (tau * (tau - 2) + 2 * expVal) / (sigT * sigT2) + q2 * mu2 * (tau * (tau * (tau - 3) + 6) - 6 * expVal) / (3 * sigT2 * sigT2); #pragma omp atomic FSR_flux[g] += weight * flux_integral; // update angular flux psi[g] = psi[g] * (1.0 - expVal) + q0 * expVal / sigT + q1 * mu * (tau - expVal) / sigT2 + q2 * mu2 * (tau * (tau - 2) + 2 * expVal) / (sigT2 * sigT); } } /* Determines the change in angular flux along a particular track across a fine * axial region and tallies the contribution to the scalar flux in the fine * axial region. This function assumes a constant source. */ void attenuate_FSR_fluxes( Track * track, bool forward, Source * FSR, Input * I, Params * params_in, float ds, float mu, float az_weight, AttenuateVars *A) { // upack attenuate vars struct float * restrict tally = A->tally; float * restrict expVal = A->expVal; float * restrict sigT = A->sigT; float * restrict tau = A->tau; Params params = * params_in; // compute fine axial interval spacing float dz = I->height / (I->fai * I->decomp_assemblies_ax * I->cai); // compute z height in cell float zin = track->z_height - dz * ( (int)( track->z_height / dz ) + 0.5f ); // compute fine axial region ID int fine_id = (int) ( track->z_height / dz ) % I->fai; // compute weight (azimuthal * polar) // NOTE: real app would also have volume weight component float weight = track->p_weight * az_weight * mu; // load fine source region flux vector float * FSR_flux = FSR -> fine_flux[fine_id]; // cycle over energy groups #ifdef INTEL #pragma simd #elif defined IBM #pragma simd_level(10) #endif for( int g = 0; g < I->n_egroups; g++) { // load total cross section sigT[g] = FSR->sigT[g]; tau[g] = sigT[g] * ds; } // compute exponential ( 1 - exp(-x) ) using table lookup #ifdef INTEL #pragma simd #elif defined IBM #pragma simd_level(10) #endif for(int g = 0; g < I->n_egroups; g++) { expVal[g] = interpolateTable( params.expTable, tau[g] ); } float * psi; if(forward) psi = track->f_psi; else psi = track->b_psi; #ifdef INTEL #pragma simd #elif defined IBM #pragma simd_level(10) #endif for( int g = 0; g < I->n_egroups; g++) { // compute angular flux attenuation float q = FSR->fine_source[fine_id][g] / sigT[g]; float delta_psi = (psi[g] - q) * expVal[g]; // add contribution to new source flux tally[g] = weight * delta_psi; // update angular flux psi[g] -= delta_psi; } #ifdef OPENMP omp_set_lock(&FSR->locks[fine_id]); #endif #ifdef INTEL #pragma simd #elif defined IBM #pragma simd_level(10) #endif for( int g = 0; g < I->n_egroups; g++) { FSR_flux[g] += tally[g]; } #ifdef OPENMP omp_unset_lock(&FSR->locks[fine_id]); #endif } /* Renormalizes scalar and angular flux for next transport sweep iteration. * Calculation requires multiple pair-wise sums and a reduction accross all * nodes. */ void renormalize_flux( Params params, Input I, CommGrid grid ) { if( I.mype == 0 ) printf("Renormalizing Flux...\n"); float node_fission_rate = 0; #ifdef OPENMP #pragma omp parallel default(none) shared(params, I, grid) \ reduction(+ : node_fission_rate) { #endif // tally total fission rate (pair-wise sum) float * fission_rates = malloc( I.n_source_regions_per_node * sizeof(float) ); float * fine_fission_rates = malloc( I.fai * sizeof(float) ); float * g_fission_rates = malloc( I.n_egroups * sizeof(float) ); // accumulate total fission rate on node domain #pragma omp for schedule(dynamic) for( int i = 0; i < I.n_source_regions_per_node; i++) { Source src = params.sources[i]; for( int j = 0; j < I.fai; j++) { for( int g = 0; g < I.n_egroups; g++) g_fission_rates[g] = src.fine_flux[j][g] * src.vol * src.XS[g][0]; fine_fission_rates[j] = pairwise_sum( g_fission_rates, I.n_egroups ); } fission_rates[i] = pairwise_sum( fine_fission_rates, I.fai ); } node_fission_rate = pairwise_sum(fission_rates, I.n_source_regions_per_node); // free allocated memory free(fission_rates); free(fine_fission_rates); free(g_fission_rates); #ifdef OPENMP } #endif #ifdef MPI // accumulate total fission rate by MPI Allreduce float total_fission_rate = 0; MPI_Barrier(grid.cart_comm_3d); MPI_Allreduce( &node_fission_rate, // Send Buffer &total_fission_rate, // Receive Buffer 1, // Element Count MPI_FLOAT, // Element Type MPI_SUM, // Reduciton Operation Type grid.cart_comm_3d ); // MPI Communicator MPI_Barrier(grid.cart_comm_3d); #else float total_fission_rate = node_fission_rate; #endif // normalize fluxes by fission reaction rate float norm_factor = 1.0 / total_fission_rate; #pragma omp parallel for default(none) \ shared(I, params) private(norm_factor) schedule(dynamic) for( int i = 0; i < I.n_source_regions_per_node; i++) { Source * src = &params.sources[i]; float adjust = norm_factor * 4 * M_PI * I.fai / src->vol; for( int k = 0; k < I.fai; k++) for( int g = 0; g < I.n_egroups; g++) src->fine_flux[k][g] *= adjust; } // normalize boundary fluxes by same factor #pragma omp parallel for default(none) \ shared(I, params) private(norm_factor) schedule(dynamic) for( long i = 0; i < I.ntracks_2D; i++) for( int j = 0; j < I.n_polar_angles; j++) for( int k = 0; k < I.z_stacked; k++) for( int g = 0; g < I.n_egroups; g++) { params.tracks[i][j][k].f_psi[g] *= norm_factor; params.tracks[i][j][k].b_psi[g] *= norm_factor; } if( I.mype == 0 ) printf("Renormalizing Flux Complete.\n"); return; } /* Updates sources for next iteration by computing scattering and fission * components. Calculation includes multiple pair-wise sums and reductions * accross all nodes */ float update_sources( Params params, Input I, float keff ) { // source residual float residual; // calculate inverse multiplication facotr for efficiency float inverse_k = 1.0 / keff; // allocate residual arrays float * group_res = (float *) malloc(I.n_egroups * sizeof(float)); float * fine_res = (float *) malloc(I.fai * sizeof(float)); float * residuals = (float *) malloc(I.n_source_regions_per_node * sizeof(float)); // allocate arrays for summation float * fission_rates = malloc(I.n_egroups * sizeof(float)); float * scatter_rates = malloc(I.n_egroups * sizeof(float)); // cycle through all coarse axial intervals to update source for( long i = 0; i < I.n_source_regions_per_node; i++) { Source src = params.sources[i]; // cycle thorugh all fine axial regions to calculate new source for( int j = 0; j < I.fai; j++) { // calculate total fission source and scattering source float fission_source; float scatter_source; // compute total fission source for( int g = 0; g < I.n_egroups; g++ ) fission_rates[g] = src.fine_flux[j][g] * src.XS[g][0]; fission_source = pairwise_sum( fission_rates, (long) I.n_egroups); // normalize fission source by multiplication factor fission_source *= inverse_k; // compute scattering and new total source for each group for( int g = 0; g < I.n_egroups; g++ ) { for( int g2 = 0; g2 < I.n_egroups; g2++ ) { // compute scatter source originating from g2 -> g scatter_rates[g2] = src.scattering_matrix[g][g2] * src.fine_flux[j][g2]; } scatter_source = pairwise_sum(scatter_rates, (long) I.n_egroups); // compuate new total source float chi = src.XS[g][2]; // calculate new fine source float newSrc = (fission_source * chi + scatter_source) / (4.0 * M_PI); // calculate residual float oldSrc = src.fine_source[j][g]; group_res[g] = (newSrc - oldSrc) * (newSrc - oldSrc) / (oldSrc * oldSrc); /* calculate new source in fine axial interval assuming * isotropic source components */ src.fine_source[j][g] = newSrc; } fine_res[j] = pairwise_sum(group_res, (long) I.n_egroups); } residuals[i] = pairwise_sum(fine_res, (long) I.fai); } // calculate source residual residual = pairwise_sum(residuals, I.n_source_regions_per_node); // free memory free(fission_rates); free(scatter_rates); free(group_res); free(fine_res); free(residuals); // NOTE: See code around line 600 of CPUSolver.cpp in ClosedMOC/ OpenMOC return residual; } /* Computes globall k-effective using multiple pair-wise summations and finally * a reduction accross all nodes */ float compute_keff(Params params, Input I, CommGrid grid) { // allocate temporary memory float * sigma = malloc( I.n_egroups * sizeof(float) ); float * group_rates = malloc( I.n_egroups * sizeof(float) ); float * fine_rates = malloc( I.fai * sizeof(float) ); float * QSR_rates = malloc( I.n_source_regions_per_node * sizeof(float) ); /////////////////////////////////////////////////////////////////////////// // compute total absorption rate, looping over source regions for( long i = 0; i < I.n_source_regions_per_node; i++) { // load absorption XS data Source src = params.sources[i]; for( int g = 0; g < I.n_egroups; g++) sigma[g] = src.XS[g][1]; for( int j = 0; j < I.fai; j++ ) { // calculate absorption rates float * fine_flux = src.fine_flux[j]; for( int g = 0; g < I.n_egroups; g++) group_rates[g] = sigma[g] * fine_flux[g]; // sum absorption over all energy groups fine_rates[j] = pairwise_sum( group_rates, (long) I.n_egroups ); } // sum absorption over all fine axial intervals QSR_rates[i] = pairwise_sum( fine_rates, (long) I.fai ); } // sum absorption over all source regions in a node float node_abs = pairwise_sum( QSR_rates, I.n_source_regions_per_node); /////////////////////////////////////////////////////////////////////////// // compute total absorption rate, looping over source regions for( long i = 0; i < I.n_source_regions_per_node; i++) { // load nuSigmaF XS data Source src = params.sources[i]; for( int g = 0; g < I.n_egroups; g++) sigma[g] = src.XS[g][0]; for( int j = 0; j < I.fai; j++ ) { // calculate absorption rates float * fine_flux = src.fine_flux[j]; for( int g = 0; g < I.n_egroups; g++) group_rates[g] = sigma[g] * fine_flux[g]; // sum fission over all energy groups fine_rates[j] = pairwise_sum( group_rates, (long) I.n_egroups ); } // sum fission over all fine axial intervals QSR_rates[i] = pairwise_sum( fine_rates, (long) I.fai ); } // sum fission over all source regions in a node float node_fission = pairwise_sum( QSR_rates, I.n_source_regions_per_node); /////////////////////////////////////////////////////////////////////////// // MPi Reduction float tot_abs = 0; float tot_fission = 0; float leakage = 0; #ifdef MPI // Total Absorption Reduction MPI_Reduce( &node_abs, // Send Buffer &tot_abs, // Receive Buffer 1, // Element Count MPI_FLOAT, // Element Type MPI_SUM, // Reduciton Operation Type 0, // Master Rank grid.cart_comm_3d ); // MPI Communicator // Total Fission Reduction MPI_Reduce( &node_fission, // Send Buffer &tot_fission, // Receive Buffer 1, // Element Count MPI_FLOAT, // Element Type MPI_SUM, // Reduciton Operation Type 0, // Master Rank grid.cart_comm_3d ); // MPI Communicator // Total Leakage Reduction MPI_Reduce( params.leakage, // Send Buffer &leakage, // Receive Buffer 1, // Element Count MPI_FLOAT, // Element Type MPI_SUM, // Reduciton Operation Type 0, // Master Rank grid.cart_comm_3d ); // MPI Communicator MPI_Barrier(grid.cart_comm_3d); // calculate keff float keff = tot_fission/ (tot_abs + leakage); #else float keff = node_fission / (node_abs + *params.leakage); #endif /////////////////////////////////////////////////////////////////////////// // free memory free(sigma); free(group_rates); free(fine_rates); free(QSR_rates); return keff; } /* Interpolates a formed exponential table to compute ( 1- exp(-x) ) * at the desired x value */ float interpolateTable( Table table, float x) { // check to ensure value is in domain if( x > table.maxVal ) return 1.0f; else { int interval = (int) ( x / table.dx + 0.5f * table.dx ); /* if( interval >= table.N || interval < 0) { printf( "Interval = %d\n", interval); printf( "N = %d\n", table.N); printf( "x = %f\n", x); printf( "dx = %f\n", table.dx); exit(1); } */ float slope = table.values[ 2 * interval ]; float intercept = table.values[ 2 * interval + 1 ]; float val = slope * x + intercept; return val; } }
ast-dump-openmp-distribute.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test_one(int x) { #pragma omp distribute for (int i = 0; i < x; i++) ; } void test_two(int x, int y) { #pragma omp distribute for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_three(int x, int y) { #pragma omp distribute collapse(1) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_four(int x, int y) { #pragma omp distribute collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_five(int x, int y, int z) { #pragma omp distribute collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) for (int i = 0; i < z; i++) ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-distribute.c:3:1, line:7:1> line:3:6 test_one 'void (int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1> // CHECK-NEXT: | `-OMPDistributeDirective {{.*}} <line:4:1, col:23> // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-distribute.c:4:1) *const restrict' // CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1> // CHECK-NEXT: | `-OMPDistributeDirective {{.*}} <line:10:1, col:23> // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-distribute.c:10:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1> // CHECK-NEXT: | `-OMPDistributeDirective {{.*}} <line:17:1, col:35> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:24, col:34> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:33> 'int' // CHECK-NEXT: | | |-value: Int 1 // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:33> 'int' 1 // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-distribute.c:17:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1> // CHECK-NEXT: | `-OMPDistributeDirective {{.*}} <line:24:1, col:35> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:24, col:34> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:33> 'int' // CHECK-NEXT: | | |-value: Int 2 // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:33> 'int' 2 // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-distribute.c:24:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1> // CHECK-NEXT: `-OMPDistributeDirective {{.*}} <line:31:1, col:35> // CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:24, col:34> // CHECK-NEXT: | `-ConstantExpr {{.*}} <col:33> 'int' // CHECK-NEXT: | |-value: Int 2 // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:33> 'int' 2 // CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-distribute.c:31:1) *const restrict' // CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
omp_flush.c
#include <stdio.h> #include <unistd.h> #include <omp.h> #include "omp_testsuite.h" #include "omp_my_sleep.h" int check_omp_flush (FILE * logFile) { int result1 = 0; int result2 = 0; int dummy; #pragma omp parallel { int rank; rank = omp_get_thread_num (); #pragma omp barrier if (rank == 1) { result2 = 3; #pragma omp flush(result2) dummy = result2; } if (rank == 0) { my_sleep (1.); #pragma omp flush(result2) result1 = result2; } } return ((result1 == result2) && (result2 == dummy) && (result2 == 3)); } int crosscheck_omp_flush (FILE * logFile) { int result1 = 0; int result2 = 0; int dummy; #pragma omp parallel { int rank; rank = omp_get_thread_num (); #pragma omp barrier if (rank == 1) { result2 = 3; /* #pragma omp flush(result2) */ dummy = result2; } if (rank == 0) { my_sleep (1.); /* #pragma omp flush(result2) */ result1 = result2; } } return ((result1 == result2) && (result2 == dummy) && (result2 == 3)); }
RCCE_lib.h
// // Copyright 2010 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #ifndef RCCE_LIB_H #define RCCE_LIB_H #include "RCCE.h" #ifdef _OPENMP #include <omp.h> #endif #include <string.h> /* PAD32byte is used to compute a cacheline padded length of n (input) bytes */ #define PAD32byte(n) ((n)%32==0 ? (n) : (n) + 32 - (n)%32) //#define BITSPERCHAR 8 #define BOTH_IN_COMM_BUFFER 12 #define SOURCE_IN_PRIVATE_MEMORY 34 #define TARGET_IN_PRIVATE_MEMORY 56 #define RCCE_SUM_INT (RCCE_SUM+(RCCE_NUM_OPS)*(RCCE_INT)) #define RCCE_SUM_LONG (RCCE_SUM+(RCCE_NUM_OPS)*(RCCE_LONG)) #define RCCE_SUM_FLOAT (RCCE_SUM+(RCCE_NUM_OPS)*(RCCE_FLOAT)) #define RCCE_SUM_DOUBLE (RCCE_SUM+(RCCE_NUM_OPS)*(RCCE_DOUBLE)) #define RCCE_MAX_INT (RCCE_MAX+(RCCE_NUM_OPS)*(RCCE_INT)) #define RCCE_MAX_LONG (RCCE_MAX+(RCCE_NUM_OPS)*(RCCE_LONG)) #define RCCE_MAX_FLOAT (RCCE_MAX+(RCCE_NUM_OPS)*(RCCE_FLOAT)) #define RCCE_MAX_DOUBLE (RCCE_MAX+(RCCE_NUM_OPS)*(RCCE_DOUBLE)) #define RCCE_MIN_INT (RCCE_MIN+(RCCE_NUM_OPS)*(RCCE_INT)) #define RCCE_MIN_LONG (RCCE_MIN+(RCCE_NUM_OPS)*(RCCE_LONG)) #define RCCE_MIN_FLOAT (RCCE_MIN+(RCCE_NUM_OPS)*(RCCE_FLOAT)) #define RCCE_MIN_DOUBLE (RCCE_MIN+(RCCE_NUM_OPS)*(RCCE_DOUBLE)) #define RCCE_PROD_INT (RCCE_PROD+(RCCE_NUM_OPS)*(RCCE_INT)) #define RCCE_PROD_LONG (RCCE_PROD+(RCCE_NUM_OPS)*(RCCE_LONG)) #define RCCE_PROD_FLOAT (RCCE_PROD+(RCCE_NUM_OPS)*(RCCE_FLOAT)) #define RCCE_PROD_DOUBLE (RCCE_PROD+(RCCE_NUM_OPS)*(RCCE_DOUBLE)) #define RCCE_COMM_INITIALIZED 45328976 #define RCCE_COMM_NOT_INITIALIZED -45328976 // auxiliary MPB pointer type typedef volatile unsigned int* t_vintp; // Also need dereferenced types typedef volatile unsigned char t_vchar; typedef volatile unsigned int t_vint; typedef struct rcce_block { t_vcharp space; // pointer to space for data in block size_t free_size; // actual free space in block (0 or whole block) struct rcce_block *next; // pointer to next block in circular linked list } RCCE_BLOCK; #ifdef SINGLEBITFLAGS typedef struct rcce_flag_line { char flag[RCCE_LINE_SIZE]; t_vcharp line_address; int members; struct rcce_flag_line *next; } RCCE_FLAG_LINE; #endif typedef struct { RCCE_BLOCK *tail; // "last" block in linked list of blocks } RCCE_BLOCK_S; #ifndef GORY extern RCCE_FLAG RCCE_sent_flag[RCCE_MAXNP]; extern RCCE_FLAG RCCE_ready_flag[RCCE_MAXNP]; extern t_vcharp RCCE_buff_ptr; extern size_t RCCE_chunk; extern t_vcharp RCCE_flags_start; #endif extern t_vcharp RCCE_comm_buffer[RCCE_MAXNP]; extern int RCCE_NP; extern int RCCE_BUFF_SIZE; #ifndef COPPERRIDGE extern omp_lock_t RCCE_corelock[RCCE_MAXNP]; extern t_vchar RC_comm_buffer[RCCE_MAXNP*RCCE_BUFF_SIZE_MAX]; #endif extern int RC_MY_COREID; extern int RC_COREID[RCCE_MAXNP]; extern double RC_REFCLOCKGHZ; extern int RCCE_IAM; extern int RCCE_debug_synch; extern int RCCE_debug_comm; extern int RCCE_debug_debug; extern int RCCE_debug_RPC; #ifdef SINGLEBITFLAGS extern RCCE_FLAG_LINE RCCE_flags; extern int WORDSIZE; extern int LEFTMOSTBIT; RCCE_FLAG_STATUS RCCE_bit_value(t_vcharp, int); RCCE_FLAG_STATUS RCCE_flip_bit_value(t_vcharp, int); int RCCE_write_bit_value(t_vcharp, int, RCCE_FLAG_STATUS); #endif extern int RCCE_comm_init_val; void RCCE_malloc_init(t_vcharp, size_t); int RCCE_qsort(char *, size_t, size_t, int (*)(const void*, const void*)); int id_compare(const void *, const void *); int RCCE_probe(RCCE_FLAG); int RCCE_error_return(int, int); void RC_cache_invalidate(void); int RCCE_acquire_lock(int); int RCCE_release_lock(int); int RCCE_global_color(int, void *); t_vcharp RC_COMM_BUFFER_START(int); #ifndef GORY t_vcharp RCCE_malloc(size_t); t_vcharp RCCE_malloc_request(size_t, size_t *); void RCCE_free(t_vcharp); int RCCE_put(t_vcharp, t_vcharp, int, int); int RCCE_get(t_vcharp, t_vcharp, int, int); int RCCE_wait_until(RCCE_FLAG, RCCE_FLAG_STATUS); int RCCE_flag_alloc(RCCE_FLAG *); int RCCE_flag_free(RCCE_FLAG *); int RCCE_flag_write(RCCE_FLAG *, RCCE_FLAG_STATUS, int); int RCCE_flag_read(RCCE_FLAG, RCCE_FLAG_STATUS *, int); #endif #ifdef _OPENMP #pragma omp threadprivate (RC_COREID, RC_MY_COREID, RC_REFCLOCKGHZ) #pragma omp threadprivate (RCCE_comm_buffer) #pragma omp threadprivate (RCCE_BUFF_SIZE) #pragma omp threadprivate (RCCE_IAM, RCCE_NP) #pragma omp threadprivate (RCCE_debug_synch, RCCE_debug_comm, RCCE_debug_debug) #ifdef SINGLEBITFLAGS #pragma omp threadprivate (RCCE_flags, WORDSIZE, LEFTMOSTBIT) #endif #ifndef GORY #pragma omp threadprivate (RCCE_sent_flag, RCCE_ready_flag) #pragma omp threadprivate (RCCE_buff_ptr, RCCE_chunk) #pragma omp threadprivate (RCCE_flags_start) #endif #endif #endif
v2gamma.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include "compearth.h" /*! * @brief Computes lune longitude from point v. * * @param[in] n Number of points in array. * @param[in] v v in rectilinear space s.t. * \f$ v \in [-1/3, 1/3] \f$. * This is an array of dimension [n]. * * @param[out] gamma Lune longitude * \f$ \gamma \in [-\pi/6, \pi/6 \f$. * This is an array of dimension [n]. * * @date 2016 - Ben Baker converted Carl Tape's v2gamma.m to C * * @copyright MIT * */ void compearth_v2gamma(const int n, const double *__restrict__ v, double *__restrict__ gamma) { int i; const double third = 1.0/3.0; #pragma omp simd for (i=0; i<n; i++) { gamma[i] = third*asin(3.0*v[i]); } return; }
variation_distance.h
/* * Created on: Oct 19, 2017 * Author: Steffen Rechner <steffen.rechner@informatik.uni-halle.de> * * This file is part of the marathon software. * * Copyright (c) 2016, Steffen Rechner * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is furnished * to do so, subject to the following conditions: * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef MARATHON_VARIATION_DISTANCE_H_ #define MARATHON_VARIATION_DISTANCE_H_ #include "transition_matrix.h" namespace marathon { /** * Calculate the variation distance of the two vectors p1 and p2. * @tparam T One of the following: float, double, Rational. * @param p1 Vector of length n. * @param p2 Vector of length n. * @return Variation Distance between p1 and p2. */ template<class T> T variationDistance(const std::vector<T> &p1, const std::vector<T> &p2) { if (p1.size() != p2.size()) throw std::runtime_error("Error! Vectors of unequal length!"); T sum(0); for (size_t j = 0; j < p1.size(); j++) { T x = p1[j] - p2[j]; if (x >= T(0)) sum += x; else sum -= x; } return sum / T(2); } /** * Calculate the total variation distance of matrix P */ template<typename T> T totalVariationDistance(const TransitionMatrix<T> &P, const std::vector<T> &pi) { const size_t omega = P.getDimension(); T max(0); #pragma omp parallel for if(omega > 100) for (size_t i = 0; i < omega; i++) { T sum(0); for (size_t j = 0; j < omega; j++) { T pij = P.get(i, j); T x = pij - pi[j]; if (x >= 0) sum += x; else sum -= x; } #pragma omp critical max = sum > max ? sum : max; } return max / T(2); } /** * Computes the variation distance of two given normalized histograms. * @param hist0 The first histogram. * @param hist1 The second histogram. * @param bins Number of bins. * @return The variation distance value. */ template<class T1, typename T2 = double> T2 variationDistance( const std::unordered_map<T1, T2> &hist0, const std::unordered_map<T1, T2> &hist1, const int bins = INT_MAX ) { std::unordered_map<T1, T2> hist_temp(hist0); hist_temp.insert(hist1.begin(), hist1.end()); // determine number of bins const int k = std::min((int) hist_temp.size(), bins); // error handling if (k == 0) return -1; else if (k == 1) return 0; // determine minimum and maximum T2 min, max; min = max = hist_temp.begin()->first; for (const auto &kv : hist_temp) { if (kv.first < min) min = kv.first; if (kv.first > max) max = kv.first; } // Distribution each observation to one of k bins of same size. // constant required for calculation of bin id const T2 z = (max - min) * T2(10001) / T2(10000); // build vectors of length k std::vector<T2> vec0(k); std::vector<T2> vec1(k); for (const auto &p : hist_temp) { // calculate bin in which p must be inserted const int bin_id = (p.first - min) * k / z; //std::cout << "bin(" << p.first << ")=" << bin_id << std::endl; auto v0 = hist0.find(p.first); auto v1 = hist1.find(p.first); if (v0 != hist0.end()) { vec0[bin_id] += v0->second; } if (v1 != hist1.end()) { vec1[bin_id] += v1->second; } } /*std::cout << "bin0:" << std::endl; for (int i = 0; i < k; i++) std::cout << vec0[i] << std::endl; std::cout << "bin1:" << std::endl; for (int i = 0; i < k; i++) std::cout << vec1[i] << std::endl;*/ return marathon::variationDistance<T2>(vec0, vec1); } } #endif //MARATHON_VARIATION_DISTANCE_H_
GB_unaryop__identity_uint64_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint64_fp64 // op(A') function: GB_tran__identity_uint64_fp64 // C type: uint64_t // A type: double // cast: uint64_t cij ; GB_CAST_UNSIGNED(cij,aij,64) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ uint64_t z ; GB_CAST_UNSIGNED(z,aij,64) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint64_fp64 ( uint64_t *Cx, // Cx and Ax may be aliased double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint64_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
coordinate_transformation_utilities.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: // // #ifndef KRATOS_COORDINATE_TRANSFORMATION_UTILITIES_H #define KRATOS_COORDINATE_TRANSFORMATION_UTILITIES_H // system includes // external includes #include "boost/numeric/ublas/matrix_proxy.hpp" // kratos includes #include "includes/define.h" #include "includes/node.h" #include "containers/variable.h" #include "geometries/geometry.h" namespace Kratos { ///@addtogroup KratosCore ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /// A utility to rotate the local contributions of certain nodes to the system matrix, which is required to apply slip conditions in arbitrary directions. template<class TLocalMatrixType, class TLocalVectorType, class TValueType> class CoordinateTransformationUtils { public: ///@name Type Definitions ///@{ /// Pointer definition of CoordinateTransformationUtils KRATOS_CLASS_POINTER_DEFINITION(CoordinateTransformationUtils); typedef Node<3> NodeType; typedef Geometry< Node<3> > GeometryType; // typedef boost::numeric::ublas::matrix_row<TLocalMatrixType> LocalRowType; // // typedef boost::numeric::ublas::matrix_range<TLocalMatrixType> MatrixBlockType; ///@} ///@name Life Cycle ///@{ /// Constructor. /** @param DomainSize Number of space dimensions (2 or 3) * @param NumRowsPerNode Number of matrix or vector rows associated to each node. Velocity DOFs are assumed to be the first mDomainSize rows in each block of rows. * @param rVariable Kratos variable used to flag nodes where local system contributions will be rotated. All nodes with rVariable != Zero will be rotated. * @param Zero The zero value for the variable. */ CoordinateTransformationUtils(const unsigned int DomainSize, const unsigned int NumRowsPerNode, const Variable<TValueType>& rVariable, const TValueType Zero): mDomainSize(DomainSize), mBlockSize(NumRowsPerNode), mrFlagVariable(rVariable), mZero(Zero) {} /// Constructor. /** @param DomainSize Number of space dimensions (2 or 3) * @param NumRowsPerNode Number of matrix or vector rows associated to each node. Velocity DOFs are assumed to be the first mDomainSize rows in each block of rows. * @param rVariable Kratos variable used to flag nodes where local system contributions will be rotated. All nodes with rVariable != Zero will be rotated. */ CoordinateTransformationUtils(const unsigned int DomainSize, const unsigned int NumRowsPerNode, const Variable<TValueType> &rVariable): mDomainSize(DomainSize), mBlockSize(NumRowsPerNode), mrFlagVariable(rVariable), mZero(TValueType()) {} /// Destructor. virtual ~CoordinateTransformationUtils() {} ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /// Rotate the local system contributions so that they are oriented with each node's normal. /** @param rLocalMatrix Local system matrix @param rLocalVector Local RHS vector @param rGeometry A reference to the element's (or condition's) geometry */ virtual void Rotate(TLocalMatrixType& rLocalMatrix, TLocalVectorType& rLocalVector, GeometryType& rGeometry) const { if(mBlockSize != mDomainSize) //Monolithic case { if(mDomainSize == 2) RotateAux<2,3>(rLocalMatrix,rLocalVector,rGeometry); if(mDomainSize == 3) RotateAux<3,4>(rLocalMatrix,rLocalVector,rGeometry); } else //fractional step case { if(mDomainSize == 2) RotateAuxPure<2>(rLocalMatrix,rLocalVector,rGeometry); if(mDomainSize == 3) RotateAuxPure<3>(rLocalMatrix,rLocalVector,rGeometry); } } /// RHS only version of Rotate virtual void Rotate(TLocalVectorType& rLocalVector, GeometryType& rGeometry) const { //const unsigned int LocalSize = rLocalVector.size(); // We expect this to work both with elements (4 nodes) and conditions (3 nodes) unsigned int Index = 0; if (rLocalVector.size() > 0) { if(mBlockSize != mDomainSize) //Monolithic case { for(unsigned int j = 0; j < rGeometry.PointsNumber(); ++j) { if( this->IsSlip(rGeometry[j]) ) { if(mDomainSize == 3) { array_1d<double,4> aux,aux1; BoundedMatrix<double,4,4> rRot; LocalRotationOperator3D<4>(rRot,rGeometry[j]); for(unsigned int k=0; k<4; k++) aux[k] = rLocalVector[j*mBlockSize+k]; noalias(aux1) = prod(rRot,aux); for(unsigned int k=0; k<4; k++) rLocalVector[j*mBlockSize+k] = aux1[k]; } else { array_1d<double,3> aux,aux1; BoundedMatrix<double,3,3> rRot; LocalRotationOperator2D<3>(rRot,rGeometry[j]); for(unsigned int k=0; k<3; k++) { aux[k] = rLocalVector[j*mBlockSize+k]; } noalias(aux1) = prod(rRot,aux); for(unsigned int k=0; k<3; k++) rLocalVector[j*mBlockSize+k] = aux1[k]; } } Index += mBlockSize; } } else //fractional step case { for(unsigned int j = 0; j < rGeometry.PointsNumber(); ++j) { if( this->IsSlip(rGeometry[j]) ) { if(mDomainSize == 3) { array_1d<double,3> aux,aux1; BoundedMatrix<double,3,3> rRot; LocalRotationOperatorPure(rRot,rGeometry[j]); for(unsigned int k=0; k<3; k++) aux[k] = rLocalVector[j*mBlockSize+k]; noalias(aux1) = prod(rRot,aux); for(unsigned int k=0; k<3; k++) rLocalVector[j*mBlockSize+k] = aux1[k]; } else { array_1d<double,2> aux,aux1; BoundedMatrix<double,2,2> rRot; LocalRotationOperatorPure(rRot,rGeometry[j]); for(unsigned int k=0; k<2; k++) aux[k] = rLocalVector[j*mBlockSize+k]; noalias(aux1) = prod(rRot,aux); for(unsigned int k=0; k<2; k++) rLocalVector[j*mBlockSize+k] = aux1[k]; } } Index += mBlockSize; } } } } /// Apply slip boundary conditions to the rotated local contributions. /** This function takes the local system contributions rotated so each node's velocities are expressed using a base oriented with its normal and imposes that the normal velocity is equal to the mesh velocity in the normal direction. */ virtual void ApplySlipCondition(TLocalMatrixType& rLocalMatrix, TLocalVectorType& rLocalVector, GeometryType& rGeometry) const { const unsigned int LocalSize = rLocalVector.size(); // We expect this to work both with elements (4 nodes) and conditions (3 nodes) if (LocalSize > 0) { for(unsigned int itNode = 0; itNode < rGeometry.PointsNumber(); ++itNode) { if( this->IsSlip(rGeometry[itNode])) { // We fix the first dof (normal velocity) for each rotated block unsigned int j = itNode * mBlockSize; //const double k = rLocalMatrix(j,j)+rLocalMatrix(j,j+1)+rLocalMatrix(j,j+2); // If the mesh is moving, we must impose v_normal = vmesh_normal array_1d<double,3> VMesh = rGeometry[itNode].FastGetSolutionStepValue(MESH_VELOCITY); VMesh -= rGeometry[itNode].FastGetSolutionStepValue(VELOCITY); array_1d<double,3> rN = rGeometry[itNode].FastGetSolutionStepValue(NORMAL); this->Normalize(rN); for( unsigned int i = 0; i < j; ++i)// Skip term (i,i) { rLocalMatrix(i,j) = 0.0; rLocalMatrix(j,i) = 0.0; } for( unsigned int i = j+1; i < LocalSize; ++i) { rLocalMatrix(i,j) = 0.0; rLocalMatrix(j,i) = 0.0; } rLocalVector(j) = inner_prod(rN,VMesh); rLocalMatrix(j,j) = 1.0; } } } } /// RHS only version of ApplySlipCondition virtual void ApplySlipCondition(TLocalVectorType& rLocalVector, GeometryType& rGeometry) const { if (rLocalVector.size() > 0) { for(unsigned int itNode = 0; itNode < rGeometry.PointsNumber(); ++itNode) { if( this->IsSlip(rGeometry[itNode]) ) { // We fix the first dof (normal velocity) for each rotated block unsigned int j = itNode * mBlockSize; // If the mesh is moving, we must impose v_normal = vmesh_normal array_1d<double,3> VMesh = rGeometry[itNode].FastGetSolutionStepValue(MESH_VELOCITY); VMesh -= rGeometry[itNode].FastGetSolutionStepValue(VELOCITY); array_1d<double,3> rN = rGeometry[itNode].FastGetSolutionStepValue(NORMAL); this->Normalize(rN); rLocalVector[j] = inner_prod(rN,VMesh); } } } } /// Transform nodal velocities to the rotated coordinates (aligned with each node's normal) virtual void RotateVelocities(ModelPart& rModelPart) const { TLocalVectorType Vel(mDomainSize); TLocalVectorType Tmp(mDomainSize); ModelPart::NodeIterator it_begin = rModelPart.NodesBegin(); #pragma omp parallel for firstprivate(Vel,Tmp) for(int iii=0; iii<static_cast<int>(rModelPart.Nodes().size()); iii++) { ModelPart::NodeIterator itNode = it_begin+iii; if( this->IsSlip(*itNode) ) { //this->RotationOperator<TLocalMatrixType>(Rotation,); if(mDomainSize == 3) { BoundedMatrix<double,3,3> rRot; LocalRotationOperatorPure(rRot,*itNode); array_1d<double,3>& rVelocity = itNode->FastGetSolutionStepValue(VELOCITY); for(unsigned int i = 0; i < 3; i++) Vel[i] = rVelocity[i]; noalias(Tmp) = prod(rRot,Vel); for(unsigned int i = 0; i < 3; i++) rVelocity[i] = Tmp[i]; } else { BoundedMatrix<double,2,2> rRot; LocalRotationOperatorPure(rRot,*itNode); array_1d<double,3>& rVelocity = itNode->FastGetSolutionStepValue(VELOCITY); for(unsigned int i = 0; i < 2; i++) Vel[i] = rVelocity[i]; noalias(Tmp) = prod(rRot,Vel); for(unsigned int i = 0; i < 2; i++) rVelocity[i] = Tmp[i]; } } } } /// Transform nodal velocities from the rotated system to the original one virtual void RecoverVelocities(ModelPart& rModelPart) const { TLocalVectorType Vel(mDomainSize); TLocalVectorType Tmp(mDomainSize); ModelPart::NodeIterator it_begin = rModelPart.NodesBegin(); #pragma omp parallel for firstprivate(Vel,Tmp) for(int iii=0; iii<static_cast<int>(rModelPart.Nodes().size()); iii++) { ModelPart::NodeIterator itNode = it_begin+iii; if( this->IsSlip(*itNode) ) { if(mDomainSize == 3) { BoundedMatrix<double,3,3> rRot; LocalRotationOperatorPure(rRot,*itNode); array_1d<double,3>& rVelocity = itNode->FastGetSolutionStepValue(VELOCITY); for(unsigned int i = 0; i < 3; i++) Vel[i] = rVelocity[i]; noalias(Tmp) = prod(trans(rRot),Vel); for(unsigned int i = 0; i < 3; i++) rVelocity[i] = Tmp[i]; } else { BoundedMatrix<double,2,2> rRot; LocalRotationOperatorPure(rRot,*itNode); array_1d<double,3>& rVelocity = itNode->FastGetSolutionStepValue(VELOCITY); for(unsigned int i = 0; i < 2; i++) Vel[i] = rVelocity[i]; noalias(Tmp) = prod(trans(rRot),Vel); for(unsigned int i = 0; i < 2; i++) rVelocity[i] = Tmp[i]; } } } } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. virtual std::string Info() const { std::stringstream buffer; buffer << "CoordinateTransformationUtils"; return buffer.str(); } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const { rOStream << "CoordinateTransformationUtils"; } /// Print object's data. virtual void PrintData(std::ostream& rOStream) const {} ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ template<unsigned int TDim, unsigned int TBlockSize, unsigned int TSkip = 0> void RotateAux(TLocalMatrixType& rLocalMatrix, TLocalVectorType& rLocalVector, GeometryType& rGeometry) const { const unsigned int LocalSize = rLocalVector.size(); unsigned int Index = 0; int rotations_needed = 0; const unsigned int NumBlocks = LocalSize / TBlockSize; DenseVector<bool> NeedRotation( NumBlocks, false); std::vector< BoundedMatrix<double,TBlockSize,TBlockSize> > rRot(NumBlocks); for(unsigned int j = 0; j < NumBlocks; ++j) { if( this->IsSlip(rGeometry[j]) ) { NeedRotation[j] = true; rotations_needed++; if (TDim == 2) LocalRotationOperator2D<TBlockSize,TSkip>(rRot[j],rGeometry[j]); else LocalRotationOperator3D<TBlockSize,TSkip>(rRot[j],rGeometry[j]); } Index += TBlockSize; } if(rotations_needed > 0) { BoundedMatrix<double,TBlockSize,TBlockSize> mat_block, tmp; array_1d<double,TBlockSize> aux, aux1; for(unsigned int i=0; i<NumBlocks; i++) { if(NeedRotation[i] == true) { for(unsigned int j=0; j<NumBlocks; j++) { if(NeedRotation[j] == true) { ReadBlockMatrix<TBlockSize>(mat_block, rLocalMatrix, i*TBlockSize, j*TBlockSize); noalias(tmp) = prod(mat_block,trans(rRot[j])); noalias(mat_block) = prod(rRot[i],tmp); WriteBlockMatrix<TBlockSize>(mat_block, rLocalMatrix, i*TBlockSize, j*TBlockSize); } else { ReadBlockMatrix<TBlockSize>(mat_block, rLocalMatrix, i*TBlockSize, j*TBlockSize); noalias(tmp) = prod(rRot[i],mat_block); WriteBlockMatrix<TBlockSize>(tmp, rLocalMatrix, i*TBlockSize, j*TBlockSize); } } for(unsigned int k=0; k<TBlockSize; k++) aux[k] = rLocalVector[i*TBlockSize+k]; noalias(aux1) = prod(rRot[i],aux); for(unsigned int k=0; k<TBlockSize; k++) rLocalVector[i*TBlockSize+k] = aux1[k]; } else { for(unsigned int j=0; j<NumBlocks; j++) { if(NeedRotation[j] == true) { ReadBlockMatrix<TBlockSize>(mat_block, rLocalMatrix, i*TBlockSize, j*TBlockSize); noalias(tmp) = prod(mat_block,trans(rRot[j])); WriteBlockMatrix<TBlockSize>(tmp, rLocalMatrix, i*TBlockSize, j*TBlockSize); } } } } } } //to be used when there is only velocity (no additional pressure or other var block) template<unsigned int TDim> void RotateAuxPure(TLocalMatrixType& rLocalMatrix, TLocalVectorType& rLocalVector, GeometryType& rGeometry) const { const unsigned int LocalSize = rLocalVector.size(); unsigned int Index = 0; int rotations_needed = 0; const unsigned int NumBlocks = LocalSize / mBlockSize; DenseVector<bool> NeedRotation( NumBlocks, false); std::vector< BoundedMatrix<double,TDim,TDim> > rRot(NumBlocks); for(unsigned int j = 0; j < NumBlocks; ++j) { if( this->IsSlip(rGeometry[j]) ) { NeedRotation[j] = true; rotations_needed++; LocalRotationOperatorPure(rRot[j],rGeometry[j]); } Index += mBlockSize; } if(rotations_needed > 0) { BoundedMatrix<double,TDim,TDim> mat_block, tmp; array_1d<double,TDim> aux, aux1; for(unsigned int i=0; i<NumBlocks; i++) { if(NeedRotation[i] == true) { for(unsigned int j=0; j<NumBlocks; j++) { if(NeedRotation[j] == true) { ReadBlockMatrix<TDim>(mat_block, rLocalMatrix, i*mBlockSize, j*mBlockSize); noalias(tmp) = prod(mat_block,trans(rRot[j])); noalias(mat_block) = prod(rRot[i],tmp); WriteBlockMatrix<TDim>(mat_block, rLocalMatrix, i*mBlockSize, j*mBlockSize); } else { ReadBlockMatrix<TDim>(mat_block, rLocalMatrix, i*mBlockSize, j*mBlockSize); noalias(tmp) = prod(rRot[i],mat_block); WriteBlockMatrix<TDim>(tmp, rLocalMatrix, i*mBlockSize, j*mBlockSize); } } for(unsigned int k=0; k<TDim; k++) aux[k] = rLocalVector[i*mBlockSize+k]; noalias(aux1) = prod(rRot[i],aux); for(unsigned int k=0; k<TDim; k++) rLocalVector[i*mBlockSize+k] = aux1[k]; } else { for(unsigned int j=0; j<NumBlocks; j++) { if(NeedRotation[j] == true) { ReadBlockMatrix<TDim>(mat_block, rLocalMatrix, i*mBlockSize, j*mBlockSize); noalias(tmp) = prod(mat_block,trans(rRot[j])); WriteBlockMatrix<TDim>(tmp, rLocalMatrix, i*mBlockSize, j*mBlockSize); } } } } } } template<unsigned int TBlockSize, unsigned int TSkip = 0> void LocalRotationOperator2D( BoundedMatrix<double,TBlockSize,TBlockSize>& rRot, GeometryType::PointType& rThisPoint) const { noalias(rRot) = IdentityMatrix(TBlockSize); // Get the normal evaluated at the node const array_1d<double,3>& rNormal = rThisPoint.FastGetSolutionStepValue(NORMAL); double aux = rNormal[0]*rNormal[0] + rNormal[1]*rNormal[1]; aux = sqrt(aux); rRot(TSkip ,TSkip ) = rNormal[0]/aux; rRot(TSkip ,TSkip+1) = rNormal[1]/aux; rRot(TSkip+1,TSkip ) = -rNormal[1]/aux; rRot(TSkip+1,TSkip+1) = rNormal[0]/aux; } template<unsigned int TBlockSize, unsigned int TSkip = 0> void LocalRotationOperator3D( BoundedMatrix<double,TBlockSize,TBlockSize>& rRot, GeometryType::PointType& rThisPoint) const { noalias(rRot) = IdentityMatrix(TBlockSize); // Get the normal evaluated at the node const array_1d<double,3>& rNormal = rThisPoint.FastGetSolutionStepValue(NORMAL); double aux = rNormal[0]*rNormal[0] + rNormal[1]*rNormal[1] + rNormal[2]*rNormal[2]; aux = sqrt(aux); rRot(TSkip,TSkip ) = rNormal[0]/aux; rRot(TSkip,TSkip+1) = rNormal[1]/aux; rRot(TSkip,TSkip+2) = rNormal[2]/aux; // Define the new coordinate system, where the first vector is aligned with the normal // To choose the remaining two vectors, we project the first component of the cartesian base to the tangent plane array_1d<double,3> rT1; rT1(0) = 1.0; rT1(1) = 0.0; rT1(2) = 0.0; double dot = rRot(TSkip,TSkip);//this->Dot(rN,rT1); // It is possible that the normal is aligned with (1,0,0), resulting in norm(rT1) = 0 // If this is the case, repeat the procedure using (0,1,0) if ( fabs(dot) > 0.99 ) { rT1(0) = 0.0; rT1(1) = 1.0; rT1(2) = 0.0; dot = rRot(TSkip,TSkip+1); //this->Dot(rN,rT1); } // calculate projection and normalize rT1[0] -= dot*rRot(TSkip,TSkip); rT1[1] -= dot*rRot(TSkip,TSkip+1); rT1[2] -= dot*rRot(TSkip,TSkip+2); this->Normalize(rT1); rRot(TSkip+1,TSkip ) = rT1[0]; rRot(TSkip+1,TSkip+1) = rT1[1]; rRot(TSkip+1,TSkip+2) = rT1[2]; // The third base component is choosen as N x T1, which is normalized by construction rRot(TSkip+2,TSkip ) = rRot(TSkip,TSkip+1)*rT1[2] - rRot(TSkip,TSkip+2)*rT1[1]; rRot(TSkip+2,TSkip+1) = rRot(TSkip,TSkip+2)*rT1[0] - rRot(TSkip,TSkip )*rT1[2]; rRot(TSkip+2,TSkip+2) = rRot(TSkip,TSkip )*rT1[1] - rRot(TSkip,TSkip+1)*rT1[0]; } void LocalRotationOperatorPure(BoundedMatrix<double,3,3>& rRot, GeometryType::PointType& rThisPoint) const { // Get the normal evaluated at the node const array_1d<double,3>& rNormal = rThisPoint.FastGetSolutionStepValue(NORMAL); double aux = rNormal[0]*rNormal[0] + rNormal[1]*rNormal[1] + rNormal[2]*rNormal[2]; aux = sqrt(aux); rRot(0,0) = rNormal[0]/aux; rRot(0,1) = rNormal[1]/aux; rRot(0,2) = rNormal[2]/aux; // Define the new coordinate system, where the first vector is aligned with the normal // To choose the remaining two vectors, we project the first component of the cartesian base to the tangent plane array_1d<double,3> rT1; rT1(0) = 1.0; rT1(1) = 0.0; rT1(2) = 0.0; double dot = rRot(0,0);//this->Dot(rN,rT1); // It is possible that the normal is aligned with (1,0,0), resulting in norm(rT1) = 0 // If this is the case, repeat the procedure using (0,1,0) if ( fabs(dot) > 0.99 ) { rT1(0) = 0.0; rT1(1) = 1.0; rT1(2) = 0.0; dot = rRot(0,1); //this->Dot(rN,rT1); } // calculate projection and normalize rT1[0] -= dot*rRot(0,0); rT1[1] -= dot*rRot(0,1); rT1[2] -= dot*rRot(0,2); this->Normalize(rT1); rRot(1,0) = rT1[0]; rRot(1,1) = rT1[1]; rRot(1,2) = rT1[2]; // The third base component is choosen as N x T1, which is normalized by construction rRot(2,0) = rRot(0,1)*rT1[2] - rRot(0,2)*rT1[1]; rRot(2,1) = rRot(0,2)*rT1[0] - rRot(0,0)*rT1[2]; rRot(2,2) = rRot(0,0)*rT1[1] - rRot(0,1)*rT1[0]; } void LocalRotationOperatorPure(BoundedMatrix<double,2,2>& rRot, GeometryType::PointType& rThisPoint) const { // Get the normal evaluated at the node const array_1d<double,3>& rNormal = rThisPoint.FastGetSolutionStepValue(NORMAL); double aux = rNormal[0]*rNormal[0] + rNormal[1]*rNormal[1]; aux = sqrt(aux); rRot(0,0) = rNormal[0]/aux; rRot(0,1) = rNormal[1]/aux; rRot(1,0) = -rNormal[1]/aux; rRot(1,1) = rNormal[0]/aux; } bool IsSlip(const Node<3>& rNode) const { return rNode.FastGetSolutionStepValue(mrFlagVariable) != mZero; } /// Normalize a vector. /** * @param rThis the vector * @return Original norm of the input vector */ template< class TVectorType > double Normalize(TVectorType& rThis) const { double Norm = 0; for(typename TVectorType::iterator iComponent = rThis.begin(); iComponent < rThis.end(); ++iComponent) Norm += (*iComponent)*(*iComponent); Norm = sqrt(Norm); for(typename TVectorType::iterator iComponent = rThis.begin(); iComponent < rThis.end(); ++iComponent) *iComponent /= Norm; return Norm; } ///@} ///@name Protected Access ///@{ unsigned int GetDomainSize() const { return mDomainSize; } unsigned int GetBlockSize() const { return mBlockSize; } ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ /// Number of spatial dimensions const unsigned int mDomainSize; /// Number of matrix or vector rows associated to each node. /** @note Velocity Dofs are assumed to be the first mDomainSize rows. */ const unsigned int mBlockSize; const Variable<TValueType>& mrFlagVariable; const TValueType mZero; ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ // /// Compute a rotation matrix to transform values from the cartesian base to one oriented with the node's normal // /** // * The normal is read from solution step data NORMAL. Use NormalCalculationUtils::CalculateOnSimplex to // * obtain and store the nodal normal from the normals of the model's conditons. // * @param rRot The rotation matrix (output) // * @param rThisPoint The point used to orient the new coordinate system. // * @see NormalCalculationUtils // */ // template<class TMatrixType> // void RotationOperator(TMatrixType& rRot, // GeometryType::PointType& rThisPoint) const // { // typedef boost::numeric::ublas::matrix_row<TMatrixType> ThisRowType; // // Get the normal evaluated at the node // const array_1d<double,3>& rNormal = rThisPoint.FastGetSolutionStepValue(NORMAL); // // if(mDomainSize == 3) // { // // Define the new coordinate system, where the first vector is aligned with the normal // ThisRowType rN(rRot,0); // for( unsigned int i = 0; i < 3; ++i) // rN[i] = rNormal[i]; // this->Normalize(rN); // // // To choose the remaining two vectors, we project the first component of the cartesian base to the tangent plane // ThisRowType rT1(rRot,1); // rT1(0) = 1.0; // rT1(1) = 0.0; // rT1(2) = 0.0; // // double dot = this->Dot(rN,rT1); // // // It is possible that the normal is aligned with (1,0,0), resulting in norm(rT1) = 0 // // If this is the case, repeat the procedure using (0,1,0) // if ( fabs(dot) > 0.99 ) // { // rT1(0) = 0.0; // rT1(1) = 1.0; // rT1(2) = 0.0; // // dot = this->Dot(rN,rT1); // } // // // calculate projection and normalize // rT1 -= dot * rN; // this->Normalize(rT1); // // // The third base component is choosen as N x T1, which is normalized by construction // ThisRowType rT2(rRot,2); // rT2(0) = rN(1)*rT1(2) - rN(2)*rT1(1); // rT2(1) = rN(2)*rT1(0) - rN(0)*rT1(2); // rT2(2) = rN(0)*rT1(1) - rN(1)*rT1(0); // } // else //if(mDomainSize == 2) // { // /* The basis for the new coordinate system is (normal,tangent) // Tangent vector is chosen (-normal_y, normal_x) so that the resulting base // is right-handed. // */ // ThisRowType rN(rRot,0); // ThisRowType rT(rRot,1); // // rN[0] = rNormal[0]; // rN[1] = rNormal[1]; // this->Normalize(rN); // rT[0] = -rN[1]; // rT[1] = rN[0]; // } // // } template< class TVectorType > double Dot(const TVectorType& rV1, const TVectorType& rV2) const { double dot = 0.0; for( typename TVectorType::const_iterator iV1 = rV1.begin(),iV2 = rV2.begin(); iV1 != rV1.end(); ++iV1, ++iV2) { dot += (*iV1) * (*iV2); } return dot; } /// Transform a local contribution from cartesian coordinates to rotated ones // void ApplyRotation(TLocalMatrixType& rMatrix, // const TLocalMatrixType& rRotation) const // { // // compute B = R*A*transpose(R) // const unsigned int LocalSize = rMatrix.size1(); // const unsigned int NumBlocks = LocalSize / mBlockSize; // //TLocalMatrixType Tmp = ZeroMatrix(LocalSize,LocalSize); // /* // for (unsigned int iBlock = 0; iBlock < NumBlocks; iBlock++) // { // for (unsigned int jBlock = 0; jBlock < NumBlocks; jBlock++) // { // for (unsigned int i = iBlock*mBlockSize; i < (iBlock+1)*mBlockSize; i++) // { // for(unsigned int j = jBlock*mBlockSize; j < (jBlock+1)*mBlockSize; j++) // { // double& tij = Tmp(i,j); // for(unsigned int k = iBlock*mBlockSize; k < (iBlock+1)*mBlockSize; k++) // { // for(unsigned int l = jBlock*mBlockSize; l < (jBlock+1)*mBlockSize; l++) // { // tij += rRotation(i,k)*rMatrix(k,l)*rRotation(j,l); // } // } // } // } // } // }*/ // // Matrix Tmp = prod(rMatrix,trans(rRotation)); // noalias(rMatrix) = prod(rRotation,Tmp); // // // noalias(rMatrix) = Tmp; // } //auxiliary functions template< unsigned int TBlockSize > void ReadBlockMatrix( BoundedMatrix<double,TBlockSize, TBlockSize>& block, const Matrix& origin, const unsigned int Ibegin, const unsigned int Jbegin) const { for(unsigned int i=0; i<TBlockSize; i++) { for(unsigned int j=0; j<TBlockSize; j++) { block(i,j) = origin(Ibegin+i, Jbegin+j); } } } template< unsigned int TBlockSize > void WriteBlockMatrix( const BoundedMatrix<double,TBlockSize, TBlockSize>& block, Matrix& destination, const unsigned int Ibegin, const unsigned int Jbegin) const { for(unsigned int i=0; i<TBlockSize; i++) { for(unsigned int j=0; j<TBlockSize; j++) { destination(Ibegin+i, Jbegin+j) = block(i,j); } } } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. CoordinateTransformationUtils& operator=(CoordinateTransformationUtils const& rOther) {} /// Copy constructor. CoordinateTransformationUtils(CoordinateTransformationUtils const& rOther) {} ///@} }; ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// input stream function template<class TLocalMatrixType, class TLocalVectorType, class TValueType> inline std::istream& operator >>(std::istream& rIStream, CoordinateTransformationUtils<TLocalMatrixType, TLocalVectorType, TValueType>& rThis) { return rIStream; } /// output stream function template<class TLocalMatrixType, class TLocalVectorType, class TValueType> inline std::ostream& operator <<(std::ostream& rOStream, const CoordinateTransformationUtils<TLocalMatrixType, TLocalVectorType, TValueType>& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} ///@} addtogroup block } #endif // KRATOS_COORDINATE_TRANSFORMATION_UTILITIES_H
calculate_embedded_signed_distance_to_3d_skin_process.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Pooyan Dadvand // Ruben Zorrilla // Daniel Baumgaertner // Johannes Wolf // #if !defined(KRATOS_CALCULATE_EMBEDDED_SIGNED_DISTANCE_TO_3D_SKIN_PROCESS_H_INCLUDED ) #define KRATOS_CALCULATE_EMBEDDED_SIGNED_DISTANCE_TO_3D_SKIN_PROCESS_H_INCLUDED // System includes #include <string> #include <iostream> #include <algorithm> // External includes #include "includes/kratos_flags.h" // Project includes #include "includes/define.h" #include "processes/process.h" #include "includes/kratos_flags.h" #include "includes/element.h" #include "includes/model_part.h" #include "geometries/geometry_data.h" #include "utilities/openmp_utils.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /// Short class definition. class CalculateEmbeddedSignedDistanceTo3DSkinProcess : public Process { public: ///@name Type Definitions ///@{ ///@} ///@name Pointer Definitions /// Pointer definition of CalculateEmbeddedSignedDistanceTo3DSkinProcess KRATOS_CLASS_POINTER_DEFINITION(CalculateEmbeddedSignedDistanceTo3DSkinProcess); ///@} ///@name Life Cycle ///@{ CalculateEmbeddedSignedDistanceTo3DSkinProcess(ModelPart& rThisModelPartStruc, ModelPart& rThisModelPartFluid, bool DiscontinuousDistance = false) : mrSkinModelPart(rThisModelPartStruc), mrFluidModelPart(rThisModelPartFluid), mDiscontinuousDistance(DiscontinuousDistance) { } /// Destructor. ~CalculateEmbeddedSignedDistanceTo3DSkinProcess() override { } ///@} ///@name Operators ///@{ void operator()() { Execute(); } ///@} ///@name Operations ///@{ void Execute() override { // Create a pointer to the discontinuous or continuos distance calculation process CalculateDiscontinuousDistanceToSkinProcess<3>::Pointer pdistance_calculator; if(mDiscontinuousDistance) { pdistance_calculator = CalculateDiscontinuousDistanceToSkinProcess<3>::Pointer( new CalculateDiscontinuousDistanceToSkinProcess<3>(mrFluidModelPart, mrSkinModelPart)); } else { pdistance_calculator = CalculateDiscontinuousDistanceToSkinProcess<3>::Pointer( new CalculateDistanceToSkinProcess<3>(mrFluidModelPart, mrSkinModelPart)); } // Call the distance calculator methods pdistance_calculator->Initialize(); pdistance_calculator->FindIntersections(); pdistance_calculator->CalculateDistances(pdistance_calculator->GetIntersections()); // TODO: Raycasting // Distance positive and negative peak values correction this->PeakValuesCorrection(); //TODO: Check the correct behaviour of this method once the raycasting has been implemented // Compute the embedded velocity this->CalculateEmbeddedVelocity(pdistance_calculator->GetIntersections()); // Call the distance calculation Clear() to delete the intersection data pdistance_calculator->Clear(); } void Clear() override { } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "CalculateEmbeddedSignedDistanceTo3DSkinProcess"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << "CalculateEmbeddedSignedDistanceTo3DSkinProcess"; } /// Print object's data. void PrintData(std::ostream& rOStream) const override { } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ void CalculateEmbeddedVelocity(std::vector<PointerVector<GeometricalObject>>& rIntersectedObjects) { const array_1d<double, 3> aux_zero = ZeroVector(3); // #pragma omp parallel for firstprivate(aux_zero) for (int k = 0; k < static_cast<int>(mrFluidModelPart.NumberOfElements()); ++k) { ModelPart::ElementsContainerType::iterator itFluidElement = mrFluidModelPart.ElementsBegin() + k; const PointerVector<GeometricalObject>& intersected_skin_elems = rIntersectedObjects[k]; // Initialize the element EMBEDDED_VELOCITY itFluidElement->SetValue(EMBEDDED_VELOCITY, aux_zero); // Accumulate the VELOCITY from all the structure conditions that intersect the element unsigned int intersection_counter = 0; for(auto itSkinElement : intersected_skin_elems) { array_1d<double,3> emb_vel = (itSkinElement.GetGeometry()[0]).GetSolutionStepValue(VELOCITY); emb_vel += (itSkinElement.GetGeometry()[1]).GetSolutionStepValue(VELOCITY); emb_vel += (itSkinElement.GetGeometry()[2]).GetSolutionStepValue(VELOCITY); itFluidElement->GetValue(EMBEDDED_VELOCITY) += emb_vel/3; intersection_counter++; } // Set the EMBEDDED_VELOCITY as the average of the accumulated values if (intersection_counter!=0) { itFluidElement->GetValue(EMBEDDED_VELOCITY) /= intersection_counter; } } } void PeakValuesCorrection() { // Obtain the maximum and minimum distance values to be set double max_distance, min_distance; this->SetMaximumAndMinimumDistanceValues(max_distance, min_distance); // Bound the distance value in the non splitted nodes block_for_each(mrFluidModelPart.Nodes(), [&](Node<3>& rNode){ if(rNode.IsNot(TO_SPLIT)) { double& rnode_distance = rNode.FastGetSolutionStepValue(DISTANCE); rnode_distance = (rnode_distance > 0.0) ? max_distance : min_distance; } }); } void SetMaximumAndMinimumDistanceValues(double& max_distance, double& min_distance) { // Flag the nodes belonging to the splitted elements for (int k = 0; k < static_cast<int>(mrFluidModelPart.NumberOfElements()); ++k) { ModelPart::ElementsContainerType::iterator itFluidElement = mrFluidModelPart.ElementsBegin() + k; if(itFluidElement->Is(TO_SPLIT)) { Geometry<Node<3>>& rGeom = itFluidElement->GetGeometry(); for (unsigned int i=0; i<rGeom.size(); ++i) { rGeom[i].Set(TO_SPLIT, true); } } } // Obtain the maximum and minimum nodal distance values of the nodes flagged as TO_SPLIT const unsigned int num_threads = ParallelUtilities::GetNumThreads(); OpenMPUtils::PartitionVector nodes_partition; OpenMPUtils::DivideInPartitions(mrFluidModelPart.NumberOfNodes(), num_threads, nodes_partition); std::vector<double> max_distance_vect(num_threads, 1.0); std::vector<double> min_distance_vect(num_threads, 1.0); #pragma omp parallel shared(max_distance_vect, min_distance_vect) { const int k = OpenMPUtils::ThisThread(); ModelPart::NodeIterator nodes_begin = mrFluidModelPart.NodesBegin() + nodes_partition[k]; ModelPart::NodeIterator nodes_end = mrFluidModelPart.NodesBegin() + nodes_partition[k+1]; double max_local_distance = 1.0; double min_local_distance = 1.0; for( ModelPart::NodeIterator itFluidNode = nodes_begin; itFluidNode != nodes_end; ++itFluidNode) { if(itFluidNode->Is(TO_SPLIT)) { const double node_distance = itFluidNode->FastGetSolutionStepValue(DISTANCE); max_local_distance = (node_distance>max_local_distance) ? node_distance : max_local_distance; min_local_distance = (node_distance<min_local_distance) ? node_distance : min_local_distance; } } max_distance_vect[k] = max_local_distance; min_distance_vect[k] = min_local_distance; } // Reduce to maximum and minimum the thread results // Note that MSVC14 does not support max reductions, which are part of OpenMP 3.1 max_distance = max_distance_vect[0]; min_distance = min_distance_vect[0]; for (unsigned int k = 1; k < num_threads; k++) { max_distance = (max_distance > max_distance_vect[k]) ? max_distance : max_distance_vect[k]; min_distance = (min_distance < min_distance_vect[k]) ? min_distance : min_distance_vect[k]; } } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ModelPart& mrSkinModelPart; ModelPart& mrFluidModelPart; bool mDiscontinuousDistance; ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. CalculateEmbeddedSignedDistanceTo3DSkinProcess& operator=(CalculateEmbeddedSignedDistanceTo3DSkinProcess const& rOther); /// Copy constructor. //CalculateEmbeddedSignedDistanceTo3DSkinProcess(CalculateEmbeddedSignedDistanceTo3DSkinProcess const& rOther); ///@} }; // Class CalculateEmbeddedSignedDistanceTo3DSkinProcess ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// input stream function inline std::istream& operator >> (std::istream& rIStream, CalculateEmbeddedSignedDistanceTo3DSkinProcess& rThis); /// output stream function inline std::ostream& operator << (std::ostream& rOStream, const CalculateEmbeddedSignedDistanceTo3DSkinProcess& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} } // namespace Kratos. #endif // KRATOS_CALCULATE_EMBEDDED_SIGNED_DISTANCE_TO_3D_SKIN_PROCESS_H_INCLUDED defined
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 8; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }