source
stringlengths
3
92
c
stringlengths
26
2.25M
VolumetricAveragePooling.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/VolumetricAveragePooling.c" #else static inline void THNN_(VolumetricAveragePooling_shapeCheck)( THNNState *state, THTensor *input, THTensor *gradOutput, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, bool ceil_mode) { long nslices; long itime; long iheight; long iwidth; long otime; long oheight; long owidth; int ndim = input->nDimension; int dimN = 0; int dimt = 1; int dimh = 2; int dimw = 3; if (input->nDimension == 5) { dimN++; dimt++; dimh++; dimw++; } THArgCheck(kT > 0 && kW > 0 && kH > 0, 5, "kernel size should be greater than zero, but got kT: %d kH: %d kW: %d", kT, kH, kW); THArgCheck(dT > 0 && dW > 0 && dH > 0, 8, "stride should be greater than zero, but got dT: %d dH: %d dW: %d", dT, dH, dW); THNN_ARGCHECK(input->nDimension == 4 || input->nDimension == 5, 2, input, "4D or 5D (batch mode) tensor expected for input, but got: %s"); THArgCheck(input->size[dimw] >= kW && input->size[dimh] >= kH && input->size[dimt] >= kT, 2, "input image (T: %d H: %d W: %d) smaller than " "kernel size (kT: %d kH: %d kW: %d)", input->size[dimt], input->size[dimh], input->size[dimw], kT, kH, kW); // The second argument is argNumber... here is the index of padH. THArgCheck(kT/2 >= padT && kW/2 >= padW && kH/2 >= padH, 11, "pad should not be greater than half of kernel size, but got " "padT = %d, padW = %d, padH = %d, kT = %d, kW = %d, kH = %d", padT, padW, padH, kT, kW, kH); /* sizes */ nslices = input->size[dimN]; itime = input->size[dimt]; iheight = input->size[dimh]; iwidth = input->size[dimw]; if (ceil_mode) { otime = (long)(ceil((float)(itime - kT + 2*padT) / dT)) + 1; oheight = (long)(ceil((float)(iheight - kH + 2*padH) / dH)) + 1; owidth = (long)(ceil((float)(iwidth - kW + 2*padW) / dW)) + 1; } else { otime = (long)(floor((float)(itime - kT + 2*padT) / dT)) + 1; oheight = (long)(floor((float)(iheight - kH + 2*padH) / dH)) + 1; owidth = (long)(floor((float)(iwidth - kW + 2*padW) / dW)) + 1; } if (padT || padW || padH) { // ensure that the last pooling starts inside the image // needed to avoid problems in ceil mode if ((otime - 1)*dT >= itime + padT) --otime; if ((oheight - 1)*dH >= iheight + padH) --oheight; if ((owidth - 1)*dW >= iwidth + padW) --owidth; } if (otime < 1 || owidth < 1 || oheight < 1) THError("Given input size: (%dx%dx%dx%d). " "Calculated output size: (%dx%dx%dx%d). Output size is too small", nslices,itime,iheight,iwidth,nslices,otime,oheight,owidth); if (gradOutput != NULL) { THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimN, nslices); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimt, otime); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, oheight); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, owidth); } } static void THNN_(VolumetricAveragePooling_updateOutput_frame)( real *input_p, real *output_p, long nslices, long itime, long iwidth, long iheight, long otime, long owidth, long oheight, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, bool count_include_pad) { long k; #pragma omp parallel for private(k) for (k = 0; k < nslices; k++) { long i, j, ti; /* local pointers. */ real *ip = input_p + k * itime * iwidth * iheight; real *op = output_p + k * otime * owidth * oheight; for (i = 0; i < otime * oheight * owidth; ++i) *(op + i) = 0; /* loop over output */ for (ti = 0; ti < otime; ti++) { for (i = 0; i < oheight; i++) { for (j = 0; j < owidth; j++) { /* compute pool range. */ long tstart = ti * dT - padT; long hstart = i * dH - padH; long wstart = j * dW - padW; long tend = fminf(tstart + kT, itime + padT); long hend = fminf(hstart + kH, iheight + padH); long wend = fminf(wstart + kW, iwidth + padW); long pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart); tstart = fmaxf(tstart, 0); hstart = fmaxf(hstart, 0); wstart = fmaxf(wstart, 0); tend = fmin(tend, itime); hend = fmin(hend, iheight); wend = fmin(wend, iwidth); int divide_factor; if (count_include_pad) divide_factor = pool_size; else divide_factor = (tend - tstart) * (hend - hstart) * (wend - wstart); /* compute local sum: */ real sum = 0.0; long x, y, z; for (z = tstart; z < tend; z++) { for (y = hstart; y < hend; y++) { for (x = wstart; x < wend; x++) { sum += *(ip + z * iwidth * iheight + y * iwidth + x); } } } /* set output to local max */ *op++ += sum / divide_factor; } } } } } void THNN_(VolumetricAveragePooling_updateOutput)( THNNState *state, THTensor *input, THTensor *output, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, bool ceil_mode, bool count_include_pad) { long nslices; long itime; long iheight; long iwidth; long otime; long oheight; long owidth; real *input_data; real *output_data; THNN_(VolumetricAveragePooling_shapeCheck)( state, input, NULL, kT, kW, kH, dT, dW, dH, padT, padW, padH, ceil_mode); int dimN = 0; int dimt = 1; int dimh = 2; int dimw = 3; if (input->nDimension == 5) { dimN++; dimt++; dimh++; dimw++; } /* sizes */ nslices = input->size[dimN]; itime = input->size[dimt]; iheight = input->size[dimh]; iwidth = input->size[dimw]; if (ceil_mode) { otime = (long)(ceil((float)(itime - kT + 2*padT) / dT)) + 1; oheight = (long)(ceil((float)(iheight - kH + 2*padH) / dH)) + 1; owidth = (long)(ceil((float)(iwidth - kW + 2*padW) / dW)) + 1; } else { otime = (long)(floor((float)(itime - kT + 2*padT) / dT)) + 1; oheight = (long)(floor((float)(iheight - kH + 2*padH) / dH)) + 1; owidth = (long)(floor((float)(iwidth - kW + 2*padW) / dW)) + 1; } if (padT || padH || padW) { // ensure that the last pooling starts inside the image // needed to avoid problems in ceil mode if ((otime - 1)*dT >= itime + padT) --otime; if ((oheight - 1)*dH >= iheight + padH) --oheight; if ((owidth - 1)*dW >= iwidth + padW) --owidth; } /* get contiguous input */ input = THTensor_(newContiguous)(input); if (input->nDimension == 4) /* non-batch mode */ { /* resize output */ THTensor_(resize4d)(output, nslices, otime, oheight, owidth); input_data = THTensor_(data)(input); output_data = THTensor_(data)(output); THNN_(VolumetricAveragePooling_updateOutput_frame)( input_data, output_data, nslices, itime, iwidth, iheight, otime, owidth, oheight, kT, kW, kH, dT, dW, dH, padT, padW, padH, count_include_pad ); } else /* batch mode */ { long p; long nBatch = input->size[0]; long istride = nslices * itime * iwidth * iheight; long ostride = nslices * otime * owidth * oheight; /* resize output */ THTensor_(resize5d)(output, nBatch, nslices, otime, oheight, owidth); input_data = THTensor_(data)(input); output_data = THTensor_(data)(output); #pragma omp parallel for private(p) for (p=0; p < nBatch; p++) { THNN_(VolumetricAveragePooling_updateOutput_frame)( input_data + p * istride, output_data + p * ostride, nslices, itime, iwidth, iheight, otime, owidth, oheight, kT, kW, kH, dT, dW, dH, padT, padW, padH, count_include_pad ); } } /* cleanup */ THTensor_(free)(input); } static void THNN_(VolumetricAveragePooling_updateGradInput_frame)( real *gradInput_p, real *gradOutput_p, long nslices, long itime, long iwidth, long iheight, long otime, long owidth, long oheight, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, bool count_include_pad) { long k; #pragma omp parallel for private(k) for (k = 0; k < nslices; k++) { long i, j, ti; /* local pointers */ real *ip = gradInput_p + k * itime * iwidth * iheight; real *op = gradOutput_p + k * otime * owidth * oheight; for (i = 0; i < itime*iwidth*iheight; i++) *(ip + i) = 0; /* loop over output */ for (ti = 0; ti < otime; ti++) { for (i = 0; i < oheight; i++) { for (j = 0; j < owidth; j++) { long tstart = ti * dT - padT; long hstart = i * dH - padH; long wstart = j * dW - padW; long tend = fminf(tstart + kT, itime + padT); long hend = fminf(hstart + kH, iheight + padH); long wend = fminf(wstart + kW, iwidth + padW); long pool_size = (tend -tstart) * (hend - hstart) * (wend - wstart); tstart = fmaxf(tstart, 0); hstart = fmaxf(hstart, 0); wstart = fmaxf(wstart, 0); tend = fminf(tend, itime); hend = fminf(hend, iheight); wend = fminf(wend, iwidth); long divide_factor; if (count_include_pad) divide_factor = pool_size; else divide_factor = (tend - tstart) * (hend - hstart) * (wend - wstart); /* scatter gradients out to footprint: */ real val = *op++; long x,y,z; for (z = tstart; z < tend; z++) { for (y = hstart; y < hend; y++) { for (x = wstart; x < wend; x++) { *(ip + z * iheight * iwidth + y * iwidth + x) += val / divide_factor; } } } } } } } } void THNN_(VolumetricAveragePooling_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, bool ceil_mode, bool count_include_pad) { long nslices; long itime; long iheight; long iwidth; long otime; long oheight; long owidth; real *gradInput_data; real *gradOutput_data; int dimN = 0; int dimt = 1; int dimh = 2; int dimw = 3; THNN_(VolumetricAveragePooling_shapeCheck)( state, input, gradOutput, kT, kW, kH, dT, dW, dH, padT, padW, padH, ceil_mode); /* get contiguous gradOutput */ gradOutput = THTensor_(newContiguous)(gradOutput); /* resize */ THTensor_(resizeAs)(gradInput, input); THTensor_(zero)(gradInput); if (input->nDimension == 5) { dimN++; dimt++; dimh++; dimw++; } /* sizes */ nslices = input->size[dimN]; itime = input->size[dimt]; iheight = input->size[dimh]; iwidth = input->size[dimw]; otime = gradOutput->size[dimt]; oheight = gradOutput->size[dimh]; owidth = gradOutput->size[dimw]; /* get raw pointers */ gradInput_data = THTensor_(data)(gradInput); gradOutput_data = THTensor_(data)(gradOutput); /* backprop */ if (input->nDimension == 4) /* non-batch mode*/ { THNN_(VolumetricAveragePooling_updateGradInput_frame)( gradInput_data, gradOutput_data, nslices, itime, iwidth, iheight, otime, owidth, oheight, kT, kW, kH, dT, dW, dH, padT, padW, padH, count_include_pad ); } else /* batch mode */ { long p; long nBatch = input->size[0]; long istride = nslices * itime * iwidth * iheight; long ostride = nslices * otime * owidth * oheight; #pragma omp parallel for private(p) for (p = 0; p < nBatch; p++) { THNN_(VolumetricAveragePooling_updateGradInput_frame)( gradInput_data + p * istride, gradOutput_data + p * ostride, nslices, itime, iwidth, iheight, otime, owidth, oheight, kT, kW, kH, dT, dW, dH, padT, padW, padH, count_include_pad ); } } /* cleanup */ THTensor_(free)(gradOutput); } #endif
GB_unop_transpose.c
//------------------------------------------------------------------------------ // GB_unop_transpose: C=op(cast(A')), transpose, typecast, and apply op //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ { // Ax unused for some uses of this template #include "GB_unused.h" //-------------------------------------------------------------------------- // get A and C //-------------------------------------------------------------------------- const GB_ATYPE *GB_RESTRICT Ax = (GB_ATYPE *) A->x ; GB_CTYPE *GB_RESTRICT Cx = (GB_CTYPE *) C->x ; //-------------------------------------------------------------------------- // C = op (cast (A')) //-------------------------------------------------------------------------- if (Workspaces == NULL) { //---------------------------------------------------------------------- // A and C are both full or both bitmap //---------------------------------------------------------------------- // A is avlen-by-avdim; C is avdim-by-avlen int64_t avlen = A->vlen ; int64_t avdim = A->vdim ; int64_t anz = avlen * avdim ; const int8_t *GB_RESTRICT Ab = A->b ; int8_t *GB_RESTRICT Cb = C->b ; ASSERT ((Cb == NULL) == (Ab == NULL)) ; //---------------------------------------------------------------------- // A and C are both full or bitmap //---------------------------------------------------------------------- // TODO: it would be faster to by tiles, not rows/columns, for large // matrices, but in most of the cases, A and C will be tall-and-thin // or short-and-fat. int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (tid = 0 ; tid < nthreads ; tid++) { int64_t pC_start, pC_end ; GB_PARTITION (pC_start, pC_end, anz, tid, nthreads) ; if (Ab == NULL) { // A and C are both full for (int64_t pC = pC_start ; pC < pC_end ; pC++) { // get i and j of the entry C(i,j) // i = (pC % avdim) ; // j = (pC / avdim) ; // find the position of the entry A(j,i) // pA = j + i * avlen // Cx [pC] = op (Ax [pA]) GB_CAST_OP (pC, ((pC / avdim) + (pC % avdim) * avlen)) ; } } else { // A and C are both bitmap for (int64_t pC = pC_start ; pC < pC_end ; pC++) { // get i and j of the entry C(i,j) // i = (pC % avdim) ; // j = (pC / avdim) ; // find the position of the entry A(j,i) // pA = j + i * avlen int64_t pA = ((pC / avdim) + (pC % avdim) * avlen) ; int8_t cij_exists = Ab [pA] ; Cb [pC] = cij_exists ; if (cij_exists) { // Cx [pC] = op (Ax [pA]) GB_CAST_OP (pC, pA) ; } } } } } else { //---------------------------------------------------------------------- // A is sparse or hypersparse; C is sparse //---------------------------------------------------------------------- const int64_t *GB_RESTRICT Ap = A->p ; const int64_t *GB_RESTRICT Ah = A->h ; const int64_t *GB_RESTRICT Ai = A->i ; const int64_t anvec = A->nvec ; int64_t *GB_RESTRICT Ci = C->i ; if (nthreads == 1) { //------------------------------------------------------------------ // sequential method //------------------------------------------------------------------ int64_t *GB_RESTRICT workspace = Workspaces [0] ; for (int64_t k = 0 ; k < anvec ; k++) { // iterate over the entries in A(:,j) int64_t j = GBH (Ah, k) ; int64_t pA_start = Ap [k] ; int64_t pA_end = Ap [k+1] ; for (int64_t pA = pA_start ; pA < pA_end ; pA++) { // C(j,i) = A(i,j) int64_t i = Ai [pA] ; int64_t pC = workspace [i]++ ; Ci [pC] = j ; // Cx [pC] = op (Ax [pA]) GB_CAST_OP (pC, pA) ; } } } else if (nworkspaces == 1) { //------------------------------------------------------------------ // atomic method //------------------------------------------------------------------ int64_t *GB_RESTRICT workspace = Workspaces [0] ; int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (tid = 0 ; tid < nthreads ; tid++) { for (int64_t k = A_slice [tid] ; k < A_slice [tid+1] ; k++) { // iterate over the entries in A(:,j) int64_t j = GBH (Ah, k) ; int64_t pA_start = Ap [k] ; int64_t pA_end = Ap [k+1] ; for (int64_t pA = pA_start ; pA < pA_end ; pA++) { // C(j,i) = A(i,j) int64_t i = Ai [pA] ; // do this atomically: pC = workspace [i]++ int64_t pC ; GB_ATOMIC_CAPTURE_INC64 (pC, workspace [i]) ; Ci [pC] = j ; // Cx [pC] = op (Ax [pA]) GB_CAST_OP (pC, pA) ; } } } } else { //------------------------------------------------------------------ // non-atomic method //------------------------------------------------------------------ int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (tid = 0 ; tid < nthreads ; tid++) { int64_t *GB_RESTRICT workspace = Workspaces [tid] ; for (int64_t k = A_slice [tid] ; k < A_slice [tid+1] ; k++) { // iterate over the entries in A(:,j) int64_t j = GBH (Ah, k) ; int64_t pA_start = Ap [k] ; int64_t pA_end = Ap [k+1] ; for (int64_t pA = pA_start ; pA < pA_end ; pA++) { // C(j,i) = A(i,j) int64_t i = Ai [pA] ; int64_t pC = workspace [i]++ ; Ci [pC] = j ; // Cx [pC] = op (Ax [pA]) GB_CAST_OP (pC, pA) ; } } } } } }
FindStartIndexWorklet.h
//============================================================================ // Copyright (c) Kitware, Inc. // All rights reserved. // See LICENSE.txt for details. // This software is distributed WITHOUT ANY WARRANTY; without even // the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR // PURPOSE. See the above copyright notice for more information. // // Copyright 2014 National Technology & Engineering Solutions of Sandia, LLC (NTESS). // Copyright 2014 UT-Battelle, LLC. // Copyright 2014 Los Alamos National Security. // // Under the terms of Contract DE-NA0003525 with NTESS, // the U.S. Government retains certain rights in this software. // // Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National // Laboratory (LANL), the U.S. Government retains certain rights in // this software. //============================================================================ // Copyright (c) 2018, The Regents of the University of California, through // Lawrence Berkeley National Laboratory (subject to receipt of any required approvals // from the U.S. Dept. of Energy). All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // (1) Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // (2) Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // (3) Neither the name of the University of California, Lawrence Berkeley National // Laboratory, U.S. Dept. of Energy nor the names of its contributors may be // used to endorse or promote products derived from this software without // specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. // IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE // OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED // OF THE POSSIBILITY OF SUCH DAMAGE. // //============================================================================= // // This code is an extension of the algorithm presented in the paper: // Parallel Peak Pruning for Scalable SMP Contour Tree Computation. // Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens. // Proceedings of the IEEE Symposium on Large Data Analysis and Visualization // (LDAV), October 2016, Baltimore, Maryland. // // The PPP2 algorithm and software were jointly developed by // Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and // Oliver Ruebel (LBNL) //============================================================================== #ifndef vtkm_worklet_contourtree_augmented_contourtree_mesh_inc_find_start_index_worklet_h #define vtkm_worklet_contourtree_augmented_contourtree_mesh_inc_find_start_index_worklet_h #include <vtkm/worklet/WorkletMapField.h> #include <vtkm/worklet/contourtree_augmented/Types.h> namespace vtkm { namespace worklet { namespace contourtree_augmented { namespace mesh_dem_contourtree_mesh_inc { class FindStartIndexWorklet : public vtkm::worklet::WorkletMapField { public: typedef void ControlSignature(WholeArrayIn neighbours, // (input) neighbours WholeArrayIn arcs, // (input) arcs WholeArrayOut firstNeighbour); // (output) firstNeighbours typedef void ExecutionSignature(_1, InputIndex, _2, _3); typedef _1 InputDomain; // Default Constructor VTKM_EXEC_CONT FindStartIndexWorklet() {} template <typename InFieldPortalType, typename OutFieldPortalType> VTKM_EXEC void operator()(const InFieldPortalType& neighboursPortal, vtkm::Id sortedArcNo, const InFieldPortalType& arcsPortal, const OutFieldPortalType& firstNeighbourPortal) const { if (sortedArcNo > 0) { vtkm::Id prevFrom = (neighboursPortal.Get(sortedArcNo - 1) % 2 == 0) ? neighboursPortal.Get(sortedArcNo - 1) / 2 : maskedIndex(arcsPortal.Get(neighboursPortal.Get(sortedArcNo - 1) / 2)); vtkm::Id currFrom = (neighboursPortal.Get(sortedArcNo) % 2 == 0) ? neighboursPortal.Get(sortedArcNo) / 2 : maskedIndex(arcsPortal.Get(neighboursPortal.Get(sortedArcNo) / 2)); if (currFrom != prevFrom) { firstNeighbourPortal.Set(currFrom, sortedArcNo); } } else // sortedArcNo == 0 { firstNeighbourPortal.Set(0, 0); } // In serial this worklet implements the following operation // #pragma omp parallel for // for (indexVector::size_type sortedArcNo = 1; sortedArcNo < neighbours.size(); ++sortedArcNo) // { // indexType prevFrom = (neighbours[sortedArcNo-1] % 2 == 0) ? neighbours[sortedArcNo-1]/2 : maskedIndex(arcs[neighbours[sortedArcNo-1]/2]); // indexType currFrom = (neighbours[sortedArcNo ] % 2 == 0) ? neighbours[sortedArcNo ]/2 : maskedIndex(arcs[neighbours[sortedArcNo ]/2]); // if (currFrom != prevFrom) // { // assert(currFrom < firstNeighbour.size()); // firstNeighbour[currFrom] = sortedArcNo; // } // } } }; // ComputeMaxNeighboursWorklet } // namespace mesh_dem_contourtree_mesh_inc } // namespace contourtree_augmented } // namespace worklet } // namespace vtkm #endif
NDArray.h
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ #ifndef NDARRAY_H #define NDARRAY_H #include <initializer_list> #include <functional> #include <shape.h> #include "NativeOpExcutioner.h" #include <memory/Workspace.h> #include <indexing/NDIndex.h> #include <indexing/IndicesList.h> #include <graph/Intervals.h> #include <array/DataType.h> #include <stdint.h> #include <array/ArrayOptions.h> #include <array/ArrayType.h> #include <array/ResultSet.h> namespace nd4j { template<typename T> class ND4J_EXPORT NDArray; ND4J_EXPORT NDArray<float> operator-(const float, const NDArray<float>&); ND4J_EXPORT NDArray<float16> operator-(const float16, const NDArray<float16>&); ND4J_EXPORT NDArray<double> operator-(const double, const NDArray<double>&); ND4J_EXPORT NDArray<float> operator+(const float, const NDArray<float>&); ND4J_EXPORT NDArray<float16> operator+(const float16, const NDArray<float16>&); ND4J_EXPORT NDArray<double> operator+(const double, const NDArray<double>&); template<typename T> NDArray<T> mmul(const NDArray<T>&, const NDArray<T>&); template<typename T> class NDArray { protected: /** * if true then array doesn't own buffer and simply points to another's buffer */ bool _isView = false; /** * pointer on flattened data array in memory */ T *_buffer = nullptr; /** * contains shape info: matrix rank, numbers of elements per each dimension, dimensions strides, element-wise-stride, c-like or fortan-like order */ Nd4jLong *_shapeInfo = nullptr; /** * pointer on externally allocated memory where _buffer and _shapeInfo are stored */ nd4j::memory::Workspace* _workspace = nullptr; /** * alternative buffers for special computational devices (like GPUs for CUDA) */ T* _bufferD = nullptr; Nd4jLong *_shapeInfoD = nullptr; /** * indicates whether user allocates memory for _buffer/_shapeInfo by himself, in opposite case the memory must be allocated from outside */ bool _isShapeAlloc = false; bool _isBuffAlloc = false; /** * Field to store cached length */ Nd4jLong _length = -1L; /** * type of array elements */ DataType _dataType = DataType_FLOAT; std::string toStringValue(T value); public: static NDArray<T>* createEmpty(nd4j::memory::Workspace* workspace = nullptr); static NDArray<T>* valueOf(const std::initializer_list<Nd4jLong>& shape, const T value, const char order = 'c'); static NDArray<T>* valueOf(const std::vector<Nd4jLong>& shape, const T value, const char order = 'c'); static NDArray<T>* linspace(const T from, const T to, const Nd4jLong numElements); static NDArray<T>* scalar(const T value); /** * default constructor, do not allocate memory, memory for array is passed from outside */ NDArray(T *buffer = nullptr, Nd4jLong* shapeInfo = nullptr, nd4j::memory::Workspace* workspace = nullptr); NDArray(std::initializer_list<Nd4jLong> shape, nd4j::memory::Workspace* workspace = nullptr); /** * Constructor for scalar NDArray */ NDArray(T scalar); /** * copy constructor */ NDArray(const NDArray<T>& other); /** * move constructor */ NDArray(NDArray<T>&& other) noexcept; #ifndef __JAVACPP_HACK__ // this method only available out of javacpp /** * This constructor creates vector of T * * @param values */ NDArray(std::initializer_list<T> values, nd4j::memory::Workspace* workspace = nullptr); NDArray(std::vector<T> &values, nd4j::memory::Workspace* workspace = nullptr); #endif /** * constructor, create empty array stored at given workspace */ NDArray(nd4j::memory::Workspace* workspace); /** * this constructor creates new NDArray with shape matching "other" array, do not copy "other" elements into new array */ NDArray(const NDArray<T> *other, const bool copyStrides = false, nd4j::memory::Workspace* workspace = nullptr); /** * constructor creates new NDArray using shape information from "shapeInfo", set all elements in new array to be zeros, if copyStrides is true then use stride values from "shapeInfo", else calculate strides independently */ NDArray(const Nd4jLong* shapeInfo, const bool copyStrides = false, nd4j::memory::Workspace* workspace = nullptr); /** * this constructor creates new array using shape information contained in vector argument */ NDArray(const char order, const std::vector<Nd4jLong> &shape, nd4j::memory::Workspace* workspace = nullptr); /** * This constructor creates new array with elements copied from data and using shape information stored in shape * * PLEASE NOTE: data will be copied AS IS, without respect to specified order. You must ensure order match here. */ NDArray(const char order, const std::vector<Nd4jLong> &shape, const std::vector<T> &data, nd4j::memory::Workspace* workspace = nullptr); /** * this constructor creates new array using given buffer (without memory allocating) and shape information stored in shape */ NDArray(T *buffer, const char order, const std::vector<Nd4jLong> &shape , nd4j::memory::Workspace* workspace = nullptr); /** * copy assignment operator */ NDArray<T>& operator=(const NDArray<T>& other); /** * move assignment operator */ NDArray<T>& operator=(NDArray<T>&& other) noexcept; /** * assignment operator, assigns the same scalar to all array elements */ NDArray<T>& operator=(const T scalar); /** * operators for memory allocation and deletion */ void* operator new(size_t i); void operator delete(void* p); /** * method replaces existing buffer/shapeinfo, AND releases original pointers (if releaseExisting TRUE) */ void replacePointers(T *buffer, Nd4jLong *shapeInfo, const bool releaseExisting = true); /** * create a new array by replicating current array by repeats times along given dimension * dimension - dimension along which to repeat elements * repeats - number of repetitions */ NDArray<T>* repeat(int dimension, const std::vector<Nd4jLong>& repeats) const; /** * This method returns quantized copy of given array * * @param array * @return */ static NDArray<T> quantize(NDArray<T> &array); /** * This method returns quantized copy of given array * * @param array * @return */ static NDArray<T>* quantize(NDArray<T> *array); /** * fill target array by repeating current array * dimension - dimension along which to repeat elements */ void repeat(int dimension, NDArray<T>& target) const; /** * return _dataType; */ DataType dataType() const; /** * creates array which is view of this array */ NDArray<T>* getView(); /** * creates array which points on certain sub-range of this array, sub-range is defined by given indices */ NDArray<T> *subarray(IndicesList& indices) const; NDArray<T> *subarray(IndicesList& indices, std::vector<Nd4jLong>& strides) const; NDArray<T>* subarray(const std::initializer_list<NDIndex*>& idx) const; NDArray<T>* subarray(const Intervals& idx) const; /** * cast array elements to given dtype */ NDArray<T>* cast(DataType dtype); void cast(NDArray<T>* target, DataType dtype); /** * returns _workspace */ nd4j::memory::Workspace* getWorkspace() const { return _workspace; } /** * returns _buffer */ T* getBuffer() const; T* buffer(); /** * returns _shapeInfo */ Nd4jLong* shapeInfo(); Nd4jLong* getShapeInfo() const; /** * if _bufferD==nullptr return _buffer, else return _bufferD */ T* specialBuffer(); /** * Returns True if it's legally empty NDArray, or false otherwise * @return */ FORCEINLINE bool isEmpty() const; /** * if _shapeInfoD==nullptr return _shapeInfo, else return _shapeInfoD */ Nd4jLong* specialShapeInfo(); /** * set values for _bufferD and _shapeInfoD */ void setSpecialBuffers(T * buffer, Nd4jLong *shape); /** * permutes (in-place) the dimensions in array according to "dimensions" array */ bool permutei(const std::initializer_list<int>& dimensions); bool permutei(const std::vector<int>& dimensions); bool permutei(const int* dimensions, const int rank); bool permutei(const std::initializer_list<Nd4jLong>& dimensions); bool permutei(const std::vector<Nd4jLong>& dimensions); bool permutei(const Nd4jLong* dimensions, const int rank); bool isFinite(); bool hasNaNs(); bool hasInfs(); /** * permutes the dimensions in array according to "dimensions" array, new array points on _buffer of this array */ NDArray<T>* permute(const std::initializer_list<int>& dimensions) const; NDArray<T>* permute(const std::vector<int>& dimensions) const; NDArray<T>* permute(const int* dimensions, const int rank) const; void permute(const int* dimensions, const int rank, NDArray<T>& target) const; void permute(const std::vector<int>& dimensions, NDArray<T>& target) const; NDArray<T>* permute(const std::initializer_list<Nd4jLong>& dimensions) const; NDArray<T>* permute(const std::vector<Nd4jLong>& dimensions) const; NDArray<T>* permute(const Nd4jLong* dimensions, const int rank) const; void permute(const Nd4jLong* dimensions, const int rank, NDArray<T>& target) const; void permute(const std::vector<Nd4jLong>& dimensions, NDArray<T>& target) const; /** * This method streamlines given view or permuted array, and reallocates buffer */ void streamline(char order = 'a'); /** * check whether array is contiguous in memory */ bool isContiguous(); /** * prints information about array shape * msg - message to print out */ void printShapeInfo(const char * msg = nullptr) const; /** * prints buffer elements * msg - message to print out * limit - number of array elements to print out */ void printBuffer(const char* msg = nullptr, Nd4jLong limit = -1); /** * prints buffer elements, takes into account offset between elements (element-wise-stride) * msg - message to print out * limit - number of array elements to print out */ void printIndexedBuffer(const char* msg = nullptr, Nd4jLong limit = -1) const; std::string asIndexedString(Nd4jLong limit = -1); std::string asString(Nd4jLong limit = -1); /** * this method assigns values of given array to this one */ void assign(const NDArray<T>* other); /** * this method assigns values of given array to this one */ void assign(const NDArray<T>& other); /** * this method assigns given value to all elements in array */ void assign(const T value); /** * returns new copy of this array, optionally in different order */ NDArray<T> *dup(const char newOrder = 'a'); /** * returns sum of all elements of array */ T sumNumber() const; /** * returns mean number of array */ T meanNumber() const; /** * This method explicitly enforces new shape for this NDArray, old shape/stride information is lost */ void enforce(const std::initializer_list<Nd4jLong> &dimensions, char order = 'a'); void enforce(std::vector<Nd4jLong> &dimensions, char order = 'a'); /** * calculates sum along dimension(s) in this array and save it to created reduced array * dimensions - array of dimensions to calculate sum over * keepDims - if true then put unities in place of reduced dimensions */ NDArray<T> *sum(const std::vector<int> &dimensions) const; /** * method reduces array by excluding its shapes along dimensions present in given dimensions vector, result is stored in new array to be returned * dimensions - array of dimensions to reduce along * keepDims - if true then put unities in place of reduced dimensions */ template<typename OpName> NDArray<T>* reduceAlongDimension(const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const; template<typename OpName> NDArray<T>* reduceAlongDimension(const std::initializer_list<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const; template<typename OpName> NDArray<T> reduceAlongDims(const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const; /** * method reduces array by excluding its shapes along dimensions present in given dimensions vector * target - where to save result of reducing * dimensions - array of dimensions to reduce along * keepDims - if true then put unities in place of reduced dimensions * extras - extra parameters */ template<typename OpName> void reduceAlongDimension(NDArray<T>* target, const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false, T *extras = nullptr) const; /** * return variance of array elements set * biasCorrected - if true bias correction will be applied */ template<typename OpName> T varianceNumber(bool biasCorrected = true); /** * apply scalar operation to array * extraParams - extra parameters for operation */ template<typename OpName> T reduceNumber(T *extraParams = nullptr) const; /** * returns element index which corresponds to some condition imposed by operation * extraParams - extra parameters for operation */ template<typename OpName> Nd4jLong indexReduceNumber(T *extraParams = nullptr); /** * returns index of max element in a given array (optionally: along given dimension(s)) * dimensions - optional vector with dimensions */ Nd4jLong argMax(std::initializer_list<int> dimensions = {}); /** * apply OpName transformation directly to array * extraParams - extra parameters for operation */ template<typename OpName> void applyTransform(T *extraParams = nullptr); /** * apply OpName transformation to array and store result in target * target - where to store result * extraParams - extra parameters for operation */ template<typename OpName> void applyTransform(NDArray<T> *target, T *extraParams = nullptr); /** * apply OpName transformation to this array and store result in new array being returned * extraParams - extra parameters for operation */ template<typename OpName> NDArray<T> transform(T *extraParams = nullptr) const; /** * apply pairwise OpName transformation based on "this" and "other" arras elements, store result in this array * other - second array necessary for pairwise operation * extraParams - extra parameters for operation */ template<typename OpName> void applyPairwiseTransform(NDArray<T> *other, T *extraParams); /** * apply pairwise OpName transformation based on "this" and "other" arras elements, store result in target array * other - second array necessary for pairwise operation * target - where to store result * extraParams - extra parameters for operation */ template<typename OpName> void applyPairwiseTransform(NDArray<T> *other, NDArray<T> *target, T *extraParams); /** * apply operation which requires broadcasting, broadcast a smaller array (tad) along bigger one (this) * tad - array to broadcast * dimensions - dimensions array to broadcast along * target - where to store result * extraParams - extra parameters for operation */ template<typename OpName> void applyBroadcast(std::initializer_list<int> dimensions, const NDArray<T>* tad, NDArray<T>* target = nullptr, T* extraArgs = nullptr); template <typename OpName> void applyBroadcast(std::vector<int> &dimensions, const NDArray<T> *tad, NDArray<T> *target = nullptr, T *extraArgs = nullptr); /** * apply operation which requires broadcasting, broadcast one tensor along another, also this method checks the possibility of broadcasting * other - input array * extraParams - extra parameters for operation */ template <typename OpName> NDArray<T> applyTrueBroadcast(const NDArray<T>& other, T *extraArgs = nullptr) const; template <typename OpName> NDArray<T>* applyTrueBroadcast(const NDArray<T>* other, T *extraArgs = nullptr) const; /** * apply operation which requires broadcasting, broadcast one tensor along another, also this method checks the possibility of broadcasting * other - input array * target - where to store result * checkTargetShape - if true check whether target shape is suitable for broadcasting * extraParams - extra parameters for operation */ template <typename OpName> void applyTrueBroadcast(const NDArray<T>* other, NDArray<T>* target, const bool checkTargetShape = true, T *extraArgs = nullptr) const; /** * apply a scalar operation to an array * scalar - input scalar * target - where to store result * extraParams - extra parameters for operation */ template<typename OpName> void applyScalar(T scalar, NDArray<T>* target = nullptr, T *extraParams = nullptr) const; /** * apply a scalar operation to an array * scalar - input array which is simple scalar * target - where to store result * extraParams - extra parameters for operation */ template<typename OpName> void applyScalar(NDArray<T>& scalar, NDArray<T>* target = nullptr, T *extraParams = nullptr) const; #ifndef __JAVACPP_HACK__ /** * apply operation "func" to an array * func - what operation to apply * target - where to store result */ void applyLambda(const std::function<T(T)>& func, NDArray<T>* target = nullptr); void applyIndexedLambda(const std::function<T(Nd4jLong, T)>& func, NDArray<T>* target = nullptr); /** * apply pairwise operation "func" to an array * other - input array * func - what pairwise operation to apply * target - where to store result */ void applyPairwiseLambda(const NDArray<T>* other, const std::function<T(T, T)>& func, NDArray<T>* target = nullptr); void applyIndexedPairwiseLambda(NDArray<T>* other, const std::function<T(Nd4jLong, T, T)>& func, NDArray<T>* target = nullptr); void applyTriplewiseLambda(NDArray<T>* second, NDArray<T> *third, const std::function<T(T, T, T)>& func, NDArray<T>* target = nullptr); #endif /** * apply OpName random operation to array * buffer - pointer on RandomBuffer * y - optional input array * z - optional input array * extraArgs - extra parameters for operation */ template<typename OpName> void applyRandom(nd4j::random::RandomBuffer *buffer, NDArray<T>* y = nullptr, NDArray<T>* z = nullptr, T* extraArgs = nullptr); /** * apply transpose operation to the copy of this array, that is this array remains unaffected */ NDArray<T>* transpose() const; NDArray<T> transp() const; /** * perform transpose operation and store result in target, this array remains unaffected * target - where to store result */ void transpose(NDArray<T>& target) const; /** * apply in-place transpose operation to this array, so this array becomes transposed */ void transposei(); /** * return array pointing on certain range of this array * index - the number of array to be returned among set of possible arrays * dimensions - array of dimensions to point on */ NDArray<T>* tensorAlongDimension(Nd4jLong index, const std::initializer_list<int>& dimensions) const; NDArray<T>* tensorAlongDimension(Nd4jLong index, const std::vector<int>& dimensions) const; /** * returns the number of arrays pointing on specified dimension(s) * dimensions - array of dimensions to point on */ Nd4jLong tensorsAlongDimension(const std::initializer_list<int> dimensions) const ; Nd4jLong tensorsAlongDimension(const std::vector<int>& dimensions) const ; /** * returns true if elements of two arrays are equal to within given epsilon value * other - input array to compare * eps - epsilon, this value defines the precision of elements comparison */ bool equalsTo(const NDArray<T> *other, T eps = (T) 1e-5f) const; bool equalsTo(NDArray<T> &other, T eps = (T) 1e-5f) const; /** * add given row vector to all rows of this array * row - row vector to add */ void addiRowVector(const NDArray<T> *row); /** * add given row vector to all rows of this array, store result in target * row - row vector to add * target - where to store result */ void addRowVector(const NDArray<T> *row, NDArray<T>* target) const; /** * subtract given row vector from all rows of this array, store result in target * row - row vector to subtract * target - where to store result */ void subRowVector(const NDArray<T> *row, NDArray<T>* target) const; /** * multiply all rows of this array on given row vector, store result in target * row - row vector to multiply on * target - where to store result */ void mulRowVector(const NDArray<T> *row, NDArray<T>* target) const; /** * divide all rows of this array on given row vector, store result in target * row - row vector to divide on * target - where to store result */ void divRowVector(const NDArray<T> *row, NDArray<T>* target) const; /** * add given column vector to all columns of this array, store result in target * column - column vector to add * target - where to store result */ void addColumnVector(const NDArray<T> *column, NDArray<T>* target) const; /** * add given column vector to all columns of this array, this array becomes affected (in-place operation) * column - column vector to add */ void addiColumnVector(const NDArray<T> *column); /** * multiply all columns of this array on given column vector, this array becomes affected (in-place operation) * column - column vector to multiply on */ void muliColumnVector(const NDArray<T> *column); /** * returns number of bytes used by _buffer & _shapeInfo */ Nd4jLong memoryFootprint(); /** * these methods suited for FlatBuffers use */ std::vector<T> getBufferAsVector(); std::vector<Nd4jLong> getShapeAsVector(); std::vector<Nd4jLong> getShapeInfoAsVector(); std::vector<int64_t> getShapeInfoAsFlatVector(); /** * set new order and shape in case of suitable array length (in-place operation) * order - order to set * shape - shape to set * * if there was permute applied before or there are weird strides, then new buffer is allocated for array */ bool reshapei(const char order, const std::initializer_list<Nd4jLong>& shape); bool reshapei(const char order, const std::vector<Nd4jLong>& shape); bool reshapei(const std::initializer_list<Nd4jLong>& shape); bool reshapei(const std::vector<Nd4jLong>& shape); /** * creates new array with corresponding order and shape, new array will point on _buffer of this array * order - order to set * shape - shape to set * * if permute have been applied before or there are weird strides, then new buffer is allocated for new array */ NDArray<T>* reshape(const char order, const std::vector<Nd4jLong>& shape) const; /** * calculate strides and set given order * order - order to set */ void updateStrides(const char order); /** * change an array by repeating it the number of times given by reps (in-place operation) * repeats - contains numbers of repetitions */ void tilei(const std::vector<Nd4jLong>& repeats); /** * returns new array which is created by repeating of this array the number of times given by reps * repeats - contains numbers of repetitions */ NDArray<T> tile(const std::vector<Nd4jLong>& repeats) const; /** * change an array by repeating it the number of times given by reps (in-place operation) * repeats - contains numbers of repetitions * target - where to store result */ void tile(const std::vector<Nd4jLong>& repeats, NDArray<T>& target) const; /** * change an array by repeating it the number of times to acquire the new shape which is the same as target shape * target - where to store result */ void tile(NDArray<T>& target) const; /** * returns an array which is result of broadcasting of this and other arrays * other - input array */ NDArray<T>* broadcast(const NDArray<T>& other); /** * check whether array's rows (arg=0) or columns (arg=1) create orthogonal basis * arg - 0 -> row, 1 -> column */ bool hasOrthonormalBasis(const int arg); /** * check whether array is identity matrix */ bool isIdentityMatrix(); /** * check whether array is unitary matrix */ bool isUnitary(); /** * reduces dimensions in this array relying on index operation OpName * dimensions - vector of dimensions to reduce along * extraArgs - extra parameters for operation */ template<typename OpName> NDArray<T>* applyIndexReduce(const std::vector<int>& dimensions, const T *extraParams = nullptr) const; /** * reduces dimensions in array relying on index operation OpName * target - where to store result * dimensions - vector of dimensions to reduce along * extraArgs - extra parameters for operation */ template<typename OpName> void applyIndexReduce(const NDArray<T>* target, const std::vector<int>& dimensions, const T *extraParams = nullptr) const; /** * apply reduce3 operation OpName to this and other array, return result in new output array * other - input array * extraArgs - extra parameters for operation */ template<typename OpName> NDArray<T>* applyReduce3(const NDArray<T>* other, const T* extraParams = nullptr) const; /** * apply reduce3 operation OpName to this and other array, return result in new output array * other - input array * dimensions - vector of dimensions to reduce along (tads not axis) * extraArgs - extra parameters for operation */ template<typename OpName> NDArray<T>* applyAllReduce3(const NDArray<T>* other, const std::vector<int>& dimensions, const T* extraParams = nullptr) const; /** * apply reduce3 (exec) operation OpName to this and other array, return result in new output array * other - input array * dimensions - vector of dimensions to reduce along (same as reduceAlongDimension) * extraArgs - extra parameters for operation */ template<typename OpName> NDArray<T>* applyReduce3(const NDArray<T>* other, const std::vector<int>& dimensions, const T* extraParams = nullptr) const; /** * returns variance along given dimensions * biasCorrected - if true bias correction will be applied * dimensions - vector of dimensions to calculate variance along */ template<typename OpName> NDArray<T>* varianceAlongDimension(const bool biasCorrected, const std::vector<int>& dimensions) const; template<typename OpName> NDArray<T>* varianceAlongDimension(const bool biasCorrected, const std::initializer_list<int>& dimensions) const; template<typename OpName> void varianceAlongDimension(const NDArray<T>* target, const bool biasCorrected, const std::vector<int>& dimensions); template<typename OpName> void varianceAlongDimension(const NDArray<T>* target, const bool biasCorrected, const std::initializer_list<int>& dimensions); /** * operator returns subarray with buffer pointing at this->_buffer with offset defined by given intervals * idx - intervals of indexes which define the subarrays to point on, idx has form {dim0Start,dim0End, dim1Start,dim1End, ....} and length (2 * this->rankOf()) * when (dimStart == dimEnd) then whole range will be used for current dimension * keepUnitiesInShape - if false then eliminate unities from resulting array shape, for example {1,a,1,b} -> {a,b} */ NDArray<T> operator()(const std::vector<Nd4jLong>& idx, bool keepUnitiesInShape = false) const; /** * evaluates subarray with buffer pointing at this->_buffer and offset defined by given sequential index subArrIdx and dimensions in dimsToExclude * subArrIdx - index of current sub-array * dimsToExclude - MUST BE SORTED, dimensions to evaluate sub-array along, i.e. when shape is [2,3,4,5] and dimsToExclude={0,2}, then there will be 8 sub-arrays with shape [3,5], and subArrIdx must be in range [0,7] * if dimsToExclude is empty then idxRanges containing all zeros (means whole array) will be returned. */ NDArray<T> operator()(const Nd4jLong subArrIdx, const std::vector<int>& dimsToExclude, bool keepUnitiesInShape = false) const; /** * addition operator: array + other * other - input array to add */ NDArray<T> operator+(const NDArray<T>& other) const; /** * addition operator: array + scalar * scalar - input scalar to add */ NDArray<T> operator+(const T scalar) const; /** * friend functions which implement addition operator: scalar + array * scalar - input scalar to add */ friend NDArray<float> nd4j::operator+(const float scalar, const NDArray<float>& arr); friend NDArray<float16> nd4j::operator+(const float16 scalar, const NDArray<float16>& arr); friend NDArray<double> nd4j::operator+(const double scalar, const NDArray<double>& arr); /** * addition unary operator array += other * other - input array to add */ void operator+=(const NDArray<T>& other); /** * subtraction unary operator array -= other * other - input array to add */ void operator-=(const NDArray<T>& other); void operator+=(const T other); void operator-=(const T other); /** * subtraction operator: array - other * other - input array to subtract */ NDArray<T> operator-(const NDArray<T>& other) const; /** * subtraction operator: array - scalar * scalar - input scalar to subtract */ NDArray<T> operator-(const T& scalar) const; /** * negative operator, it changes sign of all array elements on opposite */ NDArray<T> operator-() const; /** * friend functions which implement subtraction operator: scalar - array * scalar - input scalar to subtract */ friend NDArray<float> nd4j::operator-(const float scalar, const NDArray<float>& arr); friend NDArray<float16> nd4j::operator-(const float16 scalar, const NDArray<float16>& arr); friend NDArray<double> nd4j::operator-(const double scalar, const NDArray<double>& arr); /** * pairwise multiplication operator: array * other * other - input array to multiply on */ NDArray<T> operator*(const NDArray<T>& other) const; /** * multiplication operator: array * scalar * scalar - input scalar to multiply on */ NDArray<T> operator*(const T scalar) const; /** * pairwise multiplication unary operator array *= other * other - input array to multiply on */ void operator*=(const NDArray<T>& other); /** * multiplication unary operator array *= scalar * scalar - input scalar to multiply on */ void operator*=(const T scalar); /** * pairwise division operator: array / other * other - input array to divide on */ NDArray<T> operator/(const NDArray<T>& other) const; /** * division operator: array / scalar * scalar - input scalar to divide each array element on */ NDArray<T> operator/(const T scalar) const; /** * pairwise division unary operator: array /= other * other - input array to divide on */ void operator/=(const NDArray<T>& other); /** * division unary operator: array /= scalar * scalar - input scalar to divide on */ void operator/=(const T scalar); /** * friend function which implements mathematical multiplication of two arrays * left - input array * right - input array */ friend NDArray<T> mmul<>(const NDArray<T>& left, const NDArray<T>& right); /** * this method assigns elements of other array to the subarray of this array defined by given intervals * other - input array to assign elements from * idx - intervals of indexes which define the subarray */ void assign(const NDArray<T>& other, const Intervals& idx); /** * return vector containing _buffer as flat binary array */ std::vector<int8_t> asByteVector(); /** * makes array to be identity matrix (not necessarily square), that is set all diagonal elements = 1, rest = 0 */ void setIdentity(); /** * swaps the contents of tow arrays, * PLEASE NOTE: method doesn't take into account the shapes of arrays, shapes may be different except one condition: arrays lengths must be the same */ void swapUnsafe(NDArray<T>& other); /** * return vector with buffer which points on corresponding diagonal elements of array * type - means of vector to be returned: column ('c') or row ('r') */ NDArray<T>* diagonal(const char type ) const; /** * fill matrix with given value starting from specified diagonal in given direction, works only with 2D matrix * * diag - diagonal starting from matrix is filled. * diag = 0 corresponds to main diagonal, * diag < 0 below main diagonal * diag > 0 above main diagonal * direction - in what direction to fill matrix. There are 2 possible directions: * 'u' - fill up, mathematically this corresponds to lower triangular matrix * 'l' - fill down, mathematically this corresponds to upper triangular matrix */ void setValueInDiagMatrix(const T& value, const int diag, const char direction); /** * change an array by repeating it the number of times in order to acquire new shape equal to the input shape * * shape - contains new shape to broadcast array to * target - optional argument, if target != nullptr the resulting array will be placed in target, in opposite case tile operation is done in place */ void tileToShape(const std::vector<Nd4jLong>& shape, NDArray<T>* target = nullptr); void tileToShape(const std::initializer_list<Nd4jLong>& shape, NDArray<T>* target = nullptr); template <typename N> NDArray<N>* asT(); /** * calculates the trace of an array, that is sum of elements on main diagonal = sum array[i, i, i, ...] */ T getTrace() const; /** * fill array linearly as follows: arr[0] = from, arr[1] = from+step, arr[2] = from+2*step, ... */ void linspace(const T from, const T step = 1.0f); NDArray<T>* createUninitialized() const; ResultSet<T>* multipleTensorsAlongDimension(const std::vector<int>& indices, const std::vector<int>& dimensions) const; ResultSet<T>* allTensorsAlongDimension(const std::vector<int>& dimensions) const; ResultSet<T>* allTensorsAlongDimension(const std::initializer_list<int>& dimensions) const; ResultSet<T>* allExamples()const ; template <typename OpName> void saveResultOfBroadcast(const NDArray<T>& x, const NDArray<T>& y, const bool checkThisShape = false); /** * default destructor */ ~NDArray() noexcept; /** * set _shapeInfo */ FORCEINLINE void setShapeInfo(Nd4jLong *shapeInfo); /** * set _buffer */ FORCEINLINE void setBuffer(T* buffer); /** * set _isBuffAlloc and _isShapeAlloc */ FORCEINLINE void triggerAllocationFlag(bool bufferAllocated, bool shapeAllocated); /** * returns the value of "dim" dimension */ Nd4jLong sizeAt(const int dim) const; /** * returns order of array */ FORCEINLINE char ordering() const; /** * return _isView */ FORCEINLINE bool isView(); /** * returns shape portion of shapeInfo */ FORCEINLINE Nd4jLong* shapeOf() const; /** * returns strides portion of shapeInfo */ FORCEINLINE Nd4jLong* stridesOf() const; /** * returns rank of array */ FORCEINLINE int rankOf() const; /** * returns length of array */ FORCEINLINE Nd4jLong lengthOf() const; /** * returns number of rows in array */ FORCEINLINE Nd4jLong rows() const; /** * returns number of columns in array */ FORCEINLINE Nd4jLong columns() const; /** * returns size of array elements type */ FORCEINLINE int sizeOfT() const; /** * returns element-wise-stride */ FORCEINLINE Nd4jLong ews() const; // returns true if arrays have same shape FORCEINLINE bool isSameShape(const NDArray<T> *other) const; FORCEINLINE bool isSameShape(NDArray<T> &other) const; FORCEINLINE bool isSameShape(const std::initializer_list<Nd4jLong>& shape) const; FORCEINLINE bool isSameShape(const std::vector<Nd4jLong>& shape) const; /** * returns true if these two NDArrays have same rank, dimensions, strides, ews and order */ FORCEINLINE bool isSameShapeStrict(const NDArray<T> *other) const; /** * returns true if buffer && shapeInfo were defined (non nullptr) */ FORCEINLINE bool nonNull() const; /** * returns array element with given index from linear buffer * i - element index in array */ FORCEINLINE T getScalar(const Nd4jLong i) const; /** * returns array element with given index, takes into account offset between elements (element-wise-stride) * i - element index in array */ FORCEINLINE T getIndexedScalar(const Nd4jLong i) const; /** * returns element with given indexes from 2D array * i - number of row * j - number of column */ FORCEINLINE T getScalar(const Nd4jLong i, const Nd4jLong j) const; /** * returns element with given indexes from 3D array * i - height * j - width * k - depth */ FORCEINLINE T getScalar(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k) const; /** * assigns given scalar to array element by given index, takes into account offset between elements (element-wise-stride) * i - element index in array * value - scalar value to assign */ FORCEINLINE void putIndexedScalar(const Nd4jLong i, const T value); /** * assigns given scalar to array element by given index, regards array buffer as linear * i - element index in array * value - scalar value to assign */ FORCEINLINE void putScalar(const Nd4jLong i, const T value); /** * assigns given scalar to 2D array element by given indexes * i - number of row * j - number of row * value - scalar value to assign */ FORCEINLINE void putScalar(const Nd4jLong i, const Nd4jLong j, const T value); /** * assigns given scalar to 3D array element by given indexes * i - height * j - width * k - depth * value - scalar value to assign */ FORCEINLINE void putScalar(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k, const T value); /** * returns true if array is 2D */ FORCEINLINE bool isMatrix() const; /** * returns true if array is vector */ FORCEINLINE bool isVector() const; /** * returns true if array is column vector */ FORCEINLINE bool isColumnVector() const; /** * returns true if array is row vector */ FORCEINLINE bool isRowVector() const; /** * returns true if array is scalar */ FORCEINLINE bool isScalar() const; /** * inline accessing operator for matrix, i - absolute index */ FORCEINLINE T operator()(const Nd4jLong i) const; /** * inline modifying operator for matrix, i - absolute index */ FORCEINLINE T& operator()(const Nd4jLong i); /** * inline accessing operator for 2D array, i - row, j - column */ FORCEINLINE T operator()(const Nd4jLong i, const Nd4jLong j) const; /** * inline modifying operator for 2D array, i - row, j - column */ FORCEINLINE T& operator()(const Nd4jLong i, const Nd4jLong j); /** * inline accessing operator for 3D array, i - height, j - width, k - depth */ FORCEINLINE T operator()(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k) const; /** * inline modifying operator for 3D array, i - height, j - width, k - depth */ FORCEINLINE T& operator()(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k); /** * inline modifying operator for 4D array, i - height, j - width, k - depth */ FORCEINLINE T& operator()(const Nd4jLong t, const Nd4jLong u, const Nd4jLong v, const Nd4jLong w); /** * inline accessing operator for 4D array, i - height, j - width, k - depth */ FORCEINLINE T operator()(const Nd4jLong t, const Nd4jLong u, const Nd4jLong v, const Nd4jLong w) const; /** * inline modifying operator for ND array * idx - array with corresponding indexes, for example {2,10,0,5,...,8}, number of indexes should be equal to array rank */ FORCEINLINE T& operator()(const Nd4jLong* idx); /** * inline accessing operator for ND array * idx - array with corresponding indexes, for example {2,10,0,5,...,8}, number of indexes should be equal to array rank */ FORCEINLINE T operator()(const Nd4jLong* idx) const; template <typename T2> FORCEINLINE std::vector<T2> asVectorT(); FORCEINLINE bool isAttached(); NDArray<T>* detach(); FORCEINLINE bool operator == (const NDArray<T> &other) const; }; ////////////////////////////////////////////////////////////////////////// ///// IMLEMENTATION OF INLINE METHODS ///// ////////////////////////////////////////////////////////////////////////// template <typename T> template <typename T2> std::vector<T2> NDArray<T>::asVectorT() { std::vector<T2> result(this->lengthOf()); #pragma omp parallel for simd for (int e = 0; e < this->lengthOf(); e++) result[e] = static_cast<T2>(this->getIndexedScalar(e)); return result; } template<typename T> bool NDArray<T>::isAttached() { return this->_workspace != nullptr; } ////////////////////////////////////////////////////////////////////////// template<typename T> void NDArray<T>::setShapeInfo(Nd4jLong *shapeInfo) { if(_isShapeAlloc && _workspace == nullptr) delete []_shapeInfo; _shapeInfo = shapeInfo; _isShapeAlloc = false; if (shapeInfo != nullptr) this->_length = shape::length(shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> void NDArray<T>::setBuffer(T* buffer) { if(_isBuffAlloc && _workspace == nullptr) delete []_buffer; _buffer = buffer; _isBuffAlloc = false; } ////////////////////////////////////////////////////////////////////////// template<typename T> void NDArray<T>::triggerAllocationFlag(bool bufferAllocated, bool shapeAllocated) { _isBuffAlloc = bufferAllocated; _isShapeAlloc = shapeAllocated; } ////////////////////////////////////////////////////////////////////////// template<typename T> char NDArray<T>::ordering() const { return shape::order(_shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::isView() { return _isView; } ////////////////////////////////////////////////////////////////////////// template<typename T> Nd4jLong* NDArray<T>::shapeOf() const { return shape::shapeOf(_shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> Nd4jLong* NDArray<T>::stridesOf() const { return shape::stride(_shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> int NDArray<T>::rankOf() const { if (isEmpty()) return 0; return shape::rank(_shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> Nd4jLong NDArray<T>::lengthOf() const { return _length; } ////////////////////////////////////////////////////////////////////////// template<typename T> Nd4jLong NDArray<T>::rows() const { if (this->rankOf() == 1) return 1; if (this->rankOf() > 2) throw std::runtime_error("Array with rank > 2 can't have rows"); return shapeOf()[0]; } ////////////////////////////////////////////////////////////////////////// template<typename T> Nd4jLong NDArray<T>::columns() const { if (this->rankOf() == 1) return this->lengthOf(); if (this->rankOf() > 2) throw std::runtime_error("Array with rank > 2 can't have columns"); return shapeOf()[1]; } ////////////////////////////////////////////////////////////////////////// template<typename T> int NDArray<T>::sizeOfT() const { return sizeof(T); } ////////////////////////////////////////////////////////////////////////// template<typename T> Nd4jLong NDArray<T>::ews() const { if (this->isEmpty() || this->rankOf() == 0) return 1; return shape::elementWiseStride(_shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::nonNull() const { if (isEmpty()) return true; return this->_buffer != nullptr && this->_shapeInfo != nullptr; } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::isMatrix() const { if (isEmpty()) return false; return shape::isMatrix(this->_shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::isVector() const { if (isEmpty()) return false; return !isScalar() && shape::isVector(this->_shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::isColumnVector() const { if (isEmpty()) return false; return !isScalar() && shape::isColumnVector(this->_shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::isRowVector() const { if (isEmpty()) return false; // 1D edge case if (shape::rank(this->_shapeInfo) == 1) return true; return !isScalar() && shape::isRowVector(this->_shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::isScalar() const { return shape::isScalar(this->_shapeInfo); } ////////////////////////////////////////////////////////////////////////// // accessing operator for matrix, i - absolute index template<typename T> T NDArray<T>::operator()(const Nd4jLong i) const { if (i >= shape::length(_shapeInfo)) throw std::invalid_argument("NDArray::operator(i): input index is out of array length !"); auto ews = shape::elementWiseStride(_shapeInfo); char order = ordering(); if(ews == 1 && order == 'c') return _buffer[i]; else if(ews > 1 && order == 'c') return _buffer[i*ews]; else { Nd4jLong idx[MAX_RANK]; shape::ind2subC(rankOf(), shapeOf(), i, idx); Nd4jLong offset = shape::getOffset(0, shapeOf(), stridesOf(), idx, rankOf()); return _buffer[offset]; } } ////////////////////////////////////////////////////////////////////////// // modifying operator for matrix, i - absolute index template<typename T> T& NDArray<T>::operator()(const Nd4jLong i) { if (i >= shape::length(_shapeInfo)) throw std::invalid_argument("NDArray::operator(i): input index is out of array length !"); auto ews = shape::elementWiseStride(_shapeInfo); auto order = ordering(); if(ews == 1 && order == 'c') return _buffer[i]; else if(ews > 1 && order == 'c') return _buffer[i*ews]; else { Nd4jLong idx[MAX_RANK]; shape::ind2subC(rankOf(), shapeOf(), i, idx); auto offset = shape::getOffset(0, shapeOf(), stridesOf(), idx, rankOf()); return _buffer[offset]; } } ////////////////////////////////////////////////////////////////////////// // accessing operator for 2D matrix, i - row, j - column template<typename T> T NDArray<T>::operator()(const Nd4jLong i, const Nd4jLong j) const { if (rankOf() != 2 || i >= shapeOf()[0] || j >= shapeOf()[1]) throw std::invalid_argument("NDArray::operator(i,j): one of input indexes is out of array length or rank!=2 !"); Nd4jLong coords[2] = {i, j}; auto xOffset = shape::getOffset(0, shapeOf(), stridesOf(), coords, rankOf()); return _buffer[xOffset]; } ////////////////////////////////////////////////////////////////////////// // modifying operator for 2D matrix, i - row, j - column template<typename T> T& NDArray<T>::operator()(const Nd4jLong i, const Nd4jLong j) { if (rankOf() != 2 || i >= shapeOf()[0] || j >= shapeOf()[1]) throw std::invalid_argument("NDArray::operator(i,j): one of input indexes is out of array length or rank!=2 !"); Nd4jLong coords[2] = {i, j}; auto xOffset = shape::getOffset(0, shapeOf(), stridesOf(), coords, rankOf()); return _buffer[xOffset]; } ////////////////////////////////////////////////////////////////////////// // accessing operator for 3D array, i - row, j - column template<typename T> T NDArray<T>::operator()(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k) const { if (rankOf() != 3 || i >= shapeOf()[0] || j >= shapeOf()[1] || j >= shapeOf()[2]) throw std::invalid_argument("NDArray::operator(i,j,k): one of input indexes is out of array length or rank!=3 !"); Nd4jLong coords[3] = {i, j, k}; auto xOffset = shape::getOffset(0, shapeOf(), stridesOf(), coords, rankOf()); return _buffer[xOffset]; } ////////////////////////////////////////////////////////////////////////// // modifying operator for 3D array template<typename T> T& NDArray<T>::operator()(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k) { if (rankOf() != 3 || i >= shapeOf()[0] || j >= shapeOf()[1] || k >= shapeOf()[2]) throw std::invalid_argument("NDArray::operator(i,j,k): one of input indexes is out of array length or rank!=3 !"); Nd4jLong coords[3] = {i, j, k}; auto xOffset = shape::getOffset(0, shapeOf(), stridesOf(), coords, rankOf()); return _buffer[xOffset]; } template<typename T> T NDArray<T>::operator()(const Nd4jLong t, const Nd4jLong u, const Nd4jLong v, const Nd4jLong w) const { if (rankOf() != 4 || t >= shapeOf()[0] || u >= shapeOf()[1] || v >= shapeOf()[2] || w >= shapeOf()[3]) throw std::invalid_argument("NDArray::operator(t,u,v,w): one of input indexes is out of array length or rank!=4 !"); Nd4jLong coords[4] = {t, u, v, w}; auto xOffset = shape::getOffset(0, shapeOf(), stridesOf(), coords, rankOf()); return _buffer[xOffset]; } template<typename T> T& NDArray<T>::operator()(const Nd4jLong t, const Nd4jLong u, const Nd4jLong v, const Nd4jLong w) { if (rankOf() != 4 || t >= shapeOf()[0] || u >= shapeOf()[1] || v >= shapeOf()[2] || w >= shapeOf()[3]) throw std::invalid_argument("NDArray::operator(t,u,v,w): one of input indexes is out of array length or rank!=4 !"); Nd4jLong coords[4] = {t, u, v, w}; auto xOffset = shape::getOffset(0, shapeOf(), stridesOf(), coords, rankOf()); return _buffer[xOffset]; } ////////////////////////////////////////////////////////////////////////// template<typename T> T NDArray<T>::operator()(const Nd4jLong* idx) const { for(int i = 0; i < rankOf(); ++i) if (idx[i] >= sizeAt(i)) throw std::invalid_argument("NDArray::operator(const Nd4jLong* idx): input index is out of dimension length !"); return _buffer[shape::getOffset(0, shapeOf(), stridesOf(), idx, rankOf())]; } ////////////////////////////////////////////////////////////////////////// template<typename T> T& NDArray<T>::operator()(const Nd4jLong* idx) { for(int i = 0; i < rankOf(); ++i) if (idx[i] >= sizeAt(i)) throw std::invalid_argument("NDArray::operator(const Nd4jLong* idx): input index is out of dimension length !"); return _buffer[shape::getOffset(0, shapeOf(), stridesOf(), idx, rankOf())]; } ////////////////////////////////////////////////////////////////////////// // Return value from linear buffer template<typename T> T NDArray<T>::getScalar(const Nd4jLong i) const { return (*this)(i); } ////////////////////////////////////////////////////////////////////////// template<typename T> T NDArray<T>::getIndexedScalar(const Nd4jLong i) const { return (*this)(i); } ////////////////////////////////////////////////////////////////////////// // Returns value from 2D matrix by coordinates/indexes template<typename T> T NDArray<T>::getScalar(const Nd4jLong i, const Nd4jLong j) const { return (*this)(i, j); } ////////////////////////////////////////////////////////////////////////// // returns value from 3D tensor by coordinates template<typename T> T NDArray<T>::getScalar(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k) const { return (*this)(i, j, k); } ////////////////////////////////////////////////////////////////////////// template<typename T> void NDArray<T>::putIndexedScalar(const Nd4jLong i, const T value) { (*this)(i) = value; } ////////////////////////////////////////////////////////////////////////// // This method sets value in linear buffer to position i template<typename T> void NDArray<T>::putScalar(const Nd4jLong i, const T value) { (*this)(i) = value; } ////////////////////////////////////////////////////////////////////////// // This method sets value in 2D matrix to position i, j template<typename T> void NDArray<T>::putScalar(const Nd4jLong i, const Nd4jLong j, const T value) { (*this)(i,j) = value; } ////////////////////////////////////////////////////////////////////////// // This method sets value in 3D matrix to position i,j,k template<typename T> void NDArray<T>::putScalar(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k, const T value) { (*this)(i,j,k) = value; } ////////////////////////////////////////////////////////////////////////// template<typename T> Nd4jLong NDArray<T>::memoryFootprint() { Nd4jLong size = this->lengthOf() * this->sizeOfT(); size += shape::shapeInfoByteLength(this->rankOf()); return size; } ////////////////////////////////////////////////////////////////////////// // still the definition of inline function must be in header file template<typename T> bool NDArray<T>::isSameShape(const std::vector<Nd4jLong>& shape) const{ if (this->isScalar() && shape.size() == 1 && shape[0] == 0) return true; if (this->rankOf() != (int) shape.size()) return false; for (int e = 0; e < this->rankOf(); e++) { if (this->shapeOf()[e] != shape.at(e) && shape.at(e) != -1) return false; } return true; } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::isSameShape(const NDArray<T> *other) const { if (this->isEmpty() != other->isEmpty()) return false; return isSameShape(std::vector<Nd4jLong>(other->_shapeInfo+1, other->_shapeInfo+1+other->_shapeInfo[0])); } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::isSameShape(NDArray<T> &other) const { return isSameShape(&other); } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::isSameShape(const std::initializer_list<Nd4jLong>& other) const { return isSameShape(std::vector<Nd4jLong>(other)); } ////////////////////////////////////////////////////////////////////////// // returns true if these two NDArrays have same _shapeInfo // still the definition of inline function must be in header file template<typename T> bool NDArray<T>::isSameShapeStrict(const NDArray<T> *other) const { return shape::equalsStrict(_shapeInfo, other->_shapeInfo); } template<typename T> bool NDArray<T>::isEmpty() const { return ArrayOptions::arrayType(this->getShapeInfo()) == ArrayType::EMPTY; } template <typename T> bool NDArray<T>::operator ==(const NDArray<T> &other) const { if (!this->isSameShape(&other)) return false; return this->equalsTo(&other); } } #endif
dashlane_fmt_plug.c
/* * JtR format to crack Dashlane Password Manager files. * * This software is Copyright (c) 2017, Dhiru Kholia <kholia at kth.se> and it * is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * Special thanks goes to Robin Lambertz for making this work possible. */ #include "arch.h" #if !AC_BUILT #define HAVE_LIBZ 1 /* legacy build has -lz in LDFLAGS */ #endif #if HAVE_LIBZ #if FMT_EXTERNS_H extern struct fmt_main fmt_dashlane; #elif FMT_REGISTERS_H john_register_one(&fmt_dashlane); #else #include <string.h> #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 4 #endif #endif #include <openssl/evp.h> #include <zlib.h> #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "aes.h" #include "sha2.h" #include "jumbo.h" #include "pbkdf2_hmac_sha1.h" #include "dashlane_common.h" #include "openssl_code.h" #include "hmac_sha.h" #include "memdbg.h" #define FORMAT_NAME "Dashlane Password Manager" #define FORMAT_LABEL "dashlane" #define FORMAT_TAG "$dashlane$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #ifdef SIMD_COEF_64 #define ALGORITHM_NAME "AES PBKDF2-SHA1 " SHA1_ALGORITHM_NAME #else #define ALGORITHM_NAME "AES PBKDF2-SHA1 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(uint32_t) #define PLAINTEXT_LENGTH 125 #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *cracked, cracked_count; static struct custom_salt *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); cracked = mem_calloc(sizeof(*cracked), self->params.max_keys_per_crypt); cracked_count = self->params.max_keys_per_crypt; } static void done(void) { MEM_FREE(cracked); MEM_FREE(saved_key); } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static void dashlane_set_key(char *key, int index) { strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1); } static char *get_key(int index) { return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; memset(cracked, 0, sizeof(cracked[0]) * cracked_count); #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { unsigned char pkey[MAX_KEYS_PER_CRYPT][32]; int i; #ifdef SIMD_COEF_32 int len[MAX_KEYS_PER_CRYPT]; unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT]; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { len[i] = strlen(saved_key[i+index]); pin[i] = (unsigned char*)saved_key[i+index]; pout[i] = pkey[i]; } pbkdf2_sha1_sse((const unsigned char **)pin, len, cur_salt->salt, 32, 10204, pout, 32, 0); #else for (i = 0; i < MAX_KEYS_PER_CRYPT; i++) { pbkdf2_sha1((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]), cur_salt->salt, 32, 10204, pkey[i], 32, 0); } #endif for (i = 0; i < MAX_KEYS_PER_CRYPT; i++) { if (dashlane_verify(cur_salt, pkey[i])) cracked[index+i] = 1; else cracked[index+i] = 0; } } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if (cracked[index]) return 1; return 0; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_dashlane = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT, { NULL }, { FORMAT_TAG }, dashlane_tests }, { init, done, fmt_default_reset, fmt_default_prepare, dashlane_valid, fmt_default_split, fmt_default_binary, dashlane_get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, dashlane_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_LIBZ */
GB_unaryop__abs_uint32_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint32_uint64 // op(A') function: GB_tran__abs_uint32_uint64 // C type: uint32_t // A type: uint64_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ uint32_t z = (uint32_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT32 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint32_uint64 ( uint32_t *Cx, // Cx and Ax may be aliased uint64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint32_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
attribute.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE % % A A T T R R I B B U U T E % % AAAAA T T RRRR I BBBB U U T EEE % % A A T T R R I B B U U T E % % A A T T R R IIIII BBBB UUU T EEEEE % % % % % % MagickCore Get / Set Image Attributes % % % % Software Design % % John Cristy % % October 2002 % % % % % % Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/cache-view.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colormap-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/constitute.h" #include "magick/deprecate.h" #include "magick/draw.h" #include "magick/draw-private.h" #include "magick/effect.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/geometry.h" #include "magick/histogram.h" #include "magick/identify.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/memory_.h" #include "magick/magick.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/paint.h" #include "magick/pixel.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantize.h" #include "magick/random_.h" #include "magick/resource_.h" #include "magick/semaphore.h" #include "magick/segment.h" #include "magick/splay-tree.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/transform.h" #include "magick/utility.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e B o u n d i n g B o x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageBoundingBox() returns the bounding box of an image canvas. % % The format of the GetImageBoundingBox method is: % % RectangleInfo GetImageBoundingBox(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o bounds: Method GetImageBoundingBox returns the bounding box of an % image canvas. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport RectangleInfo GetImageBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; MagickPixelPacket target[3], zero; RectangleInfo bounds; register const PixelPacket *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); bounds.width=0; bounds.height=0; bounds.x=(ssize_t) image->columns; bounds.y=(ssize_t) image->rows; GetMagickPixelPacket(image,&target[0]); image_view=AcquireCacheView(image); p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); if (p == (const PixelPacket *) NULL) { image_view=DestroyCacheView(image_view); return(bounds); } SetMagickPixelPacket(image,p,GetCacheViewAuthenticIndexQueue(image_view), &target[0]); GetMagickPixelPacket(image,&target[1]); p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); SetMagickPixelPacket(image,p,GetCacheViewAuthenticIndexQueue(image_view), &target[1]); GetMagickPixelPacket(image,&target[2]); p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); SetMagickPixelPacket(image,p,GetCacheViewAuthenticIndexQueue(image_view), &target[2]); status=MagickTrue; GetMagickPixelPacket(image,&zero); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; RectangleInfo bounding_box; register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; if (status == MagickFalse) continue; #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif bounding_box=bounds; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,p,indexes+x,&pixel); if ((x < bounding_box.x) && (IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse)) bounding_box.x=x; if ((x > (ssize_t) bounding_box.width) && (IsMagickColorSimilar(&pixel,&target[1]) == MagickFalse)) bounding_box.width=(size_t) x; if ((y < bounding_box.y) && (IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse)) bounding_box.y=y; if ((y > (ssize_t) bounding_box.height) && (IsMagickColorSimilar(&pixel,&target[2]) == MagickFalse)) bounding_box.height=(size_t) y; p++; } #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif { if (bounding_box.x < bounds.x) bounds.x=bounding_box.x; if (bounding_box.y < bounds.y) bounds.y=bounding_box.y; if (bounding_box.width > bounds.width) bounds.width=bounding_box.width; if (bounding_box.height > bounds.height) bounds.height=bounding_box.height; } } image_view=DestroyCacheView(image_view); if ((bounds.width == 0) || (bounds.height == 0)) (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); else { bounds.width-=(bounds.x-1); bounds.height-=(bounds.y-1); } return(bounds); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelDepth() returns the depth of a particular image channel. % % The format of the GetImageChannelDepth method is: % % size_t GetImageDepth(const Image *image,ExceptionInfo *exception) % size_t GetImageChannelDepth(const Image *image, % const ChannelType channel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception) { return(GetImageChannelDepth(image,CompositeChannels,exception)); } MagickExport size_t GetImageChannelDepth(const Image *image, const ChannelType channel,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; register ssize_t id; size_t *current_depth, depth, number_threads; ssize_t y; /* Compute image depth. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); number_threads=GetOpenMPMaximumThreads(); current_depth=(size_t *) AcquireQuantumMemory(number_threads, sizeof(*current_depth)); if (current_depth == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); status=MagickTrue; for (id=0; id < (ssize_t) number_threads; id++) current_depth[id]=1; if ((image->storage_class == PseudoClass) && (image->matte == MagickFalse)) { register const PixelPacket *restrict p; register ssize_t i; p=image->colormap; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (i=0; i < (ssize_t) image->colors; i++) { const int id = GetOpenMPThreadId(); if (status == MagickFalse) continue; while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { MagickStatusType status; QuantumAny range; status=0; range=GetQuantumRange(current_depth[id]); if ((channel & RedChannel) != 0) status|=GetPixelRed(p) != ScaleAnyToQuantum(ScaleQuantumToAny(GetPixelRed(p), range),range); if ((channel & GreenChannel) != 0) status|=GetPixelGreen(p) != ScaleAnyToQuantum(ScaleQuantumToAny(GetPixelGreen(p), range),range); if ((channel & BlueChannel) != 0) status|=GetPixelBlue(p) != ScaleAnyToQuantum(ScaleQuantumToAny(GetPixelBlue(p), range),range); if (status == 0) break; current_depth[id]++; } p++; } depth=current_depth[0]; for (id=1; id < (ssize_t) number_threads; id++) if (depth < current_depth[id]) depth=current_depth[id]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { MagickStatusType status; QuantumAny range; status=0; range=GetQuantumRange(current_depth[id]); if ((channel & RedChannel) != 0) status|=GetPixelRed(p) != ScaleAnyToQuantum( ScaleQuantumToAny(GetPixelRed(p),range),range); if ((channel & GreenChannel) != 0) status|=GetPixelGreen(p) != ScaleAnyToQuantum( ScaleQuantumToAny(GetPixelGreen(p),range),range); if ((channel & BlueChannel) != 0) status|=GetPixelBlue(p) != ScaleAnyToQuantum( ScaleQuantumToAny(GetPixelBlue(p),range),range); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) status|=GetPixelOpacity(p) != ScaleAnyToQuantum( ScaleQuantumToAny(GetPixelOpacity(p),range),range); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) status|=GetPixelIndex(indexes+x) != ScaleAnyToQuantum(ScaleQuantumToAny(GetPixelIndex(indexes+ x),range),range); if (status == 0) break; current_depth[id]++; } p++; } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (id=1; id < (ssize_t) number_threads; id++) if (depth < current_depth[id]) depth=current_depth[id]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t u m D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantumDepth() returns the depth of the image rounded to a legal % quantum depth: 8, 16, or 32. % % The format of the GetImageQuantumDepth method is: % % size_t GetImageQuantumDepth(const Image *image, % const MagickBooleanType constrain) % % A description of each parameter follows: % % o image: the image. % % o constrain: A value other than MagickFalse, constrains the depth to % a maximum of MAGICKCORE_QUANTUM_DEPTH. % */ static inline double MagickMin(const double x,const double y) { if (x < y) return(x); return(y); } MagickExport size_t GetImageQuantumDepth(const Image *image, const MagickBooleanType constrain) { size_t depth; depth=image->depth; if (depth <= 8) depth=8; else if (depth <= 16) depth=16; else if (depth <= 32) depth=32; else if (depth <= 64) depth=64; if (constrain != MagickFalse) depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageType() returns the potential type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % To ensure the image type matches its potential, use SetImageType(): % % (void) SetImageType(image,GetImageType(image)); % % The format of the GetImageType method is: % % ImageType GetImageType(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType GetImageType(const Image *image,ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == CMYKColorspace) { if (image->matte == MagickFalse) return(ColorSeparationType); return(ColorSeparationMatteType); } if (IsMonochromeImage(image,exception) != MagickFalse) return(BilevelType); if (IsGrayImage(image,exception) != MagickFalse) { if (image->matte != MagickFalse) return(GrayscaleMatteType); return(GrayscaleType); } if (IsPaletteImage(image,exception) != MagickFalse) { if (image->matte != MagickFalse) return(PaletteMatteType); return(PaletteType); } if (image->matte != MagickFalse) return(TrueColorMatteType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s G r a y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsGrayImage() returns MagickTrue if all the pixels in the image have the % same red, green, and blue intensities. % % The format of the IsGrayImage method is: % % MagickBooleanType IsGrayImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsGrayImage(const Image *image, ExceptionInfo *exception) { CacheView *image_view; ImageType type; register const PixelPacket *p; register ssize_t x; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleMatteType)) return(MagickTrue); if (IsRGBColorspace(image->colorspace) == MagickFalse) return(MagickFalse); type=BilevelType; image_view=AcquireCacheView(image); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsGrayPixel(p) == MagickFalse) { type=UndefinedType; break; } if ((type == BilevelType) && (IsMonochromePixel(p) == MagickFalse)) type=GrayscaleType; p++; } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if (type == UndefinedType) return(MagickFalse); ((Image *) image)->type=type; if ((type == GrayscaleType) && (image->matte != MagickFalse)) ((Image *) image)->type=GrayscaleMatteType; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s M o n o c h r o m e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsMonochromeImage() returns MagickTrue if all the pixels in the image have % the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange. % % The format of the IsMonochromeImage method is: % % MagickBooleanType IsMonochromeImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsMonochromeImage(const Image *image, ExceptionInfo *exception) { CacheView *image_view; ImageType type; register ssize_t x; register const PixelPacket *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->type == BilevelType) return(MagickTrue); if (IsRGBColorspace(image->colorspace) == MagickFalse) return(MagickFalse); type=BilevelType; image_view=AcquireCacheView(image); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsMonochromePixel(p) == MagickFalse) { type=UndefinedType; break; } p++; } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if (type == UndefinedType) return(MagickFalse); ((Image *) image)->type=type; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s O p a q u e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsOpaqueImage() returns MagickTrue if none of the pixels in the image have % an opacity value other than opaque (0). % % The format of the IsOpaqueImage method is: % % MagickBooleanType IsOpaqueImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsOpaqueImage(const Image *image, ExceptionInfo *exception) { CacheView *image_view; register const PixelPacket *p; register ssize_t x; ssize_t y; /* Determine if image is opaque. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->matte == MagickFalse) return(MagickTrue); image_view=AcquireCacheView(image); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(p) != OpaqueOpacity) break; p++; } if (x < (ssize_t) image->columns) break; } image_view=DestroyCacheView(image_view); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C h a n n e l D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageChannelDepth() sets the depth of the image. % % The format of the SetImageChannelDepth method is: % % MagickBooleanType SetImageDepth(Image *image,const size_t depth) % MagickBooleanType SetImageChannelDepth(Image *image, % const ChannelType channel,const size_t depth) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o depth: the image depth. % */ MagickExport MagickBooleanType SetImageDepth(Image *image, const size_t depth) { return(SetImageChannelDepth(image,CompositeChannels,depth)); } MagickExport MagickBooleanType SetImageChannelDepth(Image *image, const ChannelType channel,const size_t depth) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; QuantumAny range; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); if (GetImageDepth(image,&image->exception) <= (size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH)) { image->depth=depth; return(MagickTrue); } /* Scale pixels to desired depth. */ status=MagickTrue; range=GetQuantumRange(depth); exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ScaleAnyToQuantum(ScaleQuantumToAny( GetPixelRed(q),range),range)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ScaleAnyToQuantum(ScaleQuantumToAny( GetPixelGreen(q),range),range)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ScaleAnyToQuantum(ScaleQuantumToAny( GetPixelBlue(q),range),range)); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelOpacity(q,ScaleAnyToQuantum(ScaleQuantumToAny( GetPixelOpacity(q),range),range)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ScaleAnyToQuantum(ScaleQuantumToAny( GetPixelIndex(indexes+x),range),range)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelPacket *restrict p; p=image->colormap; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & RedChannel) != 0) p->red=ScaleAnyToQuantum(ScaleQuantumToAny(p->red,range),range); if ((channel & GreenChannel) != 0) p->green=ScaleAnyToQuantum(ScaleQuantumToAny(p->green,range),range); if ((channel & BlueChannel) != 0) p->blue=ScaleAnyToQuantum(ScaleQuantumToAny(p->blue,range),range); if ((channel & OpacityChannel) != 0) p->opacity=ScaleAnyToQuantum(ScaleQuantumToAny(p->opacity,range), range); p++; } } image->depth=depth; return(status); }
tinyexr.h
#ifndef TINYEXR_H_ #define TINYEXR_H_ /* Copyright (c) 2014 - 2021, Syoyo Fujita and many contributors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Syoyo Fujita nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // TinyEXR contains some OpenEXR code, which is licensed under ------------ /////////////////////////////////////////////////////////////////////////// // // Copyright (c) 2002, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Industrial Light & Magic nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////// // End of OpenEXR license ------------------------------------------------- // // // Do this: // #define TINYEXR_IMPLEMENTATION // before you include this file in *one* C or C++ file to create the // implementation. // // // i.e. it should look like this: // #include ... // #include ... // #include ... // #define TINYEXR_IMPLEMENTATION // #include "tinyexr.h" // // #include <stddef.h> // for size_t #include <stdint.h> // guess stdint.h is available(C99) #ifdef __cplusplus extern "C" { #endif #if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \ defined(__i386) || defined(__i486__) || defined(__i486) || \ defined(i386) || defined(__ia64__) || defined(__x86_64__) #define TINYEXR_X86_OR_X64_CPU 1 #else #define TINYEXR_X86_OR_X64_CPU 0 #endif #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || TINYEXR_X86_OR_X64_CPU #define TINYEXR_LITTLE_ENDIAN 1 #else #define TINYEXR_LITTLE_ENDIAN 0 #endif // Use miniz or not to decode ZIP format pixel. Linking with zlib // required if this flas is 0. #ifndef TINYEXR_USE_MINIZ #define TINYEXR_USE_MINIZ (1) #endif // Disable PIZ comporession when applying cpplint. #ifndef TINYEXR_USE_PIZ #define TINYEXR_USE_PIZ (1) #endif #ifndef TINYEXR_USE_ZFP #define TINYEXR_USE_ZFP (0) // TinyEXR extension. // http://computation.llnl.gov/projects/floating-point-compression #endif #ifndef TINYEXR_USE_THREAD #define TINYEXR_USE_THREAD (0) // No threaded loading. // http://computation.llnl.gov/projects/floating-point-compression #endif #ifndef TINYEXR_USE_OPENMP #ifdef _OPENMP #define TINYEXR_USE_OPENMP (1) #else #define TINYEXR_USE_OPENMP (0) #endif #endif #define TINYEXR_SUCCESS (0) #define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1) #define TINYEXR_ERROR_INVALID_EXR_VERSION (-2) #define TINYEXR_ERROR_INVALID_ARGUMENT (-3) #define TINYEXR_ERROR_INVALID_DATA (-4) #define TINYEXR_ERROR_INVALID_FILE (-5) #define TINYEXR_ERROR_INVALID_PARAMETER (-6) #define TINYEXR_ERROR_CANT_OPEN_FILE (-7) #define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-8) #define TINYEXR_ERROR_INVALID_HEADER (-9) #define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-10) #define TINYEXR_ERROR_CANT_WRITE_FILE (-11) #define TINYEXR_ERROR_SERIALZATION_FAILED (-12) #define TINYEXR_ERROR_LAYER_NOT_FOUND (-13) // @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf } // pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2 #define TINYEXR_PIXELTYPE_UINT (0) #define TINYEXR_PIXELTYPE_HALF (1) #define TINYEXR_PIXELTYPE_FLOAT (2) #define TINYEXR_MAX_HEADER_ATTRIBUTES (1024) #define TINYEXR_MAX_CUSTOM_ATTRIBUTES (128) #define TINYEXR_COMPRESSIONTYPE_NONE (0) #define TINYEXR_COMPRESSIONTYPE_RLE (1) #define TINYEXR_COMPRESSIONTYPE_ZIPS (2) #define TINYEXR_COMPRESSIONTYPE_ZIP (3) #define TINYEXR_COMPRESSIONTYPE_PIZ (4) #define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension #define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0) #define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1) #define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2) #define TINYEXR_TILE_ONE_LEVEL (0) #define TINYEXR_TILE_MIPMAP_LEVELS (1) #define TINYEXR_TILE_RIPMAP_LEVELS (2) #define TINYEXR_TILE_ROUND_DOWN (0) #define TINYEXR_TILE_ROUND_UP (1) typedef struct _EXRVersion { int version; // this must be 2 // tile format image; // not zero for only a single-part "normal" tiled file (according to spec.) int tiled; int long_name; // long name attribute // deep image(EXR 2.0); // for a multi-part file, indicates that at least one part is of type deep* (according to spec.) int non_image; int multipart; // multi-part(EXR 2.0) } EXRVersion; typedef struct _EXRAttribute { char name[256]; // name and type are up to 255 chars long. char type[256]; unsigned char *value; // uint8_t* int size; int pad0; } EXRAttribute; typedef struct _EXRChannelInfo { char name[256]; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } EXRChannelInfo; typedef struct _EXRTile { int offset_x; int offset_y; int level_x; int level_y; int width; // actual width in a tile. int height; // actual height int a tile. unsigned char **images; // image[channels][pixels] } EXRTile; typedef struct _EXRBox2i { int min_x; int min_y; int max_x; int max_y; } EXRBox2i; typedef struct _EXRHeader { float pixel_aspect_ratio; int line_order; EXRBox2i data_window; EXRBox2i display_window; float screen_window_center[2]; float screen_window_width; int chunk_count; // Properties for tiled format(`tiledesc`). int tiled; int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; int long_name; // for a single-part file, agree with the version field bit 11 // for a multi-part file, it is consistent with the type of part int non_image; int multipart; unsigned int header_len; // Custom attributes(exludes required attributes(e.g. `channels`, // `compression`, etc) int num_custom_attributes; EXRAttribute *custom_attributes; // array of EXRAttribute. size = // `num_custom_attributes`. EXRChannelInfo *channels; // [num_channels] int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for // each channel. This is overwritten with `requested_pixel_types` when // loading. int num_channels; int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*) int *requested_pixel_types; // Filled initially by // ParseEXRHeaderFrom(Meomory|File), then users // can edit it(only valid for HALF pixel type // channel) // name attribute required for multipart files; // must be unique and non empty (according to spec.); // use EXRSetNameAttr for setting value; // max 255 character allowed - excluding terminating zero char name[256]; } EXRHeader; typedef struct _EXRMultiPartHeader { int num_headers; EXRHeader *headers; } EXRMultiPartHeader; typedef struct _EXRImage { EXRTile *tiles; // Tiled pixel data. The application must reconstruct image // from tiles manually. NULL if scanline format. struct _EXRImage* next_level; // NULL if scanline format or image is the last level. int level_x; // x level index int level_y; // y level index unsigned char **images; // image[channels][pixels]. NULL if tiled format. int width; int height; int num_channels; // Properties for tile format. int num_tiles; } EXRImage; typedef struct _EXRMultiPartImage { int num_images; EXRImage *images; } EXRMultiPartImage; typedef struct _DeepImage { const char **channel_names; float ***image; // image[channels][scanlines][samples] int **offset_table; // offset_table[scanline][offsets] int num_channels; int width; int height; int pad0; } DeepImage; // @deprecated { For backward compatibility. Not recommended to use. } // Loads single-frame OpenEXR image. Assume EXR image contains A(single channel // alpha) or RGB(A) channels. // Application must free image data as returned by `out_rgba` // Result image format is: float x RGBA x width x hight // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err); // Loads single-frame OpenEXR image by specifying layer name. Assume EXR image // contains A(single channel alpha) or RGB(A) channels. Application must free // image data as returned by `out_rgba` Result image format is: float x RGBA x // width x hight Returns negative value and may set error string in `err` when // there's an error When the specified layer name is not found in the EXR file, // the function will return `TINYEXR_ERROR_LAYER_NOT_FOUND`. extern int LoadEXRWithLayer(float **out_rgba, int *width, int *height, const char *filename, const char *layer_name, const char **err); // // Get layer infos from EXR file. // // @param[out] layer_names List of layer names. Application must free memory // after using this. // @param[out] num_layers The number of layers // @param[out] err Error string(will be filled when the function returns error // code). Free it using FreeEXRErrorMessage after using this value. // // @return TINYEXR_SUCCEES upon success. // extern int EXRLayers(const char *filename, const char **layer_names[], int *num_layers, const char **err); // @deprecated { to be removed. } // Simple wrapper API for ParseEXRHeaderFromFile. // checking given file is a EXR file(by just look up header) // @return TINYEXR_SUCCEES for EXR image, TINYEXR_ERROR_INVALID_HEADER for // others extern int IsEXR(const char *filename); // @deprecated { to be removed. } // Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels. // components must be 1(Grayscale), 3(RGB) or 4(RGBA). // Input image format is: `float x width x height`, or `float x RGB(A) x width x // hight` // Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero // value. // Save image as fp32(FLOAT) format when `save_as_fp16` is 0. // Use ZIP compression by default. // Returns negative value and may set error string in `err` when there's an // error extern int SaveEXR(const float *data, const int width, const int height, const int components, const int save_as_fp16, const char *filename, const char **err); // Returns the number of resolution levels of the image (including the base) extern int EXRNumLevels(const EXRImage* exr_image); // Initialize EXRHeader struct extern void InitEXRHeader(EXRHeader *exr_header); // Set name attribute of EXRHeader struct (it makes a copy) extern void EXRSetNameAttr(EXRHeader *exr_header, const char* name); // Initialize EXRImage struct extern void InitEXRImage(EXRImage *exr_image); // Frees internal data of EXRHeader struct extern int FreeEXRHeader(EXRHeader *exr_header); // Frees internal data of EXRImage struct extern int FreeEXRImage(EXRImage *exr_image); // Frees error message extern void FreeEXRErrorMessage(const char *msg); // Parse EXR version header of a file. extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename); // Parse EXR version header from memory-mapped EXR data. extern int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size); // Parse single-part OpenEXR header from a file and initialize `EXRHeader`. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version, const char *filename, const char **err); // Parse single-part OpenEXR header from a memory and initialize `EXRHeader`. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRHeaderFromMemory(EXRHeader *header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*` // array. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const char *filename, const char **err); // Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*` // array // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Loads single-part OpenEXR image from a file. // Application must setup `ParseEXRHeaderFromFile` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header, const char *filename, const char **err); // Loads single-part OpenEXR image from a memory. // Application must setup `EXRHeader` with // `ParseEXRHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header, const unsigned char *memory, const size_t size, const char **err); // Loads multi-part OpenEXR image from a file. // Application must setup `ParseEXRMultipartHeaderFromFile` before calling this // function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRMultipartImageFromFile(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const char *filename, const char **err); // Loads multi-part OpenEXR image from a memory. // Application must setup `EXRHeader*` array with // `ParseEXRMultipartHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRMultipartImageFromMemory(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err); // Saves multi-channel, single-frame OpenEXR image to a file. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int SaveEXRImageToFile(const EXRImage *image, const EXRHeader *exr_header, const char *filename, const char **err); // Saves multi-channel, single-frame OpenEXR image to a memory. // Image is compressed using EXRImage.compression value. // Return the number of bytes if success. // Return zero and will set error string in `err` when there's an // error. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern size_t SaveEXRImageToMemory(const EXRImage *image, const EXRHeader *exr_header, unsigned char **memory, const char **err); // Saves multi-channel, multi-frame OpenEXR image to a memory. // Image is compressed using EXRImage.compression value. // File global attributes (eg. display_window) must be set in the first header. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int SaveEXRMultipartImageToFile(const EXRImage *images, const EXRHeader **exr_headers, unsigned int num_parts, const char *filename, const char **err); // Saves multi-channel, multi-frame OpenEXR image to a memory. // Image is compressed using EXRImage.compression value. // File global attributes (eg. display_window) must be set in the first header. // Return the number of bytes if success. // Return zero and will set error string in `err` when there's an // error. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern size_t SaveEXRMultipartImageToMemory(const EXRImage *images, const EXRHeader **exr_headers, unsigned int num_parts, unsigned char **memory, const char **err); // Loads single-frame OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadDeepEXR(DeepImage *out_image, const char *filename, const char **err); // NOT YET IMPLEMENTED: // Saves single-frame OpenEXR deep image. // Returns negative value and may set error string in `err` when there's an // error // extern int SaveDeepEXR(const DeepImage *in_image, const char *filename, // const char **err); // NOT YET IMPLEMENTED: // Loads multi-part OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const // char *filename, // const char **err); // For emscripten. // Loads single-frame OpenEXR image from memory. Assume EXR image contains // RGB(A) channels. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err); #ifdef __cplusplus } #endif #endif // TINYEXR_H_ #ifdef TINYEXR_IMPLEMENTATION #ifndef TINYEXR_IMPLEMENTATION_DEFINED #define TINYEXR_IMPLEMENTATION_DEFINED #ifdef _WIN32 #ifndef WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN #endif #ifndef NOMINMAX #define NOMINMAX #endif #include <windows.h> // for UTF-8 #endif #include <algorithm> #include <cassert> #include <cstdio> #include <cstdlib> #include <cstring> #include <sstream> // #include <iostream> // debug #include <limits> #include <string> #include <vector> #include <set> // https://stackoverflow.com/questions/5047971/how-do-i-check-for-c11-support #if __cplusplus > 199711L || (defined(_MSC_VER) && _MSC_VER >= 1900) #define TINYEXR_HAS_CXX11 (1) // C++11 #include <cstdint> #if TINYEXR_USE_THREAD #include <atomic> #include <thread> #endif #endif // __cplusplus > 199711L #if TINYEXR_USE_OPENMP #include <omp.h> #endif #if TINYEXR_USE_MINIZ #include <miniz.h> #else // Issue #46. Please include your own zlib-compatible API header before // including `tinyexr.h` //#include "zlib.h" #endif #if TINYEXR_USE_ZFP #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Weverything" #endif #include "zfp.h" #ifdef __clang__ #pragma clang diagnostic pop #endif #endif namespace tinyexr { #if __cplusplus > 199711L // C++11 typedef uint64_t tinyexr_uint64; typedef int64_t tinyexr_int64; #else // Although `long long` is not a standard type pre C++11, assume it is defined // as a compiler's extension. #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #endif typedef unsigned long long tinyexr_uint64; typedef long long tinyexr_int64; #ifdef __clang__ #pragma clang diagnostic pop #endif #endif // static bool IsBigEndian(void) { // union { // unsigned int i; // char c[4]; // } bint = {0x01020304}; // // return bint.c[0] == 1; //} static void SetErrorMessage(const std::string &msg, const char **err) { if (err) { #ifdef _WIN32 (*err) = _strdup(msg.c_str()); #else (*err) = strdup(msg.c_str()); #endif } } static void SetWarningMessage(const std::string &msg, const char **warn) { if (warn) { #ifdef _WIN32 (*warn) = _strdup(msg.c_str()); #else (*warn) = strdup(msg.c_str()); #endif } } static const int kEXRVersionSize = 8; static void cpy2(unsigned short *dst_val, const unsigned short *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; } static void swap2(unsigned short *val) { #ifdef TINYEXR_LITTLE_ENDIAN (void)val; #else unsigned short tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[1]; dst[1] = src[0]; #endif } #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wunused-function" #endif #ifdef __GNUC__ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-function" #endif static void cpy4(int *dst_val, const int *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void cpy4(unsigned int *dst_val, const unsigned int *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void cpy4(float *dst_val, const float *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } #ifdef __clang__ #pragma clang diagnostic pop #endif #ifdef __GNUC__ #pragma GCC diagnostic pop #endif static void swap4(unsigned int *val) { #ifdef TINYEXR_LITTLE_ENDIAN (void)val; #else unsigned int tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; #endif } static void swap4(int *val) { #ifdef TINYEXR_LITTLE_ENDIAN (void)val; #else int tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; #endif } static void swap4(float *val) { #ifdef TINYEXR_LITTLE_ENDIAN (void)val; #else float tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; #endif } #if 0 static void cpy8(tinyexr::tinyexr_uint64 *dst_val, const tinyexr::tinyexr_uint64 *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; dst[4] = src[4]; dst[5] = src[5]; dst[6] = src[6]; dst[7] = src[7]; } #endif static void swap8(tinyexr::tinyexr_uint64 *val) { #ifdef TINYEXR_LITTLE_ENDIAN (void)val; #else tinyexr::tinyexr_uint64 tmp = (*val); unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[7]; dst[1] = src[6]; dst[2] = src[5]; dst[3] = src[4]; dst[4] = src[3]; dst[5] = src[2]; dst[6] = src[1]; dst[7] = src[0]; #endif } // https://gist.github.com/rygorous/2156668 union FP32 { unsigned int u; float f; struct { #if TINYEXR_LITTLE_ENDIAN unsigned int Mantissa : 23; unsigned int Exponent : 8; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 8; unsigned int Mantissa : 23; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpadded" #endif union FP16 { unsigned short u; struct { #if TINYEXR_LITTLE_ENDIAN unsigned int Mantissa : 10; unsigned int Exponent : 5; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 5; unsigned int Mantissa : 10; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic pop #endif static FP32 half_to_float(FP16 h) { static const FP32 magic = {113 << 23}; static const unsigned int shifted_exp = 0x7c00 << 13; // exponent mask after shift FP32 o; o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits unsigned int exp_ = shifted_exp & o.u; // just the exponent o.u += (127 - 15) << 23; // exponent adjust // handle exponent special cases if (exp_ == shifted_exp) // Inf/NaN? o.u += (128 - 16) << 23; // extra exp adjust else if (exp_ == 0) // Zero/Denormal? { o.u += 1 << 23; // extra exp adjust o.f -= magic.f; // renormalize } o.u |= (h.u & 0x8000U) << 16U; // sign bit return o; } static FP16 float_to_half_full(FP32 f) { FP16 o = {0}; // Based on ISPC reference code (with minor modifications) if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow) o.s.Exponent = 0; else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set) { o.s.Exponent = 31; o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf } else // Normalized number { // Exponent unbias the single, then bias the halfp int newexp = f.s.Exponent - 127 + 15; if (newexp >= 31) // Overflow, return signed infinity o.s.Exponent = 31; else if (newexp <= 0) // Underflow { if ((14 - newexp) <= 24) // Mantissa might be non-zero { unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit o.s.Mantissa = mant >> (14 - newexp); if ((mant >> (13 - newexp)) & 1) // Check for rounding o.u++; // Round, might overflow into exp bit, but this is OK } } else { o.s.Exponent = static_cast<unsigned int>(newexp); o.s.Mantissa = f.s.Mantissa >> 13; if (f.s.Mantissa & 0x1000) // Check for rounding o.u++; // Round, might overflow to inf, this is OK } } o.s.Sign = f.s.Sign; return o; } // NOTE: From OpenEXR code // #define IMF_INCREASING_Y 0 // #define IMF_DECREASING_Y 1 // #define IMF_RAMDOM_Y 2 // // #define IMF_NO_COMPRESSION 0 // #define IMF_RLE_COMPRESSION 1 // #define IMF_ZIPS_COMPRESSION 2 // #define IMF_ZIP_COMPRESSION 3 // #define IMF_PIZ_COMPRESSION 4 // #define IMF_PXR24_COMPRESSION 5 // #define IMF_B44_COMPRESSION 6 // #define IMF_B44A_COMPRESSION 7 #ifdef __clang__ #pragma clang diagnostic push #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #endif static const char *ReadString(std::string *s, const char *ptr, size_t len) { // Read untile NULL(\0). const char *p = ptr; const char *q = ptr; while ((size_t(q - ptr) < len) && (*q) != 0) { q++; } if (size_t(q - ptr) >= len) { (*s).clear(); return NULL; } (*s) = std::string(p, q); return q + 1; // skip '\0' } static bool ReadAttribute(std::string *name, std::string *type, std::vector<unsigned char> *data, size_t *marker_size, const char *marker, size_t size) { size_t name_len = strnlen(marker, size); if (name_len == size) { // String does not have a terminating character. return false; } *name = std::string(marker, name_len); marker += name_len + 1; size -= name_len + 1; size_t type_len = strnlen(marker, size); if (type_len == size) { return false; } *type = std::string(marker, type_len); marker += type_len + 1; size -= type_len + 1; if (size < sizeof(uint32_t)) { return false; } uint32_t data_len; memcpy(&data_len, marker, sizeof(uint32_t)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (data_len == 0) { if ((*type).compare("string") == 0) { // Accept empty string attribute. marker += sizeof(uint32_t); size -= sizeof(uint32_t); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t); data->resize(1); (*data)[0] = '\0'; return true; } else { return false; } } marker += sizeof(uint32_t); size -= sizeof(uint32_t); if (size < data_len) { return false; } data->resize(static_cast<size_t>(data_len)); memcpy(&data->at(0), marker, static_cast<size_t>(data_len)); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len; return true; } static void WriteAttributeToMemory(std::vector<unsigned char> *out, const char *name, const char *type, const unsigned char *data, int len) { out->insert(out->end(), name, name + strlen(name) + 1); out->insert(out->end(), type, type + strlen(type) + 1); int outLen = len; tinyexr::swap4(&outLen); out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen), reinterpret_cast<unsigned char *>(&outLen) + sizeof(int)); out->insert(out->end(), data, data + len); } typedef struct { std::string name; // less than 255 bytes long int pixel_type; int requested_pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } ChannelInfo; typedef struct { int min_x; int min_y; int max_x; int max_y; } Box2iInfo; struct HeaderInfo { std::vector<tinyexr::ChannelInfo> channels; std::vector<EXRAttribute> attributes; Box2iInfo data_window; int line_order; Box2iInfo display_window; float screen_window_center[2]; float screen_window_width; float pixel_aspect_ratio; int chunk_count; // Tiled format int tiled; // Non-zero if the part is tiled. int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; unsigned int header_len; int compression_type; // required for multi-part or non-image files std::string name; // required for multi-part or non-image files std::string type; void clear() { channels.clear(); attributes.clear(); data_window.min_x = 0; data_window.min_y = 0; data_window.max_x = 0; data_window.max_y = 0; line_order = 0; display_window.min_x = 0; display_window.min_y = 0; display_window.max_x = 0; display_window.max_y = 0; screen_window_center[0] = 0.0f; screen_window_center[1] = 0.0f; screen_window_width = 0.0f; pixel_aspect_ratio = 0.0f; chunk_count = 0; // Tiled format tiled = 0; tile_size_x = 0; tile_size_y = 0; tile_level_mode = 0; tile_rounding_mode = 0; header_len = 0; compression_type = 0; name.clear(); type.clear(); } }; static bool ReadChannelInfo(std::vector<ChannelInfo> &channels, const std::vector<unsigned char> &data) { const char *p = reinterpret_cast<const char *>(&data.at(0)); for (;;) { if ((*p) == 0) { break; } ChannelInfo info; tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) - (p - reinterpret_cast<const char *>(data.data())); if (data_len < 0) { return false; } p = ReadString(&info.name, p, size_t(data_len)); if ((p == NULL) && (info.name.empty())) { // Buffer overrun. Issue #51. return false; } const unsigned char *data_end = reinterpret_cast<const unsigned char *>(p) + 16; if (data_end >= (data.data() + data.size())) { return false; } memcpy(&info.pixel_type, p, sizeof(int)); p += 4; info.p_linear = static_cast<unsigned char>(p[0]); // uchar p += 1 + 3; // reserved: uchar[3] memcpy(&info.x_sampling, p, sizeof(int)); // int p += 4; memcpy(&info.y_sampling, p, sizeof(int)); // int p += 4; tinyexr::swap4(&info.pixel_type); tinyexr::swap4(&info.x_sampling); tinyexr::swap4(&info.y_sampling); channels.push_back(info); } return true; } static void WriteChannelInfo(std::vector<unsigned char> &data, const std::vector<ChannelInfo> &channels) { size_t sz = 0; // Calculate total size. for (size_t c = 0; c < channels.size(); c++) { sz += channels[c].name.length() + 1; // +1 for \0 sz += 16; // 4 * int } data.resize(sz + 1); unsigned char *p = &data.at(0); for (size_t c = 0; c < channels.size(); c++) { memcpy(p, channels[c].name.c_str(), channels[c].name.length()); p += channels[c].name.length(); (*p) = '\0'; p++; int pixel_type = channels[c].requested_pixel_type; int x_sampling = channels[c].x_sampling; int y_sampling = channels[c].y_sampling; tinyexr::swap4(&pixel_type); tinyexr::swap4(&x_sampling); tinyexr::swap4(&y_sampling); memcpy(p, &pixel_type, sizeof(int)); p += sizeof(int); (*p) = channels[c].p_linear; p += 4; memcpy(p, &x_sampling, sizeof(int)); p += sizeof(int); memcpy(p, &y_sampling, sizeof(int)); p += sizeof(int); } (*p) = '\0'; } static void CompressZip(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } #if TINYEXR_USE_MINIZ // // Compress the data using miniz // mz_ulong outSize = mz_compressBound(src_size); int ret = mz_compress( dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)), src_size); assert(ret == MZ_OK); (void)ret; compressedSize = outSize; #else uLong outSize = compressBound(static_cast<uLong>(src_size)); int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)), src_size); assert(ret == Z_OK); compressedSize = outSize; #endif // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static bool DecompressZip(unsigned char *dst, unsigned long *uncompressed_size /* inout */, const unsigned char *src, unsigned long src_size) { if ((*uncompressed_size) == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return true; } std::vector<unsigned char> tmpBuf(*uncompressed_size); #if TINYEXR_USE_MINIZ int ret = mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (MZ_OK != ret) { return false; } #else int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (Z_OK != ret) { return false; } #endif // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size); while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (*uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + (*uncompressed_size); for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } return true; } // RLE code from OpenEXR -------------------------------------- #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wsign-conversion" #if __has_warning("-Wextra-semi-stmt") #pragma clang diagnostic ignored "-Wextra-semi-stmt" #endif #endif #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4267) // 'argument': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif const int MIN_RUN_LENGTH = 3; const int MAX_RUN_LENGTH = 127; // // Compress an array of bytes, using run-length encoding, // and return the length of the compressed data. // static int rleCompress(int inLength, const char in[], signed char out[]) { const char *inEnd = in + inLength; const char *runStart = in; const char *runEnd = in + 1; signed char *outWrite = out; while (runStart < inEnd) { while (runEnd < inEnd && *runStart == *runEnd && runEnd - runStart - 1 < MAX_RUN_LENGTH) { ++runEnd; } if (runEnd - runStart >= MIN_RUN_LENGTH) { // // Compressible run // *outWrite++ = static_cast<char>(runEnd - runStart) - 1; *outWrite++ = *(reinterpret_cast<const signed char *>(runStart)); runStart = runEnd; } else { // // Uncompressable run // while (runEnd < inEnd && ((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) || (runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) && runEnd - runStart < MAX_RUN_LENGTH) { ++runEnd; } *outWrite++ = static_cast<char>(runStart - runEnd); while (runStart < runEnd) { *outWrite++ = *(reinterpret_cast<const signed char *>(runStart++)); } } ++runEnd; } return static_cast<int>(outWrite - out); } // // Uncompress an array of bytes compressed with rleCompress(). // Returns the length of the oncompressed data, or 0 if the // length of the uncompressed data would be more than maxLength. // static int rleUncompress(int inLength, int maxLength, const signed char in[], char out[]) { char *outStart = out; while (inLength > 0) { if (*in < 0) { int count = -(static_cast<int>(*in++)); inLength -= count + 1; // Fixes #116: Add bounds check to in buffer. if ((0 > (maxLength -= count)) || (inLength < 0)) return 0; memcpy(out, in, count); out += count; in += count; } else { int count = *in++; inLength -= 2; if (0 > (maxLength -= count + 1)) return 0; memset(out, *reinterpret_cast<const char *>(in), count + 1); out += count + 1; in++; } } return static_cast<int>(out - outStart); } #ifdef __clang__ #pragma clang diagnostic pop #endif // End of RLE code from OpenEXR ----------------------------------- static void CompressRle(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } // outSize will be (srcSiz * 3) / 2 at max. int outSize = rleCompress(static_cast<int>(src_size), reinterpret_cast<const char *>(&tmpBuf.at(0)), reinterpret_cast<signed char *>(dst)); assert(outSize > 0); compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static bool DecompressRle(unsigned char *dst, const unsigned long uncompressed_size, const unsigned char *src, unsigned long src_size) { if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return true; } // Workaround for issue #112. // TODO(syoyo): Add more robust out-of-bounds check in `rleUncompress`. if (src_size <= 2) { return false; } std::vector<unsigned char> tmpBuf(uncompressed_size); int ret = rleUncompress(static_cast<int>(src_size), static_cast<int>(uncompressed_size), reinterpret_cast<const signed char *>(src), reinterpret_cast<char *>(&tmpBuf.at(0))); if (ret != static_cast<int>(uncompressed_size)) { return false; } // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + uncompressed_size; while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + uncompressed_size; for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } return true; } #if TINYEXR_USE_PIZ #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #if __has_warning("-Wcast-qual") #pragma clang diagnostic ignored "-Wcast-qual" #endif #if __has_warning("-Wextra-semi-stmt") #pragma clang diagnostic ignored "-Wextra-semi-stmt" #endif #endif // // PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp // // ----------------------------------------------------------------- // Copyright (c) 2004, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC) // (3 clause BSD license) // struct PIZChannelData { unsigned short *start; unsigned short *end; int nx; int ny; int ys; int size; }; //----------------------------------------------------------------------------- // // 16-bit Haar Wavelet encoding and decoding // // The source code in this file is derived from the encoding // and decoding routines written by Christian Rouet for his // PIZ image file format. // //----------------------------------------------------------------------------- // // Wavelet basis functions without modulo arithmetic; they produce // the best compression ratios when the wavelet-transformed data are // Huffman-encoded, but the wavelet transform works only for 14-bit // data (untransformed data values must be less than (1 << 14)). // inline void wenc14(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { short as = static_cast<short>(a); short bs = static_cast<short>(b); short ms = (as + bs) >> 1; short ds = as - bs; l = static_cast<unsigned short>(ms); h = static_cast<unsigned short>(ds); } inline void wdec14(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { short ls = static_cast<short>(l); short hs = static_cast<short>(h); int hi = hs; int ai = ls + (hi & 1) + (hi >> 1); short as = static_cast<short>(ai); short bs = static_cast<short>(ai - hi); a = static_cast<unsigned short>(as); b = static_cast<unsigned short>(bs); } // // Wavelet basis functions with modulo arithmetic; they work with full // 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't // compress the data quite as well. // const int NBITS = 16; const int A_OFFSET = 1 << (NBITS - 1); const int M_OFFSET = 1 << (NBITS - 1); const int MOD_MASK = (1 << NBITS) - 1; inline void wenc16(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { int ao = (a + A_OFFSET) & MOD_MASK; int m = ((ao + b) >> 1); int d = ao - b; if (d < 0) m = (m + M_OFFSET) & MOD_MASK; d &= MOD_MASK; l = static_cast<unsigned short>(m); h = static_cast<unsigned short>(d); } inline void wdec16(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { int m = l; int d = h; int bb = (m - (d >> 1)) & MOD_MASK; int aa = (d + bb - A_OFFSET) & MOD_MASK; b = static_cast<unsigned short>(bb); a = static_cast<unsigned short>(aa); } // // 2D Wavelet encoding: // static void wav2Encode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; // == 1 << level int p2 = 2; // == 1 << (level+1) // // Hierarchical loop on smaller dimension n // while (p2 <= n) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet encoding // if (w14) { wenc14(*px, *p01, i00, i01); wenc14(*p10, *p11, i10, i11); wenc14(i00, i10, *px, *p10); wenc14(i01, i11, *p01, *p11); } else { wenc16(*px, *p01, i00, i01); wenc16(*p10, *p11, i10, i11); wenc16(i00, i10, *px, *p10); wenc16(i01, i11, *p01, *p11); } } // // Encode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wenc14(*px, *p10, i00, *p10); else wenc16(*px, *p10, i00, *p10); *px = i00; } } // // Encode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wenc14(*px, *p01, i00, *p01); else wenc16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p = p2; p2 <<= 1; } } // // 2D Wavelet decoding: // static void wav2Decode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; int p2; // // Search max level // while (p <= n) p <<= 1; p >>= 1; p2 = p; p >>= 1; // // Hierarchical loop on smaller dimension n // while (p >= 1) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet decoding // if (w14) { wdec14(*px, *p10, i00, i10); wdec14(*p01, *p11, i01, i11); wdec14(i00, i01, *px, *p01); wdec14(i10, i11, *p10, *p11); } else { wdec16(*px, *p10, i00, i10); wdec16(*p01, *p11, i01, i11); wdec16(i00, i01, *px, *p01); wdec16(i10, i11, *p10, *p11); } } // // Decode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wdec14(*px, *p10, i00, *p10); else wdec16(*px, *p10, i00, *p10); *px = i00; } } // // Decode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wdec14(*px, *p01, i00, *p01); else wdec16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p2 = p; p >>= 1; } } //----------------------------------------------------------------------------- // // 16-bit Huffman compression and decompression. // // The source code in this file is derived from the 8-bit // Huffman compression and decompression routines written // by Christian Rouet for his PIZ image file format. // //----------------------------------------------------------------------------- // Adds some modification for tinyexr. const int HUF_ENCBITS = 16; // literal (value) bit length const int HUF_DECBITS = 14; // decoding bit size (>= 8) const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size const int HUF_DECMASK = HUF_DECSIZE - 1; struct HufDec { // short code long code //------------------------------- unsigned int len : 8; // code length 0 unsigned int lit : 24; // lit p size unsigned int *p; // 0 lits }; inline long long hufLength(long long code) { return code & 63; } inline long long hufCode(long long code) { return code >> 6; } inline void outputBits(int nBits, long long bits, long long &c, int &lc, char *&out) { c <<= nBits; lc += nBits; c |= bits; while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8))); } inline long long getBits(int nBits, long long &c, int &lc, const char *&in) { while (lc < nBits) { c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++)); lc += 8; } lc -= nBits; return (c >> lc) & ((1 << nBits) - 1); } // // ENCODING TABLE BUILDING & (UN)PACKING // // // Build a "canonical" Huffman code table: // - for each (uncompressed) symbol, hcode contains the length // of the corresponding code (in the compressed data) // - canonical codes are computed and stored in hcode // - the rules for constructing canonical codes are as follows: // * shorter codes (if filled with zeroes to the right) // have a numerically higher value than longer codes // * for codes with the same length, numerical values // increase with numerical symbol values // - because the canonical code table can be constructed from // symbol lengths alone, the code table can be transmitted // without sending the actual code values // - see http://www.compressconsult.com/huffman/ // static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) { long long n[59]; // // For each i from 0 through 58, count the // number of different codes of length i, and // store the count in n[i]. // for (int i = 0; i <= 58; ++i) n[i] = 0; for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1; // // For each i from 58 through 1, compute the // numerically lowest code with length i, and // store that code in n[i]. // long long c = 0; for (int i = 58; i > 0; --i) { long long nc = ((c + n[i]) >> 1); n[i] = c; c = nc; } // // hcode[i] contains the length, l, of the // code for symbol i. Assign the next available // code of length l to the symbol and store both // l and the code in hcode[i]. // for (int i = 0; i < HUF_ENCSIZE; ++i) { int l = static_cast<int>(hcode[i]); if (l > 0) hcode[i] = l | (n[l]++ << 6); } } // // Compute Huffman codes (based on frq input) and store them in frq: // - code structure is : [63:lsb - 6:msb] | [5-0: bit length]; // - max code length is 58 bits; // - codes outside the range [im-iM] have a null length (unused values); // - original frequencies are destroyed; // - encoding tables are used by hufEncode() and hufBuildDecTable(); // struct FHeapCompare { bool operator()(long long *a, long long *b) { return *a > *b; } }; static void hufBuildEncTable( long long *frq, // io: input frequencies [HUF_ENCSIZE], output table int *im, // o: min frq index int *iM) // o: max frq index { // // This function assumes that when it is called, array frq // indicates the frequency of all possible symbols in the data // that are to be Huffman-encoded. (frq[i] contains the number // of occurrences of symbol i in the data.) // // The loop below does three things: // // 1) Finds the minimum and maximum indices that point // to non-zero entries in frq: // // frq[im] != 0, and frq[i] == 0 for all i < im // frq[iM] != 0, and frq[i] == 0 for all i > iM // // 2) Fills array fHeap with pointers to all non-zero // entries in frq. // // 3) Initializes array hlink such that hlink[i] == i // for all array entries. // std::vector<int> hlink(HUF_ENCSIZE); std::vector<long long *> fHeap(HUF_ENCSIZE); *im = 0; while (!frq[*im]) (*im)++; int nf = 0; for (int i = *im; i < HUF_ENCSIZE; i++) { hlink[i] = i; if (frq[i]) { fHeap[nf] = &frq[i]; nf++; *iM = i; } } // // Add a pseudo-symbol, with a frequency count of 1, to frq; // adjust the fHeap and hlink array accordingly. Function // hufEncode() uses the pseudo-symbol for run-length encoding. // (*iM)++; frq[*iM] = 1; fHeap[nf] = &frq[*iM]; nf++; // // Build an array, scode, such that scode[i] contains the number // of bits assigned to symbol i. Conceptually this is done by // constructing a tree whose leaves are the symbols with non-zero // frequency: // // Make a heap that contains all symbols with a non-zero frequency, // with the least frequent symbol on top. // // Repeat until only one symbol is left on the heap: // // Take the two least frequent symbols off the top of the heap. // Create a new node that has first two nodes as children, and // whose frequency is the sum of the frequencies of the first // two nodes. Put the new node back into the heap. // // The last node left on the heap is the root of the tree. For each // leaf node, the distance between the root and the leaf is the length // of the code for the corresponding symbol. // // The loop below doesn't actually build the tree; instead we compute // the distances of the leaves from the root on the fly. When a new // node is added to the heap, then that node's descendants are linked // into a single linear list that starts at the new node, and the code // lengths of the descendants (that is, their distance from the root // of the tree) are incremented by one. // std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); std::vector<long long> scode(HUF_ENCSIZE); memset(scode.data(), 0, sizeof(long long) * HUF_ENCSIZE); while (nf > 1) { // // Find the indices, mm and m, of the two smallest non-zero frq // values in fHeap, add the smallest frq to the second-smallest // frq, and remove the smallest frq value from fHeap. // int mm = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); --nf; int m = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); frq[m] += frq[mm]; std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); // // The entries in scode are linked into lists with the // entries in hlink serving as "next" pointers and with // the end of a list marked by hlink[j] == j. // // Traverse the lists that start at scode[m] and scode[mm]. // For each element visited, increment the length of the // corresponding code by one bit. (If we visit scode[j] // during the traversal, then the code for symbol j becomes // one bit longer.) // // Merge the lists that start at scode[m] and scode[mm] // into a single list that starts at scode[m]. // // // Add a bit to all codes in the first list. // for (int j = m;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) { // // Merge the two lists. // hlink[j] = mm; break; } } // // Add a bit to all codes in the second list // for (int j = mm;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) break; } } // // Build a canonical Huffman code table, replacing the code // lengths in scode with (code, code length) pairs. Copy the // code table from scode into frq. // hufCanonicalCodeTable(scode.data()); memcpy(frq, scode.data(), sizeof(long long) * HUF_ENCSIZE); } // // Pack an encoding table: // - only code lengths, not actual codes, are stored // - runs of zeroes are compressed as follows: // // unpacked packed // -------------------------------- // 1 zero 0 (6 bits) // 2 zeroes 59 // 3 zeroes 60 // 4 zeroes 61 // 5 zeroes 62 // n zeroes (6 or more) 63 n-6 (6 + 8 bits) // const int SHORT_ZEROCODE_RUN = 59; const int LONG_ZEROCODE_RUN = 63; const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN; const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN; static void hufPackEncTable( const long long *hcode, // i : encoding table [HUF_ENCSIZE] int im, // i : min hcode index int iM, // i : max hcode index char **pcode) // o: ptr to packed table (updated) { char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { int l = hufLength(hcode[im]); if (l == 0) { int zerun = 1; while ((im < iM) && (zerun < LONGEST_LONG_RUN)) { if (hufLength(hcode[im + 1]) > 0) break; im++; zerun++; } if (zerun >= 2) { if (zerun >= SHORTEST_LONG_RUN) { outputBits(6, LONG_ZEROCODE_RUN, c, lc, p); outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p); } else { outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p); } continue; } } outputBits(6, l, c, lc, p); } if (lc > 0) *p++ = (unsigned char)(c << (8 - lc)); *pcode = p; } // // Unpack an encoding table packed by hufPackEncTable(): // static bool hufUnpackEncTable( const char **pcode, // io: ptr to packed table (updated) int ni, // i : input size (in bytes) int im, // i : min hcode index int iM, // i : max hcode index long long *hcode) // o: encoding table [HUF_ENCSIZE] { memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE); const char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { if (p - *pcode >= ni) { return false; } long long l = hcode[im] = getBits(6, c, lc, p); // code length if (l == (long long)LONG_ZEROCODE_RUN) { if (p - *pcode > ni) { return false; } int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } else if (l >= (long long)SHORT_ZEROCODE_RUN) { int zerun = l - SHORT_ZEROCODE_RUN + 2; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } } *pcode = const_cast<char *>(p); hufCanonicalCodeTable(hcode); return true; } // // DECODING TABLE BUILDING // // // Clear a newly allocated decoding table so that it contains only zeroes. // static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller) // decoding table [HUF_DECSIZE] { for (int i = 0; i < HUF_DECSIZE; i++) { hdecod[i].len = 0; hdecod[i].lit = 0; hdecod[i].p = NULL; } // memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE); } // // Build a decoding hash table based on the encoding table hcode: // - short codes (<= HUF_DECBITS) are resolved with a single table access; // - long code entry allocations are not optimized, because long codes are // unfrequent; // - decoding tables are used by hufDecode(); // static bool hufBuildDecTable(const long long *hcode, // i : encoding table int im, // i : min index in hcode int iM, // i : max index in hcode HufDec *hdecod) // o: (allocated by caller) // decoding table [HUF_DECSIZE] { // // Init hashtable & loop on all codes. // Assumes that hufClearDecTable(hdecod) has already been called. // for (; im <= iM; im++) { long long c = hufCode(hcode[im]); int l = hufLength(hcode[im]); if (c >> l) { // // Error: c is supposed to be an l-bit code, // but c contains a value that is greater // than the largest l-bit number. // // invalidTableEntry(); return false; } if (l > HUF_DECBITS) { // // Long code: add a secondary entry // HufDec *pl = hdecod + (c >> (l - HUF_DECBITS)); if (pl->len) { // // Error: a short code has already // been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->lit++; if (pl->p) { unsigned int *p = pl->p; pl->p = new unsigned int[pl->lit]; for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i]; delete[] p; } else { pl->p = new unsigned int[1]; } pl->p[pl->lit - 1] = im; } else if (l) { // // Short code: init all primary entries // HufDec *pl = hdecod + (c << (HUF_DECBITS - l)); for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) { if (pl->len || pl->p) { // // Error: a short code or a long code has // already been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->len = l; pl->lit = im; } } } return true; } // // Free the long code entries of a decoding table built by hufBuildDecTable() // static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table { for (int i = 0; i < HUF_DECSIZE; i++) { if (hdecod[i].p) { delete[] hdecod[i].p; hdecod[i].p = 0; } } } // // ENCODING // inline void outputCode(long long code, long long &c, int &lc, char *&out) { outputBits(hufLength(code), hufCode(code), c, lc, out); } inline void sendCode(long long sCode, int runCount, long long runCode, long long &c, int &lc, char *&out) { // // Output a run of runCount instances of the symbol sCount. // Output the symbols explicitly, or if that is shorter, output // the sCode symbol once followed by a runCode symbol and runCount // expressed as an 8-bit number. // if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) { outputCode(sCode, c, lc, out); outputCode(runCode, c, lc, out); outputBits(8, runCount, c, lc, out); } else { while (runCount-- >= 0) outputCode(sCode, c, lc, out); } } // // Encode (compress) ni values based on the Huffman encoding table hcode: // static int hufEncode // return: output size (in bits) (const long long *hcode, // i : encoding table const unsigned short *in, // i : uncompressed input buffer const int ni, // i : input buffer size (in bytes) int rlc, // i : rl code char *out) // o: compressed output buffer { char *outStart = out; long long c = 0; // bits not yet written to out int lc = 0; // number of valid bits in c (LSB) int s = in[0]; int cs = 0; // // Loop on input values // for (int i = 1; i < ni; i++) { // // Count same values or send code // if (s == in[i] && cs < 255) { cs++; } else { sendCode(hcode[s], cs, hcode[rlc], c, lc, out); cs = 0; } s = in[i]; } // // Send remaining code // sendCode(hcode[s], cs, hcode[rlc], c, lc, out); if (lc) *out = (c << (8 - lc)) & 0xff; return (out - outStart) * 8 + lc; } // // DECODING // // // In order to force the compiler to inline them, // getChar() and getCode() are implemented as macros // instead of "inline" functions. // #define getChar(c, lc, in) \ { \ c = (c << 8) | *(unsigned char *)(in++); \ lc += 8; \ } #if 0 #define getCode(po, rlc, c, lc, in, out, ob, oe) \ { \ if (po == rlc) { \ if (lc < 8) getChar(c, lc, in); \ \ lc -= 8; \ \ unsigned char cs = (c >> lc); \ \ if (out + cs > oe) return false; \ \ /* TinyEXR issue 78 */ \ unsigned short s = out[-1]; \ \ while (cs-- > 0) *out++ = s; \ } else if (out < oe) { \ *out++ = po; \ } else { \ return false; \ } \ } #else static bool getCode(int po, int rlc, long long &c, int &lc, const char *&in, const char *in_end, unsigned short *&out, const unsigned short *ob, const unsigned short *oe) { (void)ob; if (po == rlc) { if (lc < 8) { /* TinyEXR issue 78 */ /* TinyEXR issue 160. in + 1 -> in */ if (in >= in_end) { return false; } getChar(c, lc, in); } lc -= 8; unsigned char cs = (c >> lc); if (out + cs > oe) return false; // Bounds check for safety // Issue 100. if ((out - 1) < ob) return false; unsigned short s = out[-1]; while (cs-- > 0) *out++ = s; } else if (out < oe) { *out++ = po; } else { return false; } return true; } #endif // // Decode (uncompress) ni bits based on encoding & decoding tables: // static bool hufDecode(const long long *hcode, // i : encoding table const HufDec *hdecod, // i : decoding table const char *in, // i : compressed input buffer int ni, // i : input size (in bits) int rlc, // i : run-length code int no, // i : expected output size (in bytes) unsigned short *out) // o: uncompressed output buffer { long long c = 0; int lc = 0; unsigned short *outb = out; // begin unsigned short *oe = out + no; // end const char *ie = in + (ni + 7) / 8; // input byte size // // Loop on input bytes // while (in < ie) { getChar(c, lc, in); // // Access decoding table // while (lc >= HUF_DECBITS) { const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK]; if (pl.len) { // // Get short code // lc -= pl.len; // std::cout << "lit = " << pl.lit << std::endl; // std::cout << "rlc = " << rlc << std::endl; // std::cout << "c = " << c << std::endl; // std::cout << "lc = " << lc << std::endl; // std::cout << "in = " << in << std::endl; // std::cout << "out = " << out << std::endl; // std::cout << "oe = " << oe << std::endl; if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) { return false; } } else { if (!pl.p) { return false; } // invalidCode(); // wrong code // // Search long code // int j; for (j = 0; j < pl.lit; j++) { int l = hufLength(hcode[pl.p[j]]); while (lc < l && in < ie) // get more bits getChar(c, lc, in); if (lc >= l) { if (hufCode(hcode[pl.p[j]]) == ((c >> (lc - l)) & (((long long)(1) << l) - 1))) { // // Found : get long code // lc -= l; if (!getCode(pl.p[j], rlc, c, lc, in, ie, out, outb, oe)) { return false; } break; } } } if (j == pl.lit) { return false; // invalidCode(); // Not found } } } } // // Get remaining (short) codes // int i = (8 - ni) & 7; c >>= i; lc -= i; while (lc > 0) { const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK]; if (pl.len) { lc -= pl.len; if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) { return false; } } else { return false; // invalidCode(); // wrong (long) code } } if (out - outb != no) { return false; } // notEnoughData (); return true; } static void countFrequencies(std::vector<long long> &freq, const unsigned short data[/*n*/], int n) { for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0; for (int i = 0; i < n; ++i) ++freq[data[i]]; } static void writeUInt(char buf[4], unsigned int i) { unsigned char *b = (unsigned char *)buf; b[0] = i; b[1] = i >> 8; b[2] = i >> 16; b[3] = i >> 24; } static unsigned int readUInt(const char buf[4]) { const unsigned char *b = (const unsigned char *)buf; return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) | ((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000); } // // EXTERNAL INTERFACE // static int hufCompress(const unsigned short raw[], int nRaw, char compressed[]) { if (nRaw == 0) return 0; std::vector<long long> freq(HUF_ENCSIZE); countFrequencies(freq, raw, nRaw); int im = 0; int iM = 0; hufBuildEncTable(freq.data(), &im, &iM); char *tableStart = compressed + 20; char *tableEnd = tableStart; hufPackEncTable(freq.data(), im, iM, &tableEnd); int tableLength = tableEnd - tableStart; char *dataStart = tableEnd; int nBits = hufEncode(freq.data(), raw, nRaw, iM, dataStart); int data_length = (nBits + 7) / 8; writeUInt(compressed, im); writeUInt(compressed + 4, iM); writeUInt(compressed + 8, tableLength); writeUInt(compressed + 12, nBits); writeUInt(compressed + 16, 0); // room for future extensions return dataStart + data_length - compressed; } static bool hufUncompress(const char compressed[], int nCompressed, std::vector<unsigned short> *raw) { if (nCompressed == 0) { if (raw->size() != 0) return false; return false; } int im = readUInt(compressed); int iM = readUInt(compressed + 4); // int tableLength = readUInt (compressed + 8); int nBits = readUInt(compressed + 12); if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false; const char *ptr = compressed + 20; // // Fast decoder needs at least 2x64-bits of compressed data, and // needs to be run-able on this platform. Otherwise, fall back // to the original decoder // // if (FastHufDecoder::enabled() && nBits > 128) //{ // FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM); // fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw); //} // else { std::vector<long long> freq(HUF_ENCSIZE); std::vector<HufDec> hdec(HUF_DECSIZE); hufClearDecTable(&hdec.at(0)); hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM, &freq.at(0)); { if (nBits > 8 * (nCompressed - (ptr - compressed))) { return false; } hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0)); hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, raw->size(), raw->data()); } // catch (...) //{ // hufFreeDecTable (hdec); // throw; //} hufFreeDecTable(&hdec.at(0)); } return true; } // // Functions to compress the range of values in the pixel data // const int USHORT_RANGE = (1 << 16); const int BITMAP_SIZE = (USHORT_RANGE >> 3); static void bitmapFromData(const unsigned short data[/*nData*/], int nData, unsigned char bitmap[BITMAP_SIZE], unsigned short &minNonZero, unsigned short &maxNonZero) { for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0; for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7)); bitmap[0] &= ~1; // zero is not explicitly stored in // the bitmap; we assume that the // data always contain zeroes minNonZero = BITMAP_SIZE - 1; maxNonZero = 0; for (int i = 0; i < BITMAP_SIZE; ++i) { if (bitmap[i]) { if (minNonZero > i) minNonZero = i; if (maxNonZero < i) maxNonZero = i; } } } static unsigned short forwardLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[i] = k++; else lut[i] = 0; } return k - 1; // maximum value stored in lut[], } // i.e. number of ones in bitmap minus 1 static unsigned short reverseLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i; } int n = k - 1; while (k < USHORT_RANGE) lut[k++] = 0; return n; // maximum k where lut[k] is non-zero, } // i.e. number of ones in bitmap minus 1 static void applyLut(const unsigned short lut[USHORT_RANGE], unsigned short data[/*nData*/], int nData) { for (int i = 0; i < nData; ++i) data[i] = lut[data[i]]; } #ifdef __clang__ #pragma clang diagnostic pop #endif // __clang__ #ifdef _MSC_VER #pragma warning(pop) #endif static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize, const unsigned char *inPtr, size_t inSize, const std::vector<ChannelInfo> &channelInfo, int data_width, int num_lines) { std::vector<unsigned char> bitmap(BITMAP_SIZE); unsigned short minNonZero; unsigned short maxNonZero; #if !TINYEXR_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif // Assume `inSize` is multiple of 2 or 4. std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short)); std::vector<PIZChannelData> channelData(channelInfo.size()); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t c = 0; c < channelData.size(); c++) { PIZChannelData &cd = channelData[c]; cd.start = tmpBufferEnd; cd.end = cd.start; cd.nx = data_width; cd.ny = num_lines; // cd.ys = c.channel().ySampling; size_t pixelSize = sizeof(int); // UINT and FLOAT if (channelInfo[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } cd.size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += cd.nx * cd.ny * cd.size; } const unsigned char *ptr = inPtr; for (int y = 0; y < num_lines; ++y) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(cd.end, ptr, n * sizeof(unsigned short)); ptr += n * sizeof(unsigned short); cd.end += n; } } bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), bitmap.data(), minNonZero, maxNonZero); std::vector<unsigned short> lut(USHORT_RANGE); unsigned short maxValue = forwardLutFromBitmap(bitmap.data(), lut.data()); applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size())); // // Store range compression info in _outBuffer // char *buf = reinterpret_cast<char *>(outPtr); memcpy(buf, &minNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); memcpy(buf, &maxNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); if (minNonZero <= maxNonZero) { memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero), maxNonZero - minNonZero + 1); buf += maxNonZero - minNonZero + 1; } // // Apply wavelet encoding // for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Apply Huffman encoding; append the result to _outBuffer // // length header(4byte), then huff data. Initialize length header with zero, // then later fill it by `length`. char *lengthPtr = buf; int zero = 0; memcpy(buf, &zero, sizeof(int)); buf += sizeof(int); int length = hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf); memcpy(lengthPtr, &length, sizeof(int)); (*outSize) = static_cast<unsigned int>( (reinterpret_cast<unsigned char *>(buf) - outPtr) + static_cast<unsigned int>(length)); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if ((*outSize) >= inSize) { (*outSize) = static_cast<unsigned int>(inSize); memcpy(outPtr, inPtr, inSize); } return true; } static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr, size_t tmpBufSizeInBytes, size_t inLen, int num_channels, const EXRChannelInfo *channels, int data_width, int num_lines) { if (inLen == tmpBufSizeInBytes) { // Data is not compressed(Issue 40). memcpy(outPtr, inPtr, inLen); return true; } std::vector<unsigned char> bitmap(BITMAP_SIZE); unsigned short minNonZero; unsigned short maxNonZero; #if !TINYEXR_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif memset(bitmap.data(), 0, BITMAP_SIZE); const unsigned char *ptr = inPtr; // minNonZero = *(reinterpret_cast<const unsigned short *>(ptr)); tinyexr::cpy2(&minNonZero, reinterpret_cast<const unsigned short *>(ptr)); // maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2)); tinyexr::cpy2(&maxNonZero, reinterpret_cast<const unsigned short *>(ptr + 2)); ptr += 4; if (maxNonZero >= BITMAP_SIZE) { return false; } if (minNonZero <= maxNonZero) { memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr, maxNonZero - minNonZero + 1); ptr += maxNonZero - minNonZero + 1; } std::vector<unsigned short> lut(USHORT_RANGE); memset(lut.data(), 0, sizeof(unsigned short) * USHORT_RANGE); unsigned short maxValue = reverseLutFromBitmap(bitmap.data(), lut.data()); // // Huffman decoding // int length; // length = *(reinterpret_cast<const int *>(ptr)); tinyexr::cpy4(&length, reinterpret_cast<const int *>(ptr)); ptr += sizeof(int); if (size_t((ptr - inPtr) + length) > inLen) { return false; } std::vector<unsigned short> tmpBuffer(tmpBufSizeInBytes / sizeof(unsigned short)); hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer); // // Wavelet decoding // std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels)); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) { const EXRChannelInfo &chan = channels[i]; size_t pixelSize = sizeof(int); // UINT and FLOAT if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } channelData[i].start = tmpBufferEnd; channelData[i].end = channelData[i].start; channelData[i].nx = data_width; channelData[i].ny = num_lines; // channelData[i].ys = 1; channelData[i].size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size; } for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Expand the pixel data to their original range // applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBufSizeInBytes / sizeof(unsigned short))); for (int y = 0; y < num_lines; y++) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short))); outPtr += n * sizeof(unsigned short); cd.end += n; } } return true; } #endif // TINYEXR_USE_PIZ #if TINYEXR_USE_ZFP struct ZFPCompressionParam { double rate; unsigned int precision; unsigned int __pad0; double tolerance; int type; // TINYEXR_ZFP_COMPRESSIONTYPE_* unsigned int __pad1; ZFPCompressionParam() { type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE; rate = 2.0; precision = 0; tolerance = 0.0; } }; static bool FindZFPCompressionParam(ZFPCompressionParam *param, const EXRAttribute *attributes, int num_attributes, std::string *err) { bool foundType = false; for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionType") == 0)) { if (attributes[i].size == 1) { param->type = static_cast<int>(attributes[i].value[0]); foundType = true; break; } else { if (err) { (*err) += "zfpCompressionType attribute must be uchar(1 byte) type.\n"; } return false; } } } if (!foundType) { if (err) { (*err) += "`zfpCompressionType` attribute not found.\n"; } return false; } if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) && (attributes[i].size == 8)) { param->rate = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } if (err) { (*err) += "`zfpCompressionRate` attribute not found.\n"; } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) && (attributes[i].size == 4)) { param->rate = *(reinterpret_cast<int *>(attributes[i].value)); return true; } } if (err) { (*err) += "`zfpCompressionPrecision` attribute not found.\n"; } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) && (attributes[i].size == 8)) { param->tolerance = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } if (err) { (*err) += "`zfpCompressionTolerance` attribute not found.\n"; } } else { if (err) { (*err) += "Unknown value specified for `zfpCompressionType`.\n"; } } return false; } // Assume pixel format is FLOAT for all channels. static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines, size_t num_channels, const unsigned char *src, unsigned long src_size, const ZFPCompressionParam &param) { size_t uncompressed_size = size_t(dst_width) * size_t(dst_num_lines) * num_channels; if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); } zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((dst_width % 4) == 0); assert((dst_num_lines % 4) == 0); if ((size_t(dst_width) & 3U) || (size_t(dst_num_lines) & 3U)) { return false; } field = zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)), zfp_type_float, static_cast<unsigned int>(dst_width), static_cast<unsigned int>(dst_num_lines) * static_cast<unsigned int>(num_channels)); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimension */ 2, /* write random access */ 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); std::vector<unsigned char> buf(buf_size); memcpy(&buf.at(0), src, src_size); bitstream *stream = stream_open(&buf.at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_stream_rewind(zfp); size_t image_size = size_t(dst_width) * size_t(dst_num_lines); for (size_t c = 0; c < size_t(num_channels); c++) { // decompress 4x4 pixel block. for (size_t y = 0; y < size_t(dst_num_lines); y += 4) { for (size_t x = 0; x < size_t(dst_width); x += 4) { float fblock[16]; zfp_decode_block_float_2(zfp, fblock); for (size_t j = 0; j < 4; j++) { for (size_t i = 0; i < 4; i++) { dst[c * image_size + ((y + j) * size_t(dst_width) + (x + i))] = fblock[j * 4 + i]; } } } } } zfp_field_free(field); zfp_stream_close(zfp); stream_close(stream); return true; } // Assume pixel format is FLOAT for all channels. static bool CompressZfp(std::vector<unsigned char> *outBuf, unsigned int *outSize, const float *inPtr, int width, int num_lines, int num_channels, const ZFPCompressionParam &param) { zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((width % 4) == 0); assert((num_lines % 4) == 0); if ((size_t(width) & 3U) || (size_t(num_lines) & 3U)) { return false; } // create input array. field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)), zfp_type_float, static_cast<unsigned int>(width), static_cast<unsigned int>(num_lines * num_channels)); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); outBuf->resize(buf_size); bitstream *stream = stream_open(&outBuf->at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_field_free(field); size_t image_size = size_t(width) * size_t(num_lines); for (size_t c = 0; c < size_t(num_channels); c++) { // compress 4x4 pixel block. for (size_t y = 0; y < size_t(num_lines); y += 4) { for (size_t x = 0; x < size_t(width); x += 4) { float fblock[16]; for (size_t j = 0; j < 4; j++) { for (size_t i = 0; i < 4; i++) { fblock[j * 4 + i] = inPtr[c * image_size + ((y + j) * size_t(width) + (x + i))]; } } zfp_encode_block_float_2(zfp, fblock); } } } zfp_stream_flush(zfp); (*outSize) = static_cast<unsigned int>(zfp_stream_compressed_size(zfp)); zfp_stream_close(zfp); return true; } #endif // // ----------------------------------------------------------------- // // heuristics #define TINYEXR_DIMENSION_THRESHOLD (1024 * 8192) // TODO(syoyo): Refactor function arguments. static bool DecodePixelData(/* out */ unsigned char **out_images, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int width, int height, int x_stride, int y, int line_no, int num_lines, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ #if TINYEXR_USE_PIZ if ((width == 0) || (num_lines == 0) || (pixel_data_size == 0)) { // Invalid input #90 return false; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>( static_cast<size_t>(width * num_lines) * pixel_data_size)); size_t tmpBufLen = outBuf.size(); bool ret = tinyexr::DecompressPiz( reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen, data_len, static_cast<int>(num_channels), channels, width, num_lines); if (!ret) { return false; } // For PIZ_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { FP16 hf; // hf.u = line_ptr[u]; // use `cpy` to avoid unaligned memory access when compiler's // optimization is on. tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; size_t offset = 0; if (line_order == 0) { offset = (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { offset = static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } image += offset; *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>(&outBuf.at( v * pixel_data_size * static_cast<size_t>(x_stride) + channel_offset_list[c] * static_cast<size_t>(x_stride))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); } } #else assert(0 && "PIZ is enabled in this build"); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS || compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); assert(dstLen > 0); if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr, static_cast<unsigned long>(data_len))) { return false; } // For ZIP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; size_t offset = 0; if (line_order == 0) { offset = (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { offset = (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } image += offset; *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); if (dstLen == 0) { return false; } if (!tinyexr::DecompressRle( reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen, data_ptr, static_cast<unsigned long>(data_len))) { return false; } // For RLE_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; std::string e; if (!tinyexr::FindZFPCompressionParam(&zfp_compression_param, attributes, int(num_attributes), &e)) { // This code path should not be reachable. assert(0); return false; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = outBuf.size(); assert(dstLen > 0); tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width, num_lines, num_channels, data_ptr, static_cast<unsigned long>(data_len), zfp_compression_param); // For ZFP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } #else (void)attributes; (void)num_attributes; (void)num_channels; assert(0); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { for (size_t c = 0; c < num_channels; c++) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { const unsigned short *line_ptr = reinterpret_cast<const unsigned short *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *outLine = reinterpret_cast<unsigned short *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); outLine[u] = hf.u; } } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } if (reinterpret_cast<const unsigned char *>(line_ptr + width) > (data_ptr + data_len)) { // Insufficient data size return false; } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; // address may not be aliged. use byte-wise copy for safety.#76 // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); tinyexr::FP32 f32 = half_to_float(hf); outLine[u] = f32.f; } } else { assert(0); return false; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { const float *line_ptr = reinterpret_cast<const float *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } if (reinterpret_cast<const unsigned char *>(line_ptr + width) > (data_ptr + data_len)) { // Insufficient data size return false; } for (int u = 0; u < width; u++) { float val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); unsigned int *outLine = reinterpret_cast<unsigned int *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } for (int u = 0; u < width; u++) { if (reinterpret_cast<const unsigned char *>(line_ptr + u) >= (data_ptr + data_len)) { // Corrupsed data? return false; } unsigned int val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } } } } return true; } static bool DecodeTiledPixelData( unsigned char **out_images, int *width, int *height, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int data_width, int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x, int tile_size_y, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { // Here, data_width and data_height are the dimensions of the current (sub)level. if (tile_size_x * tile_offset_x > data_width || tile_size_y * tile_offset_y > data_height) { return false; } // Compute actual image size in a tile. if ((tile_offset_x + 1) * tile_size_x >= data_width) { (*width) = data_width - (tile_offset_x * tile_size_x); } else { (*width) = tile_size_x; } if ((tile_offset_y + 1) * tile_size_y >= data_height) { (*height) = data_height - (tile_offset_y * tile_size_y); } else { (*height) = tile_size_y; } // Image size = tile size. return DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len, compression_type, line_order, (*width), tile_size_y, /* stride */ tile_size_x, /* y */ 0, /* line_no */ 0, (*height), pixel_data_size, num_attributes, attributes, num_channels, channels, channel_offset_list); } static bool ComputeChannelLayout(std::vector<size_t> *channel_offset_list, int *pixel_data_size, size_t *channel_offset, int num_channels, const EXRChannelInfo *channels) { channel_offset_list->resize(static_cast<size_t>(num_channels)); (*pixel_data_size) = 0; (*channel_offset) = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { (*channel_offset_list)[c] = (*channel_offset); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { (*pixel_data_size) += sizeof(unsigned short); (*channel_offset) += sizeof(unsigned short); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { (*pixel_data_size) += sizeof(float); (*channel_offset) += sizeof(float); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { (*pixel_data_size) += sizeof(unsigned int); (*channel_offset) += sizeof(unsigned int); } else { // ??? return false; } } return true; } static unsigned char **AllocateImage(int num_channels, const EXRChannelInfo *channels, const int *requested_pixel_types, int data_width, int data_height) { unsigned char **images = reinterpret_cast<unsigned char **>(static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(num_channels)))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { size_t data_len = static_cast<size_t>(data_width) * static_cast<size_t>(data_height); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { // pixel_data_size += sizeof(unsigned short); // channel_offset += sizeof(unsigned short); // Alloc internal image for half type. if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { images[c] = reinterpret_cast<unsigned char *>(static_cast<unsigned short *>( malloc(sizeof(unsigned short) * data_len))); } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else { assert(0); } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // pixel_data_size += sizeof(float); // channel_offset += sizeof(float); images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { // pixel_data_size += sizeof(unsigned int); // channel_offset += sizeof(unsigned int); images[c] = reinterpret_cast<unsigned char *>( static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len))); } else { assert(0); } } return images; } #ifdef _WIN32 static inline std::wstring UTF8ToWchar(const std::string &str) { int wstr_size = MultiByteToWideChar(CP_UTF8, 0, str.data(), (int)str.size(), NULL, 0); std::wstring wstr(wstr_size, 0); MultiByteToWideChar(CP_UTF8, 0, str.data(), (int)str.size(), &wstr[0], (int)wstr.size()); return wstr; } #endif static int ParseEXRHeader(HeaderInfo *info, bool *empty_header, const EXRVersion *version, std::string *err, const unsigned char *buf, size_t size) { const char *marker = reinterpret_cast<const char *>(&buf[0]); if (empty_header) { (*empty_header) = false; } if (version->multipart) { if (size > 0 && marker[0] == '\0') { // End of header list. if (empty_header) { (*empty_header) = true; } return TINYEXR_SUCCESS; } } // According to the spec, the header of every OpenEXR file must contain at // least the following attributes: // // channels chlist // compression compression // dataWindow box2i // displayWindow box2i // lineOrder lineOrder // pixelAspectRatio float // screenWindowCenter v2f // screenWindowWidth float bool has_channels = false; bool has_compression = false; bool has_data_window = false; bool has_display_window = false; bool has_line_order = false; bool has_pixel_aspect_ratio = false; bool has_screen_window_center = false; bool has_screen_window_width = false; bool has_name = false; bool has_type = false; info->name.clear(); info->type.clear(); info->data_window.min_x = 0; info->data_window.min_y = 0; info->data_window.max_x = 0; info->data_window.max_y = 0; info->line_order = 0; // @fixme info->display_window.min_x = 0; info->display_window.min_y = 0; info->display_window.max_x = 0; info->display_window.max_y = 0; info->screen_window_center[0] = 0.0f; info->screen_window_center[1] = 0.0f; info->screen_window_width = -1.0f; info->pixel_aspect_ratio = -1.0f; info->tiled = 0; info->tile_size_x = -1; info->tile_size_y = -1; info->tile_level_mode = -1; info->tile_rounding_mode = -1; info->attributes.clear(); // Read attributes size_t orig_size = size; for (size_t nattr = 0; nattr < TINYEXR_MAX_HEADER_ATTRIBUTES; nattr++) { if (0 == size) { if (err) { (*err) += "Insufficient data size for attributes.\n"; } return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { if (err) { (*err) += "Failed to read attribute.\n"; } return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; // For a multipart file, the version field 9th bit is 0. if ((version->tiled || version->multipart || version->non_image) && attr_name.compare("tiles") == 0) { unsigned int x_size, y_size; unsigned char tile_mode; if (data.size() != 9) { if (err) { (*err) += "(ParseEXRHeader) Invalid attribute data size. Attribute data size must be 9.\n"; } return TINYEXR_ERROR_INVALID_DATA; } assert(data.size() == 9); memcpy(&x_size, &data.at(0), sizeof(int)); memcpy(&y_size, &data.at(4), sizeof(int)); tile_mode = data[8]; tinyexr::swap4(&x_size); tinyexr::swap4(&y_size); if (x_size > static_cast<unsigned int>(std::numeric_limits<int>::max()) || y_size > static_cast<unsigned int>(std::numeric_limits<int>::max())) { if (err) { (*err) = "Tile sizes were invalid."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } info->tile_size_x = static_cast<int>(x_size); info->tile_size_y = static_cast<int>(y_size); // mode = levelMode + roundingMode * 16 info->tile_level_mode = tile_mode & 0x3; info->tile_rounding_mode = (tile_mode >> 4) & 0x1; info->tiled = 1; } else if (attr_name.compare("compression") == 0) { bool ok = false; if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) { ok = true; } if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ ok = true; #else if (err) { (*err) = "PIZ compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP ok = true; #else if (err) { (*err) = "ZFP compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (!ok) { if (err) { (*err) = "Unknown compression type."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } info->compression_type = static_cast<int>(data[0]); has_compression = true; } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int if (!ReadChannelInfo(info->channels, data)) { if (err) { (*err) += "Failed to parse channel info.\n"; } return TINYEXR_ERROR_INVALID_DATA; } if (info->channels.size() < 1) { if (err) { (*err) += "# of channels is zero.\n"; } return TINYEXR_ERROR_INVALID_DATA; } has_channels = true; } else if (attr_name.compare("dataWindow") == 0) { if (data.size() >= 16) { memcpy(&info->data_window.min_x, &data.at(0), sizeof(int)); memcpy(&info->data_window.min_y, &data.at(4), sizeof(int)); memcpy(&info->data_window.max_x, &data.at(8), sizeof(int)); memcpy(&info->data_window.max_y, &data.at(12), sizeof(int)); tinyexr::swap4(&info->data_window.min_x); tinyexr::swap4(&info->data_window.min_y); tinyexr::swap4(&info->data_window.max_x); tinyexr::swap4(&info->data_window.max_y); has_data_window = true; } } else if (attr_name.compare("displayWindow") == 0) { if (data.size() >= 16) { memcpy(&info->display_window.min_x, &data.at(0), sizeof(int)); memcpy(&info->display_window.min_y, &data.at(4), sizeof(int)); memcpy(&info->display_window.max_x, &data.at(8), sizeof(int)); memcpy(&info->display_window.max_y, &data.at(12), sizeof(int)); tinyexr::swap4(&info->display_window.min_x); tinyexr::swap4(&info->display_window.min_y); tinyexr::swap4(&info->display_window.max_x); tinyexr::swap4(&info->display_window.max_y); has_display_window = true; } } else if (attr_name.compare("lineOrder") == 0) { if (data.size() >= 1) { info->line_order = static_cast<int>(data[0]); has_line_order = true; } } else if (attr_name.compare("pixelAspectRatio") == 0) { if (data.size() >= sizeof(float)) { memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float)); tinyexr::swap4(&info->pixel_aspect_ratio); has_pixel_aspect_ratio = true; } } else if (attr_name.compare("screenWindowCenter") == 0) { if (data.size() >= 8) { memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float)); memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float)); tinyexr::swap4(&info->screen_window_center[0]); tinyexr::swap4(&info->screen_window_center[1]); has_screen_window_center = true; } } else if (attr_name.compare("screenWindowWidth") == 0) { if (data.size() >= sizeof(float)) { memcpy(&info->screen_window_width, &data.at(0), sizeof(float)); tinyexr::swap4(&info->screen_window_width); has_screen_window_width = true; } } else if (attr_name.compare("chunkCount") == 0) { if (data.size() >= sizeof(int)) { memcpy(&info->chunk_count, &data.at(0), sizeof(int)); tinyexr::swap4(&info->chunk_count); } } else if (attr_name.compare("name") == 0) { if (!data.empty() && data[0]) { data.push_back(0); size_t len = strlen(reinterpret_cast<const char*>(&data[0])); info->name.resize(len); info->name.assign(reinterpret_cast<const char*>(&data[0]), len); has_name = true; } } else if (attr_name.compare("type") == 0) { if (!data.empty() && data[0]) { data.push_back(0); size_t len = strlen(reinterpret_cast<const char*>(&data[0])); info->type.resize(len); info->type.assign(reinterpret_cast<const char*>(&data[0]), len); has_type = true; } } else { // Custom attribute(up to TINYEXR_MAX_CUSTOM_ATTRIBUTES) if (info->attributes.size() < TINYEXR_MAX_CUSTOM_ATTRIBUTES) { EXRAttribute attrib; #ifdef _MSC_VER strncpy_s(attrib.name, attr_name.c_str(), 255); strncpy_s(attrib.type, attr_type.c_str(), 255); #else strncpy(attrib.name, attr_name.c_str(), 255); strncpy(attrib.type, attr_type.c_str(), 255); #endif attrib.name[255] = '\0'; attrib.type[255] = '\0'; attrib.size = static_cast<int>(data.size()); attrib.value = static_cast<unsigned char *>(malloc(data.size())); memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0), data.size()); info->attributes.push_back(attrib); } } } // Check if required attributes exist { std::stringstream ss_err; if (!has_compression) { ss_err << "\"compression\" attribute not found in the header." << std::endl; } if (!has_channels) { ss_err << "\"channels\" attribute not found in the header." << std::endl; } if (!has_line_order) { ss_err << "\"lineOrder\" attribute not found in the header." << std::endl; } if (!has_display_window) { ss_err << "\"displayWindow\" attribute not found in the header." << std::endl; } if (!has_data_window) { ss_err << "\"dataWindow\" attribute not found in the header or invalid." << std::endl; } if (!has_pixel_aspect_ratio) { ss_err << "\"pixelAspectRatio\" attribute not found in the header." << std::endl; } if (!has_screen_window_width) { ss_err << "\"screenWindowWidth\" attribute not found in the header." << std::endl; } if (!has_screen_window_center) { ss_err << "\"screenWindowCenter\" attribute not found in the header." << std::endl; } if (version->multipart || version->non_image) { if (!has_name) { ss_err << "\"name\" attribute not found in the header." << std::endl; } if (!has_type) { ss_err << "\"type\" attribute not found in the header." << std::endl; } } if (!(ss_err.str().empty())) { if (err) { (*err) += ss_err.str(); } return TINYEXR_ERROR_INVALID_HEADER; } } info->header_len = static_cast<unsigned int>(orig_size - size); return TINYEXR_SUCCESS; } // C++ HeaderInfo to C EXRHeader conversion. static bool ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info, std::string *warn, std::string *err) { exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio; exr_header->screen_window_center[0] = info.screen_window_center[0]; exr_header->screen_window_center[1] = info.screen_window_center[1]; exr_header->screen_window_width = info.screen_window_width; exr_header->chunk_count = info.chunk_count; exr_header->display_window.min_x = info.display_window.min_x; exr_header->display_window.min_y = info.display_window.min_y; exr_header->display_window.max_x = info.display_window.max_x; exr_header->display_window.max_y = info.display_window.max_y; exr_header->data_window.min_x = info.data_window.min_x; exr_header->data_window.min_y = info.data_window.min_y; exr_header->data_window.max_x = info.data_window.max_x; exr_header->data_window.max_y = info.data_window.max_y; exr_header->line_order = info.line_order; exr_header->compression_type = info.compression_type; exr_header->tiled = info.tiled; exr_header->tile_size_x = info.tile_size_x; exr_header->tile_size_y = info.tile_size_y; exr_header->tile_level_mode = info.tile_level_mode; exr_header->tile_rounding_mode = info.tile_rounding_mode; EXRSetNameAttr(exr_header, info.name.c_str()); bool valid = true; if (!info.type.empty()) { if (info.type == "scanlineimage") { if (exr_header->tiled) { if (err) { (*err) += "(ConvertHeader) tiled bit must be off for `scanlineimage` type.\n"; } valid = false; } } else if (info.type == "tiledimage") { if (!exr_header->tiled) { if (err) { (*err) += "(ConvertHeader) tiled bit must be on for `tiledimage` type.\n"; } valid = false; } } else if (info.type == "deeptile") { exr_header->non_image = 1; if (!exr_header->tiled) { if (err) { (*err) += "(ConvertHeader) tiled bit must be on for `deeptile` type.\n"; } valid = false; } } else if (info.type == "deepscanline") { exr_header->non_image = 1; if (exr_header->tiled) { if (err) { (*err) += "(ConvertHeader) tiled bit must be off for `deepscanline` type.\n"; } valid = false; } } else { if (warn) { std::stringstream ss; ss << "(ConvertHeader) Unsupported or unknown info.type: " << info.type << "\n"; (*warn) += ss.str(); } } } exr_header->num_channels = static_cast<int>(info.channels.size()); exr_header->channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { #ifdef _MSC_VER strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #else strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #endif // manually add '\0' for safety. exr_header->channels[c].name[255] = '\0'; exr_header->channels[c].pixel_type = info.channels[c].pixel_type; exr_header->channels[c].p_linear = info.channels[c].p_linear; exr_header->channels[c].x_sampling = info.channels[c].x_sampling; exr_header->channels[c].y_sampling = info.channels[c].y_sampling; } exr_header->pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->pixel_types[c] = info.channels[c].pixel_type; } // Initially fill with values of `pixel_types` exr_header->requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->requested_pixel_types[c] = info.channels[c].pixel_type; } exr_header->num_custom_attributes = static_cast<int>(info.attributes.size()); if (exr_header->num_custom_attributes > 0) { // TODO(syoyo): Report warning when # of attributes exceeds // `TINYEXR_MAX_CUSTOM_ATTRIBUTES` if (exr_header->num_custom_attributes > TINYEXR_MAX_CUSTOM_ATTRIBUTES) { exr_header->num_custom_attributes = TINYEXR_MAX_CUSTOM_ATTRIBUTES; } exr_header->custom_attributes = static_cast<EXRAttribute *>(malloc( sizeof(EXRAttribute) * size_t(exr_header->num_custom_attributes))); for (size_t i = 0; i < info.attributes.size(); i++) { memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name, 256); memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type, 256); exr_header->custom_attributes[i].size = info.attributes[i].size; // Just copy pointer exr_header->custom_attributes[i].value = info.attributes[i].value; } } else { exr_header->custom_attributes = NULL; } exr_header->header_len = info.header_len; return true; } struct OffsetData { OffsetData() : num_x_levels(0), num_y_levels(0) {} std::vector<std::vector<std::vector <tinyexr::tinyexr_uint64> > > offsets; int num_x_levels; int num_y_levels; }; int LevelIndex(int lx, int ly, int tile_level_mode, int num_x_levels) { switch (tile_level_mode) { case TINYEXR_TILE_ONE_LEVEL: return 0; case TINYEXR_TILE_MIPMAP_LEVELS: return lx; case TINYEXR_TILE_RIPMAP_LEVELS: return lx + ly * num_x_levels; default: assert(false); } return 0; } static int LevelSize(int toplevel_size, int level, int tile_rounding_mode) { assert(level >= 0); int b = (int)(1u << (unsigned)level); int level_size = toplevel_size / b; if (tile_rounding_mode == TINYEXR_TILE_ROUND_UP && level_size * b < toplevel_size) level_size += 1; return std::max(level_size, 1); } static int DecodeTiledLevel(EXRImage* exr_image, const EXRHeader* exr_header, const OffsetData& offset_data, const std::vector<size_t>& channel_offset_list, int pixel_data_size, const unsigned char* head, const size_t size, std::string* err) { int num_channels = exr_header->num_channels; int level_index = LevelIndex(exr_image->level_x, exr_image->level_y, exr_header->tile_level_mode, offset_data.num_x_levels); int num_y_tiles = (int)offset_data.offsets[level_index].size(); assert(num_y_tiles); int num_x_tiles = (int)offset_data.offsets[level_index][0].size(); assert(num_x_tiles); int num_tiles = num_x_tiles * num_y_tiles; int err_code = TINYEXR_SUCCESS; enum { EF_SUCCESS = 0, EF_INVALID_DATA = 1, EF_INSUFFICIENT_DATA = 2, EF_FAILED_TO_DECODE = 4 }; #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) std::atomic<unsigned> error_flag(EF_SUCCESS); #else unsigned error_flag(EF_SUCCESS); #endif // Although the spec says : "...the data window is subdivided into an array of smaller rectangles...", // the IlmImf library allows the dimensions of the tile to be larger (or equal) than the dimensions of the data window. #if 0 if ((exr_header->tile_size_x > exr_image->width || exr_header->tile_size_y > exr_image->height) && exr_image->level_x == 0 && exr_image->level_y == 0) { if (err) { (*err) += "Failed to decode tile data.\n"; } err_code = TINYEXR_ERROR_INVALID_DATA; } #endif exr_image->tiles = static_cast<EXRTile*>( calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles))); #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) std::vector<std::thread> workers; std::atomic<int> tile_count(0); int num_threads = std::max(1, int(std::thread::hardware_concurrency())); if (num_threads > int(num_tiles)) { num_threads = int(num_tiles); } for (int t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&]() { int tile_idx = 0; while ((tile_idx = tile_count++) < num_tiles) { #else #if TINYEXR_USE_OPENMP #pragma omp parallel for #endif for (int tile_idx = 0; tile_idx < num_tiles; tile_idx++) { #endif // Allocate memory for each tile. exr_image->tiles[tile_idx].images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, exr_header->tile_size_x, exr_header->tile_size_y); int x_tile = tile_idx % num_x_tiles; int y_tile = tile_idx / num_x_tiles; // 16 byte: tile coordinates // 4 byte : data size // ~ : data(uncompressed or compressed) tinyexr::tinyexr_uint64 offset = offset_data.offsets[level_index][y_tile][x_tile]; if (offset + sizeof(int) * 5 > size) { // Insufficient data size. error_flag |= EF_INSUFFICIENT_DATA; continue; } size_t data_size = size_t(size - (offset + sizeof(int) * 5)); const unsigned char* data_ptr = reinterpret_cast<const unsigned char*>(head + offset); int tile_coordinates[4]; memcpy(tile_coordinates, data_ptr, sizeof(int) * 4); tinyexr::swap4(&tile_coordinates[0]); tinyexr::swap4(&tile_coordinates[1]); tinyexr::swap4(&tile_coordinates[2]); tinyexr::swap4(&tile_coordinates[3]); if (tile_coordinates[2] != exr_image->level_x) { // Invalid data. error_flag |= EF_INVALID_DATA; continue; } if (tile_coordinates[3] != exr_image->level_y) { // Invalid data. error_flag |= EF_INVALID_DATA; continue; } int data_len; memcpy(&data_len, data_ptr + 16, sizeof(int)); // 16 = sizeof(tile_coordinates) tinyexr::swap4(&data_len); if (data_len < 2 || size_t(data_len) > data_size) { // Insufficient data size. error_flag |= EF_INSUFFICIENT_DATA; continue; } // Move to data addr: 20 = 16 + 4; data_ptr += 20; bool ret = tinyexr::DecodeTiledPixelData( exr_image->tiles[tile_idx].images, &(exr_image->tiles[tile_idx].width), &(exr_image->tiles[tile_idx].height), exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, exr_image->width, exr_image->height, tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x, exr_header->tile_size_y, static_cast<size_t>(pixel_data_size), static_cast<size_t>(exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list); if (!ret) { // Failed to decode tile data. error_flag |= EF_FAILED_TO_DECODE; } exr_image->tiles[tile_idx].offset_x = tile_coordinates[0]; exr_image->tiles[tile_idx].offset_y = tile_coordinates[1]; exr_image->tiles[tile_idx].level_x = tile_coordinates[2]; exr_image->tiles[tile_idx].level_y = tile_coordinates[3]; #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) } })); } // num_thread loop for (auto& t : workers) { t.join(); } #else } // parallel for #endif // Even in the event of an error, the reserved memory may be freed. exr_image->num_channels = num_channels; exr_image->num_tiles = static_cast<int>(num_tiles); if (error_flag) err_code = TINYEXR_ERROR_INVALID_DATA; if (err) { if (error_flag & EF_INSUFFICIENT_DATA) { (*err) += "Insufficient data length.\n"; } if (error_flag & EF_FAILED_TO_DECODE) { (*err) += "Failed to decode tile data.\n"; } } return err_code; } static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header, const OffsetData& offset_data, const unsigned char *head, const size_t size, std::string *err) { int num_channels = exr_header->num_channels; int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; if (!FindZFPCompressionParam(&zfp_compression_param, exr_header->custom_attributes, int(exr_header->num_custom_attributes), err)) { return TINYEXR_ERROR_INVALID_HEADER; } #endif } if (exr_header->data_window.max_x < exr_header->data_window.min_x || exr_header->data_window.max_y < exr_header->data_window.min_y) { if (err) { (*err) += "Invalid data window.\n"; } return TINYEXR_ERROR_INVALID_DATA; } int data_width = exr_header->data_window.max_x - exr_header->data_window.min_x + 1; int data_height = exr_header->data_window.max_y - exr_header->data_window.min_y + 1; // Do not allow too large data_width and data_height. header invalid? { if ((data_width > TINYEXR_DIMENSION_THRESHOLD) || (data_height > TINYEXR_DIMENSION_THRESHOLD)) { if (err) { std::stringstream ss; ss << "data_with or data_height too large. data_width: " << data_width << ", " << "data_height = " << data_height << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } if (exr_header->tiled) { if ((exr_header->tile_size_x > TINYEXR_DIMENSION_THRESHOLD) || (exr_header->tile_size_y > TINYEXR_DIMENSION_THRESHOLD)) { if (err) { std::stringstream ss; ss << "tile with or tile height too large. tile width: " << exr_header->tile_size_x << ", " << "tile height = " << exr_header->tile_size_y << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } } } const std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0]; size_t num_blocks = offsets.size(); std::vector<size_t> channel_offset_list; int pixel_data_size = 0; size_t channel_offset = 0; if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size, &channel_offset, num_channels, exr_header->channels)) { if (err) { (*err) += "Failed to compute channel layout.\n"; } return TINYEXR_ERROR_INVALID_DATA; } #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) std::atomic<bool> invalid_data(false); #else bool invalid_data(false); #endif if (exr_header->tiled) { // value check if (exr_header->tile_size_x < 0) { if (err) { std::stringstream ss; ss << "Invalid tile size x : " << exr_header->tile_size_x << "\n"; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_HEADER; } if (exr_header->tile_size_y < 0) { if (err) { std::stringstream ss; ss << "Invalid tile size y : " << exr_header->tile_size_y << "\n"; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_HEADER; } if (exr_header->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) { EXRImage* level_image = NULL; for (int level = 0; level < offset_data.num_x_levels; ++level) { if (!level_image) { level_image = exr_image; } else { level_image->next_level = new EXRImage; InitEXRImage(level_image->next_level); level_image = level_image->next_level; } level_image->width = LevelSize(exr_header->data_window.max_x - exr_header->data_window.min_x + 1, level, exr_header->tile_rounding_mode); level_image->height = LevelSize(exr_header->data_window.max_y - exr_header->data_window.min_y + 1, level, exr_header->tile_rounding_mode); level_image->level_x = level; level_image->level_y = level; int ret = DecodeTiledLevel(level_image, exr_header, offset_data, channel_offset_list, pixel_data_size, head, size, err); if (ret != TINYEXR_SUCCESS) return ret; } } else { EXRImage* level_image = NULL; for (int level_y = 0; level_y < offset_data.num_y_levels; ++level_y) for (int level_x = 0; level_x < offset_data.num_x_levels; ++level_x) { if (!level_image) { level_image = exr_image; } else { level_image->next_level = new EXRImage; InitEXRImage(level_image->next_level); level_image = level_image->next_level; } level_image->width = LevelSize(exr_header->data_window.max_x - exr_header->data_window.min_x + 1, level_x, exr_header->tile_rounding_mode); level_image->height = LevelSize(exr_header->data_window.max_y - exr_header->data_window.min_y + 1, level_y, exr_header->tile_rounding_mode); level_image->level_x = level_x; level_image->level_y = level_y; int ret = DecodeTiledLevel(level_image, exr_header, offset_data, channel_offset_list, pixel_data_size, head, size, err); if (ret != TINYEXR_SUCCESS) return ret; } } } else { // scanline format // Don't allow too large image(256GB * pixel_data_size or more). Workaround // for #104. size_t total_data_len = size_t(data_width) * size_t(data_height) * size_t(num_channels); const bool total_data_len_overflown = sizeof(void *) == 8 ? (total_data_len >= 0x4000000000) : false; if ((total_data_len == 0) || total_data_len_overflown) { if (err) { std::stringstream ss; ss << "Image data size is zero or too large: width = " << data_width << ", height = " << data_height << ", channels = " << num_channels << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } exr_image->images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, data_width, data_height); #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) std::vector<std::thread> workers; std::atomic<int> y_count(0); int num_threads = std::max(1, int(std::thread::hardware_concurrency())); if (num_threads > int(num_blocks)) { num_threads = int(num_blocks); } for (int t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&]() { int y = 0; while ((y = y_count++) < int(num_blocks)) { #else #if TINYEXR_USE_OPENMP #pragma omp parallel for #endif for (int y = 0; y < static_cast<int>(num_blocks); y++) { #endif size_t y_idx = static_cast<size_t>(y); if (offsets[y_idx] + sizeof(int) * 2 > size) { invalid_data = true; } else { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed or compressed) size_t data_size = size_t(size - (offsets[y_idx] + sizeof(int) * 2)); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y_idx]); int line_no; memcpy(&line_no, data_ptr, sizeof(int)); int data_len; memcpy(&data_len, data_ptr + 4, sizeof(int)); tinyexr::swap4(&line_no); tinyexr::swap4(&data_len); if (size_t(data_len) > data_size) { invalid_data = true; } else if ((line_no > (2 << 20)) || (line_no < -(2 << 20))) { // Too large value. Assume this is invalid // 2**20 = 1048576 = heuristic value. invalid_data = true; } else if (data_len == 0) { // TODO(syoyo): May be ok to raise the threshold for example // `data_len < 4` invalid_data = true; } else { // line_no may be negative. int end_line_no = (std::min)(line_no + num_scanline_blocks, (exr_header->data_window.max_y + 1)); int num_lines = end_line_no - line_no; if (num_lines <= 0) { invalid_data = true; } else { // Move to data addr: 8 = 4 + 4; data_ptr += 8; // Adjust line_no with data_window.bmin.y // overflow check tinyexr_int64 lno = static_cast<tinyexr_int64>(line_no) - static_cast<tinyexr_int64>(exr_header->data_window.min_y); if (lno > std::numeric_limits<int>::max()) { line_no = -1; // invalid } else if (lno < -std::numeric_limits<int>::max()) { line_no = -1; // invalid } else { line_no -= exr_header->data_window.min_y; } if (line_no < 0) { invalid_data = true; } else { if (!tinyexr::DecodePixelData( exr_image->images, exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, data_width, y, line_no, num_lines, static_cast<size_t>(pixel_data_size), static_cast<size_t>( exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list)) { invalid_data = true; } } } } } #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) } })); } for (auto &t : workers) { t.join(); } #else } // omp parallel #endif } if (invalid_data) { if (err) { (*err) += "Invalid data found when decoding pixels.\n"; } return TINYEXR_ERROR_INVALID_DATA; } // Overwrite `pixel_type` with `requested_pixel_type`. { for (int c = 0; c < exr_header->num_channels; c++) { exr_header->pixel_types[c] = exr_header->requested_pixel_types[c]; } } { exr_image->num_channels = num_channels; exr_image->width = data_width; exr_image->height = data_height; } return TINYEXR_SUCCESS; } static bool ReconstructLineOffsets( std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n, const unsigned char *head, const unsigned char *marker, const size_t size) { assert(head < marker); assert(offsets->size() == n); for (size_t i = 0; i < n; i++) { size_t offset = static_cast<size_t>(marker - head); // Offset should not exceed whole EXR file/data size. if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) { return false; } int y; unsigned int data_len; memcpy(&y, marker, sizeof(int)); memcpy(&data_len, marker + 4, sizeof(unsigned int)); if (data_len >= size) { return false; } tinyexr::swap4(&y); tinyexr::swap4(&data_len); (*offsets)[i] = offset; marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len) } return true; } static int FloorLog2(unsigned x) { // // For x > 0, floorLog2(y) returns floor(log(x)/log(2)). // int y = 0; while (x > 1) { y += 1; x >>= 1u; } return y; } static int CeilLog2(unsigned x) { // // For x > 0, ceilLog2(y) returns ceil(log(x)/log(2)). // int y = 0; int r = 0; while (x > 1) { if (x & 1) r = 1; y += 1; x >>= 1u; } return y + r; } static int RoundLog2(int x, int tile_rounding_mode) { return (tile_rounding_mode == TINYEXR_TILE_ROUND_DOWN) ? FloorLog2(static_cast<unsigned>(x)) : CeilLog2(static_cast<unsigned>(x)); } static int CalculateNumXLevels(const EXRHeader* exr_header) { int min_x = exr_header->data_window.min_x; int max_x = exr_header->data_window.max_x; int min_y = exr_header->data_window.min_y; int max_y = exr_header->data_window.max_y; int num = 0; switch (exr_header->tile_level_mode) { case TINYEXR_TILE_ONE_LEVEL: num = 1; break; case TINYEXR_TILE_MIPMAP_LEVELS: { int w = max_x - min_x + 1; int h = max_y - min_y + 1; num = RoundLog2(std::max(w, h), exr_header->tile_rounding_mode) + 1; } break; case TINYEXR_TILE_RIPMAP_LEVELS: { int w = max_x - min_x + 1; num = RoundLog2(w, exr_header->tile_rounding_mode) + 1; } break; default: assert(false); } return num; } static int CalculateNumYLevels(const EXRHeader* exr_header) { int min_x = exr_header->data_window.min_x; int max_x = exr_header->data_window.max_x; int min_y = exr_header->data_window.min_y; int max_y = exr_header->data_window.max_y; int num = 0; switch (exr_header->tile_level_mode) { case TINYEXR_TILE_ONE_LEVEL: num = 1; break; case TINYEXR_TILE_MIPMAP_LEVELS: { int w = max_x - min_x + 1; int h = max_y - min_y + 1; num = RoundLog2(std::max(w, h), exr_header->tile_rounding_mode) + 1; } break; case TINYEXR_TILE_RIPMAP_LEVELS: { int h = max_y - min_y + 1; num = RoundLog2(h, exr_header->tile_rounding_mode) + 1; } break; default: assert(false); } return num; } static void CalculateNumTiles(std::vector<int>& numTiles, int toplevel_size, int size, int tile_rounding_mode) { for (unsigned i = 0; i < numTiles.size(); i++) { int l = LevelSize(toplevel_size, i, tile_rounding_mode); assert(l <= std::numeric_limits<int>::max() - size + 1); numTiles[i] = (l + size - 1) / size; } } static void PrecalculateTileInfo(std::vector<int>& num_x_tiles, std::vector<int>& num_y_tiles, const EXRHeader* exr_header) { int min_x = exr_header->data_window.min_x; int max_x = exr_header->data_window.max_x; int min_y = exr_header->data_window.min_y; int max_y = exr_header->data_window.max_y; int num_x_levels = CalculateNumXLevels(exr_header); int num_y_levels = CalculateNumYLevels(exr_header); num_x_tiles.resize(num_x_levels); num_y_tiles.resize(num_y_levels); CalculateNumTiles(num_x_tiles, max_x - min_x + 1, exr_header->tile_size_x, exr_header->tile_rounding_mode); CalculateNumTiles(num_y_tiles, max_y - min_y + 1, exr_header->tile_size_y, exr_header->tile_rounding_mode); } static void InitSingleResolutionOffsets(OffsetData& offset_data, size_t num_blocks) { offset_data.offsets.resize(1); offset_data.offsets[0].resize(1); offset_data.offsets[0][0].resize(num_blocks); offset_data.num_x_levels = 1; offset_data.num_y_levels = 1; } // Return sum of tile blocks. static int InitTileOffsets(OffsetData& offset_data, const EXRHeader* exr_header, const std::vector<int>& num_x_tiles, const std::vector<int>& num_y_tiles) { int num_tile_blocks = 0; offset_data.num_x_levels = static_cast<int>(num_x_tiles.size()); offset_data.num_y_levels = static_cast<int>(num_y_tiles.size()); switch (exr_header->tile_level_mode) { case TINYEXR_TILE_ONE_LEVEL: case TINYEXR_TILE_MIPMAP_LEVELS: assert(offset_data.num_x_levels == offset_data.num_y_levels); offset_data.offsets.resize(offset_data.num_x_levels); for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) { offset_data.offsets[l].resize(num_y_tiles[l]); for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) { offset_data.offsets[l][dy].resize(num_x_tiles[l]); num_tile_blocks += num_x_tiles[l]; } } break; case TINYEXR_TILE_RIPMAP_LEVELS: offset_data.offsets.resize(static_cast<size_t>(offset_data.num_x_levels) * static_cast<size_t>(offset_data.num_y_levels)); for (int ly = 0; ly < offset_data.num_y_levels; ++ly) { for (int lx = 0; lx < offset_data.num_x_levels; ++lx) { int l = ly * offset_data.num_x_levels + lx; offset_data.offsets[l].resize(num_y_tiles[ly]); for (size_t dy = 0; dy < offset_data.offsets[l].size(); ++dy) { offset_data.offsets[l][dy].resize(num_x_tiles[lx]); num_tile_blocks += num_x_tiles[lx]; } } } break; default: assert(false); } return num_tile_blocks; } static bool IsAnyOffsetsAreInvalid(const OffsetData& offset_data) { for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) if (reinterpret_cast<const tinyexr::tinyexr_int64&>(offset_data.offsets[l][dy][dx]) <= 0) return true; return false; } static bool isValidTile(const EXRHeader* exr_header, const OffsetData& offset_data, int dx, int dy, int lx, int ly) { if (lx < 0 || ly < 0 || dx < 0 || dy < 0) return false; int num_x_levels = offset_data.num_x_levels; int num_y_levels = offset_data.num_y_levels; switch (exr_header->tile_level_mode) { case TINYEXR_TILE_ONE_LEVEL: if (lx == 0 && ly == 0 && offset_data.offsets.size() > 0 && offset_data.offsets[0].size() > static_cast<size_t>(dy) && offset_data.offsets[0][dy].size() > static_cast<size_t>(dx)) { return true; } break; case TINYEXR_TILE_MIPMAP_LEVELS: if (lx < num_x_levels && ly < num_y_levels && offset_data.offsets.size() > static_cast<size_t>(lx) && offset_data.offsets[lx].size() > static_cast<size_t>(dy) && offset_data.offsets[lx][dy].size() > static_cast<size_t>(dx)) { return true; } break; case TINYEXR_TILE_RIPMAP_LEVELS: { size_t idx = static_cast<size_t>(lx) + static_cast<size_t>(ly)* static_cast<size_t>(num_x_levels); if (lx < num_x_levels && ly < num_y_levels && (offset_data.offsets.size() > idx) && offset_data.offsets[idx].size() > static_cast<size_t>(dy) && offset_data.offsets[idx][dy].size() > static_cast<size_t>(dx)) { return true; } } break; default: return false; } return false; } static void ReconstructTileOffsets(OffsetData& offset_data, const EXRHeader* exr_header, const unsigned char* head, const unsigned char* marker, const size_t /*size*/, bool isMultiPartFile, bool isDeep) { int numXLevels = offset_data.num_x_levels; for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) { for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) { for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) { tinyexr::tinyexr_uint64 tileOffset = marker - head; if (isMultiPartFile) { //int partNumber; marker += sizeof(int); } int tileX; memcpy(&tileX, marker, sizeof(int)); tinyexr::swap4(&tileX); marker += sizeof(int); int tileY; memcpy(&tileY, marker, sizeof(int)); tinyexr::swap4(&tileY); marker += sizeof(int); int levelX; memcpy(&levelX, marker, sizeof(int)); tinyexr::swap4(&levelX); marker += sizeof(int); int levelY; memcpy(&levelY, marker, sizeof(int)); tinyexr::swap4(&levelY); marker += sizeof(int); if (isDeep) { tinyexr::tinyexr_int64 packed_offset_table_size; memcpy(&packed_offset_table_size, marker, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64*>(&packed_offset_table_size)); marker += sizeof(tinyexr::tinyexr_int64); tinyexr::tinyexr_int64 packed_sample_size; memcpy(&packed_sample_size, marker, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64*>(&packed_sample_size)); marker += sizeof(tinyexr::tinyexr_int64); // next Int64 is unpacked sample size - skip that too marker += packed_offset_table_size + packed_sample_size + 8; } else { int dataSize; memcpy(&dataSize, marker, sizeof(int)); tinyexr::swap4(&dataSize); marker += sizeof(int); marker += dataSize; } if (!isValidTile(exr_header, offset_data, tileX, tileY, levelX, levelY)) return; int level_idx = LevelIndex(levelX, levelY, exr_header->tile_level_mode, numXLevels); offset_data.offsets[level_idx][tileY][tileX] = tileOffset; } } } } // marker output is also static int ReadOffsets(OffsetData& offset_data, const unsigned char* head, const unsigned char*& marker, const size_t size, const char** err) { for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) { for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) { for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) { tinyexr::tinyexr_uint64 offset; if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) { tinyexr::SetErrorMessage("Insufficient data size in offset table.", err); return TINYEXR_ERROR_INVALID_DATA; } memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64)); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } marker += sizeof(tinyexr::tinyexr_uint64); // = 8 offset_data.offsets[l][dy][dx] = offset; } } } return TINYEXR_SUCCESS; } static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *head, const unsigned char *marker, const size_t size, const char **err) { if (exr_image == NULL || exr_header == NULL || head == NULL || marker == NULL || (size <= tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage("Invalid argument for DecodeEXRImage().", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } if (exr_header->data_window.max_x < exr_header->data_window.min_x || exr_header->data_window.max_x - exr_header->data_window.min_x == std::numeric_limits<int>::max()) { // Issue 63 tinyexr::SetErrorMessage("Invalid data width value", err); return TINYEXR_ERROR_INVALID_DATA; } int data_width = exr_header->data_window.max_x - exr_header->data_window.min_x + 1; if (exr_header->data_window.max_y < exr_header->data_window.min_y || exr_header->data_window.max_y - exr_header->data_window.min_y == std::numeric_limits<int>::max()) { tinyexr::SetErrorMessage("Invalid data height value", err); return TINYEXR_ERROR_INVALID_DATA; } int data_height = exr_header->data_window.max_y - exr_header->data_window.min_y + 1; // Do not allow too large data_width and data_height. header invalid? { if (data_width > TINYEXR_DIMENSION_THRESHOLD) { tinyexr::SetErrorMessage("data width too large.", err); return TINYEXR_ERROR_INVALID_DATA; } if (data_height > TINYEXR_DIMENSION_THRESHOLD) { tinyexr::SetErrorMessage("data height too large.", err); return TINYEXR_ERROR_INVALID_DATA; } } if (exr_header->tiled) { if (exr_header->tile_size_x > TINYEXR_DIMENSION_THRESHOLD) { tinyexr::SetErrorMessage("tile width too large.", err); return TINYEXR_ERROR_INVALID_DATA; } if (exr_header->tile_size_y > TINYEXR_DIMENSION_THRESHOLD) { tinyexr::SetErrorMessage("tile height too large.", err); return TINYEXR_ERROR_INVALID_DATA; } } // Read offset tables. OffsetData offset_data; size_t num_blocks = 0; // For a multi-resolution image, the size of the offset table will be calculated from the other attributes of the header. // If chunk_count > 0 then chunk_count must be equal to the calculated tile count. if (exr_header->tiled) { { std::vector<int> num_x_tiles, num_y_tiles; PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_header); num_blocks = InitTileOffsets(offset_data, exr_header, num_x_tiles, num_y_tiles); if (exr_header->chunk_count > 0) { if (exr_header->chunk_count != static_cast<int>(num_blocks)) { tinyexr::SetErrorMessage("Invalid offset table size.", err); return TINYEXR_ERROR_INVALID_DATA; } } } int ret = ReadOffsets(offset_data, head, marker, size, err); if (ret != TINYEXR_SUCCESS) return ret; if (IsAnyOffsetsAreInvalid(offset_data)) { ReconstructTileOffsets(offset_data, exr_header, head, marker, size, exr_header->multipart, exr_header->non_image); } } else if (exr_header->chunk_count > 0) { // Use `chunkCount` attribute. num_blocks = static_cast<size_t>(exr_header->chunk_count); InitSingleResolutionOffsets(offset_data, num_blocks); } else { num_blocks = static_cast<size_t>(data_height) / static_cast<size_t>(num_scanline_blocks); if (num_blocks * static_cast<size_t>(num_scanline_blocks) < static_cast<size_t>(data_height)) { num_blocks++; } InitSingleResolutionOffsets(offset_data, num_blocks); } if (!exr_header->tiled) { std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0]; for (size_t y = 0; y < num_blocks; y++) { tinyexr::tinyexr_uint64 offset; // Issue #81 if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) { tinyexr::SetErrorMessage("Insufficient data size in offset table.", err); return TINYEXR_ERROR_INVALID_DATA; } memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64)); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } marker += sizeof(tinyexr::tinyexr_uint64); // = 8 offsets[y] = offset; } // If line offsets are invalid, we try to reconstruct it. // See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details. for (size_t y = 0; y < num_blocks; y++) { if (offsets[y] <= 0) { // TODO(syoyo) Report as warning? // if (err) { // stringstream ss; // ss << "Incomplete lineOffsets." << std::endl; // (*err) += ss.str(); //} bool ret = ReconstructLineOffsets(&offsets, num_blocks, head, marker, size); if (ret) { // OK break; } else { tinyexr::SetErrorMessage( "Cannot reconstruct lineOffset table in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } } } } { std::string e; int ret = DecodeChunk(exr_image, exr_header, offset_data, head, size, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } #if 1 FreeEXRImage(exr_image); #else // release memory(if exists) if ((exr_header->num_channels > 0) && exr_image && exr_image->images) { for (size_t c = 0; c < size_t(exr_header->num_channels); c++) { if (exr_image->images[c]) { free(exr_image->images[c]); exr_image->images[c] = NULL; } } free(exr_image->images); exr_image->images = NULL; } #endif } return ret; } } static void GetLayers(const EXRHeader &exr_header, std::vector<std::string> &layer_names) { // Naive implementation // Group channels by layers // go over all channel names, split by periods // collect unique names layer_names.clear(); for (int c = 0; c < exr_header.num_channels; c++) { std::string full_name(exr_header.channels[c].name); const size_t pos = full_name.find_last_of('.'); if (pos != std::string::npos && pos != 0 && pos + 1 < full_name.size()) { full_name.erase(pos); if (std::find(layer_names.begin(), layer_names.end(), full_name) == layer_names.end()) layer_names.push_back(full_name); } } } struct LayerChannel { explicit LayerChannel(size_t i, std::string n) : index(i), name(n) {} size_t index; std::string name; }; static void ChannelsInLayer(const EXRHeader &exr_header, const std::string &layer_name, std::vector<LayerChannel> &channels) { channels.clear(); for (int c = 0; c < exr_header.num_channels; c++) { std::string ch_name(exr_header.channels[c].name); if (layer_name.empty()) { const size_t pos = ch_name.find_last_of('.'); if (pos != std::string::npos && pos < ch_name.size()) { ch_name = ch_name.substr(pos + 1); } } else { const size_t pos = ch_name.find(layer_name + '.'); if (pos == std::string::npos) continue; if (pos == 0) { ch_name = ch_name.substr(layer_name.size() + 1); } } LayerChannel ch(size_t(c), ch_name); channels.push_back(ch); } } } // namespace tinyexr int EXRLayers(const char *filename, const char **layer_names[], int *num_layers, const char **err) { EXRVersion exr_version; EXRHeader exr_header; InitEXRHeader(&exr_header); { int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage("Invalid EXR header.", err); return ret; } if (exr_version.multipart || exr_version.non_image) { tinyexr::SetErrorMessage( "Loading multipart or DeepImage is not supported in LoadEXR() API", err); return TINYEXR_ERROR_INVALID_DATA; // @fixme. } } int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } std::vector<std::string> layer_vec; tinyexr::GetLayers(exr_header, layer_vec); (*num_layers) = int(layer_vec.size()); (*layer_names) = static_cast<const char **>( malloc(sizeof(const char *) * static_cast<size_t>(layer_vec.size()))); for (size_t c = 0; c < static_cast<size_t>(layer_vec.size()); c++) { #ifdef _MSC_VER (*layer_names)[c] = _strdup(layer_vec[c].c_str()); #else (*layer_names)[c] = strdup(layer_vec[c].c_str()); #endif } FreeEXRHeader(&exr_header); return TINYEXR_SUCCESS; } int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err) { return LoadEXRWithLayer(out_rgba, width, height, filename, /* layername */ NULL, err); } int LoadEXRWithLayer(float **out_rgba, int *width, int *height, const char *filename, const char *layername, const char **err) { if (out_rgba == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXR()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); InitEXRImage(&exr_image); { int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { std::stringstream ss; ss << "Failed to open EXR file or read version info from EXR file. code(" << ret << ")"; tinyexr::SetErrorMessage(ss.str(), err); return ret; } if (exr_version.multipart || exr_version.non_image) { tinyexr::SetErrorMessage( "Loading multipart or DeepImage is not supported in LoadEXR() API", err); return TINYEXR_ERROR_INVALID_DATA; // @fixme. } } { int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } // TODO: Probably limit loading to layers (channels) selected by layer index { int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; std::vector<std::string> layer_names; tinyexr::GetLayers(exr_header, layer_names); std::vector<tinyexr::LayerChannel> channels; tinyexr::ChannelsInLayer( exr_header, layername == NULL ? "" : std::string(layername), channels); if (channels.size() < 1) { tinyexr::SetErrorMessage("Layer Not Found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_LAYER_NOT_FOUND; } size_t ch_count = channels.size() < 4 ? channels.size() : 4; for (size_t c = 0; c < ch_count; c++) { const tinyexr::LayerChannel &ch = channels[c]; if (ch.name == "R") { idxR = int(ch.index); } else if (ch.name == "G") { idxG = int(ch.index); } else if (ch.name == "B") { idxB = int(ch.index); } else if (ch.name == "A") { idxA = int(ch.index); } } if (channels.size() == 1) { int chIdx = int(channels.front().index); // Grayscale channel only. (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) { for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * static_cast<int>(exr_header.tile_size_x) + i; const int jj = exr_image.tiles[it].offset_y * static_cast<int>(exr_header.tile_size_y) + j; const int idx = ii + jj * static_cast<int>(exr_image.width); // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { const float val = reinterpret_cast<float **>(exr_image.images)[chIdx][i]; (*out_rgba)[4 * i + 0] = val; (*out_rgba)[4 * i + 1] = val; (*out_rgba)[4 * i + 2] = val; (*out_rgba)[4 * i + 3] = val; } } } else { // Assume RGB(A) if (idxR == -1) { tinyexr::SetErrorMessage("R channel not found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { tinyexr::SetErrorMessage("G channel not found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { tinyexr::SetErrorMessage("B channel not found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) { for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[idxR][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[idxG][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[idxB][srcIdx]; if (idxA != -1) { (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[idxA][srcIdx]; } else { (*out_rgba)[4 * idx + 3] = 1.0; } } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int IsEXR(const char *filename) { EXRVersion exr_version; int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { return ret; } return TINYEXR_SUCCESS; } int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_header == NULL) { tinyexr::SetErrorMessage( "Invalid argument. `memory` or `exr_header` argument is null in " "ParseEXRHeaderFromMemory()", err); // Invalid argument return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { tinyexr::SetErrorMessage("Insufficient header/data size.\n", err); return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; tinyexr::HeaderInfo info; info.clear(); int ret; { std::string err_str; ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { if (err && !err_str.empty()) { tinyexr::SetErrorMessage(err_str, err); } } } { std::string warn; std::string err_str; if (!ConvertHeader(exr_header, info, &warn, &err_str)) { if (err && !err_str.empty()) { tinyexr::SetErrorMessage(err_str, err); } ret = TINYEXR_ERROR_INVALID_HEADER; } } exr_header->multipart = version->multipart ? 1 : 0; exr_header->non_image = version->non_image ? 1 : 0; return ret; } int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err) { if (out_rgba == NULL || memory == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); int ret = ParseEXRVersionFromMemory(&exr_version, memory, size); if (ret != TINYEXR_SUCCESS) { std::stringstream ss; ss << "Failed to parse EXR version. code(" << ret << ")"; tinyexr::SetErrorMessage(ss.str(), err); return ret; } ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } InitEXRImage(&exr_image); ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; for (int c = 0; c < exr_header.num_channels; c++) { if (strcmp(exr_header.channels[c].name, "R") == 0) { idxR = c; } else if (strcmp(exr_header.channels[c].name, "G") == 0) { idxG = c; } else if (strcmp(exr_header.channels[c].name, "B") == 0) { idxB = c; } else if (strcmp(exr_header.channels[c].name, "A") == 0) { idxA = c; } } // TODO(syoyo): Refactor removing same code as used in LoadEXR(). if (exr_header.num_channels == 1) { // Grayscale channel only. (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) { for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[0][srcIdx]; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { const float val = reinterpret_cast<float **>(exr_image.images)[0][i]; (*out_rgba)[4 * i + 0] = val; (*out_rgba)[4 * i + 1] = val; (*out_rgba)[4 * i + 2] = val; (*out_rgba)[4 * i + 3] = val; } } } else { // TODO(syoyo): Support non RGBA image. if (idxR == -1) { tinyexr::SetErrorMessage("R channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { tinyexr::SetErrorMessage("G channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { tinyexr::SetErrorMessage("B channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[idxR][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[idxG][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[idxB][srcIdx]; if (idxA != -1) { (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[idxA][srcIdx]; } else { (*out_rgba)[4 * idx + 3] = 1.0; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang. errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); // TODO(syoyo): return wfopen_s erro code return TINYEXR_ERROR_CANT_OPEN_FILE; } #else // Unknown compiler or MinGW without MINGW_HAS_SECURE_API. fp = fopen(filename, "rb"); #endif #else fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize < 16) { tinyexr::SetErrorMessage("File size too short " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize, err); } int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *memory, const size_t size, const char **err) { if (exr_image == NULL || memory == NULL || (size < tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->header_len == 0) { tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } const unsigned char *head = memory; const unsigned char *marker = reinterpret_cast<const unsigned char *>( memory + exr_header->header_len + 8); // +8 for magic number + version header. return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size, err); } namespace tinyexr { // out_data must be allocated initially with the block-header size // of the current image(-part) type static bool EncodePixelData(/* out */ std::vector<unsigned char>& out_data, const unsigned char* const* images, int compression_type, int /*line_order*/, int width, // for tiled : tile.width int /*height*/, // for tiled : header.tile_size_y int x_stride, // for tiled : header.tile_size_x int line_no, // for tiled : 0 int num_lines, // for tiled : tile.height size_t pixel_data_size, const std::vector<ChannelInfo>& channels, const std::vector<size_t>& channel_offset_list, const void* compression_param = 0) // zfp compression param { size_t buf_size = static_cast<size_t>(width) * static_cast<size_t>(num_lines) * static_cast<size_t>(pixel_data_size); //int last2bit = (buf_size & 3); // buf_size must be multiple of four //if(last2bit) buf_size += 4 - last2bit; std::vector<unsigned char> buf(buf_size); size_t start_y = static_cast<size_t>(line_no); for (size_t c = 0; c < channels.size(); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < num_lines; y++) { // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * width) + channel_offset_list[c] * static_cast<size_t>(width))); for (int x = 0; x < width; x++) { tinyexr::FP16 h16; h16.u = reinterpret_cast<const unsigned short * const *>( images)[c][(y + start_y) * x_stride + x]; tinyexr::FP32 f32 = half_to_float(h16); tinyexr::swap4(&f32.f); // line_ptr[x] = f32.f; tinyexr::cpy4(line_ptr + x, &(f32.f)); } } } else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < num_lines; y++) { // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * width) + channel_offset_list[c] * static_cast<size_t>(width))); for (int x = 0; x < width; x++) { unsigned short val = reinterpret_cast<const unsigned short * const *>( images)[c][(y + start_y) * x_stride + x]; tinyexr::swap2(&val); // line_ptr[x] = val; tinyexr::cpy2(line_ptr + x, &val); } } } else { assert(0); } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < num_lines; y++) { // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * width) + channel_offset_list[c] * static_cast<size_t>(width))); for (int x = 0; x < width; x++) { tinyexr::FP32 f32; f32.f = reinterpret_cast<const float * const *>( images)[c][(y + start_y) * x_stride + x]; tinyexr::FP16 h16; h16 = float_to_half_full(f32); tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u)); // line_ptr[x] = h16.u; tinyexr::cpy2(line_ptr + x, &(h16.u)); } } } else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < num_lines; y++) { // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * width) + channel_offset_list[c] * static_cast<size_t>(width))); for (int x = 0; x < width; x++) { float val = reinterpret_cast<const float * const *>( images)[c][(y + start_y) * x_stride + x]; tinyexr::swap4(&val); // line_ptr[x] = val; tinyexr::cpy4(line_ptr + x, &val); } } } else { assert(0); } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { for (int y = 0; y < num_lines; y++) { // Assume increasing Y unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at( static_cast<size_t>(pixel_data_size * y * width) + channel_offset_list[c] * static_cast<size_t>(width))); for (int x = 0; x < width; x++) { unsigned int val = reinterpret_cast<const unsigned int * const *>( images)[c][(y + start_y) * x_stride + x]; tinyexr::swap4(&val); // line_ptr[x] = val; tinyexr::cpy4(line_ptr + x, &val); } } } } if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed) out_data.insert(out_data.end(), buf.begin(), buf.end()); } else if ((compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #if TINYEXR_USE_MINIZ std::vector<unsigned char> block(mz_compressBound( static_cast<unsigned long>(buf.size()))); #else std::vector<unsigned char> block( compressBound(static_cast<uLong>(buf.size()))); #endif tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressZip(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) unsigned int data_len = static_cast<unsigned int>(outSize); // truncate out_data.insert(out_data.end(), block.begin(), block.begin() + data_len); } else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // (buf.size() * 3) / 2 would be enough. std::vector<unsigned char> block((buf.size() * 3) / 2); tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressRle(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) unsigned int data_len = static_cast<unsigned int>(outSize); // truncate out_data.insert(out_data.end(), block.begin(), block.begin() + data_len); } else if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ unsigned int bufLen = 8192 + static_cast<unsigned int>( 2 * static_cast<unsigned int>( buf.size())); // @fixme { compute good bound. } std::vector<unsigned char> block(bufLen); unsigned int outSize = static_cast<unsigned int>(block.size()); CompressPiz(&block.at(0), &outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), buf.size(), channels, width, num_lines); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) unsigned int data_len = outSize; out_data.insert(out_data.end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP const ZFPCompressionParam* zfp_compression_param = reinterpret_cast<const ZFPCompressionParam*>(compression_param); std::vector<unsigned char> block; unsigned int outSize; tinyexr::CompressZfp( &block, &outSize, reinterpret_cast<const float *>(&buf.at(0)), width, num_lines, static_cast<int>(channels.size()), *zfp_compression_param); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) unsigned int data_len = outSize; out_data.insert(out_data.end(), block.begin(), block.begin() + data_len); #else (void)compression_param; assert(0); #endif } else { assert(0); return false; } return true; } static int EncodeTiledLevel(const EXRImage* level_image, const EXRHeader* exr_header, const std::vector<tinyexr::ChannelInfo>& channels, std::vector<std::vector<unsigned char> >& data_list, size_t start_index, // for data_list int num_x_tiles, int num_y_tiles, const std::vector<size_t>& channel_offset_list, int pixel_data_size, const void* compression_param, // must be set if zfp compression is enabled std::string* err) { int num_tiles = num_x_tiles * num_y_tiles; assert(num_tiles == level_image->num_tiles); if ((exr_header->tile_size_x > level_image->width || exr_header->tile_size_y > level_image->height) && level_image->level_x == 0 && level_image->level_y == 0) { if (err) { (*err) += "Failed to encode tile data.\n"; } return TINYEXR_ERROR_INVALID_DATA; } #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) std::atomic<bool> invalid_data(false); #else bool invalid_data(false); #endif #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) std::vector<std::thread> workers; std::atomic<int> tile_count(0); int num_threads = std::max(1, int(std::thread::hardware_concurrency())); if (num_threads > int(num_tiles)) { num_threads = int(num_tiles); } for (int t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&]() { int i = 0; while ((i = tile_count++) < num_tiles) { #else // Use signed int since some OpenMP compiler doesn't allow unsigned type for // `parallel for` #if TINYEXR_USE_OPENMP #pragma omp parallel for #endif for (int i = 0; i < num_tiles; i++) { #endif size_t tile_idx = static_cast<size_t>(i); size_t data_idx = tile_idx + start_index; int x_tile = i % num_x_tiles; int y_tile = i / num_x_tiles; EXRTile& tile = level_image->tiles[tile_idx]; const unsigned char* const* images = static_cast<const unsigned char* const*>(tile.images); data_list[data_idx].resize(5*sizeof(int)); size_t data_header_size = data_list[data_idx].size(); bool ret = EncodePixelData(data_list[data_idx], images, exr_header->compression_type, 0, // increasing y tile.width, exr_header->tile_size_y, exr_header->tile_size_x, 0, tile.height, pixel_data_size, channels, channel_offset_list, compression_param); if (!ret) { invalid_data = true; continue; } assert(data_list[data_idx].size() > data_header_size); int data_len = static_cast<int>(data_list[data_idx].size() - data_header_size); //tileX, tileY, levelX, levelY // pixel_data_size(int) memcpy(&data_list[data_idx][0], &x_tile, sizeof(int)); memcpy(&data_list[data_idx][4], &y_tile, sizeof(int)); memcpy(&data_list[data_idx][8], &level_image->level_x, sizeof(int)); memcpy(&data_list[data_idx][12], &level_image->level_y, sizeof(int)); memcpy(&data_list[data_idx][16], &data_len, sizeof(int)); swap4(reinterpret_cast<int*>(&data_list[data_idx][0])); swap4(reinterpret_cast<int*>(&data_list[data_idx][4])); swap4(reinterpret_cast<int*>(&data_list[data_idx][8])); swap4(reinterpret_cast<int*>(&data_list[data_idx][12])); swap4(reinterpret_cast<int*>(&data_list[data_idx][16])); #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) } })); } for (auto &t : workers) { t.join(); } #else } // omp parallel #endif if (invalid_data) { if (err) { (*err) += "Failed to encode tile data.\n"; } return TINYEXR_ERROR_INVALID_DATA; } return TINYEXR_SUCCESS; } static int NumScanlines(int compression_type) { int num_scanlines = 1; if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanlines = 16; } else if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanlines = 32; } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanlines = 16; } return num_scanlines; } static int EncodeChunk(const EXRImage* exr_image, const EXRHeader* exr_header, const std::vector<ChannelInfo>& channels, int num_blocks, tinyexr_uint64 chunk_offset, // starting offset of current chunk bool is_multipart, OffsetData& offset_data, // output block offsets, must be initialized std::vector<std::vector<unsigned char> >& data_list, // output tinyexr_uint64& total_size, // output: ending offset of current chunk std::string* err) { int num_scanlines = NumScanlines(exr_header->compression_type); data_list.resize(num_blocks); std::vector<size_t> channel_offset_list( static_cast<size_t>(exr_header->num_channels)); int pixel_data_size = 0; { size_t channel_offset = 0; for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { channel_offset_list[c] = channel_offset; if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) { pixel_data_size += sizeof(unsigned short); channel_offset += sizeof(unsigned short); } else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_FLOAT) { pixel_data_size += sizeof(float); channel_offset += sizeof(float); } else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_UINT) { pixel_data_size += sizeof(unsigned int); channel_offset += sizeof(unsigned int); } else { assert(0); } } } const void* compression_param = 0; #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; // Use ZFP compression parameter from custom attributes(if such a parameter // exists) { std::string e; bool ret = tinyexr::FindZFPCompressionParam( &zfp_compression_param, exr_header->custom_attributes, exr_header->num_custom_attributes, &e); if (!ret) { // Use predefined compression parameter. zfp_compression_param.type = 0; zfp_compression_param.rate = 2; } compression_param = &zfp_compression_param; } #endif tinyexr_uint64 offset = chunk_offset; tinyexr_uint64 doffset = is_multipart ? 4u : 0u; if (exr_image->tiles) { const EXRImage* level_image = exr_image; size_t block_idx = 0; tinyexr::tinyexr_uint64 block_data_size = 0; int num_levels = (exr_header->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) ? offset_data.num_x_levels : (offset_data.num_x_levels * offset_data.num_y_levels); for (int level_index = 0; level_index < num_levels; ++level_index) { if (!level_image) { if (err) { (*err) += "Invalid number of tiled levels for EncodeChunk\n"; } return TINYEXR_ERROR_INVALID_DATA; } int level_index_from_image = LevelIndex(level_image->level_x, level_image->level_y, exr_header->tile_level_mode, offset_data.num_x_levels); if (level_index_from_image != level_index) { if (err) { (*err) += "Incorrect level ordering in tiled image\n"; } return TINYEXR_ERROR_INVALID_DATA; } int num_y_tiles = (int)offset_data.offsets[level_index].size(); assert(num_y_tiles); int num_x_tiles = (int)offset_data.offsets[level_index][0].size(); assert(num_x_tiles); std::string e; int ret = EncodeTiledLevel(level_image, exr_header, channels, data_list, block_idx, num_x_tiles, num_y_tiles, channel_offset_list, pixel_data_size, compression_param, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty() && err) { (*err) += e; } return ret; } for (size_t j = 0; j < static_cast<size_t>(num_y_tiles); ++j) for (size_t i = 0; i < static_cast<size_t>(num_x_tiles); ++i) { offset_data.offsets[level_index][j][i] = offset; swap8(reinterpret_cast<tinyexr_uint64*>(&offset_data.offsets[level_index][j][i])); offset += data_list[block_idx].size() + doffset; block_data_size += data_list[block_idx].size(); ++block_idx; } level_image = level_image->next_level; } assert(static_cast<int>(block_idx) == num_blocks); total_size = offset; } else { // scanlines std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0]; #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) std::atomic<bool> invalid_data(false); std::vector<std::thread> workers; std::atomic<int> block_count(0); int num_threads = std::min(std::max(1, int(std::thread::hardware_concurrency())), num_blocks); for (int t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&]() { int i = 0; while ((i = block_count++) < num_blocks) { #else bool invalid_data(false); #if TINYEXR_USE_OPENMP #pragma omp parallel for #endif for (int i = 0; i < num_blocks; i++) { #endif int start_y = num_scanlines * i; int end_Y = (std::min)(num_scanlines * (i + 1), exr_image->height); int num_lines = end_Y - start_y; const unsigned char* const* images = static_cast<const unsigned char* const*>(exr_image->images); data_list[i].resize(2*sizeof(int)); size_t data_header_size = data_list[i].size(); bool ret = EncodePixelData(data_list[i], images, exr_header->compression_type, 0, // increasing y exr_image->width, exr_image->height, exr_image->width, start_y, num_lines, pixel_data_size, channels, channel_offset_list, compression_param); if (!ret) { invalid_data = true; continue; // "break" cannot be used with OpenMP } assert(data_list[i].size() > data_header_size); int data_len = static_cast<int>(data_list[i].size() - data_header_size); memcpy(&data_list[i][0], &start_y, sizeof(int)); memcpy(&data_list[i][4], &data_len, sizeof(int)); swap4(reinterpret_cast<int*>(&data_list[i][0])); swap4(reinterpret_cast<int*>(&data_list[i][4])); #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) } })); } for (auto &t : workers) { t.join(); } #else } // omp parallel #endif if (invalid_data) { if (err) { (*err) += "Failed to encode scanline data.\n"; } return TINYEXR_ERROR_INVALID_DATA; } for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) { offsets[i] = offset; tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i])); offset += data_list[i].size() + doffset; } total_size = static_cast<size_t>(offset); } return TINYEXR_SUCCESS; } // can save a single or multi-part image (no deep* formats) static size_t SaveEXRNPartImageToMemory(const EXRImage* exr_images, const EXRHeader** exr_headers, unsigned int num_parts, unsigned char** memory_out, const char** err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0 || memory_out == NULL) { SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory", err); return 0; } { for (unsigned int i = 0; i < num_parts; ++i) { if (exr_headers[i]->compression_type < 0) { SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory", err); return 0; } #if !TINYEXR_USE_PIZ if (exr_headers[i]->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { SetErrorMessage("PIZ compression is not supported in this build", err); return 0; } #endif #if !TINYEXR_USE_ZFP if (exr_headers[i]->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { SetErrorMessage("ZFP compression is not supported in this build", err); return 0; } #else for (int c = 0; c < exr_header->num_channels; ++c) { if (exr_headers[i]->requested_pixel_types[c] != TINYEXR_PIXELTYPE_FLOAT) { SetErrorMessage("Pixel type must be FLOAT for ZFP compression", err); return 0; } } #endif } } std::vector<unsigned char> memory; // Header { const char header[] = { 0x76, 0x2f, 0x31, 0x01 }; memory.insert(memory.end(), header, header + 4); } // Version // using value from the first header int long_name = exr_headers[0]->long_name; { char marker[] = { 2, 0, 0, 0 }; /* @todo if (exr_header->non_image) { marker[1] |= 0x8; } */ // tiled if (num_parts == 1 && exr_images[0].tiles) { marker[1] |= 0x2; } // long_name if (long_name) { marker[1] |= 0x4; } // multipart if (num_parts > 1) { marker[1] |= 0x10; } memory.insert(memory.end(), marker, marker + 4); } int total_chunk_count = 0; std::vector<int> chunk_count(num_parts); std::vector<OffsetData> offset_data(num_parts); for (unsigned int i = 0; i < num_parts; ++i) { if (!exr_images[i].tiles) { int num_scanlines = NumScanlines(exr_headers[i]->compression_type); chunk_count[i] = (exr_images[i].height + num_scanlines - 1) / num_scanlines; InitSingleResolutionOffsets(offset_data[i], chunk_count[i]); total_chunk_count += chunk_count[i]; } else { { std::vector<int> num_x_tiles, num_y_tiles; PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_headers[i]); chunk_count[i] = InitTileOffsets(offset_data[i], exr_headers[i], num_x_tiles, num_y_tiles); total_chunk_count += chunk_count[i]; } } } // Write attributes to memory buffer. std::vector< std::vector<tinyexr::ChannelInfo> > channels(num_parts); { std::set<std::string> partnames; for (unsigned int i = 0; i < num_parts; ++i) { //channels { std::vector<unsigned char> data; for (int c = 0; c < exr_headers[i]->num_channels; c++) { tinyexr::ChannelInfo info; info.p_linear = 0; info.pixel_type = exr_headers[i]->pixel_types[c]; info.requested_pixel_type = exr_headers[i]->requested_pixel_types[c]; info.x_sampling = 1; info.y_sampling = 1; info.name = std::string(exr_headers[i]->channels[c].name); channels[i].push_back(info); } tinyexr::WriteChannelInfo(data, channels[i]); tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0), static_cast<int>(data.size())); } { int comp = exr_headers[i]->compression_type; swap4(&comp); WriteAttributeToMemory( &memory, "compression", "compression", reinterpret_cast<const unsigned char*>(&comp), 1); } { int data[4] = { 0, 0, exr_images[i].width - 1, exr_images[i].height - 1 }; swap4(&data[0]); swap4(&data[1]); swap4(&data[2]); swap4(&data[3]); WriteAttributeToMemory( &memory, "dataWindow", "box2i", reinterpret_cast<const unsigned char*>(data), sizeof(int) * 4); int data0[4] = { 0, 0, exr_images[0].width - 1, exr_images[0].height - 1 }; swap4(&data0[0]); swap4(&data0[1]); swap4(&data0[2]); swap4(&data0[3]); // Note: must be the same across parts (currently, using value from the first header) WriteAttributeToMemory( &memory, "displayWindow", "box2i", reinterpret_cast<const unsigned char*>(data0), sizeof(int) * 4); } { unsigned char line_order = 0; // @fixme { read line_order from EXRHeader } WriteAttributeToMemory(&memory, "lineOrder", "lineOrder", &line_order, 1); } { // Note: must be the same across parts float aspectRatio = 1.0f; swap4(&aspectRatio); WriteAttributeToMemory( &memory, "pixelAspectRatio", "float", reinterpret_cast<const unsigned char*>(&aspectRatio), sizeof(float)); } { float center[2] = { 0.0f, 0.0f }; swap4(&center[0]); swap4(&center[1]); WriteAttributeToMemory( &memory, "screenWindowCenter", "v2f", reinterpret_cast<const unsigned char*>(center), 2 * sizeof(float)); } { float w = 1.0f; swap4(&w); WriteAttributeToMemory(&memory, "screenWindowWidth", "float", reinterpret_cast<const unsigned char*>(&w), sizeof(float)); } if (exr_images[i].tiles) { unsigned char tile_mode = static_cast<unsigned char>(exr_headers[i]->tile_level_mode & 0x3); if (exr_headers[i]->tile_rounding_mode) tile_mode |= (1u << 4u); //unsigned char data[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }; unsigned int datai[3] = { 0, 0, 0 }; unsigned char* data = reinterpret_cast<unsigned char*>(&datai[0]); datai[0] = static_cast<unsigned int>(exr_headers[i]->tile_size_x); datai[1] = static_cast<unsigned int>(exr_headers[i]->tile_size_y); data[8] = tile_mode; swap4(reinterpret_cast<unsigned int*>(&data[0])); swap4(reinterpret_cast<unsigned int*>(&data[4])); WriteAttributeToMemory( &memory, "tiles", "tiledesc", reinterpret_cast<const unsigned char*>(data), 9); } // must be present for multi-part files - according to spec. if (num_parts > 1) { // name { size_t len = 0; if ((len = strlen(exr_headers[i]->name)) > 0) { partnames.emplace(exr_headers[i]->name); if (partnames.size() != i + 1) { SetErrorMessage("'name' attributes must be unique for a multi-part file", err); return 0; } WriteAttributeToMemory( &memory, "name", "string", reinterpret_cast<const unsigned char*>(exr_headers[i]->name), static_cast<int>(len)); } else { SetErrorMessage("Invalid 'name' attribute for a multi-part file", err); return 0; } } // type { const char* type = "scanlineimage"; if (exr_images[i].tiles) type = "tiledimage"; WriteAttributeToMemory( &memory, "type", "string", reinterpret_cast<const unsigned char*>(type), static_cast<int>(strlen(type))); } // chunkCount { WriteAttributeToMemory( &memory, "chunkCount", "int", reinterpret_cast<const unsigned char*>(&chunk_count[i]), 4); } } // Custom attributes if (exr_headers[i]->num_custom_attributes > 0) { for (int j = 0; j < exr_headers[i]->num_custom_attributes; j++) { tinyexr::WriteAttributeToMemory( &memory, exr_headers[i]->custom_attributes[j].name, exr_headers[i]->custom_attributes[j].type, reinterpret_cast<const unsigned char*>( exr_headers[i]->custom_attributes[j].value), exr_headers[i]->custom_attributes[j].size); } } { // end of header memory.push_back(0); } } } if (num_parts > 1) { // end of header list memory.push_back(0); } tinyexr_uint64 chunk_offset = memory.size() + size_t(total_chunk_count) * sizeof(tinyexr_uint64); tinyexr_uint64 total_size = 0; std::vector< std::vector< std::vector<unsigned char> > > data_lists(num_parts); for (unsigned int i = 0; i < num_parts; ++i) { std::string e; int ret = EncodeChunk(&exr_images[i], exr_headers[i], channels[i], chunk_count[i], // starting offset of current chunk after part-number chunk_offset, num_parts > 1, offset_data[i], // output: block offsets, must be initialized data_lists[i], // output total_size, // output &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } return 0; } chunk_offset = total_size; } // Allocating required memory if (total_size == 0) { // something went wrong tinyexr::SetErrorMessage("Output memory size is zero", err); return 0; } (*memory_out) = static_cast<unsigned char*>(malloc(total_size)); // Writing header memcpy((*memory_out), &memory[0], memory.size()); unsigned char* memory_ptr = *memory_out + memory.size(); size_t sum = memory.size(); // Writing offset data for chunks for (unsigned int i = 0; i < num_parts; ++i) { if (exr_images[i].tiles) { const EXRImage* level_image = &exr_images[i]; int num_levels = (exr_headers[i]->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) ? offset_data[i].num_x_levels : (offset_data[i].num_x_levels * offset_data[i].num_y_levels); for (int level_index = 0; level_index < num_levels; ++level_index) { for (size_t j = 0; j < offset_data[i].offsets[level_index].size(); ++j) { size_t num_bytes = sizeof(tinyexr_uint64) * offset_data[i].offsets[level_index][j].size(); sum += num_bytes; assert(sum <= total_size); memcpy(memory_ptr, reinterpret_cast<unsigned char*>(&offset_data[i].offsets[level_index][j][0]), num_bytes); memory_ptr += num_bytes; } level_image = level_image->next_level; } } else { size_t num_bytes = sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(chunk_count[i]); sum += num_bytes; assert(sum <= total_size); std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data[i].offsets[0][0]; memcpy(memory_ptr, reinterpret_cast<unsigned char*>(&offsets[0]), num_bytes); memory_ptr += num_bytes; } } // Writing chunk data for (unsigned int i = 0; i < num_parts; ++i) { for (size_t j = 0; j < static_cast<size_t>(chunk_count[i]); ++j) { if (num_parts > 1) { sum += 4; assert(sum <= total_size); unsigned int part_number = i; swap4(&part_number); memcpy(memory_ptr, &part_number, 4); memory_ptr += 4; } sum += data_lists[i][j].size(); assert(sum <= total_size); memcpy(memory_ptr, &data_lists[i][j][0], data_lists[i][j].size()); memory_ptr += data_lists[i][j].size(); } } assert(sum == total_size); return total_size; // OK } } // tinyexr size_t SaveEXRImageToMemory(const EXRImage* exr_image, const EXRHeader* exr_header, unsigned char** memory_out, const char** err) { return tinyexr::SaveEXRNPartImageToMemory(exr_image, &exr_header, 1, memory_out, err); } int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL || filename == NULL || exr_header->compression_type < 0) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { tinyexr::SetErrorMessage("PIZ compression is not supported in this build", err); return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { tinyexr::SetErrorMessage("ZFP compression is not supported in this build", err); return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } #endif FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"wb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename), err); return TINYEXR_ERROR_CANT_WRITE_FILE; } #else // Unknown compiler or MinGW without MINGW_HAS_SECURE_API. fp = fopen(filename, "wb"); #endif #else fp = fopen(filename, "wb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename), err); return TINYEXR_ERROR_CANT_WRITE_FILE; } unsigned char *mem = NULL; size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err); if (mem_size == 0) { return TINYEXR_ERROR_SERIALZATION_FAILED; } size_t written_size = 0; if ((mem_size > 0) && mem) { written_size = fwrite(mem, 1, mem_size, fp); } free(mem); fclose(fp); if (written_size != mem_size) { tinyexr::SetErrorMessage("Cannot write a file", err); return TINYEXR_ERROR_CANT_WRITE_FILE; } return TINYEXR_SUCCESS; } size_t SaveEXRMultipartImageToMemory(const EXRImage* exr_images, const EXRHeader** exr_headers, unsigned int num_parts, unsigned char** memory_out, const char** err) { if (exr_images == NULL || exr_headers == NULL || num_parts < 2 || memory_out == NULL) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory", err); return 0; } return tinyexr::SaveEXRNPartImageToMemory(exr_images, exr_headers, num_parts, memory_out, err); } int SaveEXRMultipartImageToFile(const EXRImage* exr_images, const EXRHeader** exr_headers, unsigned int num_parts, const char* filename, const char** err) { if (exr_images == NULL || exr_headers == NULL || num_parts < 2) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRMultipartImageToFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang. errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"wb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename), err); return TINYEXR_ERROR_CANT_WRITE_FILE; } #else // Unknown compiler or MinGW without MINGW_HAS_SECURE_API. fp = fopen(filename, "wb"); #endif #else fp = fopen(filename, "wb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename), err); return TINYEXR_ERROR_CANT_WRITE_FILE; } unsigned char *mem = NULL; size_t mem_size = SaveEXRMultipartImageToMemory(exr_images, exr_headers, num_parts, &mem, err); if (mem_size == 0) { return TINYEXR_ERROR_SERIALZATION_FAILED; } size_t written_size = 0; if ((mem_size > 0) && mem) { written_size = fwrite(mem, 1, mem_size, fp); } free(mem); fclose(fp); if (written_size != mem_size) { tinyexr::SetErrorMessage("Cannot write a file", err); return TINYEXR_ERROR_CANT_WRITE_FILE; } return TINYEXR_SUCCESS; } int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) { if (deep_image == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadDeepEXR", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; #if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang. errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #else // Unknown compiler or MinGW without MINGW_HAS_SECURE_API. fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #else FILE *fp = fopen(filename, "rb"); if (!fp) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #endif size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize == 0) { fclose(fp); tinyexr::SetErrorMessage("File size is zero : " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } std::vector<char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); (void)ret; } fclose(fp); const char *head = &buf[0]; const char *marker = &buf[0]; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { tinyexr::SetErrorMessage("Invalid magic number", err); return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } // Version, scanline. { // ver 2.0, scanline, deep bit on(0x800) // must be [2, 0, 0, 0] if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) { tinyexr::SetErrorMessage("Unsupported version or scanline", err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } marker += 4; } int dx = -1; int dy = -1; int dw = -1; int dh = -1; int num_scanline_blocks = 1; // 16 for ZIP compression. int compression_type = -1; int num_channels = -1; std::vector<tinyexr::ChannelInfo> channels; // Read attributes size_t size = filesize - tinyexr::kEXRVersionSize; for (;;) { if (0 == size) { return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { marker++; size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { std::stringstream ss; ss << "Failed to parse attribute\n"; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (attr_name.compare("compression") == 0) { compression_type = data[0]; if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) { std::stringstream ss; ss << "Unsupported compression type : " << compression_type; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int if (!tinyexr::ReadChannelInfo(channels, data)) { tinyexr::SetErrorMessage("Failed to parse channel info", err); return TINYEXR_ERROR_INVALID_DATA; } num_channels = static_cast<int>(channels.size()); if (num_channels < 1) { tinyexr::SetErrorMessage("Invalid channels format", err); return TINYEXR_ERROR_INVALID_DATA; } } else if (attr_name.compare("dataWindow") == 0) { memcpy(&dx, &data.at(0), sizeof(int)); memcpy(&dy, &data.at(4), sizeof(int)); memcpy(&dw, &data.at(8), sizeof(int)); memcpy(&dh, &data.at(12), sizeof(int)); tinyexr::swap4(&dx); tinyexr::swap4(&dy); tinyexr::swap4(&dw); tinyexr::swap4(&dh); } else if (attr_name.compare("displayWindow") == 0) { int x; int y; int w; int h; memcpy(&x, &data.at(0), sizeof(int)); memcpy(&y, &data.at(4), sizeof(int)); memcpy(&w, &data.at(8), sizeof(int)); memcpy(&h, &data.at(12), sizeof(int)); tinyexr::swap4(&x); tinyexr::swap4(&y); tinyexr::swap4(&w); tinyexr::swap4(&h); } } assert(dx >= 0); assert(dy >= 0); assert(dw >= 0); assert(dh >= 0); assert(num_channels >= 1); int data_width = dw - dx + 1; int data_height = dh - dy + 1; // Read offset tables. int num_blocks = data_height / num_scanline_blocks; if (num_blocks * num_scanline_blocks < data_height) { num_blocks++; } std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks)); for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { tinyexr::tinyexr_int64 offset; memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset)); marker += sizeof(tinyexr::tinyexr_int64); // = 8 offsets[y] = offset; } #if TINYEXR_USE_PIZ if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) || (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) { #else if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #endif // OK } else { tinyexr::SetErrorMessage("Unsupported compression format", err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } deep_image->image = static_cast<float ***>( malloc(sizeof(float **) * static_cast<size_t>(num_channels))); for (int c = 0; c < num_channels; c++) { deep_image->image[c] = static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { } } deep_image->offset_table = static_cast<int **>( malloc(sizeof(int *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { deep_image->offset_table[y] = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(data_width))); } for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y]); // int: y coordinate // int64: packed size of pixel offset table // int64: packed size of sample data // int64: unpacked size of sample data // compressed pixel offset table // compressed sample data int line_no; tinyexr::tinyexr_int64 packedOffsetTableSize; tinyexr::tinyexr_int64 packedSampleDataSize; tinyexr::tinyexr_int64 unpackedSampleDataSize; memcpy(&line_no, data_ptr, sizeof(int)); memcpy(&packedOffsetTableSize, data_ptr + 4, sizeof(tinyexr::tinyexr_int64)); memcpy(&packedSampleDataSize, data_ptr + 12, sizeof(tinyexr::tinyexr_int64)); memcpy(&unpackedSampleDataSize, data_ptr + 20, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap4(&line_no); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize)); std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width)); // decode pixel offset table. { unsigned long dstLen = static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int)); if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)), &dstLen, data_ptr + 28, static_cast<unsigned long>(packedOffsetTableSize))) { return false; } assert(dstLen == pixelOffsetTable.size() * sizeof(int)); for (size_t i = 0; i < static_cast<size_t>(data_width); i++) { deep_image->offset_table[y][i] = pixelOffsetTable[i]; } } std::vector<unsigned char> sample_data( static_cast<size_t>(unpackedSampleDataSize)); // decode sample data. { unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize); if (dstLen) { if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen, data_ptr + 28 + packedOffsetTableSize, static_cast<unsigned long>(packedSampleDataSize))) { return false; } assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize)); } } // decode sample int sampleSize = -1; std::vector<int> channel_offset_list(static_cast<size_t>(num_channels)); { int channel_offset = 0; for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) { channel_offset_list[i] = channel_offset; if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT channel_offset += 4; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half channel_offset += 2; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // float channel_offset += 4; } else { assert(0); } } sampleSize = channel_offset; } assert(sampleSize >= 2); assert(static_cast<size_t>( pixelOffsetTable[static_cast<size_t>(data_width - 1)] * sampleSize) == sample_data.size()); int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize; // // Alloc memory // // // pixel data is stored as image[channels][pixel_samples] // { tinyexr::tinyexr_uint64 data_offset = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { deep_image->image[c][y] = static_cast<float *>( malloc(sizeof(float) * static_cast<size_t>(samples_per_line))); if (channels[c].pixel_type == 0) { // UINT for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { unsigned int ui; unsigned int *src_ptr = reinterpret_cast<unsigned int *>( &sample_data.at(size_t(data_offset) + x * sizeof(int))); tinyexr::cpy4(&ui, src_ptr); deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme } data_offset += sizeof(unsigned int) * static_cast<size_t>(samples_per_line); } else if (channels[c].pixel_type == 1) { // half for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { tinyexr::FP16 f16; const unsigned short *src_ptr = reinterpret_cast<unsigned short *>( &sample_data.at(size_t(data_offset) + x * sizeof(short))); tinyexr::cpy2(&(f16.u), src_ptr); tinyexr::FP32 f32 = half_to_float(f16); deep_image->image[c][y][x] = f32.f; } data_offset += sizeof(short) * static_cast<size_t>(samples_per_line); } else { // float for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { float f; const float *src_ptr = reinterpret_cast<float *>( &sample_data.at(size_t(data_offset) + x * sizeof(float))); tinyexr::cpy4(&f, src_ptr); deep_image->image[c][y][x] = f; } data_offset += sizeof(float) * static_cast<size_t>(samples_per_line); } } } } // y deep_image->width = data_width; deep_image->height = data_height; deep_image->channel_names = static_cast<const char **>( malloc(sizeof(const char *) * static_cast<size_t>(num_channels))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { #ifdef _WIN32 deep_image->channel_names[c] = _strdup(channels[c].name.c_str()); #else deep_image->channel_names[c] = strdup(channels[c].name.c_str()); #endif } deep_image->num_channels = num_channels; return TINYEXR_SUCCESS; } void InitEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return; } exr_image->width = 0; exr_image->height = 0; exr_image->num_channels = 0; exr_image->images = NULL; exr_image->tiles = NULL; exr_image->next_level = NULL; exr_image->level_x = 0; exr_image->level_y = 0; exr_image->num_tiles = 0; } void FreeEXRErrorMessage(const char *msg) { if (msg) { free(reinterpret_cast<void *>(const_cast<char *>(msg))); } return; } void InitEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return; } memset(exr_header, 0, sizeof(EXRHeader)); } int FreeEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->channels) { free(exr_header->channels); } if (exr_header->pixel_types) { free(exr_header->pixel_types); } if (exr_header->requested_pixel_types) { free(exr_header->requested_pixel_types); } for (int i = 0; i < exr_header->num_custom_attributes; i++) { if (exr_header->custom_attributes[i].value) { free(exr_header->custom_attributes[i].value); } } if (exr_header->custom_attributes) { free(exr_header->custom_attributes); } EXRSetNameAttr(exr_header, NULL); return TINYEXR_SUCCESS; } void EXRSetNameAttr(EXRHeader* exr_header, const char* name) { if (exr_header == NULL) { return; } memset(exr_header->name, 0, 256); if (name != NULL) { size_t len = std::min(strlen(name), (size_t)255); if (len) { memcpy(exr_header->name, name, len); } } } int EXRNumLevels(const EXRImage* exr_image) { if (exr_image == NULL) return 0; if(exr_image->images) return 1; // scanlines int levels = 1; const EXRImage* level_image = exr_image; while((level_image = level_image->next_level)) ++levels; return levels; } int FreeEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_image->next_level) { FreeEXRImage(exr_image->next_level); delete exr_image->next_level; } for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->images && exr_image->images[i]) { free(exr_image->images[i]); } } if (exr_image->images) { free(exr_image->images); } if (exr_image->tiles) { for (int tid = 0; tid < exr_image->num_tiles; tid++) { for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) { free(exr_image->tiles[tid].images[i]); } } if (exr_image->tiles[tid].images) { free(exr_image->tiles[tid].images); } } free(exr_image->tiles); } return TINYEXR_SUCCESS; } int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_header == NULL || exr_version == NULL || filename == NULL) { tinyexr::SetErrorMessage("Invalid argument for ParseEXRHeaderFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang. errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } #else // Unknown compiler or MinGW without MINGW_HAS_SECURE_API. fp = fopen(filename, "rb"); #endif #else fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { tinyexr::SetErrorMessage("fread() error on " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize, err); } int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_headers == NULL || num_headers == NULL || exr_version == NULL) { // Invalid argument tinyexr::SetErrorMessage( "Invalid argument for ParseEXRMultipartHeaderFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { tinyexr::SetErrorMessage("Data size too short", err); return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; std::vector<tinyexr::HeaderInfo> infos; for (;;) { tinyexr::HeaderInfo info; info.clear(); std::string err_str; bool empty_header = false; int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage(err_str, err); return ret; } if (empty_header) { marker += 1; // skip '\0' break; } // `chunkCount` must exist in the header. if (info.chunk_count == 0) { tinyexr::SetErrorMessage( "`chunkCount' attribute is not found in the header.", err); return TINYEXR_ERROR_INVALID_DATA; } infos.push_back(info); // move to next header. marker += info.header_len; size -= info.header_len; } // allocate memory for EXRHeader and create array of EXRHeader pointers. (*exr_headers) = static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size())); int retcode = TINYEXR_SUCCESS; for (size_t i = 0; i < infos.size(); i++) { EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader))); memset(exr_header, 0, sizeof(EXRHeader)); std::string warn; std::string _err; if (!ConvertHeader(exr_header, infos[i], &warn, &_err)) { if (!_err.empty()) { tinyexr::SetErrorMessage( _err, err); } // continue to converting headers retcode = TINYEXR_ERROR_INVALID_HEADER; } exr_header->multipart = exr_version->multipart ? 1 : 0; (*exr_headers)[i] = exr_header; } (*num_headers) = static_cast<int>(infos.size()); return retcode; } int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_headers == NULL || num_headers == NULL || exr_version == NULL || filename == NULL) { tinyexr::SetErrorMessage( "Invalid argument for ParseEXRMultipartHeaderFromFile()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang. errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } #else // Unknown compiler or MinGW without MINGW_HAS_SECURE_API. fp = fopen(filename, "rb"); #endif #else fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { tinyexr::SetErrorMessage("`fread' error. file may be corrupted.", err); return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRMultipartHeaderFromMemory( exr_headers, num_headers, exr_version, &buf.at(0), filesize, err); } int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size) { if (version == NULL || memory == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } version->tiled = false; version->long_name = false; version->non_image = false; version->multipart = false; // Parse version header. { // must be 2 if (marker[0] != 2) { return TINYEXR_ERROR_INVALID_EXR_VERSION; } if (version == NULL) { return TINYEXR_SUCCESS; // May OK } version->version = 2; if (marker[1] & 0x2) { // 9th bit version->tiled = true; } if (marker[1] & 0x4) { // 10th bit version->long_name = true; } if (marker[1] & 0x8) { // 11th bit version->non_image = true; // (deep image) } if (marker[1] & 0x10) { // 12th bit version->multipart = true; } } return TINYEXR_SUCCESS; } int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) { if (filename == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang. errno_t err = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); if (err != 0) { // TODO(syoyo): return wfopen_s erro code return TINYEXR_ERROR_CANT_OPEN_FILE; } #else // Unknown compiler or MinGW without MINGW_HAS_SECURE_API. fp = fopen(filename, "rb"); #endif #else fp = fopen(filename, "rb"); #endif if (!fp) { return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t file_size; // Compute size fseek(fp, 0, SEEK_END); file_size = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (file_size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } unsigned char buf[tinyexr::kEXRVersionSize]; size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp); fclose(fp); if (ret != tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize); } int LoadEXRMultipartImageFromMemory(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0 || memory == NULL || (size <= tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage( "Invalid argument for LoadEXRMultipartImageFromMemory()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } // compute total header size. size_t total_header_size = 0; for (unsigned int i = 0; i < num_parts; i++) { if (exr_headers[i]->header_len == 0) { tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } total_header_size += exr_headers[i]->header_len; } const char *marker = reinterpret_cast<const char *>( memory + total_header_size + 4 + 4); // +8 for magic number and version header. marker += 1; // Skip empty header. // NOTE 1: // In multipart image, There is 'part number' before chunk data. // 4 byte : part number // 4+ : chunk // // NOTE 2: // EXR spec says 'part number' is 'unsigned long' but actually this is // 'unsigned int(4 bytes)' in OpenEXR implementation... // http://www.openexr.com/openexrfilelayout.pdf // Load chunk offset table. std::vector<tinyexr::OffsetData> chunk_offset_table_list; chunk_offset_table_list.reserve(num_parts); for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { chunk_offset_table_list.resize(chunk_offset_table_list.size() + 1); tinyexr::OffsetData& offset_data = chunk_offset_table_list.back(); if (!exr_headers[i]->tiled || exr_headers[i]->tile_level_mode == TINYEXR_TILE_ONE_LEVEL) { tinyexr::InitSingleResolutionOffsets(offset_data, exr_headers[i]->chunk_count); std::vector<tinyexr::tinyexr_uint64>& offset_table = offset_data.offsets[0][0]; for (size_t c = 0; c < offset_table.size(); c++) { tinyexr::tinyexr_uint64 offset; memcpy(&offset, marker, 8); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } offset_table[c] = offset + 4; // +4 to skip 'part number' marker += 8; } } else { { std::vector<int> num_x_tiles, num_y_tiles; tinyexr::PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_headers[i]); int num_blocks = InitTileOffsets(offset_data, exr_headers[i], num_x_tiles, num_y_tiles); if (num_blocks != exr_headers[i]->chunk_count) { tinyexr::SetErrorMessage("Invalid offset table size.", err); return TINYEXR_ERROR_INVALID_DATA; } } for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) { for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) { for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) { tinyexr::tinyexr_uint64 offset; memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64)); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } offset_data.offsets[l][dy][dx] = offset + 4; // +4 to skip 'part number' marker += sizeof(tinyexr::tinyexr_uint64); // = 8 } } } } } // Decode image. for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { tinyexr::OffsetData &offset_data = chunk_offset_table_list[i]; // First check 'part number' is identitical to 'i' for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) { const unsigned char *part_number_addr = memory + offset_data.offsets[l][dy][dx] - 4; // -4 to move to 'part number' field. unsigned int part_no; memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4 tinyexr::swap4(&part_no); if (part_no != i) { tinyexr::SetErrorMessage("Invalid `part number' in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } } std::string e; int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_data, memory, size, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } return ret; } } return TINYEXR_SUCCESS; } int LoadEXRMultipartImageFromFile(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const char *filename, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0) { tinyexr::SetErrorMessage( "Invalid argument for LoadEXRMultipartImageFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang. errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #else // Unknown compiler or MinGW without MINGW_HAS_SECURE_API. fp = fopen(filename, "rb"); #endif #else fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts, &buf.at(0), filesize, err); } int SaveEXR(const float *data, int width, int height, int components, const int save_as_fp16, const char *outfilename, const char **err) { if ((components == 1) || components == 3 || components == 4) { // OK } else { std::stringstream ss; ss << "Unsupported component value : " << components << std::endl; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRHeader header; InitEXRHeader(&header); if ((width < 16) && (height < 16)) { // No compression for small image. header.compression_type = TINYEXR_COMPRESSIONTYPE_NONE; } else { header.compression_type = TINYEXR_COMPRESSIONTYPE_ZIP; } EXRImage image; InitEXRImage(&image); image.num_channels = components; std::vector<float> images[4]; if (components == 1) { images[0].resize(static_cast<size_t>(width * height)); memcpy(images[0].data(), data, sizeof(float) * size_t(width * height)); } else { images[0].resize(static_cast<size_t>(width * height)); images[1].resize(static_cast<size_t>(width * height)); images[2].resize(static_cast<size_t>(width * height)); images[3].resize(static_cast<size_t>(width * height)); // Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers for (size_t i = 0; i < static_cast<size_t>(width * height); i++) { images[0][i] = data[static_cast<size_t>(components) * i + 0]; images[1][i] = data[static_cast<size_t>(components) * i + 1]; images[2][i] = data[static_cast<size_t>(components) * i + 2]; if (components == 4) { images[3][i] = data[static_cast<size_t>(components) * i + 3]; } } } float *image_ptr[4] = {0, 0, 0, 0}; if (components == 4) { image_ptr[0] = &(images[3].at(0)); // A image_ptr[1] = &(images[2].at(0)); // B image_ptr[2] = &(images[1].at(0)); // G image_ptr[3] = &(images[0].at(0)); // R } else if (components == 3) { image_ptr[0] = &(images[2].at(0)); // B image_ptr[1] = &(images[1].at(0)); // G image_ptr[2] = &(images[0].at(0)); // R } else if (components == 1) { image_ptr[0] = &(images[0].at(0)); // A } image.images = reinterpret_cast<unsigned char **>(image_ptr); image.width = width; image.height = height; header.num_channels = components; header.channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels))); // Must be (A)BGR order, since most of EXR viewers expect this channel order. if (components == 4) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); strncpy_s(header.channels[1].name, "B", 255); strncpy_s(header.channels[2].name, "G", 255); strncpy_s(header.channels[3].name, "R", 255); #else strncpy(header.channels[0].name, "A", 255); strncpy(header.channels[1].name, "B", 255); strncpy(header.channels[2].name, "G", 255); strncpy(header.channels[3].name, "R", 255); #endif header.channels[0].name[strlen("A")] = '\0'; header.channels[1].name[strlen("B")] = '\0'; header.channels[2].name[strlen("G")] = '\0'; header.channels[3].name[strlen("R")] = '\0'; } else if (components == 3) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "B", 255); strncpy_s(header.channels[1].name, "G", 255); strncpy_s(header.channels[2].name, "R", 255); #else strncpy(header.channels[0].name, "B", 255); strncpy(header.channels[1].name, "G", 255); strncpy(header.channels[2].name, "R", 255); #endif header.channels[0].name[strlen("B")] = '\0'; header.channels[1].name[strlen("G")] = '\0'; header.channels[2].name[strlen("R")] = '\0'; } else { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); #else strncpy(header.channels[0].name, "A", 255); #endif header.channels[0].name[strlen("A")] = '\0'; } header.pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); header.requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); for (int i = 0; i < header.num_channels; i++) { header.pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image if (save_as_fp16 > 0) { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format } else { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e. // no precision reduction) } } int ret = SaveEXRImageToFile(&image, &header, outfilename, err); if (ret != TINYEXR_SUCCESS) { return ret; } free(header.channels); free(header.pixel_types); free(header.requested_pixel_types); return ret; } #ifdef __clang__ // zero-as-null-ppinter-constant #pragma clang diagnostic pop #endif #endif // TINYEXR_IMPLEMENTATION_DEFINED #endif // TINYEXR_IMPLEMENTATION
ParFriends.h
/****************************************************************/ /* Parallel Combinatorial BLAS Library (for Graph Computations) */ /* version 1.6 -------------------------------------------------*/ /* date: 6/15/2017 ---------------------------------------------*/ /* authors: Ariful Azad, Aydin Buluc --------------------------*/ /****************************************************************/ /* Copyright (c) 2010-2017, The Regents of the University of California Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef _PAR_FRIENDS_H_ #define _PAR_FRIENDS_H_ #include "mpi.h" #include <iostream> #include <cstdarg> #include "SpParMat.h" #include "SpParMat3D.h" #include "SpParHelper.h" #include "MPIType.h" #include "Friends.h" #include "OptBuf.h" #include "mtSpGEMM.h" #include "MultiwayMerge.h" #include <unistd.h> #include <type_traits> namespace combblas { template <class IT, class NT, class DER> class SpParMat; /*************************************************************************************************/ /**************************** FRIEND FUNCTIONS FOR PARALLEL CLASSES ******************************/ /*************************************************************************************************/ /** ** Concatenate all the FullyDistVec<IT,NT> objects into a single one **/ template <typename IT, typename NT> FullyDistVec<IT,NT> Concatenate ( std::vector< FullyDistVec<IT,NT> > & vecs) { if(vecs.size() < 1) { SpParHelper::Print("Warning: Nothing to concatenate, returning empty "); return FullyDistVec<IT,NT>(); } else if (vecs.size() < 2) { return vecs[1]; } else { typename std::vector< FullyDistVec<IT,NT> >::iterator it = vecs.begin(); std::shared_ptr<CommGrid> commGridPtr = it->getcommgrid(); MPI_Comm World = commGridPtr->GetWorld(); IT nglen = it->TotalLength(); // new global length IT cumloclen = it->MyLocLength(); // existing cumulative local lengths ++it; for(; it != vecs.end(); ++it) { if(*(commGridPtr) != *(it->getcommgrid())) { SpParHelper::Print("Grids are not comparable for FullyDistVec<IT,NT>::EWiseApply\n"); MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); } nglen += it->TotalLength(); cumloclen += it->MyLocLength(); } FullyDistVec<IT,NT> ConCat (commGridPtr, nglen, NT()); int nprocs = commGridPtr->GetSize(); std::vector< std::vector< NT > > data(nprocs); std::vector< std::vector< IT > > inds(nprocs); IT gloffset = 0; for(it = vecs.begin(); it != vecs.end(); ++it) { IT loclen = it->LocArrSize(); for(IT i=0; i < loclen; ++i) { IT locind; IT loffset = it->LengthUntil(); int owner = ConCat.Owner(gloffset+loffset+i, locind); data[owner].push_back(it->arr[i]); inds[owner].push_back(locind); } gloffset += it->TotalLength(); } int * sendcnt = new int[nprocs]; int * sdispls = new int[nprocs]; for(int i=0; i<nprocs; ++i) sendcnt[i] = (int) data[i].size(); int * rdispls = new int[nprocs]; int * recvcnt = new int[nprocs]; MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, World); // share the request counts sdispls[0] = 0; rdispls[0] = 0; for(int i=0; i<nprocs-1; ++i) { sdispls[i+1] = sdispls[i] + sendcnt[i]; rdispls[i+1] = rdispls[i] + recvcnt[i]; } IT totrecv = std::accumulate(recvcnt,recvcnt+nprocs,static_cast<IT>(0)); NT * senddatabuf = new NT[cumloclen]; for(int i=0; i<nprocs; ++i) { std::copy(data[i].begin(), data[i].end(), senddatabuf+sdispls[i]); std::vector<NT>().swap(data[i]); // delete data vectors } NT * recvdatabuf = new NT[totrecv]; MPI_Alltoallv(senddatabuf, sendcnt, sdispls, MPIType<NT>(), recvdatabuf, recvcnt, rdispls, MPIType<NT>(), World); // send data delete [] senddatabuf; IT * sendindsbuf = new IT[cumloclen]; for(int i=0; i<nprocs; ++i) { std::copy(inds[i].begin(), inds[i].end(), sendindsbuf+sdispls[i]); std::vector<IT>().swap(inds[i]); // delete inds vectors } IT * recvindsbuf = new IT[totrecv]; MPI_Alltoallv(sendindsbuf, sendcnt, sdispls, MPIType<IT>(), recvindsbuf, recvcnt, rdispls, MPIType<IT>(), World); // send new inds DeleteAll(sendindsbuf, sendcnt, sdispls); for(int i=0; i<nprocs; ++i) { for(int j = rdispls[i]; j < rdispls[i] + recvcnt[i]; ++j) { ConCat.arr[recvindsbuf[j]] = recvdatabuf[j]; } } DeleteAll(recvindsbuf, recvcnt, rdispls); return ConCat; } } template <typename MATRIXA, typename MATRIXB> bool CheckSpGEMMCompliance(const MATRIXA & A, const MATRIXB & B) { if(A.getncol() != B.getnrow()) { std::ostringstream outs; outs << "Can not multiply, dimensions does not match"<< std::endl; outs << A.getncol() << " != " << B.getnrow() << std::endl; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); return false; } if((void*) &A == (void*) &B) { std::ostringstream outs; outs << "Can not multiply, inputs alias (make a temporary copy of one of them first)"<< std::endl; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, MATRIXALIAS); return false; } return true; } // Combined logic for prune, recovery, and select template <typename IT, typename NT, typename DER> void MCLPruneRecoverySelect(SpParMat<IT,NT,DER> & A, NT hardThreshold, IT selectNum, IT recoverNum, NT recoverPct, int kselectVersion) { int myrank; MPI_Comm_rank(MPI_COMM_WORLD,&myrank); #ifdef TIMING double t0, t1; #endif // Prune and create a new pruned matrix SpParMat<IT,NT,DER> PrunedA = A.Prune(std::bind2nd(std::less_equal<NT>(), hardThreshold), false); // column-wise statistics of the pruned matrix FullyDistVec<IT,NT> colSums = PrunedA.Reduce(Column, std::plus<NT>(), 0.0); FullyDistVec<IT,NT> nnzPerColumnUnpruned = A.Reduce(Column, std::plus<NT>(), 0.0, [](NT val){return 1.0;}); FullyDistVec<IT,NT> nnzPerColumn = PrunedA.Reduce(Column, std::plus<NT>(), 0.0, [](NT val){return 1.0;}); //FullyDistVec<IT,NT> pruneCols(A.getcommgrid(), A.getncol(), hardThreshold); FullyDistVec<IT,NT> pruneCols(nnzPerColumn); pruneCols = hardThreshold; PrunedA.FreeMemory(); FullyDistSpVec<IT,NT> recoverCols(nnzPerColumn, std::bind2nd(std::less<NT>(), recoverNum)); // recover only when nnzs in unprunned columns are greater than nnzs in pruned column recoverCols = EWiseApply<NT>(recoverCols, nnzPerColumnUnpruned, [](NT spval, NT dval){return spval;}, [](NT spval, NT dval){return dval > spval;}, false, NT()); recoverCols = recoverPct; // columns with nnz < r AND sum < recoverPct (pct) recoverCols = EWiseApply<NT>(recoverCols, colSums, [](NT spval, NT dval){return spval;}, [](NT spval, NT dval){return dval < spval;}, false, NT()); IT nrecover = recoverCols.getnnz(); if(nrecover > 0) { #ifdef TIMING t0=MPI_Wtime(); #endif A.Kselect(recoverCols, recoverNum, kselectVersion); #ifdef TIMING t1=MPI_Wtime(); mcl_kselecttime += (t1-t0); #endif pruneCols.Set(recoverCols); #ifdef COMBBLAS_DEBUG std::ostringstream outs; outs << "Number of columns needing recovery: " << nrecover << std::endl; SpParHelper::Print(outs.str()); #endif } if(selectNum>0) { // remaining columns will be up for selection FullyDistSpVec<IT,NT> selectCols = EWiseApply<NT>(recoverCols, colSums, [](NT spval, NT dval){return spval;}, [](NT spval, NT dval){return spval==-1;}, true, static_cast<NT>(-1)); selectCols = selectNum; selectCols = EWiseApply<NT>(selectCols, nnzPerColumn, [](NT spval, NT dval){return spval;}, [](NT spval, NT dval){return dval > spval;}, false, NT()); IT nselect = selectCols.getnnz(); if(nselect > 0 ) { #ifdef TIMING t0=MPI_Wtime(); #endif A.Kselect(selectCols, selectNum, kselectVersion); // PrunedA would also work #ifdef TIMING t1=MPI_Wtime(); mcl_kselecttime += (t1-t0); #endif pruneCols.Set(selectCols); #ifdef COMBBLAS_DEBUG std::ostringstream outs; outs << "Number of columns needing selection: " << nselect << std::endl; SpParHelper::Print(outs.str()); #endif #ifdef TIMING t0=MPI_Wtime(); #endif SpParMat<IT,NT,DER> selectedA = A.PruneColumn(pruneCols, std::less<NT>(), false); #ifdef TIMING t1=MPI_Wtime(); mcl_prunecolumntime += (t1-t0); #endif if(recoverNum>0 ) // recovery can be attempted after selection { FullyDistVec<IT,NT> nnzPerColumn1 = selectedA.Reduce(Column, std::plus<NT>(), 0.0, [](NT val){return 1.0;}); FullyDistVec<IT,NT> colSums1 = selectedA.Reduce(Column, std::plus<NT>(), 0.0); selectedA.FreeMemory(); // slected columns with nnz < recoverNum (r) selectCols = recoverNum; selectCols = EWiseApply<NT>(selectCols, nnzPerColumn1, [](NT spval, NT dval){return spval;}, [](NT spval, NT dval){return dval < spval;}, false, NT()); // selected columns with sum < recoverPct (pct) selectCols = recoverPct; selectCols = EWiseApply<NT>(selectCols, colSums1, [](NT spval, NT dval){return spval;}, [](NT spval, NT dval){return dval < spval;}, false, NT()); IT n_recovery_after_select = selectCols.getnnz(); if(n_recovery_after_select>0) { // mclExpandVector2 does it on the original vector // mclExpandVector1 does it one pruned vector #ifdef TIMING t0=MPI_Wtime(); #endif A.Kselect(selectCols, recoverNum, kselectVersion); // Kselect on PrunedA might give different result #ifdef TIMING t1=MPI_Wtime(); mcl_kselecttime += (t1-t0); #endif pruneCols.Set(selectCols); #ifdef COMBBLAS_DEBUG std::ostringstream outs1; outs1 << "Number of columns needing recovery after selection: " << nselect << std::endl; SpParHelper::Print(outs1.str()); #endif } } } } // final prune #ifdef TIMING t0=MPI_Wtime(); #endif A.PruneColumn(pruneCols, std::less<NT>(), true); #ifdef TIMING t1=MPI_Wtime(); mcl_prunecolumntime += (t1-t0); #endif // Add loops for empty columns if(recoverNum<=0 ) // if recoverNum>0, recovery would have added nonzeros in empty columns { FullyDistVec<IT,NT> nnzPerColumnA = A.Reduce(Column, std::plus<NT>(), 0.0, [](NT val){return 1.0;}); FullyDistSpVec<IT,NT> emptyColumns(nnzPerColumnA, std::bind2nd(std::equal_to<NT>(), 0.0)); emptyColumns = 1.00; //Ariful: We need a selective AddLoops function with a sparse vector //A.AddLoops(emptyColumns); } } template <typename SR, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> IU EstimateFLOP (SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, bool clearA = false, bool clearB = false) { int myrank; MPI_Comm_rank(MPI_COMM_WORLD,&myrank); int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy); IU C_m = A.spSeq->getnrow(); IU C_n = B.spSeq->getncol(); //const_cast< UDERB* >(B.spSeq)->Transpose(); // do not transpose for colum-by-column multiplication IU ** ARecvSizes = SpHelper::allocate2D<IU>(UDERA::esscount, stages); IU ** BRecvSizes = SpHelper::allocate2D<IU>(UDERB::esscount, stages); SpParHelper::GetSetSizes( *(A.spSeq), ARecvSizes, (A.commGrid)->GetRowWorld()); SpParHelper::GetSetSizes( *(B.spSeq), BRecvSizes, (B.commGrid)->GetColWorld()); // Remotely fetched matrices are stored as pointers UDERA * ARecv; UDERB * BRecv; IU local_flops = 0; int Aself = (A.commGrid)->GetRankInProcRow(); int Bself = (B.commGrid)->GetRankInProcCol(); for(int i = 0; i < stages; ++i) { std::vector<IU> ess; if(i == Aself) { ARecv = A.spSeq; // shallow-copy } else { ess.resize(UDERA::esscount); for(int j=0; j< UDERA::esscount; ++j) { ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row } ARecv = new UDERA(); // first, create the object } SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements ess.clear(); if(i == Bself) { BRecv = B.spSeq; // shallow-copy } else { ess.resize(UDERB::esscount); for(int j=0; j< UDERB::esscount; ++j) { ess[j] = BRecvSizes[j][i]; } BRecv = new UDERB(); } SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements local_flops += EstimateLocalFLOP<SR> (*ARecv, *BRecv, // parameters themselves i != Aself, // 'delete A' condition i != Bself); // 'delete B' condition } if(clearA && A.spSeq != NULL) { delete A.spSeq; A.spSeq = NULL; } if(clearB && B.spSeq != NULL) { delete B.spSeq; B.spSeq = NULL; } SpHelper::deallocate2D(ARecvSizes, UDERA::esscount); SpHelper::deallocate2D(BRecvSizes, UDERB::esscount); //if(!clearB) // const_cast< UDERB* >(B.spSeq)->Transpose(); // transpose back to original IU global_flops = 0; MPI_Allreduce(&local_flops, &global_flops, 1, MPI_LONG_LONG_INT, MPI_SUM, A.getcommgrid()->GetWorld()); return global_flops; } /** * Broadcasts A multiple times (#phases) in order to save storage in the output * Only uses 1/phases of C memory if the threshold/max limits are proper * Parameters: * - computationKernel: 1 means hash-based, 2 means heap-based */ template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> SpParMat<IU,NUO,UDERO> MemEfficientSpGEMM (SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, int phases, NUO hardThreshold, IU selectNum, IU recoverNum, NUO recoverPct, int kselectVersion, int computationKernel, int64_t perProcessMemory) { typedef typename UDERA::LocalIT LIA; typedef typename UDERB::LocalIT LIB; typedef typename UDERO::LocalIT LIC; int myrank; MPI_Comm_rank(MPI_COMM_WORLD,&myrank); if(A.getncol() != B.getnrow()) { std::ostringstream outs; outs << "Can not multiply, dimensions does not match"<< std::endl; outs << A.getncol() << " != " << B.getnrow() << std::endl; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); return SpParMat< IU,NUO,UDERO >(); } if(phases <1 || phases >= A.getncol()) { SpParHelper::Print("MemEfficientSpGEMM: The value of phases is too small or large. Resetting to 1.\n"); phases = 1; } int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy); double t0, t1, t2, t3, t4, t5; #ifdef TIMING MPI_Barrier(A.getcommgrid()->GetWorld()); t0 = MPI_Wtime(); #endif if(perProcessMemory>0) // estimate the number of phases permitted by memory { int p; MPI_Comm World = GridC->GetWorld(); MPI_Comm_size(World,&p); int64_t perNNZMem_in = sizeof(IU)*2 + sizeof(NU1); int64_t perNNZMem_out = sizeof(IU)*2 + sizeof(NUO); // max nnz(A) in a porcess int64_t lannz = A.getlocalnnz(); int64_t gannz; MPI_Allreduce(&lannz, &gannz, 1, MPIType<int64_t>(), MPI_MAX, World); int64_t inputMem = gannz * perNNZMem_in * 4; // for four copies (two for SUMMA) // max nnz(A^2) stored by SUMMA in a porcess int64_t asquareNNZ = EstPerProcessNnzSUMMA(A,B, false); int64_t asquareMem = asquareNNZ * perNNZMem_out * 2; // an extra copy in multiway merge and in selection/recovery step // estimate kselect memory int64_t d = ceil( (asquareNNZ * sqrt(p))/ B.getlocalcols() ); // average nnz per column in A^2 (it is an overestimate because asquareNNZ is estimated based on unmerged matrices) // this is equivalent to (asquareNNZ * p) / B.getcol() int64_t k = std::min(int64_t(std::max(selectNum, recoverNum)), d ); int64_t kselectmem = B.getlocalcols() * k * 8 * 3; // estimate output memory int64_t outputNNZ = (B.getlocalcols() * k)/sqrt(p); int64_t outputMem = outputNNZ * perNNZMem_in * 2; //inputMem + outputMem + asquareMem/phases + kselectmem/phases < memory int64_t remainingMem = perProcessMemory*1000000000 - inputMem - outputMem; if(remainingMem > 0) { phases = 1 + (asquareMem+kselectmem) / remainingMem; } if(myrank==0) { if(remainingMem < 0) { std::cout << "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n Warning: input and output memory requirement is greater than per-process avaiable memory. Keeping phase to the value supplied at the command line. The program may go out of memory and crash! \n !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" << std::endl; } #ifdef SHOW_MEMORY_USAGE int64_t maxMemory = kselectmem/phases + inputMem + outputMem + asquareMem / phases; if(maxMemory>1000000000) std::cout << "phases: " << phases << ": per process memory: " << perProcessMemory << " GB asquareMem: " << asquareMem/1000000000.00 << " GB" << " inputMem: " << inputMem/1000000000.00 << " GB" << " outputMem: " << outputMem/1000000000.00 << " GB" << " kselectmem: " << kselectmem/1000000000.00 << " GB" << std::endl; else std::cout << "phases: " << phases << ": per process memory: " << perProcessMemory << " GB asquareMem: " << asquareMem/1000000.00 << " MB" << " inputMem: " << inputMem/1000000.00 << " MB" << " outputMem: " << outputMem/1000000.00 << " MB" << " kselectmem: " << kselectmem/1000000.00 << " MB" << std::endl; #endif } } //if(myrank == 0){ //fprintf(stderr, "[MemEfficientSpGEMM] Running with phase: %d\n", phases); //} #ifdef TIMING MPI_Barrier(A.getcommgrid()->GetWorld()); t1 = MPI_Wtime(); mcl_symbolictime += (t1-t0); #endif LIA C_m = A.spSeq->getnrow(); LIB C_n = B.spSeq->getncol(); std::vector< UDERB > PiecesOfB; UDERB CopyB = *(B.spSeq); // we allow alias matrices as input because of this local copy CopyB.ColSplit(phases, PiecesOfB); // CopyB's memory is destroyed at this point MPI_Barrier(GridC->GetWorld()); LIA ** ARecvSizes = SpHelper::allocate2D<LIA>(UDERA::esscount, stages); LIB ** BRecvSizes = SpHelper::allocate2D<LIB>(UDERB::esscount, stages); static_assert(std::is_same<LIA, LIB>::value, "local index types for both input matrices should be the same"); static_assert(std::is_same<LIA, LIC>::value, "local index types for input and output matrices should be the same"); SpParHelper::GetSetSizes( *(A.spSeq), ARecvSizes, (A.commGrid)->GetRowWorld()); // Remotely fetched matrices are stored as pointers UDERA * ARecv; UDERB * BRecv; std::vector< UDERO > toconcatenate; int Aself = (A.commGrid)->GetRankInProcRow(); int Bself = (B.commGrid)->GetRankInProcCol(); for(int p = 0; p< phases; ++p) { SpParHelper::GetSetSizes( PiecesOfB[p], BRecvSizes, (B.commGrid)->GetColWorld()); std::vector< SpTuples<LIC,NUO> *> tomerge; for(int i = 0; i < stages; ++i) { std::vector<LIA> ess; if(i == Aself) ARecv = A.spSeq; // shallow-copy else { ess.resize(UDERA::esscount); for(int j=0; j< UDERA::esscount; ++j) ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row ARecv = new UDERA(); // first, create the object } #ifdef TIMING MPI_Barrier(A.getcommgrid()->GetWorld()); t0 = MPI_Wtime(); #endif SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements #ifdef TIMING MPI_Barrier(A.getcommgrid()->GetWorld()); t1 = MPI_Wtime(); mcl_Abcasttime += (t1-t0); #endif ess.clear(); if(i == Bself) BRecv = &(PiecesOfB[p]); // shallow-copy else { ess.resize(UDERB::esscount); for(int j=0; j< UDERB::esscount; ++j) ess[j] = BRecvSizes[j][i]; BRecv = new UDERB(); } #ifdef TIMING MPI_Barrier(A.getcommgrid()->GetWorld()); double t2=MPI_Wtime(); #endif SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements #ifdef TIMING MPI_Barrier(A.getcommgrid()->GetWorld()); double t3=MPI_Wtime(); mcl_Bbcasttime += (t3-t2); #endif #ifdef TIMING MPI_Barrier(A.getcommgrid()->GetWorld()); double t4=MPI_Wtime(); #endif SpTuples<LIC,NUO> * C_cont; if(computationKernel == 1) C_cont = LocalSpGEMMHash<SR, NUO>(*ARecv, *BRecv,i != Aself, i != Bself, false); // Hash SpGEMM without per-column sorting else if(computationKernel == 2) C_cont=LocalSpGEMM<SR, NUO>(*ARecv, *BRecv,i != Aself, i != Bself); #ifdef TIMING MPI_Barrier(A.getcommgrid()->GetWorld()); double t5=MPI_Wtime(); mcl_localspgemmtime += (t5-t4); #endif if(!C_cont->isZero()) tomerge.push_back(C_cont); else delete C_cont; } // all stages executed #ifdef SHOW_MEMORY_USAGE int64_t gcnnz_unmerged, lcnnz_unmerged = 0; for(size_t i = 0; i < tomerge.size(); ++i) { lcnnz_unmerged += tomerge[i]->getnnz(); } MPI_Allreduce(&lcnnz_unmerged, &gcnnz_unmerged, 1, MPIType<int64_t>(), MPI_MAX, MPI_COMM_WORLD); int64_t summa_memory = gcnnz_unmerged*20;//(gannz*2 + phase_nnz + gcnnz_unmerged + gannz + gannz/phases) * 20; // last two for broadcasts if(myrank==0) { if(summa_memory>1000000000) std::cout << p+1 << ". unmerged: " << summa_memory/1000000000.00 << "GB " ; else std::cout << p+1 << ". unmerged: " << summa_memory/1000000.00 << " MB " ; } #endif #ifdef TIMING MPI_Barrier(A.getcommgrid()->GetWorld()); double t6=MPI_Wtime(); #endif // TODO: MultiwayMerge can directly return UDERO inorder to avoid the extra copy SpTuples<LIC,NUO> * OnePieceOfC_tuples; if(computationKernel == 1) OnePieceOfC_tuples = MultiwayMergeHash<SR>(tomerge, C_m, PiecesOfB[p].getncol(), true, false); else if(computationKernel == 2) OnePieceOfC_tuples = MultiwayMerge<SR>(tomerge, C_m, PiecesOfB[p].getncol(), true); #ifdef SHOW_MEMORY_USAGE int64_t gcnnz_merged, lcnnz_merged ; lcnnz_merged = OnePieceOfC_tuples->getnnz(); MPI_Allreduce(&lcnnz_merged, &gcnnz_merged, 1, MPIType<int64_t>(), MPI_MAX, MPI_COMM_WORLD); // TODO: we can remove gcnnz_merged memory here because we don't need to concatenate anymore int64_t merge_memory = gcnnz_merged*2*20;//(gannz*2 + phase_nnz + gcnnz_unmerged + gcnnz_merged*2) * 20; if(myrank==0) { if(merge_memory>1000000000) std::cout << " merged: " << merge_memory/1000000000.00 << "GB " ; else std::cout << " merged: " << merge_memory/1000000.00 << " MB " ; } #endif #ifdef TIMING MPI_Barrier(A.getcommgrid()->GetWorld()); double t7=MPI_Wtime(); mcl_multiwaymergetime += (t7-t6); #endif UDERO * OnePieceOfC = new UDERO(* OnePieceOfC_tuples, false); delete OnePieceOfC_tuples; SpParMat<IU,NUO,UDERO> OnePieceOfC_mat(OnePieceOfC, GridC); MCLPruneRecoverySelect(OnePieceOfC_mat, hardThreshold, selectNum, recoverNum, recoverPct, kselectVersion); #ifdef SHOW_MEMORY_USAGE int64_t gcnnz_pruned, lcnnz_pruned ; lcnnz_pruned = OnePieceOfC_mat.getlocalnnz(); MPI_Allreduce(&lcnnz_pruned, &gcnnz_pruned, 1, MPIType<int64_t>(), MPI_MAX, MPI_COMM_WORLD); // TODO: we can remove gcnnz_merged memory here because we don't need to concatenate anymore int64_t prune_memory = gcnnz_pruned*2*20;//(gannz*2 + phase_nnz + gcnnz_pruned*2) * 20 + kselectmem; // 3 extra copies of OnePieceOfC_mat, we can make it one extra copy! //phase_nnz += gcnnz_pruned; if(myrank==0) { if(prune_memory>1000000000) std::cout << "Prune: " << prune_memory/1000000000.00 << "GB " << std::endl ; else std::cout << "Prune: " << prune_memory/1000000.00 << " MB " << std::endl ; } #endif // ABAB: Change this to accept pointers to objects toconcatenate.push_back(OnePieceOfC_mat.seq()); } UDERO * C = new UDERO(0,C_m, C_n,0); C->ColConcatenate(toconcatenate); // ABAB: Change this to accept a vector of pointers to pointers to DER objects SpHelper::deallocate2D(ARecvSizes, UDERA::esscount); SpHelper::deallocate2D(BRecvSizes, UDERA::esscount); return SpParMat<IU,NUO,UDERO> (C, GridC); } template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> int CalculateNumberOfPhases (SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, NUO hardThreshold, IU selectNum, IU recoverNum, NUO recoverPct, int kselectVersion, int64_t perProcessMemory){ int phases; typedef typename UDERA::LocalIT LIA; typedef typename UDERB::LocalIT LIB; typedef typename UDERO::LocalIT LIC; int myrank; MPI_Comm_rank(MPI_COMM_WORLD,&myrank); int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy); double t0, t1, t2, t3, t4, t5; int p; MPI_Comm World = GridC->GetWorld(); MPI_Comm_size(World,&p); int64_t perNNZMem_in = sizeof(IU)*2 + sizeof(NU1); int64_t perNNZMem_out = sizeof(IU)*2 + sizeof(NUO); // max nnz(A) in a porcess int64_t lannz = A.getlocalnnz(); int64_t gannz; MPI_Allreduce(&lannz, &gannz, 1, MPIType<int64_t>(), MPI_MAX, World); int64_t inputMem = gannz * perNNZMem_in * 4; // for four copies (two for SUMMA) // max nnz(A^2) stored by SUMMA in a porcess int64_t asquareNNZ = EstPerProcessNnzSUMMA(A,B, false); int64_t asquareMem = asquareNNZ * perNNZMem_out * 2; // an extra copy in multiway merge and in selection/recovery step // estimate kselect memory int64_t d = ceil( (asquareNNZ * sqrt(p))/ B.getlocalcols() ); // average nnz per column in A^2 (it is an overestimate because asquareNNZ is estimated based on unmerged matrices) // this is equivalent to (asquareNNZ * p) / B.getcol() int64_t k = std::min(int64_t(std::max(selectNum, recoverNum)), d ); int64_t kselectmem = B.getlocalcols() * k * 8 * 3; // estimate output memory int64_t outputNNZ = (B.getlocalcols() * d)/sqrt(p); //int64_t outputNNZ = (B.getlocalcols() * k)/sqrt(p); // if kselect is used int64_t outputMem = outputNNZ * perNNZMem_in * 2; //inputMem + outputMem + asquareMem/phases + kselectmem/phases < memory //int64_t remainingMem = perProcessMemory*1000000000 - inputMem - outputMem; int64_t remainingMem = perProcessMemory*1000000000 - inputMem; // if each phase result is discarded //if(remainingMem > 0) //{ //phases = 1 + (asquareMem+kselectmem) / remainingMem; //} phases = 1 + asquareMem / remainingMem; return phases; } /** * Parallel C = A*B routine that uses a double buffered broadcasting scheme * @pre { Input matrices, A and B, should not alias } * Most memory efficient version available. Total stages: 2*sqrt(p) * Memory requirement during first sqrt(p) stages: <= (3/2)*(nnz(A)+nnz(B))+(1/2)*nnz(C) * Memory requirement during second sqrt(p) stages: <= nnz(A)+nnz(B)+nnz(C) * Final memory requirement: nnz(C) if clearA and clearB are true **/ template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> SpParMat<IU,NUO,UDERO> Mult_AnXBn_DoubleBuff (SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, bool clearA = false, bool clearB = false ) { if(!CheckSpGEMMCompliance(A,B) ) { return SpParMat< IU,NUO,UDERO >(); } typedef typename UDERA::LocalIT LIA; typedef typename UDERB::LocalIT LIB; typedef typename UDERO::LocalIT LIC; static_assert(std::is_same<LIA, LIB>::value, "local index types for both input matrices should be the same"); static_assert(std::is_same<LIA, LIC>::value, "local index types for input and output matrices should be the same"); int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy); LIA C_m = A.spSeq->getnrow(); LIB C_n = B.spSeq->getncol(); UDERA * A1seq = new UDERA(); UDERA * A2seq = new UDERA(); UDERB * B1seq = new UDERB(); UDERB * B2seq = new UDERB(); (A.spSeq)->Split( *A1seq, *A2seq); const_cast< UDERB* >(B.spSeq)->Transpose(); (B.spSeq)->Split( *B1seq, *B2seq); // Transpose back for the column-by-column algorithm const_cast< UDERB* >(B1seq)->Transpose(); const_cast< UDERB* >(B2seq)->Transpose(); LIA ** ARecvSizes = SpHelper::allocate2D<LIA>(UDERA::esscount, stages); LIB ** BRecvSizes = SpHelper::allocate2D<LIB>(UDERB::esscount, stages); SpParHelper::GetSetSizes( *A1seq, ARecvSizes, (A.commGrid)->GetRowWorld()); SpParHelper::GetSetSizes( *B1seq, BRecvSizes, (B.commGrid)->GetColWorld()); // Remotely fetched matrices are stored as pointers UDERA * ARecv; UDERB * BRecv; std::vector< SpTuples<LIC,NUO> *> tomerge; int Aself = (A.commGrid)->GetRankInProcRow(); int Bself = (B.commGrid)->GetRankInProcCol(); for(int i = 0; i < stages; ++i) { std::vector<LIA> ess; if(i == Aself) { ARecv = A1seq; // shallow-copy } else { ess.resize(UDERA::esscount); for(int j=0; j< UDERA::esscount; ++j) { ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row } ARecv = new UDERA(); // first, create the object } SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements ess.clear(); if(i == Bself) { BRecv = B1seq; // shallow-copy } else { ess.resize(UDERB::esscount); for(int j=0; j< UDERB::esscount; ++j) { ess[j] = BRecvSizes[j][i]; } BRecv = new UDERB(); } SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements // before activating this remove transposing B1seq /* SpTuples<LIC,NUO> * C_cont = MultiplyReturnTuples<SR, NUO> (*ARecv, *BRecv, // parameters themselves false, true, // transpose information (B is transposed) i != Aself, // 'delete A' condition i != Bself); // 'delete B' condition */ SpTuples<LIC,NUO> * C_cont = LocalHybridSpGEMM<SR, NUO> (*ARecv, *BRecv, // parameters themselves i != Aself, // 'delete A' condition i != Bself); // 'delete B' condition if(!C_cont->isZero()) tomerge.push_back(C_cont); else delete C_cont; } if(clearA) delete A1seq; if(clearB) delete B1seq; // Set the new dimensions SpParHelper::GetSetSizes( *A2seq, ARecvSizes, (A.commGrid)->GetRowWorld()); SpParHelper::GetSetSizes( *B2seq, BRecvSizes, (B.commGrid)->GetColWorld()); // Start the second round for(int i = 0; i < stages; ++i) { std::vector<LIA> ess; if(i == Aself) { ARecv = A2seq; // shallow-copy } else { ess.resize(UDERA::esscount); for(int j=0; j< UDERA::esscount; ++j) { ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row } ARecv = new UDERA(); // first, create the object } SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements ess.clear(); if(i == Bself) { BRecv = B2seq; // shallow-copy } else { ess.resize(UDERB::esscount); for(int j=0; j< UDERB::esscount; ++j) { ess[j] = BRecvSizes[j][i]; } BRecv = new UDERB(); } SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements // before activating this remove transposing B2seq /* SpTuples<LIC,NUO> * C_cont = MultiplyReturnTuples<SR, NUO> (*ARecv, *BRecv, // parameters themselves false, true, // transpose information (B is transposed) i != Aself, // 'delete A' condition i != Bself); // 'delete B' condition */ SpTuples<LIC,NUO> * C_cont = LocalHybridSpGEMM<SR, NUO> (*ARecv, *BRecv, // parameters themselves i != Aself, // 'delete A' condition i != Bself); // 'delete B' condition if(!C_cont->isZero()) tomerge.push_back(C_cont); else delete C_cont; } SpHelper::deallocate2D(ARecvSizes, UDERA::esscount); SpHelper::deallocate2D(BRecvSizes, UDERB::esscount); if(clearA) { delete A2seq; delete A.spSeq; A.spSeq = NULL; } else { (A.spSeq)->Merge(*A1seq, *A2seq); delete A1seq; delete A2seq; } if(clearB) { delete B2seq; delete B.spSeq; B.spSeq = NULL; } else { B1seq->Transpose(); B2seq->Transpose(); (B.spSeq)->Merge(*B1seq, *B2seq); delete B1seq; delete B2seq; const_cast< UDERB* >(B.spSeq)->Transpose(); // transpose back to original } UDERO * C = new UDERO(MergeAll<SR>(tomerge, C_m, C_n,true), false); return SpParMat<IU,NUO,UDERO> (C, GridC); // return the result object } /** * Parallel A = B*C routine that uses only MPI-1 features * Relies on simple blocking broadcast * @pre { Input matrices, A and B, should not alias } **/ template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> SpParMat<IU, NUO, UDERO> Mult_AnXBn_Synch (SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, bool clearA = false, bool clearB = false ) { int myrank; MPI_Comm_rank(MPI_COMM_WORLD,&myrank); if(!CheckSpGEMMCompliance(A,B) ) { return SpParMat< IU,NUO,UDERO >(); } int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy); IU C_m = A.spSeq->getnrow(); IU C_n = B.spSeq->getncol(); //const_cast< UDERB* >(B.spSeq)->Transpose(); // do not transpose for colum-by-column multiplication IU ** ARecvSizes = SpHelper::allocate2D<IU>(UDERA::esscount, stages); IU ** BRecvSizes = SpHelper::allocate2D<IU>(UDERB::esscount, stages); SpParHelper::GetSetSizes( *(A.spSeq), ARecvSizes, (A.commGrid)->GetRowWorld()); SpParHelper::GetSetSizes( *(B.spSeq), BRecvSizes, (B.commGrid)->GetColWorld()); // Remotely fetched matrices are stored as pointers UDERA * ARecv; UDERB * BRecv; std::vector< SpTuples<IU,NUO> *> tomerge; int Aself = (A.commGrid)->GetRankInProcRow(); int Bself = (B.commGrid)->GetRankInProcCol(); for(int i = 0; i < stages; ++i) { std::vector<IU> ess; if(i == Aself) { ARecv = A.spSeq; // shallow-copy } else { ess.resize(UDERA::esscount); for(int j=0; j< UDERA::esscount; ++j) { ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row } ARecv = new UDERA(); // first, create the object } SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements ess.clear(); if(i == Bself) { BRecv = B.spSeq; // shallow-copy } else { ess.resize(UDERB::esscount); for(int j=0; j< UDERB::esscount; ++j) { ess[j] = BRecvSizes[j][i]; } BRecv = new UDERB(); } SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements SpTuples<IU,NUO> * C_cont = LocalHybridSpGEMM<SR, NUO> (*ARecv, *BRecv, // parameters themselves i != Aself, // 'delete A' condition i != Bself); // 'delete B' condition if(!C_cont->isZero()) tomerge.push_back(C_cont); #ifdef COMBBLAS_DEBUG std::ostringstream outs; outs << i << "th SUMMA iteration"<< std::endl; SpParHelper::Print(outs.str()); #endif } if(clearA && A.spSeq != NULL) { delete A.spSeq; A.spSeq = NULL; } if(clearB && B.spSeq != NULL) { delete B.spSeq; B.spSeq = NULL; } SpHelper::deallocate2D(ARecvSizes, UDERA::esscount); SpHelper::deallocate2D(BRecvSizes, UDERB::esscount); SpTuples<IU,NUO> * C_tuples = MultiwayMerge<SR>(tomerge, C_m, C_n,false); UDERO * C = new UDERO(*C_tuples, false); delete C_tuples; //if(!clearB) // const_cast< UDERB* >(B.spSeq)->Transpose(); // transpose back to original return SpParMat<IU,NUO,UDERO> (C, GridC); // return the result object } template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> SpParMat<IU, NUO, UDERO> Mult_AnXBn_Overlap (SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, bool clearA = false, bool clearB = false ) { int myrank; MPI_Comm_rank(MPI_COMM_WORLD,&myrank); if(!CheckSpGEMMCompliance(A,B) ) { return SpParMat< IU,NUO,UDERO >(); } int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy); IU C_m = A.spSeq->getnrow(); IU C_n = B.spSeq->getncol(); //const_cast< UDERB* >(B.spSeq)->Transpose(); // do not transpose for colum-by-column multiplication IU ** ARecvSizes = SpHelper::allocate2D<IU>(UDERA::esscount, stages); IU ** BRecvSizes = SpHelper::allocate2D<IU>(UDERB::esscount, stages); SpParHelper::GetSetSizes( *(A.spSeq), ARecvSizes, (A.commGrid)->GetRowWorld()); SpParHelper::GetSetSizes( *(B.spSeq), BRecvSizes, (B.commGrid)->GetColWorld()); // Remotely fetched matrices are stored as pointers UDERA ** ARecv = new UDERA* [stages]; UDERB ** BRecv = new UDERB* [stages]; Arr<IU,NU1> Aarrinfo = A.seqptr()->GetArrays(); Arr<IU,NU2> Barrinfo = B.seqptr()->GetArrays(); std::vector< std::vector<MPI_Request> > ABCastIndarrayReq; std::vector< std::vector<MPI_Request> > ABCastNumarrayReq; std::vector< std::vector<MPI_Request> > BBCastIndarrayReq; std::vector< std::vector<MPI_Request> > BBCastNumarrayReq; for(int i = 0; i < stages; i++){ ABCastIndarrayReq.push_back( std::vector<MPI_Request>(Aarrinfo.indarrs.size(), MPI_REQUEST_NULL) ); ABCastNumarrayReq.push_back( std::vector<MPI_Request>(Aarrinfo.numarrs.size(), MPI_REQUEST_NULL) ); BBCastIndarrayReq.push_back( std::vector<MPI_Request>(Barrinfo.indarrs.size(), MPI_REQUEST_NULL) ); BBCastNumarrayReq.push_back( std::vector<MPI_Request>(Barrinfo.numarrs.size(), MPI_REQUEST_NULL) ); } int Aself = (A.commGrid)->GetRankInProcRow(); int Bself = (B.commGrid)->GetRankInProcCol(); std::vector< SpTuples<IU,NUO> *> tomerge; for(int i = 0; i < stages; ++i){ std::vector<IU> ess; if(i == Aself) ARecv[i] = A.spSeq; // shallow-copy else{ ess.resize(UDERA::esscount); for(int j=0; j< UDERA::esscount; ++j) ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row ARecv[i] = new UDERA(); // first, create the object } SpParHelper::IBCastMatrix(GridC->GetRowWorld(), *(ARecv[i]), ess, i, ABCastIndarrayReq[i], ABCastNumarrayReq[i]); // then, receive its elements ess.clear(); if(i == Bself) BRecv[i] = B.spSeq; // shallow-copy else{ ess.resize(UDERB::esscount); for(int j=0; j< UDERB::esscount; ++j) ess[j] = BRecvSizes[j][i]; BRecv[i] = new UDERB(); } SpParHelper::IBCastMatrix(GridC->GetColWorld(), *(BRecv[i]), ess, i, BBCastIndarrayReq[i], BBCastNumarrayReq[i]); // then, receive its elements if(i > 0){ MPI_Waitall(ABCastIndarrayReq[i-1].size(), ABCastIndarrayReq[i-1].data(), MPI_STATUSES_IGNORE); MPI_Waitall(ABCastNumarrayReq[i-1].size(), ABCastNumarrayReq[i-1].data(), MPI_STATUSES_IGNORE); MPI_Waitall(BBCastIndarrayReq[i-1].size(), BBCastIndarrayReq[i-1].data(), MPI_STATUSES_IGNORE); MPI_Waitall(BBCastNumarrayReq[i-1].size(), BBCastNumarrayReq[i-1].data(), MPI_STATUSES_IGNORE); SpTuples<IU,NUO> * C_cont = LocalHybridSpGEMM<SR, NUO> (*(ARecv[i-1]), *(BRecv[i-1]), // parameters themselves i-1 != Aself, // 'delete A' condition i-1 != Bself); // 'delete B' condition if(!C_cont->isZero()) tomerge.push_back(C_cont); SpTuples<IU,NUO> * C_tuples = MultiwayMerge<SR>(tomerge, C_m, C_n,true); std::vector< SpTuples<IU,NUO> *>().swap(tomerge); tomerge.push_back(C_tuples); } #ifdef COMBBLAS_DEBUG std::ostringstream outs; outs << i << "th SUMMA iteration"<< std::endl; SpParHelper::Print(outs.str()); #endif } MPI_Waitall(ABCastIndarrayReq[stages-1].size(), ABCastIndarrayReq[stages-1].data(), MPI_STATUSES_IGNORE); MPI_Waitall(ABCastNumarrayReq[stages-1].size(), ABCastNumarrayReq[stages-1].data(), MPI_STATUSES_IGNORE); MPI_Waitall(BBCastIndarrayReq[stages-1].size(), BBCastIndarrayReq[stages-1].data(), MPI_STATUSES_IGNORE); MPI_Waitall(BBCastNumarrayReq[stages-1].size(), BBCastNumarrayReq[stages-1].data(), MPI_STATUSES_IGNORE); SpTuples<IU,NUO> * C_cont = LocalHybridSpGEMM<SR, NUO> (*(ARecv[stages-1]), *(BRecv[stages-1]), // parameters themselves stages-1 != Aself, // 'delete A' condition stages-1 != Bself); // 'delete B' condition if(!C_cont->isZero()) tomerge.push_back(C_cont); if(clearA && A.spSeq != NULL) { delete A.spSeq; A.spSeq = NULL; } if(clearB && B.spSeq != NULL) { delete B.spSeq; B.spSeq = NULL; } delete ARecv; delete BRecv; SpHelper::deallocate2D(ARecvSizes, UDERA::esscount); SpHelper::deallocate2D(BRecvSizes, UDERB::esscount); // the last parameter to MergeAll deletes tomerge arrays SpTuples<IU,NUO> * C_tuples = MultiwayMerge<SR>(tomerge, C_m, C_n,true); std::vector< SpTuples<IU,NUO> *>().swap(tomerge); UDERO * C = new UDERO(*C_tuples, false); delete C_tuples; //if(!clearB) // const_cast< UDERB* >(B.spSeq)->Transpose(); // transpose back to original return SpParMat<IU,NUO,UDERO> (C, GridC); // return the result object } /** * Estimate the maximum nnz needed to store in a process from all stages of SUMMA before reduction * @pre { Input matrices, A and B, should not alias } **/ template <typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> int64_t EstPerProcessNnzSUMMA(SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, bool hashEstimate) { typedef typename UDERA::LocalIT LIA; typedef typename UDERB::LocalIT LIB; static_assert(std::is_same<LIA, LIB>::value, "local index types for both input matrices should be the same"); double t0, t1; int64_t nnzC_SUMMA = 0; if(A.getncol() != B.getnrow()) { std::ostringstream outs; outs << "Can not multiply, dimensions does not match"<< std::endl; outs << A.getncol() << " != " << B.getnrow() << std::endl; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); return nnzC_SUMMA; } int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy); MPI_Barrier(GridC->GetWorld()); LIA ** ARecvSizes = SpHelper::allocate2D<LIA>(UDERA::esscount, stages); LIB ** BRecvSizes = SpHelper::allocate2D<LIB>(UDERB::esscount, stages); SpParHelper::GetSetSizes( *(A.spSeq), ARecvSizes, (A.commGrid)->GetRowWorld()); SpParHelper::GetSetSizes( *(B.spSeq), BRecvSizes, (B.commGrid)->GetColWorld()); // Remotely fetched matrices are stored as pointers UDERA * ARecv; UDERB * BRecv; int Aself = (A.commGrid)->GetRankInProcRow(); int Bself = (B.commGrid)->GetRankInProcCol(); for(int i = 0; i < stages; ++i) { std::vector<LIA> ess; if(i == Aself) { ARecv = A.spSeq; // shallow-copy } else { ess.resize(UDERA::esscount); for(int j=0; j< UDERA::esscount; ++j) { ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row } ARecv = new UDERA(); // first, create the object } SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements ess.clear(); if(i == Bself) { BRecv = B.spSeq; // shallow-copy } else { ess.resize(UDERB::esscount); for(int j=0; j< UDERB::esscount; ++j) { ess[j] = BRecvSizes[j][i]; } BRecv = new UDERB(); } SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements // no need to keep entries of colnnzC in larger precision // because colnnzC is of length nzc and estimates nnzs per column // @OGUZ-EDIT Using hash spgemm for estimation //LIB * colnnzC = estimateNNZ(*ARecv, *BRecv); LIB* flopC = estimateFLOP(*ARecv, *BRecv); LIB* colnnzC = estimateNNZ_Hash(*ARecv, *BRecv, flopC); LIB nzc = BRecv->GetDCSC()->nzc; if (flopC) delete [] flopC; if(colnnzC) delete [] colnnzC; // sampling-based estimation (comment the estimation above, and // comment out below to use) // int64_t nnzC_stage = estimateNNZ_sampling(*ARecv, *BRecv); // nnzC_SUMMA += nnzC_stage; // delete received data if(i != Aself) delete ARecv; if(i != Bself) delete BRecv; } SpHelper::deallocate2D(ARecvSizes, UDERA::esscount); SpHelper::deallocate2D(BRecvSizes, UDERB::esscount); int64_t nnzC_SUMMA_max = 0; MPI_Allreduce(&nnzC_SUMMA, &nnzC_SUMMA_max, 1, MPIType<int64_t>(), MPI_MAX, GridC->GetWorld()); return nnzC_SUMMA_max; } template <typename MATRIX, typename VECTOR> void CheckSpMVCompliance(const MATRIX & A, const VECTOR & x) { if(A.getncol() != x.TotalLength()) { std::ostringstream outs; outs << "Can not multiply, dimensions does not match"<< std::endl; outs << A.getncol() << " != " << x.TotalLength() << std::endl; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); } if(! ( *(A.getcommgrid()) == *(x.getcommgrid())) ) { std::cout << "Grids are not comparable for SpMV" << std::endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); } } template <typename SR, typename IU, typename NUM, typename UDER> FullyDistSpVec<IU,typename promote_trait<NUM,IU>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IU> & x, bool indexisvalue, OptBuf<int32_t, typename promote_trait<NUM,IU>::T_promote > & optbuf); template <typename SR, typename IU, typename NUM, typename UDER> FullyDistSpVec<IU,typename promote_trait<NUM,IU>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IU> & x, bool indexisvalue) { typedef typename promote_trait<NUM,IU>::T_promote T_promote; OptBuf<int32_t, T_promote > optbuf = OptBuf<int32_t, T_promote >(); return SpMV<SR>(A, x, indexisvalue, optbuf); } /** * Step 1 of the sparse SpMV algorithm * @param[in,out] trxlocnz, lenuntil,trxinds,trxnums { set or allocated } * @param[in] indexisvalue **/ template<typename IU, typename NV> void TransposeVector(MPI_Comm & World, const FullyDistSpVec<IU,NV> & x, int32_t & trxlocnz, IU & lenuntil, int32_t * & trxinds, NV * & trxnums, bool indexisvalue) { int32_t xlocnz = (int32_t) x.getlocnnz(); int32_t roffst = (int32_t) x.RowLenUntil(); // since trxinds is int32_t int32_t roffset; IU luntil = x.LengthUntil(); int diagneigh = x.commGrid->GetComplementRank(); MPI_Status status; MPI_Sendrecv(&roffst, 1, MPIType<int32_t>(), diagneigh, TROST, &roffset, 1, MPIType<int32_t>(), diagneigh, TROST, World, &status); MPI_Sendrecv(&xlocnz, 1, MPIType<int32_t>(), diagneigh, TRNNZ, &trxlocnz, 1, MPIType<int32_t>(), diagneigh, TRNNZ, World, &status); MPI_Sendrecv(&luntil, 1, MPIType<IU>(), diagneigh, TRLUT, &lenuntil, 1, MPIType<IU>(), diagneigh, TRLUT, World, &status); // ABAB: Important observation is that local indices (given by x.ind) is 32-bit addressible // Copy them to 32 bit integers and transfer that to save 50% of off-node bandwidth trxinds = new int32_t[trxlocnz]; int32_t * temp_xind = new int32_t[xlocnz]; #ifdef THREADED #pragma omp parallel for #endif for(int i=0; i< xlocnz; ++i) temp_xind[i] = (int32_t) x.ind[i]; MPI_Sendrecv(temp_xind, xlocnz, MPIType<int32_t>(), diagneigh, TRI, trxinds, trxlocnz, MPIType<int32_t>(), diagneigh, TRI, World, &status); delete [] temp_xind; if(!indexisvalue) { trxnums = new NV[trxlocnz]; MPI_Sendrecv(const_cast<NV*>(SpHelper::p2a(x.num)), xlocnz, MPIType<NV>(), diagneigh, TRX, trxnums, trxlocnz, MPIType<NV>(), diagneigh, TRX, World, &status); } std::transform(trxinds, trxinds+trxlocnz, trxinds, std::bind2nd(std::plus<int32_t>(), roffset)); // fullydist indexing (p pieces) -> matrix indexing (sqrt(p) pieces) } /** * Step 2 of the sparse SpMV algorithm * @param[in,out] trxinds, trxnums { deallocated } * @param[in,out] indacc, numacc { allocated } * @param[in,out] accnz { set } * @param[in] trxlocnz, lenuntil, indexisvalue **/ template<typename IU, typename NV> void AllGatherVector(MPI_Comm & ColWorld, int trxlocnz, IU lenuntil, int32_t * & trxinds, NV * & trxnums, int32_t * & indacc, NV * & numacc, int & accnz, bool indexisvalue) { int colneighs, colrank; MPI_Comm_size(ColWorld, &colneighs); MPI_Comm_rank(ColWorld, &colrank); int * colnz = new int[colneighs]; colnz[colrank] = trxlocnz; MPI_Allgather(MPI_IN_PLACE, 1, MPI_INT, colnz, 1, MPI_INT, ColWorld); int * dpls = new int[colneighs](); // displacements (zero initialized pid) std::partial_sum(colnz, colnz+colneighs-1, dpls+1); accnz = std::accumulate(colnz, colnz+colneighs, 0); indacc = new int32_t[accnz]; numacc = new NV[accnz]; // ABAB: Future issues here, colnz is of type int (MPI limitation) // What if the aggregate vector size along the processor row/column is not 32-bit addressible? // This will happen when n/sqrt(p) > 2^31 // Currently we can solve a small problem (scale 32) with 4096 processor // For a medium problem (scale 35), we'll need 32K processors which gives sqrt(p) ~ 180 // 2^35 / 180 ~ 2^29 / 3 which is not an issue ! #ifdef TIMING double t0=MPI_Wtime(); #endif MPI_Allgatherv(trxinds, trxlocnz, MPIType<int32_t>(), indacc, colnz, dpls, MPIType<int32_t>(), ColWorld); delete [] trxinds; if(indexisvalue) { IU lenuntilcol; if(colrank == 0) lenuntilcol = lenuntil; MPI_Bcast(&lenuntilcol, 1, MPIType<IU>(), 0, ColWorld); for(int i=0; i< accnz; ++i) // fill numerical values from indices { numacc[i] = indacc[i] + lenuntilcol; } } else { MPI_Allgatherv(trxnums, trxlocnz, MPIType<NV>(), numacc, colnz, dpls, MPIType<NV>(), ColWorld); delete [] trxnums; } #ifdef TIMING double t1=MPI_Wtime(); cblas_allgathertime += (t1-t0); #endif DeleteAll(colnz,dpls); } /** * Step 3 of the sparse SpMV algorithm, with the semiring * @param[in,out] optbuf {scratch space for all-to-all (fold) communication} * @param[in,out] indacc, numacc {index and values of the input vector, deleted upon exit} * @param[in,out] sendindbuf, sendnumbuf {index and values of the output vector, created} **/ template<typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER> void LocalSpMV(const SpParMat<IU,NUM,UDER> & A, int rowneighs, OptBuf<int32_t, OVT > & optbuf, int32_t * & indacc, IVT * & numacc, int32_t * & sendindbuf, OVT * & sendnumbuf, int * & sdispls, int * sendcnt, int accnz, bool indexisvalue, PreAllocatedSPA<OVT> & SPA) { if(optbuf.totmax > 0) // graph500 optimization enabled { if(A.spSeq->getnsplit() > 0) { // optbuf.{inds/nums/dspls} and sendcnt are all pre-allocated and only filled by dcsc_gespmv_threaded generic_gespmv_threaded_setbuffers<SR> (*(A.spSeq), indacc, numacc, accnz, optbuf.inds, optbuf.nums, sendcnt, optbuf.dspls, rowneighs); } else { generic_gespmv<SR> (*(A.spSeq), indacc, numacc, accnz, optbuf.inds, optbuf.nums, sendcnt, optbuf.dspls, rowneighs, indexisvalue); } DeleteAll(indacc,numacc); } else { if(A.spSeq->getnsplit() > 0) { // sendindbuf/sendnumbuf/sdispls are all allocated and filled by dcsc_gespmv_threaded int totalsent = generic_gespmv_threaded<SR> (*(A.spSeq), indacc, numacc, accnz, sendindbuf, sendnumbuf, sdispls, rowneighs, SPA); DeleteAll(indacc, numacc); for(int i=0; i<rowneighs-1; ++i) sendcnt[i] = sdispls[i+1] - sdispls[i]; sendcnt[rowneighs-1] = totalsent - sdispls[rowneighs-1]; } else { // default SpMSpV std::vector< int32_t > indy; std::vector< OVT > numy; generic_gespmv<SR>(*(A.spSeq), indacc, numacc, accnz, indy, numy, SPA); DeleteAll(indacc, numacc); int32_t bufsize = indy.size(); // as compact as possible sendindbuf = new int32_t[bufsize]; sendnumbuf = new OVT[bufsize]; int32_t perproc = A.getlocalrows() / rowneighs; int k = 0; // index to buffer for(int i=0; i<rowneighs; ++i) { int32_t end_this = (i==rowneighs-1) ? A.getlocalrows(): (i+1)*perproc; while(k < bufsize && indy[k] < end_this) { sendindbuf[k] = indy[k] - i*perproc; sendnumbuf[k] = numy[k]; ++sendcnt[i]; ++k; } } sdispls = new int[rowneighs](); std::partial_sum(sendcnt, sendcnt+rowneighs-1, sdispls+1); //#endif } } } // non threaded template <typename SR, typename IU, typename OVT> void MergeContributions(int* listSizes, std::vector<int32_t *> & indsvec, std::vector<OVT *> & numsvec, std::vector<IU>& mergedind, std::vector<OVT>& mergednum) { int nlists = indsvec.size(); // this condition is checked in the caller SpMV function. // I am still putting it here for completeness if(nlists == 1) { // simply copy data int veclen = listSizes[0]; mergedind.resize(veclen); mergednum.resize(veclen); for(int i=0; i<veclen; i++) { mergedind[i] = indsvec[0][i]; mergednum[i] = numsvec[0][i]; } return; } int32_t hsize = 0; int32_t inf = std::numeric_limits<int32_t>::min(); int32_t sup = std::numeric_limits<int32_t>::max(); KNHeap< int32_t, int32_t > sHeap(sup, inf); int * processed = new int[nlists](); for(int i=0; i<nlists; ++i) { if(listSizes[i] > 0) { // key, list_id sHeap.insert(indsvec[i][0], i); ++hsize; } } int32_t key, locv; if(hsize > 0) { sHeap.deleteMin(&key, &locv); mergedind.push_back( static_cast<IU>(key)); mergednum.push_back(numsvec[locv][0]); // nothing is processed yet if( (++(processed[locv])) < listSizes[locv] ) sHeap.insert(indsvec[locv][processed[locv]], locv); else --hsize; } while(hsize > 0) { sHeap.deleteMin(&key, &locv); if(mergedind.back() == static_cast<IU>(key)) { mergednum.back() = SR::add(mergednum.back(), numsvec[locv][processed[locv]]); // ABAB: Benchmark actually allows us to be non-deterministic in terms of parent selection // We can just skip this addition operator (if it's a max/min select) } else { mergedind.push_back(static_cast<IU>(key)); mergednum.push_back(numsvec[locv][processed[locv]]); } if( (++(processed[locv])) < listSizes[locv] ) sHeap.insert(indsvec[locv][processed[locv]], locv); else --hsize; } DeleteAll(processed); } template <typename SR, typename IU, typename OVT> void MergeContributions_threaded(int * & listSizes, std::vector<int32_t *> & indsvec, std::vector<OVT *> & numsvec, std::vector<IU> & mergedind, std::vector<OVT> & mergednum, IU maxindex) { int nlists = indsvec.size(); // this condition is checked in the caller SpMV function. // I am still putting it here for completeness if(nlists == 1) { // simply copy data int veclen = listSizes[0]; mergedind.resize(veclen); mergednum.resize(veclen); #ifdef THREADED #pragma omp parallel for #endif for(int i=0; i<veclen; i++) { mergedind[i] = indsvec[0][i]; mergednum[i] = numsvec[0][i]; } return; } int nthreads=1; #ifdef THREADED #pragma omp parallel { nthreads = omp_get_num_threads(); } #endif int nsplits = 4*nthreads; // oversplit for load balance nsplits = std::min(nsplits, (int)maxindex); std::vector< std::vector<int32_t> > splitters(nlists); for(int k=0; k< nlists; k++) { splitters[k].resize(nsplits+1); splitters[k][0] = static_cast<int32_t>(0); #pragma omp parallel for for(int i=1; i< nsplits; i++) { IU cur_idx = i * (maxindex/nsplits); auto it = std::lower_bound (indsvec[k], indsvec[k] + listSizes[k], cur_idx); splitters[k][i] = (int32_t) (it - indsvec[k]); } splitters[k][nsplits] = listSizes[k]; } // ------ perform merge in parallel ------ std::vector<std::vector<IU>> indsBuf(nsplits); std::vector<std::vector<OVT>> numsBuf(nsplits); //TODO: allocate these vectors here before calling MergeContributions #pragma omp parallel for schedule(dynamic) for(int i=0; i< nsplits; i++) { std::vector<int32_t *> tIndsVec(nlists); std::vector<OVT *> tNumsVec(nlists); std::vector<int> tLengths(nlists); for(int j=0; j< nlists; ++j) { tIndsVec[j] = indsvec[j] + splitters[j][i]; tNumsVec[j] = numsvec[j] + splitters[j][i]; tLengths[j]= splitters[j][i+1] - splitters[j][i]; } MergeContributions<SR>(tLengths.data(), tIndsVec, tNumsVec, indsBuf[i], numsBuf[i]); } // ------ concatenate merged tuples processed by threads ------ std::vector<IU> tdisp(nsplits+1); tdisp[0] = 0; for(int i=0; i<nsplits; ++i) { tdisp[i+1] = tdisp[i] + indsBuf[i].size(); } mergedind.resize(tdisp[nsplits]); mergednum.resize(tdisp[nsplits]); #pragma omp parallel for schedule(dynamic) for(int i=0; i< nsplits; i++) { std::copy(indsBuf[i].data() , indsBuf[i].data() + indsBuf[i].size(), mergedind.data() + tdisp[i]); std::copy(numsBuf[i].data() , numsBuf[i].data() + numsBuf[i].size(), mergednum.data() + tdisp[i]); } } /** * This version is the most flexible sparse matrix X sparse vector [Used in KDT] * It accepts different types for the matrix (NUM), the input vector (IVT) and the output vector (OVT) * without relying on automatic type promotion * Input (x) and output (y) vectors can be ALIASED because y is not written until the algorithm is done with x. */ template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER> void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y, bool indexisvalue, OptBuf<int32_t, OVT > & optbuf, PreAllocatedSPA<OVT> & SPA) { CheckSpMVCompliance(A,x); optbuf.MarkEmpty(); y.glen = A.getnrow(); // in case it is not set already MPI_Comm World = x.commGrid->GetWorld(); MPI_Comm ColWorld = x.commGrid->GetColWorld(); MPI_Comm RowWorld = x.commGrid->GetRowWorld(); int accnz; int32_t trxlocnz; IU lenuntil; int32_t *trxinds, *indacc; IVT *trxnums, *numacc; #ifdef TIMING double t0=MPI_Wtime(); #endif TransposeVector(World, x, trxlocnz, lenuntil, trxinds, trxnums, indexisvalue); #ifdef TIMING double t1=MPI_Wtime(); cblas_transvectime += (t1-t0); #endif if(x.commGrid->GetGridRows() > 1) { AllGatherVector(ColWorld, trxlocnz, lenuntil, trxinds, trxnums, indacc, numacc, accnz, indexisvalue); // trxindS/trxnums deallocated, indacc/numacc allocated, accnz set } else { accnz = trxlocnz; indacc = trxinds; // aliasing ptr numacc = trxnums; // aliasing ptr } int rowneighs; MPI_Comm_size(RowWorld, &rowneighs); int * sendcnt = new int[rowneighs](); int32_t * sendindbuf; OVT * sendnumbuf; int * sdispls; #ifdef TIMING double t2=MPI_Wtime(); #endif LocalSpMV<SR>(A, rowneighs, optbuf, indacc, numacc, sendindbuf, sendnumbuf, sdispls, sendcnt, accnz, indexisvalue, SPA); // indacc/numacc deallocated, sendindbuf/sendnumbuf/sdispls allocated #ifdef TIMING double t3=MPI_Wtime(); cblas_localspmvtime += (t3-t2); #endif if(x.commGrid->GetGridCols() == 1) { y.ind.resize(sendcnt[0]); y.num.resize(sendcnt[0]); if(optbuf.totmax > 0 ) // graph500 optimization enabled { #ifdef THREADED #pragma omp parallel for #endif for(int i=0; i<sendcnt[0]; i++) { y.ind[i] = optbuf.inds[i]; y.num[i] = optbuf.nums[i]; } } else { #ifdef THREADED #pragma omp parallel for #endif for(int i=0; i<sendcnt[0]; i++) { y.ind[i] = sendindbuf[i]; y.num[i] = sendnumbuf[i]; } DeleteAll(sendindbuf, sendnumbuf,sdispls); } delete [] sendcnt; return; } int * rdispls = new int[rowneighs]; int * recvcnt = new int[rowneighs]; MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, RowWorld); // share the request counts // receive displacements are exact whereas send displacements have slack rdispls[0] = 0; for(int i=0; i<rowneighs-1; ++i) { rdispls[i+1] = rdispls[i] + recvcnt[i]; } int totrecv = std::accumulate(recvcnt,recvcnt+rowneighs,0); int32_t * recvindbuf = new int32_t[totrecv]; OVT * recvnumbuf = new OVT[totrecv]; #ifdef TIMING double t4=MPI_Wtime(); #endif if(optbuf.totmax > 0 ) // graph500 optimization enabled { MPI_Alltoallv(optbuf.inds, sendcnt, optbuf.dspls, MPIType<int32_t>(), recvindbuf, recvcnt, rdispls, MPIType<int32_t>(), RowWorld); MPI_Alltoallv(optbuf.nums, sendcnt, optbuf.dspls, MPIType<OVT>(), recvnumbuf, recvcnt, rdispls, MPIType<OVT>(), RowWorld); delete [] sendcnt; } else { MPI_Alltoallv(sendindbuf, sendcnt, sdispls, MPIType<int32_t>(), recvindbuf, recvcnt, rdispls, MPIType<int32_t>(), RowWorld); MPI_Alltoallv(sendnumbuf, sendcnt, sdispls, MPIType<OVT>(), recvnumbuf, recvcnt, rdispls, MPIType<OVT>(), RowWorld); DeleteAll(sendindbuf, sendnumbuf, sendcnt, sdispls); } #ifdef TIMING double t5=MPI_Wtime(); cblas_alltoalltime += (t5-t4); #endif #ifdef TIMING double t6=MPI_Wtime(); #endif //MergeContributions<SR>(y,recvcnt, rdispls, recvindbuf, recvnumbuf, rowneighs); // free memory of y, in case it was aliased std::vector<IU>().swap(y.ind); std::vector<OVT>().swap(y.num); std::vector<int32_t *> indsvec(rowneighs); std::vector<OVT *> numsvec(rowneighs); #ifdef THREADED #pragma omp parallel for #endif for(int i=0; i<rowneighs; i++) { indsvec[i] = recvindbuf+rdispls[i]; numsvec[i] = recvnumbuf+rdispls[i]; } #ifdef THREADED MergeContributions_threaded<SR>(recvcnt, indsvec, numsvec, y.ind, y.num, y.MyLocLength()); #else MergeContributions<SR>(recvcnt, indsvec, numsvec, y.ind, y.num); #endif DeleteAll(recvcnt, rdispls,recvindbuf, recvnumbuf); #ifdef TIMING double t7=MPI_Wtime(); cblas_mergeconttime += (t7-t6); #endif } template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER> void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y, bool indexisvalue, PreAllocatedSPA<OVT> & SPA) { OptBuf< int32_t, OVT > optbuf = OptBuf< int32_t,OVT >(); SpMV<SR>(A, x, y, indexisvalue, optbuf, SPA); } template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER> void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y, bool indexisvalue) { OptBuf< int32_t, OVT > optbuf = OptBuf< int32_t,OVT >(); PreAllocatedSPA<OVT> SPA; SpMV<SR>(A, x, y, indexisvalue, optbuf, SPA); } template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER> void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y, bool indexisvalue, OptBuf<int32_t, OVT > & optbuf) { PreAllocatedSPA<OVT> SPA; SpMV<SR>(A, x, y, indexisvalue, optbuf, SPA); } /** * Automatic type promotion is ONLY done here, all the callee functions (in Friends.h and below) are initialized with the promoted type * If indexisvalues = true, then we do not need to transfer values for x (happens for BFS iterations with boolean matrices and integer rhs vectors) **/ template <typename SR, typename IU, typename NUM, typename UDER> FullyDistSpVec<IU,typename promote_trait<NUM,IU>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IU> & x, bool indexisvalue, OptBuf<int32_t, typename promote_trait<NUM,IU>::T_promote > & optbuf) { typedef typename promote_trait<NUM,IU>::T_promote T_promote; FullyDistSpVec<IU, T_promote> y ( x.getcommgrid(), A.getnrow()); // identity doesn't matter for sparse vectors SpMV<SR>(A, x, y, indexisvalue, optbuf); return y; } /** * Parallel dense SpMV **/ template <typename SR, typename IU, typename NUM, typename NUV, typename UDER> FullyDistVec<IU,typename promote_trait<NUM,NUV>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistVec<IU,NUV> & x ) { typedef typename promote_trait<NUM,NUV>::T_promote T_promote; CheckSpMVCompliance(A, x); MPI_Comm World = x.commGrid->GetWorld(); MPI_Comm ColWorld = x.commGrid->GetColWorld(); MPI_Comm RowWorld = x.commGrid->GetRowWorld(); int xsize = (int) x.LocArrSize(); int trxsize = 0; int diagneigh = x.commGrid->GetComplementRank(); MPI_Status status; MPI_Sendrecv(&xsize, 1, MPI_INT, diagneigh, TRX, &trxsize, 1, MPI_INT, diagneigh, TRX, World, &status); NUV * trxnums = new NUV[trxsize]; MPI_Sendrecv(const_cast<NUV*>(SpHelper::p2a(x.arr)), xsize, MPIType<NUV>(), diagneigh, TRX, trxnums, trxsize, MPIType<NUV>(), diagneigh, TRX, World, &status); int colneighs, colrank; MPI_Comm_size(ColWorld, &colneighs); MPI_Comm_rank(ColWorld, &colrank); int * colsize = new int[colneighs]; colsize[colrank] = trxsize; MPI_Allgather(MPI_IN_PLACE, 1, MPI_INT, colsize, 1, MPI_INT, ColWorld); int * dpls = new int[colneighs](); // displacements (zero initialized pid) std::partial_sum(colsize, colsize+colneighs-1, dpls+1); int accsize = std::accumulate(colsize, colsize+colneighs, 0); NUV * numacc = new NUV[accsize]; MPI_Allgatherv(trxnums, trxsize, MPIType<NUV>(), numacc, colsize, dpls, MPIType<NUV>(), ColWorld); delete [] trxnums; // serial SpMV with dense vector T_promote id = SR::id(); IU ysize = A.getlocalrows(); T_promote * localy = new T_promote[ysize]; std::fill_n(localy, ysize, id); #ifdef THREADED dcsc_gespmv_threaded<SR>(*(A.spSeq), numacc, localy); #else dcsc_gespmv<SR>(*(A.spSeq), numacc, localy); #endif DeleteAll(numacc,colsize, dpls); // FullyDistVec<IT,NT>(shared_ptr<CommGrid> grid, IT globallen, NT initval, NT id) FullyDistVec<IU, T_promote> y ( x.commGrid, A.getnrow(), id); int rowneighs; MPI_Comm_size(RowWorld, &rowneighs); IU begptr, endptr; for(int i=0; i< rowneighs; ++i) { begptr = y.RowLenUntil(i); if(i == rowneighs-1) { endptr = ysize; } else { endptr = y.RowLenUntil(i+1); } MPI_Reduce(localy+begptr, SpHelper::p2a(y.arr), endptr-begptr, MPIType<T_promote>(), SR::mpi_op(), i, RowWorld); } delete [] localy; return y; } /** * \TODO: Old version that is no longer considered optimal * Kept for legacy purposes * To be removed when other functionals are fully tested. **/ template <typename SR, typename IU, typename NUM, typename NUV, typename UDER> FullyDistSpVec<IU,typename promote_trait<NUM,NUV>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,NUV> & x) { typedef typename promote_trait<NUM,NUV>::T_promote T_promote; CheckSpMVCompliance(A, x); MPI_Comm World = x.commGrid->GetWorld(); MPI_Comm ColWorld = x.commGrid->GetColWorld(); MPI_Comm RowWorld = x.commGrid->GetRowWorld(); int xlocnz = (int) x.getlocnnz(); int trxlocnz = 0; int roffst = x.RowLenUntil(); int offset; int diagneigh = x.commGrid->GetComplementRank(); MPI_Status status; MPI_Sendrecv(&xlocnz, 1, MPI_INT, diagneigh, TRX, &trxlocnz, 1, MPI_INT, diagneigh, TRX, World, &status); MPI_Sendrecv(&roffst, 1, MPI_INT, diagneigh, TROST, &offset, 1, MPI_INT, diagneigh, TROST, World, &status); IU * trxinds = new IU[trxlocnz]; NUV * trxnums = new NUV[trxlocnz]; MPI_Sendrecv(const_cast<IU*>(SpHelper::p2a(x.ind)), xlocnz, MPIType<IU>(), diagneigh, TRX, trxinds, trxlocnz, MPIType<IU>(), diagneigh, TRX, World, &status); MPI_Sendrecv(const_cast<NUV*>(SpHelper::p2a(x.num)), xlocnz, MPIType<NUV>(), diagneigh, TRX, trxnums, trxlocnz, MPIType<NUV>(), diagneigh, TRX, World, &status); std::transform(trxinds, trxinds+trxlocnz, trxinds, std::bind2nd(std::plus<IU>(), offset)); // fullydist indexing (n pieces) -> matrix indexing (sqrt(p) pieces) int colneighs, colrank; MPI_Comm_size(ColWorld, &colneighs); MPI_Comm_rank(ColWorld, &colrank); int * colnz = new int[colneighs]; colnz[colrank] = trxlocnz; MPI_Allgather(MPI_IN_PLACE, 1, MPI_INT, colnz, 1, MPI_INT, ColWorld); int * dpls = new int[colneighs](); // displacements (zero initialized pid) std::partial_sum(colnz, colnz+colneighs-1, dpls+1); int accnz = std::accumulate(colnz, colnz+colneighs, 0); IU * indacc = new IU[accnz]; NUV * numacc = new NUV[accnz]; // ABAB: Future issues here, colnz is of type int (MPI limitation) // What if the aggregate vector size along the processor row/column is not 32-bit addressible? MPI_Allgatherv(trxinds, trxlocnz, MPIType<IU>(), indacc, colnz, dpls, MPIType<IU>(), ColWorld); MPI_Allgatherv(trxnums, trxlocnz, MPIType<NUV>(), numacc, colnz, dpls, MPIType<NUV>(), ColWorld); DeleteAll(trxinds, trxnums); // serial SpMV with sparse vector std::vector< int32_t > indy; std::vector< T_promote > numy; int32_t * tmpindacc = new int32_t[accnz]; for(int i=0; i< accnz; ++i) tmpindacc[i] = indacc[i]; delete [] indacc; dcsc_gespmv<SR>(*(A.spSeq), tmpindacc, numacc, accnz, indy, numy); // actual multiplication DeleteAll(tmpindacc, numacc); DeleteAll(colnz, dpls); FullyDistSpVec<IU, T_promote> y ( x.commGrid, A.getnrow()); // identity doesn't matter for sparse vectors IU yintlen = y.MyRowLength(); int rowneighs; MPI_Comm_size(RowWorld,&rowneighs); std::vector< std::vector<IU> > sendind(rowneighs); std::vector< std::vector<T_promote> > sendnum(rowneighs); typename std::vector<int32_t>::size_type outnz = indy.size(); for(typename std::vector<IU>::size_type i=0; i< outnz; ++i) { IU locind; int rown = y.OwnerWithinRow(yintlen, static_cast<IU>(indy[i]), locind); sendind[rown].push_back(locind); sendnum[rown].push_back(numy[i]); } IU * sendindbuf = new IU[outnz]; T_promote * sendnumbuf = new T_promote[outnz]; int * sendcnt = new int[rowneighs]; int * sdispls = new int[rowneighs]; for(int i=0; i<rowneighs; ++i) sendcnt[i] = sendind[i].size(); int * rdispls = new int[rowneighs]; int * recvcnt = new int[rowneighs]; MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, RowWorld); // share the request counts sdispls[0] = 0; rdispls[0] = 0; for(int i=0; i<rowneighs-1; ++i) { sdispls[i+1] = sdispls[i] + sendcnt[i]; rdispls[i+1] = rdispls[i] + recvcnt[i]; } int totrecv = std::accumulate(recvcnt,recvcnt+rowneighs,0); IU * recvindbuf = new IU[totrecv]; T_promote * recvnumbuf = new T_promote[totrecv]; for(int i=0; i<rowneighs; ++i) { std::copy(sendind[i].begin(), sendind[i].end(), sendindbuf+sdispls[i]); std::vector<IU>().swap(sendind[i]); } for(int i=0; i<rowneighs; ++i) { std::copy(sendnum[i].begin(), sendnum[i].end(), sendnumbuf+sdispls[i]); std::vector<T_promote>().swap(sendnum[i]); } MPI_Alltoallv(sendindbuf, sendcnt, sdispls, MPIType<IU>(), recvindbuf, recvcnt, rdispls, MPIType<IU>(), RowWorld); MPI_Alltoallv(sendnumbuf, sendcnt, sdispls, MPIType<T_promote>(), recvnumbuf, recvcnt, rdispls, MPIType<T_promote>(), RowWorld); DeleteAll(sendindbuf, sendnumbuf); DeleteAll(sendcnt, recvcnt, sdispls, rdispls); // define a SPA-like data structure IU ysize = y.MyLocLength(); T_promote * localy = new T_promote[ysize]; bool * isthere = new bool[ysize]; std::vector<IU> nzinds; // nonzero indices std::fill_n(isthere, ysize, false); for(int i=0; i< totrecv; ++i) { if(!isthere[recvindbuf[i]]) { localy[recvindbuf[i]] = recvnumbuf[i]; // initial assignment nzinds.push_back(recvindbuf[i]); isthere[recvindbuf[i]] = true; } else { localy[recvindbuf[i]] = SR::add(localy[recvindbuf[i]], recvnumbuf[i]); } } DeleteAll(isthere, recvindbuf, recvnumbuf); sort(nzinds.begin(), nzinds.end()); int nnzy = nzinds.size(); y.ind.resize(nnzy); y.num.resize(nnzy); for(int i=0; i< nnzy; ++i) { y.ind[i] = nzinds[i]; y.num[i] = localy[nzinds[i]]; } delete [] localy; return y; } // Aydin (June 2021): // This currently duplicates the work of EWiseMult with exclude = true // However, this is the right way of implementing it because it allows set difference when // the types of two matrices do not have a valid multiplication operator defined // set difference should not require such an operator so we will move all code // bases that use EWiseMult(..., exclude=true) to this one template <typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> SpParMat<IU,NU1,UDERA> SetDifference(const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B) { if(*(A.commGrid) == *(B.commGrid)) { UDERA * result = new UDERA( SetDifference(*(A.spSeq),*(B.spSeq))); return SpParMat<IU, NU1, UDERA> (result, A.commGrid); } else { std::cout << "Grids are not comparable for set difference" << std::endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); return SpParMat< IU,NU1,UDERA >(); } } template <typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> SpParMat<IU,typename promote_trait<NU1,NU2>::T_promote,typename promote_trait<UDERA,UDERB>::T_promote> EWiseMult (const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B , bool exclude) { typedef typename promote_trait<NU1,NU2>::T_promote N_promote; typedef typename promote_trait<UDERA,UDERB>::T_promote DER_promote; if(*(A.commGrid) == *(B.commGrid)) { DER_promote * result = new DER_promote( EWiseMult(*(A.spSeq),*(B.spSeq),exclude) ); return SpParMat<IU, N_promote, DER_promote> (result, A.commGrid); } else { std::cout << "Grids are not comparable elementwise multiplication" << std::endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); return SpParMat< IU,N_promote,DER_promote >(); } } template <typename RETT, typename RETDER, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB, typename _BinaryOperation> SpParMat<IU,RETT,RETDER> EWiseApply (const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B, _BinaryOperation __binary_op, bool notB, const NU2& defaultBVal) { if(*(A.commGrid) == *(B.commGrid)) { RETDER * result = new RETDER( EWiseApply<RETT>(*(A.spSeq),*(B.spSeq), __binary_op, notB, defaultBVal) ); return SpParMat<IU, RETT, RETDER> (result, A.commGrid); } else { std::cout << "Grids are not comparable elementwise apply" << std::endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); return SpParMat< IU,RETT,RETDER >(); } } template <typename RETT, typename RETDER, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB, typename _BinaryOperation, typename _BinaryPredicate> SpParMat<IU,RETT,RETDER> EWiseApply (const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B, _BinaryOperation __binary_op, _BinaryPredicate do_op, bool allowANulls, bool allowBNulls, const NU1& ANullVal, const NU2& BNullVal, const bool allowIntersect, const bool useExtendedBinOp) { if(*(A.commGrid) == *(B.commGrid)) { RETDER * result = new RETDER( EWiseApply<RETT>(*(A.spSeq),*(B.spSeq), __binary_op, do_op, allowANulls, allowBNulls, ANullVal, BNullVal, allowIntersect) ); return SpParMat<IU, RETT, RETDER> (result, A.commGrid); } else { std::cout << "Grids are not comparable elementwise apply" << std::endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); return SpParMat< IU,RETT,RETDER >(); } } // plain adapter template <typename RETT, typename RETDER, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB, typename _BinaryOperation, typename _BinaryPredicate> SpParMat<IU,RETT,RETDER> EWiseApply (const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B, _BinaryOperation __binary_op, _BinaryPredicate do_op, bool allowANulls, bool allowBNulls, const NU1& ANullVal, const NU2& BNullVal, const bool allowIntersect = true) { return EWiseApply<RETT, RETDER>(A, B, EWiseExtToPlainAdapter<RETT, NU1, NU2, _BinaryOperation>(__binary_op), EWiseExtToPlainAdapter<bool, NU1, NU2, _BinaryPredicate>(do_op), allowANulls, allowBNulls, ANullVal, BNullVal, allowIntersect, true); } // end adapter /** * if exclude is true, then we prune all entries W[i] != zero from V * if exclude is false, then we perform a proper elementwise multiplication **/ template <typename IU, typename NU1, typename NU2> FullyDistSpVec<IU,typename promote_trait<NU1,NU2>::T_promote> EWiseMult (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , bool exclude, NU2 zero) { typedef typename promote_trait<NU1,NU2>::T_promote T_promote; if(*(V.commGrid) == *(W.commGrid)) { FullyDistSpVec< IU, T_promote> Product(V.commGrid); if(V.glen != W.glen) { std::cerr << "Vector dimensions don't match for EWiseMult\n"; MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); } else { Product.glen = V.glen; IU size= V.getlocnnz(); if(exclude) { #if defined(_OPENMP) && defined(CBLAS_EXPERIMENTAL) // not faster than serial int actual_splits = cblas_splits * 1; // 1 is the parallel slackness std::vector <IU> tlosizes (actual_splits, 0); std::vector < std::vector<IU> > tlinds(actual_splits); std::vector < std::vector<T_promote> > tlnums(actual_splits); IU tlsize = size / actual_splits; #pragma omp parallel for //schedule(dynamic, 1) for(IU t = 0; t < actual_splits; ++t) { IU tlbegin = t*tlsize; IU tlend = (t==actual_splits-1)? size : (t+1)*tlsize; for(IU i=tlbegin; i<tlend; ++i) { if(W.arr[V.ind[i]] == zero) // keep only those { tlinds[t].push_back(V.ind[i]); tlnums[t].push_back(V.num[i]); tlosizes[t]++; } } } std::vector<IU> prefix_sum(actual_splits+1,0); std::partial_sum(tlosizes.begin(), tlosizes.end(), prefix_sum.begin()+1); Product.ind.resize(prefix_sum[actual_splits]); Product.num.resize(prefix_sum[actual_splits]); #pragma omp parallel for //schedule(dynamic, 1) for(IU t=0; t< actual_splits; ++t) { std::copy(tlinds[t].begin(), tlinds[t].end(), Product.ind.begin()+prefix_sum[t]); std::copy(tlnums[t].begin(), tlnums[t].end(), Product.num.begin()+prefix_sum[t]); } #else for(IU i=0; i<size; ++i) { if(W.arr[V.ind[i]] == zero) // keep only those { Product.ind.push_back(V.ind[i]); Product.num.push_back(V.num[i]); } } #endif } else { for(IU i=0; i<size; ++i) { if(W.arr[V.ind[i]] != zero) // keep only those { Product.ind.push_back(V.ind[i]); Product.num.push_back(V.num[i] * W.arr[V.ind[i]]); } } } } return Product; } else { std::cout << "Grids are not comparable elementwise multiplication" << std::endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); return FullyDistSpVec< IU,T_promote>(); } } /** Threaded EWiseApply. Only called internally from EWiseApply. **/ template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate> FullyDistSpVec<IU,RET> EWiseApply_threaded (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero, const bool useExtendedBinOp) { typedef RET T_promote; //typedef typename promote_trait<NU1,NU2>::T_promote T_promote; if(*(V.commGrid) == *(W.commGrid)) { FullyDistSpVec< IU, T_promote> Product(V.commGrid); if(V.TotalLength() != W.TotalLength()) { std::ostringstream outs; outs << "Vector dimensions don't match (" << V.TotalLength() << " vs " << W.TotalLength() << ") for EWiseApply (short version)\n"; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); } else { int nthreads=1; #ifdef _OPENMP #pragma omp parallel { nthreads = omp_get_num_threads(); } #endif Product.glen = V.glen; IU size= W.LocArrSize(); IU spsize = V.getlocnnz(); // temporary result vectors per thread std::vector<std::vector<IU>> tProductInd(nthreads); std::vector<std::vector<T_promote>> tProductVal(nthreads); IU perthread; //chunk of tProductInd or tProductVal allocated to each thread if (allowVNulls) perthread = size/nthreads; else perthread = spsize/nthreads; #ifdef _OPENMP #pragma omp parallel #endif { int curthread = 0; #ifdef _OPENMP curthread = omp_get_thread_num(); #endif IU tStartIdx = perthread * curthread; IU tNextIdx = perthread * (curthread+1); if (allowVNulls) { if(curthread == nthreads-1) tNextIdx = size; // get sparse part for the current thread auto it = std::lower_bound (V.ind.begin(), V.ind.end(), tStartIdx); IU tSpIdx = (IU) std::distance(V.ind.begin(), it); // iterate over the dense vector for(IU tIdx=tStartIdx; tIdx < tNextIdx; ++tIdx) { if(tSpIdx < spsize && V.ind[tSpIdx] < tNextIdx && V.ind[tSpIdx] == tIdx) { if (_doOp(V.num[tSpIdx], W.arr[tIdx], false, false)) { tProductInd[curthread].push_back(tIdx); tProductVal[curthread].push_back (_binary_op(V.num[tSpIdx], W.arr[tIdx], false, false)); } tSpIdx++; } else { if (_doOp(Vzero, W.arr[tIdx], true, false)) { tProductInd[curthread].push_back(tIdx); tProductVal[curthread].push_back (_binary_op(Vzero, W.arr[tIdx], true, false)); } } } } else // iterate over the sparse vector { if(curthread == nthreads-1) tNextIdx = spsize; for(IU tSpIdx=tStartIdx; tSpIdx < tNextIdx; ++tSpIdx) { if (_doOp(V.num[tSpIdx], W.arr[V.ind[tSpIdx]], false, false)) { tProductInd[curthread].push_back( V.ind[tSpIdx]); tProductVal[curthread].push_back (_binary_op(V.num[tSpIdx], W.arr[V.ind[tSpIdx]], false, false)); } } } } std::vector<IU> tdisp(nthreads+1); tdisp[0] = 0; for(int i=0; i<nthreads; ++i) { tdisp[i+1] = tdisp[i] + tProductInd[i].size(); } // copy results from temporary vectors Product.ind.resize(tdisp[nthreads]); Product.num.resize(tdisp[nthreads]); #ifdef _OPENMP #pragma omp parallel #endif { int curthread = 0; #ifdef _OPENMP curthread = omp_get_thread_num(); #endif std::copy(tProductInd[curthread].begin(), tProductInd[curthread].end(), Product.ind.data() + tdisp[curthread]); std::copy(tProductVal[curthread].begin() , tProductVal[curthread].end(), Product.num.data() + tdisp[curthread]); } } return Product; } else { std::cout << "Grids are not comparable for EWiseApply" << std::endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); return FullyDistSpVec< IU,T_promote>(); } } /** * Performs an arbitrary binary operation _binary_op on the corresponding elements of two vectors with the result stored in a return vector ret. * The binary operatiation is only performed if the binary predicate _doOp returns true for those elements. Otherwise the binary operation is not * performed and ret does not contain an element at that position. * More formally the operation is defined as: * if (_doOp(V[i], W[i])) * ret[i] = _binary_op(V[i], W[i]) * else * // ret[i] is not set * Hence _doOp can be used to implement a filter on either of the vectors. * * The above is only defined if both V[i] and W[i] exist (i.e. an intersection). To allow a union operation (ex. when V[i] doesn't exist but W[i] does) * the allowVNulls flag is set to true and the Vzero argument is used as the missing V[i] value. * * The type of each element of ret must not necessarily be related to the types of V or W, so the return type must be explicitly specified as a template parameter: * FullyDistSpVec<int, double> r = EWiseApply<double>(V, W, plus, retTrue, false, 0) **/ template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate> FullyDistSpVec<IU,RET> EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero, const bool useExtendedBinOp) { #ifdef _OPENMP return EWiseApply_threaded<RET>(V, W, _binary_op, _doOp, allowVNulls, Vzero, useExtendedBinOp); #else typedef RET T_promote; //typedef typename promote_trait<NU1,NU2>::T_promote T_promote; if(*(V.commGrid) == *(W.commGrid)) { FullyDistSpVec< IU, T_promote> Product(V.commGrid); //FullyDistVec< IU, NU1> DV (V); // Ariful: I am not sure why it was there?? if(V.TotalLength() != W.TotalLength()) { std::ostringstream outs; outs << "Vector dimensions don't match (" << V.TotalLength() << " vs " << W.TotalLength() << ") for EWiseApply (short version)\n"; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); } else { Product.glen = V.glen; IU size= W.LocArrSize(); IU spsize = V.getlocnnz(); IU sp_iter = 0; if (allowVNulls) { // iterate over the dense vector for(IU i=0; i<size; ++i) { if(sp_iter < spsize && V.ind[sp_iter] == i) { if (_doOp(V.num[sp_iter], W.arr[i], false, false)) { Product.ind.push_back(i); Product.num.push_back(_binary_op(V.num[sp_iter], W.arr[i], false, false)); } sp_iter++; } else { if (_doOp(Vzero, W.arr[i], true, false)) { Product.ind.push_back(i); Product.num.push_back(_binary_op(Vzero, W.arr[i], true, false)); } } } } else { // iterate over the sparse vector for(sp_iter = 0; sp_iter < spsize; ++sp_iter) { if (_doOp(V.num[sp_iter], W.arr[V.ind[sp_iter]], false, false)) { Product.ind.push_back(V.ind[sp_iter]); Product.num.push_back(_binary_op(V.num[sp_iter], W.arr[V.ind[sp_iter]], false, false)); } } } } return Product; } else { std::cout << "Grids are not comparable for EWiseApply" << std::endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); return FullyDistSpVec< IU,T_promote>(); } #endif } /** * Performs an arbitrary binary operation _binary_op on the corresponding elements of two vectors with the result stored in a return vector ret. * The binary operatiation is only performed if the binary predicate _doOp returns true for those elements. Otherwise the binary operation is not * performed and ret does not contain an element at that position. * More formally the operation is defined as: * if (_doOp(V[i], W[i])) * ret[i] = _binary_op(V[i], W[i]) * else * // ret[i] is not set * Hence _doOp can be used to implement a filter on either of the vectors. * * The above is only defined if both V[i] and W[i] exist (i.e. an intersection). To allow a union operation (ex. when V[i] doesn't exist but W[i] does) * the allowVNulls flag is set to true and the Vzero argument is used as the missing V[i] value. * !allowVNulls && !allowWNulls => intersection * !allowVNulls && allowWNulls => operate on all elements of V * allowVNulls && !allowWNulls => operate on all elements of W * allowVNulls && allowWNulls => union * * The type of each element of ret must not necessarily be related to the types of V or W, so the return type must be explicitly specified as a template parameter: * FullyDistSpVec<int, double> r = EWiseApply<double>(V, W, plus, ...) * For intersection, Vzero and Wzero are irrelevant * ABAB: \todo: Should allowIntersect be "false" for all SetDifference uses? **/ template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate> FullyDistSpVec<IU,RET> EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistSpVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, bool allowWNulls, NU1 Vzero, NU2 Wzero, const bool allowIntersect, const bool useExtendedBinOp) { typedef RET T_promote; // typename promote_trait<NU1,NU2>::T_promote T_promote; if(*(V.commGrid) == *(W.commGrid)) { FullyDistSpVec< IU, T_promote> Product(V.commGrid); if(V.glen != W.glen) { std::ostringstream outs; outs << "Vector dimensions don't match (" << V.glen << " vs " << W.glen << ") for EWiseApply (full version)\n"; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); } else { Product.glen = V.glen; typename std::vector< IU >::const_iterator indV = V.ind.begin(); typename std::vector< NU1 >::const_iterator numV = V.num.begin(); typename std::vector< IU >::const_iterator indW = W.ind.begin(); typename std::vector< NU2 >::const_iterator numW = W.num.begin(); while (indV < V.ind.end() && indW < W.ind.end()) { if (*indV == *indW) { // overlap if (allowIntersect) { if (_doOp(*numV, *numW, false, false)) { Product.ind.push_back(*indV); Product.num.push_back(_binary_op(*numV, *numW, false, false)); } } indV++; numV++; indW++; numW++; } else if (*indV < *indW) { // V has value but W does not if (allowWNulls) { if (_doOp(*numV, Wzero, false, true)) { Product.ind.push_back(*indV); Product.num.push_back(_binary_op(*numV, Wzero, false, true)); } } indV++; numV++; } else //(*indV > *indW) { // W has value but V does not if (allowVNulls) { if (_doOp(Vzero, *numW, true, false)) { Product.ind.push_back(*indW); Product.num.push_back(_binary_op(Vzero, *numW, true, false)); } } indW++; numW++; } } // clean up while (allowWNulls && indV < V.ind.end()) { if (_doOp(*numV, Wzero, false, true)) { Product.ind.push_back(*indV); Product.num.push_back(_binary_op(*numV, Wzero, false, true)); } indV++; numV++; } while (allowVNulls && indW < W.ind.end()) { if (_doOp(Vzero, *numW, true, false)) { Product.ind.push_back(*indW); Product.num.push_back(_binary_op(Vzero, *numW, true, false)); } indW++; numW++; } } return Product; } else { std::cout << "Grids are not comparable for EWiseApply" << std::endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); return FullyDistSpVec< IU,T_promote>(); } } // plain callback versions template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate> FullyDistSpVec<IU,RET> EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero) { return EWiseApply<RET>(V, W, EWiseExtToPlainAdapter<RET, NU1, NU2, _BinaryOperation>(_binary_op), EWiseExtToPlainAdapter<bool, NU1, NU2, _BinaryPredicate>(_doOp), allowVNulls, Vzero, true); } template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate> FullyDistSpVec<IU,RET> EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistSpVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, bool allowWNulls, NU1 Vzero, NU2 Wzero, const bool allowIntersect = true) { return EWiseApply<RET>(V, W, EWiseExtToPlainAdapter<RET, NU1, NU2, _BinaryOperation>(_binary_op), EWiseExtToPlainAdapter<bool, NU1, NU2, _BinaryPredicate>(_doOp), allowVNulls, allowWNulls, Vzero, Wzero, allowIntersect, true); } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // sampling-based nnz estimation via SpMV // @OGUZ-NOTE This is not based on SUMMA, do not use. Estimates the number of // nonzeros in the final output matrix. #define NROUNDS 5 typedef std::array<float, NROUNDS> samparr_t; template <typename NZT> struct promote_trait<NZT, samparr_t> { typedef samparr_t T_promote; }; class SamplesSaveHandler { public: template<typename c, typename t, typename V> void save(std::basic_ostream<c, t> &os, std::array<V, NROUNDS> &sample_vec, int64_t index) { for (auto it = sample_vec.begin(); it != sample_vec.end(); ++it) os << *it << " "; } }; template<typename NZT> struct SelectMinxSR { static samparr_t id() { samparr_t arr; for (auto it = arr.begin(); it != arr.end(); ++it) *it = std::numeric_limits<float>::max(); return arr; } static bool returnedSAID() { return false; } static samparr_t add (const samparr_t &arg1, const samparr_t &arg2) { samparr_t out; for (int i = 0; i < NROUNDS; ++i) out[i] = std::min(arg1[i], arg2[i]); return out; } static samparr_t multiply (const NZT arg1, const samparr_t &arg2) { return arg2; } static void axpy (const NZT a, const samparr_t &x, samparr_t &y) { y = add(y, multiply(a, x)); } static MPI_Op mpi_op() { static MPI_Op mpiop; static bool exists = false; if (exists) return mpiop; else { MPI_Op_create(MPI_func, true, &mpiop); exists = true; return mpiop; } } static void MPI_func(void *invec, void *inoutvec, int *len, MPI_Datatype *datatype) { samparr_t *in = static_cast<samparr_t *>(invec); samparr_t *inout = static_cast<samparr_t *>(inoutvec); for (int i = 0; i < *len; ++i) inout[i] = add(inout[i], in[i]); } }; template <typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> int64_t EstPerProcessNnzSpMV( SpParMat<IU, NU1, UDERA> &A, SpParMat<IU, NU2, UDERB> &B ) { int myrank; MPI_Comm_rank(MPI_COMM_WORLD, &myrank); float lambda = 1.0f; int nthds = 1; #ifdef THREADED #pragma omp parallel #endif { nthds = omp_get_num_threads(); } if (myrank == 0) std::cout << "taking transposes." << std::endl; A.Transpose(); B.Transpose(); if (myrank == 0) std::cout << "setting initial samples." << std::endl; samparr_t sa; FullyDistVec<IU, samparr_t> samples_init(A.getcommgrid(), A.getncol(), sa); #ifdef THREADED #pragma omp parallel #endif { std::default_random_engine gen; std::exponential_distribution<float> exp_dist(lambda); #ifdef THREADED #pragma omp parallel for #endif for (IU i = 0; i < samples_init.LocArrSize(); ++i) { samparr_t tmp; for (auto it = tmp.begin(); it != tmp.end(); ++it) *it = exp_dist(gen); samples_init.SetLocalElement(i, tmp); } } // std::string fname("samples_init"); // samples_init.ParallelWrite(fname, 1, SamplesSaveHandler(), true); if (myrank == 0) std::cout << "computing mid samples." << std::endl; FullyDistVec<IU, samparr_t> samples_mid = SpMV<SelectMinxSR<NU1> > (A, samples_init); // fname = "samples_mid"; // samples_mid.ParallelWrite(fname, 1, SamplesSaveHandler(), true); if (myrank == 0) std::cout << "computing final samples." << std::endl; FullyDistVec<IU, samparr_t> samples_final = SpMV<SelectMinxSR<NU2> > (B, samples_mid); // fname = "samples_final"; // samples_final.ParallelWrite(fname, 1, SamplesSaveHandler(), true); if (myrank == 0) std::cout << "computing nnz estimation." << std::endl; float nnzest = 0.0f; std::cout << myrank << "samples_final loc size: " << samples_final.LocArrSize() << std::endl; const samparr_t *lsamples = samples_final.GetLocArr(); #ifdef THREADED #pragma omp parallel for reduction (+:nnzest) #endif for (IU i = 0; i < samples_final.LocArrSize(); ++i) { float tmp = 0.0f; for (auto it = lsamples[i].begin(); it != lsamples[i].end(); ++it) tmp += *it; nnzest += static_cast<float>(NROUNDS - 1) / tmp; } if (myrank == 0) std::cout << "taking transposes again." << std::endl; int64_t nnzC_est = nnzest; int64_t nnzC_tot = 0; MPI_Allreduce(&nnzC_est, &nnzC_tot, 1, MPIType<int64_t>(), MPI_SUM, (B.commGrid)->GetWorld()); if (myrank == 0) std::cout << "sampling-based spmv est tot: " << nnzC_tot << std::endl; // revert back A.Transpose(); B.Transpose(); return nnzC_tot; } template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDER1, typename UDER2> SpParMat3D<IU,NUO,UDERO> Mult_AnXBn_SUMMA3D(SpParMat3D<IU,NU1,UDER1> & A, SpParMat3D<IU,NU2,UDER2> & B){ int myrank; MPI_Comm_rank(MPI_COMM_WORLD, &myrank); typedef typename UDERO::LocalIT LIC; typedef typename UDER1::LocalIT LIA; typedef typename UDER2::LocalIT LIB; #ifdef TIMING double t0, t1, t2, t3; #endif /* * Check if A and B are multipliable * */ if(A.getncol() != B.getnrow()){ std::ostringstream outs; outs << "Can not multiply, dimensions does not match"<< std::endl; outs << A.getncol() << " != " << B.getnrow() << std::endl; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); } /* * Calculate, accross fibers, which process should get how many columns after redistribution * */ vector<LIB> divisions3d; // Calcuclate split boundaries as if all contents of the layer is being re-distributed along fiber // These boundaries will be used later on B.CalculateColSplitDistributionOfLayer(divisions3d); #ifdef TIMING t0 = MPI_Wtime(); #endif /* * SUMMA Starts * */ int stages, dummy; // last two parameters of ProductGrid are ignored for this multiplication std::shared_ptr<CommGrid> GridC = ProductGrid((A.GetLayerMat()->getcommgrid()).get(), (B.GetLayerMat()->getcommgrid()).get(), stages, dummy, dummy); IU C_m = A.GetLayerMat()->seqptr()->getnrow(); IU C_n = B.GetLayerMat()->seqptr()->getncol(); IU ** ARecvSizes = SpHelper::allocate2D<IU>(UDERO::esscount, stages); IU ** BRecvSizes = SpHelper::allocate2D<IU>(UDERO::esscount, stages); SpParHelper::GetSetSizes( *(A.GetLayerMat()->seqptr()), ARecvSizes, (A.GetLayerMat()->getcommgrid())->GetRowWorld() ); SpParHelper::GetSetSizes( *(B.GetLayerMat()->seqptr()), BRecvSizes, (B.GetLayerMat()->getcommgrid())->GetColWorld() ); // Remotely fetched matrices are stored as pointers UDERO * ARecv; UDER2 * BRecv; std::vector< SpTuples<IU,NUO> *> tomerge; int Aself = (A.GetLayerMat()->getcommgrid())->GetRankInProcRow(); int Bself = (B.GetLayerMat()->getcommgrid())->GetRankInProcCol(); double Abcast_time = 0; double Bbcast_time = 0; double Local_multiplication_time = 0; for(int i = 0; i < stages; ++i) { std::vector<IU> ess; if(i == Aself){ ARecv = A.GetLayerMat()->seqptr(); // shallow-copy } else{ ess.resize(UDER1::esscount); for(int j=0; j<UDER1::esscount; ++j) { ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row } ARecv = new UDER1(); // first, create the object } #ifdef TIMING t2 = MPI_Wtime(); #endif if (Aself != i) { ARecv->Create(ess); } Arr<IU,NU1> Aarrinfo = ARecv->GetArrays(); for(unsigned int idx = 0; idx < Aarrinfo.indarrs.size(); ++idx) { MPI_Bcast(Aarrinfo.indarrs[idx].addr, Aarrinfo.indarrs[idx].count, MPIType<IU>(), i, GridC->GetRowWorld()); } for(unsigned int idx = 0; idx < Aarrinfo.numarrs.size(); ++idx) { MPI_Bcast(Aarrinfo.numarrs[idx].addr, Aarrinfo.numarrs[idx].count, MPIType<NU1>(), i, GridC->GetRowWorld()); } #ifdef TIMING t3 = MPI_Wtime(); Abcast_time += (t3-t2); #endif ess.clear(); if(i == Bself){ BRecv = B.GetLayerMat()->seqptr(); // shallow-copy } else{ ess.resize(UDER2::esscount); for(int j=0; j<UDER2::esscount; ++j) { ess[j] = BRecvSizes[j][i]; } BRecv = new UDER2(); } MPI_Barrier(A.GetLayerMat()->getcommgrid()->GetWorld()); #ifdef TIMING t2 = MPI_Wtime(); #endif if (Bself != i) { BRecv->Create(ess); } Arr<IU,NU2> Barrinfo = BRecv->GetArrays(); for(unsigned int idx = 0; idx < Barrinfo.indarrs.size(); ++idx) { MPI_Bcast(Barrinfo.indarrs[idx].addr, Barrinfo.indarrs[idx].count, MPIType<IU>(), i, GridC->GetColWorld()); } for(unsigned int idx = 0; idx < Barrinfo.numarrs.size(); ++idx) { MPI_Bcast(Barrinfo.numarrs[idx].addr, Barrinfo.numarrs[idx].count, MPIType<NU2>(), i, GridC->GetColWorld()); } #ifdef TIMING t3 = MPI_Wtime(); Bbcast_time += (t3-t2); #endif #ifdef TIMING t2 = MPI_Wtime(); #endif SpTuples<IU,NUO> * C_cont = LocalSpGEMMHash<SR, NUO> (*ARecv, *BRecv, // parameters themselves i != Aself, // 'delete A' condition i != Bself, // 'delete B' condition false); // not to sort each column #ifdef TIMING t3 = MPI_Wtime(); Local_multiplication_time += (t3-t2); #endif if(!C_cont->isZero()) tomerge.push_back(C_cont); } SpHelper::deallocate2D(ARecvSizes, UDER1::esscount); SpHelper::deallocate2D(BRecvSizes, UDER2::esscount); #ifdef TIMING t2 = MPI_Wtime(); #endif SpTuples<IU,NUO> * C_tuples = MultiwayMergeHash<SR>(tomerge, C_m, C_n, true, false); // Delete input arrays and do not sort //SpTuples<IU,NUO> * C_tuples = MultiwayMergeHashSliding<SR>(tomerge, C_m, C_n, true, false); // Delete input arrays and do not sort #ifdef TIMING t3 = MPI_Wtime(); #endif #ifdef TIMING if(myrank == 0){ fprintf(stderr, "[SUMMA3D]\tAbcast_time: %lf\n", Abcast_time); fprintf(stderr, "[SUMMA3D]\tBbcast_time: %lf\n", Bbcast_time); fprintf(stderr, "[SUMMA3D]\tLocal_multiplication_time: %lf\n", Local_multiplication_time); fprintf(stderr, "[SUMMA3D]\tMerge_layer_time: %lf\n", (t3-t2)); } #endif /* * SUMMA Ends * */ #ifdef TIMING t1 = MPI_Wtime(); if(myrank == 0) fprintf(stderr, "[SUMMA3D]\tSUMMA time: %lf\n", (t1-t0)); #endif /* * 3d-reduction starts * */ #ifdef TIMING //MPI_Barrier(getcommgrid3D()->GetWorld()); t0 = MPI_Wtime(); #endif MPI_Datatype MPI_tuple; MPI_Type_contiguous(sizeof(std::tuple<LIC,LIC,NUO>), MPI_CHAR, &MPI_tuple); MPI_Type_commit(&MPI_tuple); /* * Create a profile with information regarding data to be sent and received between layers * These memory allocation needs to be `int` specifically because some of these arrays would be used in communication * This is requirement is for MPI as MPI_Alltoallv takes pointer to integer exclusively as count and displacement * */ int * sendcnt = new int[A.getcommgrid3D()->GetGridLayers()]; int * sendprfl = new int[A.getcommgrid3D()->GetGridLayers()*3]; int * sdispls = new int[A.getcommgrid3D()->GetGridLayers()](); int * recvcnt = new int[A.getcommgrid3D()->GetGridLayers()]; int * recvprfl = new int[A.getcommgrid3D()->GetGridLayers()*3]; int * rdispls = new int[A.getcommgrid3D()->GetGridLayers()](); vector<IU> divisions3dPrefixSum(divisions3d.size()); divisions3dPrefixSum[0] = 0; std::partial_sum(divisions3d.begin(), divisions3d.end()-1, divisions3dPrefixSum.begin()+1); ColLexiCompare<IU,NUO> comp; IU totsend = C_tuples->getnnz(); #pragma omp parallel for for(int i=0; i < A.getcommgrid3D()->GetGridLayers(); ++i){ IU start_col = divisions3dPrefixSum[i]; IU end_col = divisions3dPrefixSum[i] + divisions3d[i]; std::tuple<IU, IU, NUO> search_tuple_start(0, start_col, NUO()); std::tuple<IU, IU, NUO> search_tuple_end(0, end_col, NUO()); std::tuple<IU, IU, NUO>* start_it = std::lower_bound(C_tuples->tuples, C_tuples->tuples + C_tuples->getnnz(), search_tuple_start, comp); std::tuple<IU, IU, NUO>* end_it = std::lower_bound(C_tuples->tuples, C_tuples->tuples + C_tuples->getnnz(), search_tuple_end, comp); // This type casting is important from semantic point of view sendcnt[i] = (int)(end_it - start_it); sendprfl[i*3+0] = (int)(sendcnt[i]); // Number of nonzeros in ith chunk sendprfl[i*3+1] = (int)(A.GetLayerMat()->seqptr()->getnrow()); // Number of rows in ith chunk sendprfl[i*3+2] = (int)(divisions3d[i]); // Number of columns in ith chunk } std::partial_sum(sendcnt, sendcnt+A.getcommgrid3D()->GetGridLayers()-1, sdispls+1); // Send profile ready. Now need to update the tuples to reflect correct column id after column split. for(int i=0; i < A.getcommgrid3D()->GetGridLayers(); ++i){ #pragma omp parallel for schedule(static) for(int j = 0; j < sendcnt[i]; j++){ std::get<1>(C_tuples->tuples[sdispls[i]+j]) = std::get<1>(C_tuples->tuples[sdispls[i]+j]) - divisions3dPrefixSum[i]; } } MPI_Alltoall(sendprfl, 3, MPI_INT, recvprfl, 3, MPI_INT, A.getcommgrid3D()->GetFiberWorld()); for(int i = 0; i < A.getcommgrid3D()->GetGridLayers(); i++) recvcnt[i] = recvprfl[i*3]; std::partial_sum(recvcnt, recvcnt+A.getcommgrid3D()->GetGridLayers()-1, rdispls+1); IU totrecv = std::accumulate(recvcnt,recvcnt+A.getcommgrid3D()->GetGridLayers(), static_cast<IU>(0)); std::tuple<LIC,LIC,NUO>* recvTuples = static_cast<std::tuple<LIC,LIC,NUO>*> (::operator new (sizeof(std::tuple<LIC,LIC,NUO>[totrecv]))); #ifdef TIMING t2 = MPI_Wtime(); #endif MPI_Alltoallv(C_tuples->tuples, sendcnt, sdispls, MPI_tuple, recvTuples, recvcnt, rdispls, MPI_tuple, A.getcommgrid3D()->GetFiberWorld()); delete C_tuples; #ifdef TIMING t3 = MPI_Wtime(); if(myrank == 0) fprintf(stderr, "[SUMMA3D]\tAlltoallv: %lf\n", (t3-t2)); #endif vector<SpTuples<IU, NUO>*> recvChunks(A.getcommgrid3D()->GetGridLayers()); #pragma omp parallel for for (int i = 0; i < A.getcommgrid3D()->GetGridLayers(); i++){ recvChunks[i] = new SpTuples<LIC, NUO>(recvcnt[i], recvprfl[i*3+1], recvprfl[i*3+2], recvTuples + rdispls[i], true, false); } // Free all memory except tempTuples; Because that memory is holding data of newly created local matrices after receiving. DeleteAll(sendcnt, sendprfl, sdispls); DeleteAll(recvcnt, recvprfl, rdispls); MPI_Type_free(&MPI_tuple); /* * 3d-reduction ends * */ #ifdef TIMING t1 = MPI_Wtime(); if(myrank == 0) fprintf(stderr, "[SUMMA3D]\tReduction time: %lf\n", (t1-t0)); #endif #ifdef TIMING t0 = MPI_Wtime(); #endif /* * 3d-merge starts * */ SpTuples<IU, NUO> * merged_tuples = MultiwayMergeHash<SR, IU, NUO>(recvChunks, recvChunks[0]->getnrow(), recvChunks[0]->getncol(), false, false); // Do not delete #ifdef TIMING t1 = MPI_Wtime(); if(myrank == 0) fprintf(stderr, "[SUMMA3D]\tMerge_fiber_time: %lf\n", (t1-t0)); #endif //Create SpDCCol and delete merged_tuples; UDERO * localResultant = new UDERO(*merged_tuples, false); delete merged_tuples; // Do not delete elements of recvChunks, because that would give segmentation fault due to double free //delete [] recvTuples; ::operator delete(recvTuples); for(int i = 0; i < recvChunks.size(); i++){ recvChunks[i]->tuples_deleted = true; // Temporary patch to avoid memory leak and segfault delete recvChunks[i]; } vector<SpTuples<IU,NUO>*>().swap(recvChunks); /* * 3d-merge ends * */ std::shared_ptr<CommGrid3D> grid3d; grid3d.reset(new CommGrid3D(A.getcommgrid3D()->GetWorld(), A.getcommgrid3D()->GetGridLayers(), A.getcommgrid3D()->GetGridRows(), A.getcommgrid3D()->GetGridCols(), A.isSpecial())); SpParMat3D<IU, NUO, UDERO> C(localResultant, grid3d, A.isColSplit(), A.isSpecial()); return C; } /* * Parameters: * - computationKernel: 1 for hash-based, 2 for heap-based * */ template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> SpParMat3D<IU, NUO, UDERO> MemEfficientSpGEMM3D(SpParMat3D<IU, NU1, UDERA> & A, SpParMat3D<IU, NU2, UDERB> & B, int phases, NUO hardThreshold, IU selectNum, IU recoverNum, NUO recoverPct, int kselectVersion, int computationKernel, int64_t perProcessMemory){ int myrank; MPI_Comm_rank(MPI_COMM_WORLD,&myrank); typedef typename UDERA::LocalIT LIA; typedef typename UDERB::LocalIT LIB; typedef typename UDERO::LocalIT LIC; /* * Check if A and B are multipliable * */ if(A.getncol() != B.getnrow()){ std::ostringstream outs; outs << "Can not multiply, dimensions does not match"<< std::endl; outs << A.getncol() << " != " << B.getnrow() << std::endl; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); } /* * If provided number of phase is too low or too high then reset value of phase as 1 * */ if(phases < 1 || phases >= B.getncol()){ SpParHelper::Print("[MemEfficientSpGEMM3D]\tThe value of phases is too small or large. Resetting to 1.\n"); phases = 1; } double t0, t1, t2, t3, t4, t5, t6, t7, t8, t9; // To time different parts of the function #ifdef TIMING MPI_Barrier(B.getcommgrid3D()->GetWorld()); t0 = MPI_Wtime(); #endif /* * If per process memory is provided then calculate number of phases * Otherwise, proceed to multiplication. * */ if(perProcessMemory > 0) { int p, calculatedPhases; MPI_Comm_size(A.getcommgrid3D()->GetLayerWorld(),&p); int64_t perNNZMem_in = sizeof(IU)*2 + sizeof(NU1); int64_t perNNZMem_out = sizeof(IU)*2 + sizeof(NUO); int64_t lannz = A.GetLayerMat()->getlocalnnz(); int64_t gannz = 0; // Get maximum number of nnz owned by one process MPI_Allreduce(&lannz, &gannz, 1, MPIType<int64_t>(), MPI_MAX, A.getcommgrid3D()->GetWorld()); //int64_t ginputMem = gannz * perNNZMem_in * 4; // Four pieces per process: one piece of own A and B, one piece of received A and B int64_t ginputMem = gannz * perNNZMem_in * 5; // One extra copy for safety // Estimate per layer nnz after multiplication. After this estimation each process would know an estimation of // how many nnz the corresponding layer will have after the layerwise operation. int64_t asquareNNZ = EstPerProcessNnzSUMMA(*(A.GetLayerMat()), *(B.GetLayerMat()), true); int64_t gasquareNNZ; MPI_Allreduce(&asquareNNZ, &gasquareNNZ, 1, MPIType<int64_t>(), MPI_MAX, A.getcommgrid3D()->GetFiberWorld()); // Atmost two copies, one of a process's own, another received from fiber reduction int64_t gasquareMem = gasquareNNZ * perNNZMem_out * 2; // Calculate estimated average degree after multiplication int64_t d = ceil( ( ( gasquareNNZ / B.getcommgrid3D()->GetGridLayers() ) * sqrt(p) ) / B.GetLayerMat()->getlocalcols() ); // Calculate per column nnz how left after k-select. Minimum of average degree and k-select parameters. int64_t k = std::min(int64_t(std::max(selectNum, recoverNum)), d ); //estimate output memory int64_t postKselectOutputNNZ = ceil(( (B.GetLayerMat()->getlocalcols() / B.getcommgrid3D()->GetGridLayers() ) * k)/sqrt(p)); // If kselect is run int64_t postKselectOutputMem = postKselectOutputNNZ * perNNZMem_out * 2; double remainingMem = perProcessMemory*1000000000 - ginputMem - postKselectOutputMem; int64_t kselectMem = B.GetLayerMat()->getlocalcols() * k * sizeof(NUO) * 3; //inputMem + outputMem + asquareMem/phases + kselectmem/phases < memory if(remainingMem > 0){ calculatedPhases = ceil( (gasquareMem + kselectMem) / remainingMem ); // If kselect is run } else calculatedPhases = -1; int gCalculatedPhases; MPI_Allreduce(&calculatedPhases, &gCalculatedPhases, 1, MPI_INT, MPI_MAX, A.getcommgrid3D()->GetFiberWorld()); if(gCalculatedPhases > phases) phases = gCalculatedPhases; } else{ // Do nothing } #ifdef TIMING MPI_Barrier(B.getcommgrid3D()->GetWorld()); t1 = MPI_Wtime(); mcl3d_symbolictime+=(t1-t0); //if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tSymbolic stage time: %lf\n", (t1-t0)); #endif /* * Calculate, accross fibers, which process should get how many columns after redistribution * */ vector<LIB> divisions3d; // Calculate split boundaries as if all contents of the layer is being re-distributed along fiber // These boundaries will be used later on B.CalculateColSplitDistributionOfLayer(divisions3d); /* * Split B according to calculated number of phases * For better load balancing split B into nlayers*phases chunks * */ vector<UDERB*> PiecesOfB; vector<UDERB*> tempPiecesOfB; UDERB CopyB = *(B.GetLayerMat()->seqptr()); CopyB.ColSplit(divisions3d, tempPiecesOfB); // Split B into `nlayers` chunks at first for(int i = 0; i < tempPiecesOfB.size(); i++){ vector<UDERB*> temp; tempPiecesOfB[i]->ColSplit(phases, temp); // Split each chunk of B into `phases` chunks for(int j = 0; j < temp.size(); j++){ PiecesOfB.push_back(temp[j]); } } vector<UDERO> toconcatenate; //if(myrank == 0){ //fprintf(stderr, "[MemEfficientSpGEMM3D]\tRunning with phase: %d\n", phases); //} for(int p = 0; p < phases; p++){ /* * At the start of each phase take appropriate pieces from previously created pieces of local B matrix * Appropriate means correct pieces so that 3D-merge can be properly load balanced. * */ vector<LIB> lbDivisions3d; // load balance friendly division LIB totalLocalColumnInvolved = 0; vector<UDERB*> targetPiecesOfB; // Pieces of B involved in current phase for(int i = 0; i < PiecesOfB.size(); i++){ if(i % phases == p){ targetPiecesOfB.push_back(new UDERB(*(PiecesOfB[i]))); lbDivisions3d.push_back(PiecesOfB[i]->getncol()); totalLocalColumnInvolved += PiecesOfB[i]->getncol(); } } /* * Create new local matrix by concatenating appropriately picked pieces * */ UDERB * OnePieceOfB = new UDERB(0, (B.GetLayerMat())->seqptr()->getnrow(), totalLocalColumnInvolved, 0); OnePieceOfB->ColConcatenate(targetPiecesOfB); vector<UDERB*>().swap(targetPiecesOfB); /* * Create a new layer-wise distributed matrix with the newly created local matrix for this phase * This matrix is used in SUMMA multiplication of respective layer * */ SpParMat<IU, NU2, UDERB> OnePieceOfBLayer(OnePieceOfB, A.getcommgrid3D()->GetLayerWorld()); #ifdef TIMING t0 = MPI_Wtime(); #endif /* * SUMMA Starts * */ int stages, dummy; // last two parameters of ProductGrid are ignored for this multiplication std::shared_ptr<CommGrid> GridC = ProductGrid((A.GetLayerMat()->getcommgrid()).get(), (OnePieceOfBLayer.getcommgrid()).get(), stages, dummy, dummy); LIA C_m = A.GetLayerMat()->seqptr()->getnrow(); LIB C_n = OnePieceOfBLayer.seqptr()->getncol(); LIA ** ARecvSizes = SpHelper::allocate2D<LIA>(UDERA::esscount, stages); LIB ** BRecvSizes = SpHelper::allocate2D<LIB>(UDERB::esscount, stages); SpParHelper::GetSetSizes( *(A.GetLayerMat()->seqptr()), ARecvSizes, (A.GetLayerMat()->getcommgrid())->GetRowWorld() ); SpParHelper::GetSetSizes( *(OnePieceOfBLayer.seqptr()), BRecvSizes, (OnePieceOfBLayer.getcommgrid())->GetColWorld() ); // Remotely fetched matrices are stored as pointers UDERA * ARecv; UDERB * BRecv; std::vector< SpTuples<LIC,NUO> *> tomerge; int Aself = (A.GetLayerMat()->getcommgrid())->GetRankInProcRow(); int Bself = (OnePieceOfBLayer.getcommgrid())->GetRankInProcCol(); double Abcast_time = 0; double Bbcast_time = 0; double Local_multiplication_time = 0; for(int i = 0; i < stages; ++i) { std::vector<LIA> ess; if(i == Aself){ ARecv = A.GetLayerMat()->seqptr(); // shallow-copy } else{ ess.resize(UDERA::esscount); for(int j=0; j<UDERA::esscount; ++j) { ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row } ARecv = new UDERA(); // first, create the object } #ifdef TIMING t2 = MPI_Wtime(); #endif if (Aself != i) { ARecv->Create(ess); } Arr<LIA,NU1> Aarrinfo = ARecv->GetArrays(); for(unsigned int idx = 0; idx < Aarrinfo.indarrs.size(); ++idx) { MPI_Bcast(Aarrinfo.indarrs[idx].addr, Aarrinfo.indarrs[idx].count, MPIType<IU>(), i, GridC->GetRowWorld()); } for(unsigned int idx = 0; idx < Aarrinfo.numarrs.size(); ++idx) { MPI_Bcast(Aarrinfo.numarrs[idx].addr, Aarrinfo.numarrs[idx].count, MPIType<NU1>(), i, GridC->GetRowWorld()); } #ifdef TIMING t3 = MPI_Wtime(); mcl3d_Abcasttime += (t3-t2); Abcast_time += (t3-t2); #endif ess.clear(); if(i == Bself){ BRecv = OnePieceOfBLayer.seqptr(); // shallow-copy } else{ ess.resize(UDERB::esscount); for(int j=0; j<UDERB::esscount; ++j) { ess[j] = BRecvSizes[j][i]; } BRecv = new UDERB(); } MPI_Barrier(A.GetLayerMat()->getcommgrid()->GetWorld()); #ifdef TIMING t2 = MPI_Wtime(); #endif if (Bself != i) { BRecv->Create(ess); } Arr<LIB,NU2> Barrinfo = BRecv->GetArrays(); for(unsigned int idx = 0; idx < Barrinfo.indarrs.size(); ++idx) { MPI_Bcast(Barrinfo.indarrs[idx].addr, Barrinfo.indarrs[idx].count, MPIType<IU>(), i, GridC->GetColWorld()); } for(unsigned int idx = 0; idx < Barrinfo.numarrs.size(); ++idx) { MPI_Bcast(Barrinfo.numarrs[idx].addr, Barrinfo.numarrs[idx].count, MPIType<NU2>(), i, GridC->GetColWorld()); } #ifdef TIMING t3 = MPI_Wtime(); mcl3d_Bbcasttime += (t3-t2); Bbcast_time += (t3-t2); #endif #ifdef TIMING t2 = MPI_Wtime(); #endif SpTuples<LIC,NUO> * C_cont; if(computationKernel == 1){ C_cont = LocalSpGEMMHash<SR, NUO> (*ARecv, *BRecv, // parameters themselves i != Aself, // 'delete A' condition i != Bself, // 'delete B' condition false); // not to sort each column } else if(computationKernel == 2){ C_cont = LocalSpGEMM<SR, NUO> (*ARecv, *BRecv, // parameters themselves i != Aself, // 'delete A' condition i != Bself); // 'delete B' condition } #ifdef TIMING t3 = MPI_Wtime(); mcl3d_localspgemmtime += (t3-t2); Local_multiplication_time += (t3-t2); #endif if(!C_cont->isZero()) tomerge.push_back(C_cont); } SpHelper::deallocate2D(ARecvSizes, UDERA::esscount); SpHelper::deallocate2D(BRecvSizes, UDERB::esscount); #ifdef TIMING t2 = MPI_Wtime(); #endif SpTuples<LIC,NUO> * C_tuples; if(computationKernel == 1) C_tuples = MultiwayMergeHash<SR>(tomerge, C_m, C_n, true, true); // Delete input arrays and sort else if(computationKernel == 2) C_tuples = MultiwayMerge<SR>(tomerge, C_m, C_n, true); // Delete input arrays and sort #ifdef TIMING t3 = MPI_Wtime(); mcl3d_SUMMAmergetime += (t3-t2); #endif #ifdef TIMING if(myrank == 0){ fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tAbcast_time: %lf\n", p, Abcast_time); fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tBbcast_time: %lf\n", p, Bbcast_time); fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tLocal_multiplication_time: %lf\n", p, Local_multiplication_time); fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tSUMMA Merge time: %lf\n", p, (t3-t2)); } #endif /* * SUMMA Ends * */ #ifdef TIMING t1 = MPI_Wtime(); mcl3d_SUMMAtime += (t1-t0); if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tSUMMA time: %lf\n", p, (t1-t0)); #endif /* * 3d-reduction starts * */ #ifdef TIMING t0 = MPI_Wtime(); t2 = MPI_Wtime(); #endif MPI_Datatype MPI_tuple; MPI_Type_contiguous(sizeof(std::tuple<LIC,LIC,NUO>), MPI_CHAR, &MPI_tuple); MPI_Type_commit(&MPI_tuple); /* * Create a profile with information regarding data to be sent and received between layers * These memory allocation needs to be `int` specifically because some of these arrays would be used in communication * This is requirement is for MPI as MPI_Alltoallv takes pointer to integer exclusively as count and displacement * */ int * sendcnt = new int[A.getcommgrid3D()->GetGridLayers()]; int * sendprfl = new int[A.getcommgrid3D()->GetGridLayers()*3]; int * sdispls = new int[A.getcommgrid3D()->GetGridLayers()](); int * recvcnt = new int[A.getcommgrid3D()->GetGridLayers()]; int * recvprfl = new int[A.getcommgrid3D()->GetGridLayers()*3]; int * rdispls = new int[A.getcommgrid3D()->GetGridLayers()](); vector<LIC> lbDivisions3dPrefixSum(lbDivisions3d.size()); lbDivisions3dPrefixSum[0] = 0; std::partial_sum(lbDivisions3d.begin(), lbDivisions3d.end()-1, lbDivisions3dPrefixSum.begin()+1); ColLexiCompare<LIC,NUO> comp; LIC totsend = C_tuples->getnnz(); #ifdef TIMING t3 = MPI_Wtime(); if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tAllocation of alltoall information: %lf\n", p, (t3-t2)); #endif #ifdef TIMING t2 = MPI_Wtime(); #endif #pragma omp parallel for for(int i=0; i < A.getcommgrid3D()->GetGridLayers(); ++i){ LIC start_col = lbDivisions3dPrefixSum[i]; LIC end_col = lbDivisions3dPrefixSum[i] + lbDivisions3d[i]; std::tuple<LIC, LIC, NUO> search_tuple_start(0, start_col, NUO()); std::tuple<LIC, LIC, NUO> search_tuple_end(0, end_col, NUO()); std::tuple<LIC, LIC, NUO>* start_it = std::lower_bound(C_tuples->tuples, C_tuples->tuples + C_tuples->getnnz(), search_tuple_start, comp); std::tuple<LIC, LIC, NUO>* end_it = std::lower_bound(C_tuples->tuples, C_tuples->tuples + C_tuples->getnnz(), search_tuple_end, comp); // This type casting is important from semantic point of view sendcnt[i] = (int)(end_it - start_it); sendprfl[i*3+0] = (int)(sendcnt[i]); // Number of nonzeros in ith chunk sendprfl[i*3+1] = (int)(A.GetLayerMat()->seqptr()->getnrow()); // Number of rows in ith chunk sendprfl[i*3+2] = (int)(lbDivisions3d[i]); // Number of columns in ith chunk } std::partial_sum(sendcnt, sendcnt+A.getcommgrid3D()->GetGridLayers()-1, sdispls+1); #ifdef TIMING t3 = MPI_Wtime(); if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tGetting Alltoall data ready: %lf\n", p, (t3-t2)); #endif // Send profile ready. Now need to update the tuples to reflect correct column id after column split. #ifdef TIMING t2 = MPI_Wtime(); #endif for(int i=0; i < A.getcommgrid3D()->GetGridLayers(); ++i){ #pragma omp parallel for schedule(static) for(int j = 0; j < sendcnt[i]; j++){ std::get<1>(C_tuples->tuples[sdispls[i]+j]) = std::get<1>(C_tuples->tuples[sdispls[i]+j]) - lbDivisions3dPrefixSum[i]; } } #ifdef TIMING t3 = MPI_Wtime(); if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tGetting Alltoallv data ready: %lf\n", p, (t3-t2)); #endif #ifdef TIMING t2 = MPI_Wtime(); #endif MPI_Alltoall(sendprfl, 3, MPI_INT, recvprfl, 3, MPI_INT, A.getcommgrid3D()->GetFiberWorld()); #ifdef TIMING t3 = MPI_Wtime(); if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tAlltoall: %lf\n", p, (t3-t2)); #endif #ifdef TIMING t2 = MPI_Wtime(); #endif for(int i = 0; i < A.getcommgrid3D()->GetGridLayers(); i++) recvcnt[i] = recvprfl[i*3]; std::partial_sum(recvcnt, recvcnt+A.getcommgrid3D()->GetGridLayers()-1, rdispls+1); LIC totrecv = std::accumulate(recvcnt,recvcnt+A.getcommgrid3D()->GetGridLayers(), static_cast<IU>(0)); std::tuple<LIC,LIC,NUO>* recvTuples = static_cast<std::tuple<LIC,LIC,NUO>*> (::operator new (sizeof(std::tuple<LIC,LIC,NUO>[totrecv]))); #ifdef TIMING t3 = MPI_Wtime(); if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tAllocation of receive data: %lf\n", p, (t3-t2)); #endif #ifdef TIMING t2 = MPI_Wtime(); #endif MPI_Alltoallv(C_tuples->tuples, sendcnt, sdispls, MPI_tuple, recvTuples, recvcnt, rdispls, MPI_tuple, A.getcommgrid3D()->GetFiberWorld()); delete C_tuples; #ifdef TIMING t3 = MPI_Wtime(); if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tAlltoallv: %lf\n", p, (t3-t2)); #endif #ifdef TIMING t2 = MPI_Wtime(); #endif vector<SpTuples<LIC, NUO>*> recvChunks(A.getcommgrid3D()->GetGridLayers()); #pragma omp parallel for for (int i = 0; i < A.getcommgrid3D()->GetGridLayers(); i++){ recvChunks[i] = new SpTuples<LIC, NUO>(recvcnt[i], recvprfl[i*3+1], recvprfl[i*3+2], recvTuples + rdispls[i], true, false); } #ifdef TIMING t3 = MPI_Wtime(); if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\trecvChunks creation: %lf\n", p, (t3-t2)); #endif #ifdef TIMING t2 = MPI_Wtime(); #endif // Free all memory except tempTuples; Because that is holding data of newly created local matrices after receiving. DeleteAll(sendcnt, sendprfl, sdispls); DeleteAll(recvcnt, recvprfl, rdispls); MPI_Type_free(&MPI_tuple); #ifdef TIMING t3 = MPI_Wtime(); if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tMemory freeing: %lf\n", p, (t3-t2)); #endif /* * 3d-reduction ends * */ #ifdef TIMING t1 = MPI_Wtime(); mcl3d_reductiontime += (t1-t0); if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tReduction time: %lf\n", p, (t1-t0)); #endif #ifdef TIMING t0 = MPI_Wtime(); #endif /* * 3d-merge starts * */ SpTuples<LIC, NUO> * merged_tuples; if(computationKernel == 1) merged_tuples = MultiwayMergeHash<SR, LIC, NUO>(recvChunks, recvChunks[0]->getnrow(), recvChunks[0]->getncol(), false, false); // Do not delete else if(computationKernel == 2) merged_tuples = MultiwayMerge<SR, LIC, NUO>(recvChunks, recvChunks[0]->getnrow(), recvChunks[0]->getncol(), false); // Do not delete #ifdef TIMING t1 = MPI_Wtime(); mcl3d_3dmergetime += (t1-t0); if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\t3D Merge time: %lf\n", p, (t1-t0)); #endif /* * 3d-merge ends * */ #ifdef TIMING t0 = MPI_Wtime(); #endif // Do not delete elements of recvChunks, because that would give segmentation fault due to double free ::operator delete(recvTuples); for(int i = 0; i < recvChunks.size(); i++){ recvChunks[i]->tuples_deleted = true; // Temporary patch to avoid memory leak and segfault delete recvChunks[i]; // As the patch is used, now delete each element of recvChunks } vector<SpTuples<LIC,NUO>*>().swap(recvChunks); // As the patch is used, now delete recvChunks // This operation is not needed if result can be used and discareded right away // This operation is being done because it is needed by MCLPruneRecoverySelect UDERO * phaseResultant = new UDERO(*merged_tuples, false); delete merged_tuples; SpParMat<IU, NUO, UDERO> phaseResultantLayer(phaseResultant, A.getcommgrid3D()->GetLayerWorld()); MCLPruneRecoverySelect(phaseResultantLayer, hardThreshold, selectNum, recoverNum, recoverPct, kselectVersion); #ifdef TIMING t1 = MPI_Wtime(); mcl3d_kselecttime += (t1-t0); if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tMCLPruneRecoverySelect time: %lf\n",p, (t1-t0)); #endif toconcatenate.push_back(phaseResultantLayer.seq()); #ifdef TIMING if(myrank == 0) fprintf(stderr, "***\n"); #endif } for(int i = 0; i < PiecesOfB.size(); i++) delete PiecesOfB[i]; std::shared_ptr<CommGrid3D> grid3d; grid3d.reset(new CommGrid3D(A.getcommgrid3D()->GetWorld(), A.getcommgrid3D()->GetGridLayers(), A.getcommgrid3D()->GetGridRows(), A.getcommgrid3D()->GetGridCols(), A.isSpecial())); UDERO * localResultant = new UDERO(0, A.GetLayerMat()->seqptr()->getnrow(), divisions3d[A.getcommgrid3D()->GetRankInFiber()], 0); localResultant->ColConcatenate(toconcatenate); SpParMat3D<IU, NUO, UDERO> C3D(localResultant, grid3d, A.isColSplit(), A.isSpecial()); return C3D; } } #endif
omp_workshare1.c
/****************************************************************************** * FILE: omp_workshare1.c * DESCRIPTION: * OpenMP Example - Loop Work-sharing - C/C++ Version * In this example, the iterations of a loop are scheduled dynamically * across the team of threads. A thread will perform CHUNK iterations * at a time before being scheduled for the next CHUNK of work. * AUTHOR: Blaise Barney 5/99 * LAST REVISED: 04/06/05 ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> #define CHUNKSIZE 10 #define N 100 int main (int argc, char *argv[]) { int nthreads, tid, i, chunk; float a[N], b[N], c[N]; /* Some initializations */ for (i=0; i < N; i++) a[i] = b[i] = i * 1.0; chunk = CHUNKSIZE; #pragma omp parallel shared(a,b,c,nthreads,chunk) private(i,tid) { tid = omp_get_thread_num(); if (tid == 0) { nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); } printf("Thread %d starting...\n",tid); //#pragma omp for schedule(dynamic,chunk) #pragma omp for schedule(static,chunk) for (i=0; i<N; i++) { c[i] = a[i] + b[i]; printf("Thread %d: c[%d]= %f\n",tid,i,c[i]); } } /* end of parallel section */ }
convolution_pack4to1.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_transform_kernel_pack4to1_neon(const Mat& weight_data, Mat& weight_data_pack4to1, int num_input, int num_output, int kernel_w, int kernel_h) { const int maxk = kernel_w * kernel_h; // src = kw-kh-inch-outch // dst = 4a-kw-kh-inch/4a-outch Mat weight_data_r2 = weight_data.reshape(maxk, num_input, num_output); weight_data_pack4to1.create(maxk, num_input / 4, num_output, (size_t)4 * 4, 4); for (int q = 0; q < num_output; q++) { const Mat k0 = weight_data_r2.channel(q); Mat g0 = weight_data_pack4to1.channel(q); for (int p = 0; p + 3 < num_input; p += 4) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); float* g00 = g0.row(p / 4); for (int k = 0; k < maxk; k++) { g00[0] = k00[k]; g00[1] = k01[k]; g00[2] = k02[k]; g00[3] = k03[k]; g00 += 4; } } } } static void convolution_pack4to1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_pack4to1, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const float* bias_data_ptr = bias_data; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float sum = 0.f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } const float* kptr = (const float*)weight_data_pack4to1 + maxk * channels * p * 4; // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const float* sptr = m.row(i * stride_h) + j * stride_w * 4; for (int k = 0; k < maxk; k++) // 29.23 { float32x4_t _val = vld1q_f32(sptr + space_ofs[k] * 4); float32x4_t _w = vld1q_f32(kptr); float32x4_t _s4 = vmulq_f32(_val, _w); #if __aarch64__ sum += vaddvq_f32(_s4); // dot #else float32x2_t _ss = vadd_f32(vget_low_f32(_s4), vget_high_f32(_s4)); _ss = vpadd_f32(_ss, _ss); sum += vget_lane_f32(_ss, 0); #endif kptr += 4; } } sum = activation_ss(sum, activation_type, activation_params); outptr[j] = sum; } outptr += outw; } } }
convolution_1x1_pack1to16.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_pack1to16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; const int size = w * h; Mat bottom_im2col = bottom_blob; bottom_im2col.w = size; bottom_im2col.h = 1; im2col_sgemm_pack1to16_avx512(bottom_im2col, top_blob, kernel, _bias, opt); } static void conv1x1s2_sgemm_pack1to16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = w - 2 * outw + w; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < channels; p++) { const float* r0 = bottom_blob.channel(p); float* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { outptr[0] = r0[0]; r0 += 2; outptr += 1; } r0 += tailstep; } } conv1x1s1_sgemm_pack1to16_avx512(bottom_blob_shrinked, top_blob, kernel, _bias, opt); }
lmfit.c
#include <stdlib.h> #include <stdio.h> #include <math.h> #include <float.h> #include "lmfit.h" #define MIN(a,b) (((a)<=(b)) ? (a) : (b)) #define MAX(a,b) (((a)>=(b)) ? (a) : (b)) #define SQR(x) (x)*(x) void lm_lmpar(const int n, double *const r, const int ldr, int *const ipvt, double *const diag, double *const qtb, double delta, double *const par, double *const x, double *const sdiag, double *const aux, double *const xdi ); void lm_qrfac(const int m, const int n, double *const a, int *const ipvt, double *const rdiag, double *const acnorm, double *const wa ); void lm_qrsolv(const int n, double *const r, const int ldr, int *const ipvt, double *const diag, double *const qtb, double *const x, double *const sdiag, double *const wa ); /* Numeric constants */ /* machine-dependent constants from float.h */ #define LM_MACHEP DBL_EPSILON /* resolution of arithmetic */ #define LM_DWARF DBL_MIN /* smallest nonzero number */ #define LM_SQRT_DWARF sqrt(DBL_MIN) /* square should not underflow */ #define LM_SQRT_GIANT sqrt(DBL_MAX) /* square should not overflow */ #define LM_USERTOL 30*LM_MACHEP /* users are recommended to require this */ /* If the above values do not work, the following seem good for an x86: LM_MACHEP .555e-16 LM_DWARF 9.9e-324 LM_SQRT_DWARF 1.e-160 LM_SQRT_GIANT 1.e150 LM_USER_TOL 1.e-14 The following values should work on any machine: LM_MACHEP 1.2e-16 LM_DWARF 1.0e-38 LM_SQRT_DWARF 3.834e-20 LM_SQRT_GIANT 1.304e19 LM_USER_TOL 1.e-14 */ const lm_control_struct lm_control_double = { LM_USERTOL, LM_USERTOL, LM_USERTOL, LM_USERTOL, 100., 100, 1, NULL, 0, -1, -1 }; const lm_control_struct lm_control_float = { 1.e-7, 1.e-7, 1.e-7, 1.e-7, 100., 100, 1, NULL, 0, -1, -1 }; /* Message texts (indexed by status.info) */ const char *lm_infmsg[] = { "found zero (sum of squares below underflow limit)", "converged (the relative error in the sum of squares is at most tol)", "converged (the relative error of the parameter vector is at most tol)", "converged (both errors are at most tol)", "trapped (by degeneracy; increasing epsilon might help)", "exhausted (number of function calls exceeding preset patience)", "failed (ftol<tol: cannot reduce sum of squares any further)", "failed (xtol<tol: cannot improve approximate solution any further)", "failed (gtol<tol: cannot improve approximate solution any further)", "crashed (not enough memory)", "exploded (fatal coding error: improper input parameters)", "stopped (break requested within function evaluation)", "found nan (function value is not-a-number or infinite)" }; const char *lm_shortmsg[] = { "found zero", "converged (f)", "converged (p)", "converged (2)", "degenerate", "call limit", "failed (f)", "failed (p)", "failed (o)", "no memory", "invalid input", "user break", "found nan" }; void lm_print_pars(const int nout, const double *par, FILE* fout) { for (int i = 0; i < nout; ++i) fprintf( fout, " %16.9g", par[i] ); fprintf( fout, "\n" ); } /** * Main minimization routine */ void lmmin(const int n, double* x, const int m, const void* data, void (*evaluate)(const double* par, const int m_dat, const void* data, double* fvec, int* userbreak), const lm_control_struct* C, lm_status_struct* S) { int j, i; double actred, dirder, fnorm, fnorm1, gnorm, pnorm, prered, ratio, sum, temp, temp1, temp2, temp3; static double p0001 = 1.0e-4; int maxfev = C->patience * (n + 1); int inner_success; /* flag for loop control */ double lmpar = 0; /* Levenberg-Marquardt parameter */ double delta = 0; double xnorm = 0; double eps = sqrt(MAX(C->epsilon, LM_MACHEP)); /* for forward differences */ int nout = C->n_maxpri == -1 ? n : MIN( C->n_maxpri, n ); /* The workaround msgfile=NULL is needed for default initialization */ FILE* msgfile = (FILE*)C->msgfile ? C->msgfile : stdout; /* Default status info; must be set ahead of first return statements */ S->outcome = 0; /* status code */ S->userbreak = 0; S->nfev = 0; /* function evaluation counter */ /* Check input parameters for errors. */ if ( n <= 0 ) { fprintf( stderr, "lmmin: invalid number of parameters %i\n", n ); S->outcome = 10; /* invalid parameter */ return; } if (m < n) { fprintf( stderr, "lmmin: number of data points (%i) " "smaller than number of parameters (%i)\n", m, n ); S->outcome = 10; return; } if (C->ftol < 0 || C->xtol < 0 || C->gtol < 0) { fprintf( stderr, "lmmin: negative tolerance (at least one of %g %g %g)\n", C->ftol, C->xtol, C->gtol ); S->outcome = 10; return; } if (maxfev <= 0) { fprintf( stderr, "lmmin: nonpositive function evaluations limit %i\n", maxfev ); S->outcome = 10; return; } if (C->stepbound <= 0) { fprintf( stderr, "lmmin: nonpositive stepbound %g\n", C->stepbound ); S->outcome = 10; return; } if (C->scale_diag != 0 && C->scale_diag != 1) { fprintf( stderr, "lmmin: logical variable scale_diag=%i, should be 0 or 1\n", C->scale_diag ); S->outcome = 10; return; } /* Allocate work space. */ /* Allocate total workspace with just one system call */ char *ws; if ( ( ws = malloc((2 * m + 5 * n + m * n) * sizeof(double) + n * sizeof(int) ) ) == NULL ) { S->outcome = 9; return; } /* where to store the params for all p + step */ double* par2 = (double*)malloc(n * n * sizeof(double)); double* steps = (double*)malloc(n * sizeof(double)); double* wfs = (double*)malloc(m * n * sizeof(double)); /* Assign workspace segments. */ char *pws = ws; double *fvec = (double*) pws; pws += m * sizeof(double) / sizeof(char); double *diag = (double*) pws; pws += n * sizeof(double) / sizeof(char); double *qtf = (double*) pws; pws += n * sizeof(double) / sizeof(char); double *fjac = (double*) pws; pws += n * m * sizeof(double) / sizeof(char); double *wa1 = (double*) pws; pws += n * sizeof(double) / sizeof(char); double *wa2 = (double*) pws; pws += n * sizeof(double) / sizeof(char); double *wa3 = (double*) pws; pws += n * sizeof(double) / sizeof(char); double *wf = (double*) pws; pws += m * sizeof(double) / sizeof(char); int *ipvt = (int*) pws; pws += n * sizeof(int) / sizeof(char); /* Initialize diag */ // TODO: check whether this is still needed if (!C->scale_diag) { for (j = 0; j < n; j++) diag[j] = 1.; } /* Evaluate function at starting point and calculate norm. */ if( C->verbosity ) { fprintf( msgfile, "lmmin start " ); lm_print_pars( nout, x, msgfile ); } //printf("evaluate: starting\n"); (*evaluate)( x, m, data, fvec, &(S->userbreak) ); if( C->verbosity > 4 ) for( i = 0; i < m; ++i ) fprintf( msgfile, " fvec[%4i] = %18.8g\n", i, fvec[i] ); S->nfev = 1; if ( S->userbreak ) goto terminate; fnorm = lm_enorm(m, fvec); if( C->verbosity ) fprintf( msgfile, " fnorm = %18.8g\n", fnorm ); if( !isfinite(fnorm) ) { if( C->verbosity ) fprintf( msgfile, "nan case 1\n" ); S->outcome = 12; /* nan */ goto terminate; } else if( fnorm <= LM_DWARF ) { S->outcome = 0; /* sum of squares almost zero, nothing to do */ goto terminate; } /* The outer loop: compute gradient, then descend. */ for( int outer = 0; ; ++outer ) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { steps[j] = MAX(eps * eps, eps * fabs(x[j])); par2[i * n + j] = x[j] + ((i == j) ? steps[j] : 0.0); } } /* [outer] Calculate the Jacobian. */ #ifdef LMFIT_OPENMP #pragma omp parallel for #endif for (j = 0; j < n; j++) { (*evaluate)(par2 + n * j, m, data, wfs + m * j, &(S->userbreak)); for (i = 0; i < m; i++) fjac[j * m + i] = (wfs[m * j + i] - fvec[i]) / steps[j]; } S->nfev += n; if (S->userbreak) goto terminate; if (C->verbosity > 6) { /* print the entire matrix */ printf("\nlmmin Jacobian\n"); for (i = 0; i < m; i++) { printf(" "); for (j = 0; j < n; j++) printf("%.5e ", fjac[j * m + i]); printf("\n"); } } /* [outer] Compute the QR factorization of the Jacobian. */ /* fjac is an m by n array. The upper n by n submatrix of fjac * is made to contain an upper triangular matrix R with diagonal * elements of nonincreasing magnitude such that * * P^T*(J^T*J)*P = R^T*R * * (NOTE: ^T stands for matrix transposition), * * where P is a permutation matrix and J is the final calculated * Jacobian. Column j of P is column ipvt(j) of the identity matrix. * The lower trapezoidal part of fjac contains information generated * during the computation of R. * * ipvt is an integer array of length n. It defines a permutation * matrix P such that jac*P = Q*R, where jac is the final calculated * Jacobian, Q is orthogonal (not stored), and R is upper triangular * with diagonal elements of nonincreasing magnitude. Column j of P * is column ipvt(j) of the identity matrix. */ lm_qrfac(m, n, fjac, ipvt, wa1, wa2, wa3); /* return values are ipvt, wa1=rdiag, wa2=acnorm */ /* [outer] Form Q^T * fvec, and store first n components in qtf. */ for (i = 0; i < m; i++) wf[i] = fvec[i]; for (j = 0; j < n; j++) { temp3 = fjac[j * m + j]; if (temp3 != 0) { sum = 0; for (i = j; i < m; i++) sum += fjac[j * m + i] * wf[i]; temp = -sum / temp3; for (i = j; i < m; i++) wf[i] += fjac[j * m + i] * temp; } fjac[j * m + j] = wa1[j]; qtf[j] = wf[j]; } /* [outer] Compute norm of scaled gradient and detect degeneracy. */ gnorm = 0; for (j = 0; j < n; j++) { if (wa2[ipvt[j]] == 0) continue; sum = 0; for (i = 0; i <= j; i++) sum += fjac[j * m + i] * qtf[i]; gnorm = MAX( gnorm, fabs( sum / wa2[ipvt[j]] / fnorm ) ); } if (gnorm <= C->gtol) { S->outcome = 4; goto terminate; } /* [outer] Initialize / update diag and delta. */ if ( !outer ) { /* first iteration only */ if (C->scale_diag) { /* diag := norms of the columns of the initial Jacobian */ for (j = 0; j < n; j++) diag[j] = wa2[j] ? wa2[j] : 1; /* xnorm := || D x || */ for (j = 0; j < n; j++) wa3[j] = diag[j] * x[j]; xnorm = lm_enorm(n, wa3); if( C->verbosity >= 2 ) { fprintf( msgfile, "lmmin diag " ); lm_print_pars( nout, x, msgfile ); // xnorm fprintf( msgfile, " xnorm = %18.8g\n", xnorm ); } /* only now print the header for the loop table */ if( C->verbosity >= 3 ) { fprintf( msgfile, " #o #i lmpar prered ratio dirder delta pnorm fnorm" ); for (i = 0; i < nout; ++i) fprintf( msgfile, " p%i", i ); fprintf( msgfile, "\n" ); } } else { xnorm = lm_enorm(n, x); } if( !isfinite(xnorm) ) { if( C->verbosity ) fprintf( msgfile, "nan case 2\n" ); S->outcome = 12; /* nan */ goto terminate; } /* initialize the step bound delta. */ if ( xnorm ) delta = C->stepbound * xnorm; else delta = C->stepbound; } else { if (C->scale_diag) { for (j = 0; j < n; j++) diag[j] = MAX( diag[j], wa2[j] ); } } /* The inner loop. */ int inner = 0; do { /* [inner] Determine the Levenberg-Marquardt parameter. */ lm_lmpar( n, fjac, m, ipvt, diag, qtf, delta, &lmpar, wa1, wa2, wf, wa3 ); /* used return values are fjac (partly), lmpar, wa1=x, wa3=diag*x */ /* predict scaled reduction */ pnorm = lm_enorm(n, wa3); if( !isfinite(pnorm) ) { if( C->verbosity ) fprintf( msgfile, "nan case 3\n" ); S->outcome = 12; /* nan */ goto terminate; } temp2 = lmpar * SQR( pnorm / fnorm ); for (j = 0; j < n; j++) { wa3[j] = 0; for (i = 0; i <= j; i++) wa3[i] -= fjac[j * m + i] * wa1[ipvt[j]]; } temp1 = SQR( lm_enorm(n, wa3) / fnorm ); if( !isfinite(temp1) ) { if( C->verbosity ) fprintf( msgfile, "nan case 4\n" ); S->outcome = 12; /* nan */ goto terminate; } prered = temp1 + 2 * temp2; dirder = -temp1 + temp2; /* scaled directional derivative */ /* at first call, adjust the initial step bound. */ if ( !outer && pnorm < delta ) delta = pnorm; /* [inner] Evaluate the function at x + p. */ for (j = 0; j < n; j++) wa2[j] = x[j] - wa1[j]; //printf("evaluate: inner\n"); (*evaluate)( wa2, m, data, wf, &(S->userbreak) ); ++(S->nfev); if ( S->userbreak ) goto terminate; fnorm1 = lm_enorm(m, wf); // exceptionally, for this norm we do not test for infinity // because we can deal with it without terminating. /* [inner] Evaluate the scaled reduction. */ /* actual scaled reduction (supports even the case fnorm1=infty) */ actred = fnorm1 < 10 * fnorm ? 1 - SQR(fnorm1 / fnorm) : -1; /* ratio of actual to predicted reduction */ ratio = prered ? actred / prered : 0; if( C->verbosity == 2 ) { fprintf( msgfile, "lmmin (%i:%i) ", outer, inner ); lm_print_pars( nout, wa2, msgfile ); // fnorm1, } else if( C->verbosity >= 3 ) { printf("%3i %2i %9.2g %9.2g %14.6g %9.2g %10.3e %10.3e %21.15e", outer, inner, lmpar, prered, ratio, dirder, delta, pnorm, fnorm1); for (i = 0; i < nout; ++i) fprintf( msgfile, " %16.9g", wa2[i] ); fprintf( msgfile, "\n" ); } /* update the step bound */ if ( ratio <= 0.25 ) { if ( actred >= 0 ) { temp = 0.5; } else if ( actred > -99 ) { /* -99 = 1-1/0.1^2 */ temp = MAX( dirder / (2 * dirder + actred), 0.1 ); } else { temp = 0.1; } delta = temp * MIN(delta, pnorm / 0.1); lmpar /= temp; } else if ( ratio >= 0.75 ) { delta = 2 * pnorm; lmpar *= 0.5; } else if ( !lmpar ) { delta = 2 * pnorm; } /* [inner] On success, update solution, and test for convergence. */ inner_success = ratio >= p0001; if ( inner_success ) { /* update x, fvec, and their norms */ if (C->scale_diag) { for (j = 0; j < n; j++) { x[j] = wa2[j]; wa2[j] = diag[j] * x[j]; } } else { for (j = 0; j < n; j++) x[j] = wa2[j]; } for (i = 0; i < m; i++) fvec[i] = wf[i]; xnorm = lm_enorm(n, wa2); if( !isfinite(xnorm) ) { if( C->verbosity ) fprintf( msgfile, "nan case 6\n" ); S->outcome = 12; /* nan */ goto terminate; } fnorm = fnorm1; } /* convergence tests */ S->outcome = 0; if( fnorm <= LM_DWARF ) goto terminate; /* success: sum of squares almost zero */ /* test two criteria (both may be fulfilled) */ if (fabs(actred) <= C->ftol && prered <= C->ftol && ratio <= 2) S->outcome = 1; /* success: x almost stable */ if (delta <= C->xtol * xnorm) S->outcome += 2; /* success: sum of squares almost stable */ if (S->outcome != 0) { goto terminate; } /* [inner] Tests for termination and stringent tolerances. */ if ( S->nfev >= maxfev ) { S->outcome = 5; goto terminate; } if ( fabs(actred) <= LM_MACHEP && prered <= LM_MACHEP && ratio <= 2 ) { S->outcome = 6; goto terminate; } if ( delta <= LM_MACHEP * xnorm ) { S->outcome = 7; goto terminate; } if ( gnorm <= LM_MACHEP ) { S->outcome = 8; goto terminate; } /* [inner] End of the loop. Repeat if iteration unsuccessful. */ ++inner; } while ( !inner_success ); /* [outer] End of the loop. */ }; terminate: S->fnorm = lm_enorm(m, fvec); if ( C->verbosity >= 2 ) printf("lmmin outcome (%i) xnorm %g ftol %g xtol %g\n", S->outcome, xnorm, C->ftol, C->xtol ); if( C->verbosity & 1 ) { fprintf( msgfile, "lmmin final " ); lm_print_pars( nout, x, msgfile ); // S->fnorm, fprintf( msgfile, " fnorm = %18.8g\n", S->fnorm ); } if ( S->userbreak ) /* user-requested break */ S->outcome = 11; free(ws); free(steps); free(par2); } /** * Determine Levenberg-Marquardt parameter. * Given an m by n matrix A, an n by n nonsingular diagonal matrix D, * an m-vector b, and a positive number delta, the problem is to * determine a parameter value par such that if x solves the system * A*x = b and sqrt(par)*D*x = 0 * in the least squares sense, and dxnorm is the euclidean * norm of D*x, then either par=0 and (dxnorm-delta) < 0.1*delta, * or par>0 and abs(dxnorm-delta) < 0.1*delta. * * Using lm_qrsolv, this subroutine completes the solution of the * problem if it is provided with the necessary information from * the QR factorization, with column pivoting, of A. That is, if * A*P = Q*R, where P is a permutation matrix, Q has orthogonal * columns, and R is an upper triangular matrix with diagonal * elements of nonincreasing magnitude, then lmpar expects the * full upper triangle of R, the permutation matrix P, and the * first n components of Q^T*b. On output lmpar also provides an * upper triangular matrix S such that * * P^T*(A^T*A + par*D*D)*P = S^T*S. * * S is employed within lmpar and may be of separate interest. * * Only a few iterations are generally needed for convergence * of the algorithm. If, however, the limit of 10 iterations * is reached, then the output par will contain the best value * obtained so far. * * @param n positive integer INPUT variable set to the order of r. * * @param r n by n array. On INPUT the full upper triangle * must contain the full upper triangle of the matrix R. * On OUTPUT the full upper triangle is unaltered, and the * strict lower triangle contains the strict upper triangle * (transposed) of the upper triangular matrix S. * * @param ldr positive integer INPUT variable not less than n * which specifies the leading dimension of the array R. * * @param ipvt integer INPUT array of length n which defines the * permutation matrix P such that A*P = Q*R. Column j of P * is column ipvt(j) of the identity matrix. * * @param diag INPUT array of length n which must contain the * diagonal elements of the matrix D. * * @param qtb INPUT array of length n which must contain the first * n elements of the vector Q^T*b. * * @param delta positive INPUT variable which specifies an upper * bound on the euclidean norm of D*x. * * @param par nonnegative variable. On INPUT par contains an * initial estimate of the Levenberg-Marquardt parameter. * On OUTPUT par contains the final estimate. * * @param x OUTPUT array of length n which contains the least * squares solution of the system A*x = b, sqrt(par)*D*x = 0, * for the output par. * * @param sdiag array of length n needed as workspace; on OUTPUT * it contains the diagonal elements of the upper triangular * matrix S. * * @param aux multi-purpose work array of length n. * * @param xdi work array of length n. On OUTPUT: diag[j] * x[j]. */ void lm_lmpar(const int n, double *const r, const int ldr, int *const ipvt, double *const diag, double *const qtb, double delta, double *const par, double *const x, double *const sdiag, double *const aux, double *const xdi) { int i, iter, j, nsing; double dxnorm, fp, fp_old, gnorm, parc, parl, paru; double sum, temp; static double p1 = 0.1; /* lmpar: compute and store in x the gauss-newton direction. if the jacobian is rank-deficient, obtain a least squares solution. */ nsing = n; for (j = 0; j < n; j++) { aux[j] = qtb[j]; if (r[j * ldr + j] == 0 && nsing == n) nsing = j; if (nsing < n) aux[j] = 0; } for (j = nsing - 1; j >= 0; j--) { aux[j] = aux[j] / r[j + ldr * j]; temp = aux[j]; for (i = 0; i < j; i++) aux[i] -= r[j * ldr + i] * temp; } for (j = 0; j < n; j++) x[ipvt[j]] = aux[j]; /* lmpar: initialize the iteration counter, evaluate the function at the origin, and test for acceptance of the gauss-newton direction. */ for (j = 0; j < n; j++) xdi[j] = diag[j] * x[j]; dxnorm = lm_enorm(n, xdi); fp = dxnorm - delta; if (fp <= p1 * delta) { #ifdef LMFIT_DEBUG_MESSAGES printf("debug lmpar nsing %d n %d, terminate (fp<p1*delta)\n", nsing, n); #endif *par = 0; return; } /* lmpar: if the jacobian is not rank deficient, the newton step provides a lower bound, parl, for the zero of the function. otherwise set this bound to zero. */ parl = 0; if (nsing >= n) { for (j = 0; j < n; j++) aux[j] = diag[ipvt[j]] * xdi[ipvt[j]] / dxnorm; for (j = 0; j < n; j++) { sum = 0; for (i = 0; i < j; i++) sum += r[j * ldr + i] * aux[i]; aux[j] = (aux[j] - sum) / r[j + ldr * j]; } temp = lm_enorm(n, aux); parl = fp / delta / temp / temp; } /* lmpar: calculate an upper bound, paru, for the zero of the function. */ for (j = 0; j < n; j++) { sum = 0; for (i = 0; i <= j; i++) sum += r[j * ldr + i] * qtb[i]; aux[j] = sum / diag[ipvt[j]]; } gnorm = lm_enorm(n, aux); paru = gnorm / delta; if (paru == 0) paru = LM_DWARF / MIN(delta, p1); /* lmpar: if the input par lies outside of the interval (parl,paru), set par to the closer endpoint. */ *par = MAX(*par, parl); *par = MIN(*par, paru); if (*par == 0) *par = gnorm / dxnorm; /* lmpar: iterate. */ for (iter = 0; ; iter++) { /** evaluate the function at the current value of par. **/ if (*par == 0) *par = MAX(LM_DWARF, 0.001 * paru); temp = sqrt(*par); for (j = 0; j < n; j++) aux[j] = temp * diag[j]; lm_qrsolv( n, r, ldr, ipvt, aux, qtb, x, sdiag, xdi ); /* return values are r, x, sdiag */ for (j = 0; j < n; j++) xdi[j] = diag[j] * x[j]; /* used as output */ dxnorm = lm_enorm(n, xdi); fp_old = fp; fp = dxnorm - delta; /** if the function is small enough, accept the current value of par. Also test for the exceptional cases where parl is zero or the number of iterations has reached 10. **/ if (fabs(fp) <= p1 * delta || (parl == 0 && fp <= fp_old && fp_old < 0) || iter == 10) { #ifdef LMFIT_DEBUG_MESSAGES printf("debug lmpar nsing %d iter %d par %.4e [%.4e %.4e] delta %.4e fp %.4e\n", nsing, iter, *par, parl, paru, delta, fp); #endif break; /* the only exit from the iteration. */ } /** compute the Newton correction. **/ for (j = 0; j < n; j++) aux[j] = diag[ipvt[j]] * xdi[ipvt[j]] / dxnorm; for (j = 0; j < n; j++) { aux[j] = aux[j] / sdiag[j]; for (i = j + 1; i < n; i++) aux[i] -= r[j * ldr + i] * aux[j]; } temp = lm_enorm(n, aux); parc = fp / delta / temp / temp; /** depending on the sign of the function, update parl or paru. **/ if (fp > 0) parl = MAX(parl, *par); else if (fp < 0) paru = MIN(paru, *par); /* the case fp==0 is precluded by the break condition */ /** compute an improved estimate for par. **/ *par = MAX(parl, *par + parc); } } /** * QR factorization, from lapack. * This subroutine uses Householder transformations with column pivoting * to compute a QR factorization of the m by n matrix A. That is, qrfac * determines an orthogonal matrix Q, a permutation matrix P, and an * upper trapezoidal matrix R with diagonal elements of nonincreasing * magnitude, such that A*P = Q*R. The Householder transformation for * column k, k = 1,2,...,n, is of the form * * I - 2*w*wT/|w|^2 * * where w has zeroes in the first k-1 positions. * * @param m INPUT parameter set to the number of rows of A. * * @param n INPUT parameter set to the number of columns of A. * * @param A m by n array. On INPUT, A contains the matrix for * which the QR factorization is to be computed. On OUTPUT * the strict upper trapezoidal part of A contains the strict * upper trapezoidal part of R, and the lower trapezoidal * part of A contains a factored form of Q (the non-trivial * elements of the vectors w described above). * * @param Pivot integer OUTPUT array of length n that describes the * permutation matrix P: * Column j of P is column ipvt(j) of the identity matrix. * * @param Rdiag OUTPUT array of length n which contains the * diagonal elements of R. * * @param Acnorm OUTPUT array of length n which contains the norms * of the corresponding columns of the input matrix A. If this * information is not needed, then Acnorm can share storage with Rdiag. * * @param W work array of length n. */ void lm_qrfac(const int m, const int n, double *const A, int *const Pivot, double *const Rdiag, double *const Acnorm, double *const W) { int i, j, k, kmax; double ajnorm, sum, temp; #ifdef LMFIT_DEBUG_MESSAGES printf("debug qrfac\n"); #endif /* Compute initial column norms; initialize Pivot with identity permutation. */ for (j = 0; j < n; j++) { W[j] = Rdiag[j] = Acnorm[j] = lm_enorm(m, &A[j * m]); Pivot[j] = j; } /* Loop over columns of A. */ // assert( n <= m ); for (j = 0; j < n; j++) { /* Bring the column of largest norm into the pivot position. */ kmax = j; for (k = j + 1; k < n; k++) if (Rdiag[k] > Rdiag[kmax]) kmax = k; if (kmax != j) { /* Swap columns j and kmax. */ k = Pivot[j]; Pivot[j] = Pivot[kmax]; Pivot[kmax] = k; for (i = 0; i < m; i++) { temp = A[j * m + i]; A[j * m + i] = A[kmax * m + i]; A[kmax * m + i] = temp; } /* Half-swap: Rdiag[j], W[j] won't be needed any further. */ Rdiag[kmax] = Rdiag[j]; W[kmax] = W[j]; } /* Compute the Householder reflection vector w_j to reduce the j-th column of A to a multiple of the j-th unit vector. */ ajnorm = lm_enorm(m - j, &A[j * m + j]); if (ajnorm == 0) { Rdiag[j] = 0; continue; } /* Let the partial column vector A[j][j:] contain w_j := e_j+-a_j/|a_j|, where the sign +- is chosen to avoid cancellation in w_jj. */ if (A[j * m + j] < 0) ajnorm = -ajnorm; for (i = j; i < m; i++) A[j * m + i] /= ajnorm; A[j * m + j] += 1; /* Apply the Householder transformation U_w := 1 - 2*w_j.w_j/|w_j|^2 to the remaining columns, and update the norms. */ for (k = j + 1; k < n; k++) { /* Compute scalar product w_j * a_j. */ sum = 0; for (i = j; i < m; i++) sum += A[j * m + i] * A[k * m + i]; /* Normalization is simplified by the coincidence |w_j|^2=2w_jj. */ temp = sum / A[j * m + j]; /* Carry out transform U_w_j * a_k. */ for (i = j; i < m; i++) A[k * m + i] -= temp * A[j * m + i]; /* No idea what happens here. */ if (Rdiag[k] != 0) { temp = A[m * k + j] / Rdiag[k]; if ( fabs(temp) < 1 ) { Rdiag[k] *= sqrt(1 - SQR(temp)); temp = Rdiag[k] / W[k]; } else temp = 0; if ( temp == 0 || 0.05 * SQR(temp) <= LM_MACHEP ) { Rdiag[k] = lm_enorm(m - j - 1, &A[m * k + j + 1]); W[k] = Rdiag[k]; } } } Rdiag[j] = -ajnorm; } } /** * Linear least-squares. * Given an m by n matrix A, an n by n diagonal matrix D, and an * m-vector b, the problem is to determine an x which solves the * system A*x = b and D*x = 0 in the least squares sense. * This subroutine completes the solution of the problem if it is * provided with the necessary information from the QR factorization, * with column pivoting, of A. That is, if A*P = Q*R, where P is a * permutation matrix, Q has orthogonal columns, and R is an upper * triangular matrix with diagonal elements of nonincreasing magnitude, * then qrsolv expects the full upper triangle of R, the permutation * matrix P, and the first n components of Q^T*b. The system * A*x = b, D*x = 0, is then equivalent to * * R*z = Q^T*b, P^T*D*P*z = 0, * * where x = P*z. If this system does not have full rank, then a least * squares solution is obtained. On output qrsolv also provides an upper * triangular matrix S such that * * P^T*(A^T*A + D*D)*P = S^T*S. * * S is computed within qrsolv and may be of separate interest. * * @param n a positive integer INPUT variable set to the order of R. * * @param r an n by n array. On INPUT the full upper triangle must * contain the full upper triangle of the matrix R. On OUTPUT * the full upper triangle is unaltered, and the strict lower * triangle contains the strict upper triangle (transposed) of * the upper triangular matrix S. * * @param ldr a positive integer INPUT variable not less than n * which specifies the leading dimension of the array R. * * @param ipvt an integer INPUT array of length n which defines the * permutation matrix P such that A*P = Q*R. Column j of P * is column ipvt(j) of the identity matrix. * * @param diag an INPUT array of length n which must contain the * diagonal elements of the matrix D. * * @param qtb an INPUT array of length n which must contain the first * n elements of the vector Q^T*b. * * @param x an OUTPUT array of length n which contains the least * squares solution of the system A*x = b, D*x = 0. * * @param sdiag an OUTPUT array of length n which contains the * diagonal elements of the upper triangular matrix S. * * @param wa a work array of length n. */ void lm_qrsolv(const int n, double *const r, const int ldr, int *const ipvt, double *const diag, double *const qtb, double *const x, double *const sdiag, double *const wa) { int i, kk, j, k, nsing; double qtbpj, sum, temp; double _sin, _cos, _tan, _cot; /* local variables, not functions */ /* qrsolv: copy R and Q^T*b to preserve input and initialize S. In particular, save the diagonal elements of R in x. */ for (j = 0; j < n; j++) { for (i = j; i < n; i++) r[j * ldr + i] = r[i * ldr + j]; x[j] = r[j * ldr + j]; wa[j] = qtb[j]; } /* qrsolv: eliminate the diagonal matrix D using a Givens rotation. */ for (j = 0; j < n; j++) { /* qrsolv: prepare the row of D to be eliminated, locating the diagonal element using P from the QR factorization. */ if (diag[ipvt[j]] == 0) goto L90; for (k = j; k < n; k++) sdiag[k] = 0; sdiag[j] = diag[ipvt[j]]; /* qrsolv: the transformations to eliminate the row of D modify only a single element of Q^T*b beyond the first n, which is initially 0. */ qtbpj = 0; for (k = j; k < n; k++) { /* determine a Givens rotation which eliminates the appropriate element in the current row of D. */ if (sdiag[k] == 0) continue; kk = k + ldr * k; if (fabs(r[kk]) < fabs(sdiag[k])) { _cot = r[kk] / sdiag[k]; _sin = 1 / sqrt(1 + SQR(_cot)); _cos = _sin * _cot; } else { _tan = sdiag[k] / r[kk]; _cos = 1 / sqrt(1 + SQR(_tan)); _sin = _cos * _tan; } /* compute the modified diagonal element of R and the modified element of (Q^T*b,0). */ r[kk] = _cos * r[kk] + _sin * sdiag[k]; temp = _cos * wa[k] + _sin * qtbpj; qtbpj = -_sin * wa[k] + _cos * qtbpj; wa[k] = temp; /* accumulate the tranformation in the row of S. */ for (i = k + 1; i < n; i++) { temp = _cos * r[k * ldr + i] + _sin * sdiag[i]; sdiag[i] = -_sin * r[k * ldr + i] + _cos * sdiag[i]; r[k * ldr + i] = temp; } } L90: /* store the diagonal element of S and restore the corresponding diagonal element of R. */ sdiag[j] = r[j * ldr + j]; r[j * ldr + j] = x[j]; } /* qrsolv: solve the triangular system for z. If the system is singular, then obtain a least squares solution. */ nsing = n; for (j = 0; j < n; j++) { if (sdiag[j] == 0 && nsing == n) nsing = j; if (nsing < n) wa[j] = 0; } for (j = nsing - 1; j >= 0; j--) { sum = 0; for (i = j + 1; i < nsing; i++) sum += r[j * ldr + i] * wa[i]; wa[j] = (wa[j] - sum) / sdiag[j]; } /* qrsolv: permute the components of z back to components of x. */ for (j = 0; j < n; j++) x[ipvt[j]] = wa[j]; } /** * Calculate the Euclidean norm of an n-vector x. * * @param n a positive integer INPUT variable. * @param x an INPUT array of length n. */ double lm_enorm(const int n, const double* x) { int i; double agiant, s1, s2, s3, xabs, x1max, x3max, temp; s1 = 0; s2 = 0; s3 = 0; x1max = 0; x3max = 0; agiant = LM_SQRT_GIANT / n; for (i = 0; i < n; i++) { xabs = fabs(x[i]); if (xabs > LM_SQRT_DWARF) { if ( xabs < agiant ) { s2 += xabs * xabs; } else if ( xabs > x1max ) { temp = x1max / xabs; s1 = 1 + s1 * SQR(temp); x1max = xabs; } else { temp = xabs / x1max; s1 += SQR(temp); } } else if ( xabs > x3max ) { temp = x3max / xabs; s3 = 1 + s3 * SQR(temp); x3max = xabs; } else if (xabs != 0) { temp = xabs / x3max; s3 += SQR(temp); } } if (s1 != 0) return x1max * sqrt(s1 + (s2 / x1max) / x1max); else if (s2 != 0) if (s2 >= x3max) return sqrt(s2 * (1 + (x3max / s2) * (x3max * s3))); else return sqrt(x3max * ((s2 / x3max) + (x3max * s3))); else return x3max * sqrt(s3); } /* lmcurve */ typedef struct { const double *const t; const double *const y; double (*const f) (const double t, const double *par); } lmcurve_data_struct; void lmcurve_evaluate(const double *const par, const int m_dat, const void *const data, double *const fvec, int *const info) { for (int i = 0; i < m_dat; i++ ) fvec[i] = ((lmcurve_data_struct*)data)->y[i] - ((lmcurve_data_struct*)data)->f( ((lmcurve_data_struct*)data)->t[i], par ); } void lmcurve(const int n_par, double* par, const int m_dat, const double* t, const double* y, double (*f)(const double t, const double* par), const lm_control_struct* control, lm_status_struct* status) { lmcurve_data_struct data = {t, y, f}; lmmin( n_par, par, m_dat, (const void *) &data, lmcurve_evaluate, control, status ); } /* lmcurve_tyd */ typedef struct { const double* t; const double* y; const double* dy; double (*f)(const double t, const double* par); } lmcurve_tyd_data_struct; void lmcurve_tyd_evaluate(const double* par, const int m_dat, const void* data, double* fvec, int* info) { lmcurve_tyd_data_struct* D = (lmcurve_tyd_data_struct*)data; int i; for (i = 0; i < m_dat; i++) fvec[i] = ( D->y[i] - D->f(D->t[i], par) ) / D->dy[i]; } void lmcurve_tyd(const int n_par, double* par, const int m_dat, const double* t, const double* y, const double* dy, double (*f)(const double t, const double* par), const lm_control_struct* control, lm_status_struct* status) { lmcurve_tyd_data_struct data = { t, y, dy, f }; lmmin(n_par, par, m_dat, (const void*)&data, lmcurve_tyd_evaluate, control, status); }
mapOptmization.h
#pragma once #include "utility.h" #include "lio_sam/cloud_info.h" #include <gtsam/geometry/Rot3.h> #include <gtsam/geometry/Pose3.h> #include <gtsam/slam/PriorFactor.h> #include <gtsam/slam/BetweenFactor.h> #include <gtsam/navigation/GPSFactor.h> #include <gtsam/navigation/ImuFactor.h> #include <gtsam/navigation/CombinedImuFactor.h> #include <gtsam/nonlinear/NonlinearFactorGraph.h> #include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h> #include <gtsam/nonlinear/Marginals.h> #include <gtsam/nonlinear/Values.h> #include <gtsam/inference/Symbol.h> #include <gtsam/nonlinear/ISAM2.h> using namespace gtsam; using symbol_shorthand::X; // Pose3 (x,y,z,r,p,y) using symbol_shorthand::V; // Vel (xdot,ydot,zdot) using symbol_shorthand::B; // Bias (ax,ay,az,gx,gy,gz) using symbol_shorthand::G; // GPS pose /* * A point cloud type that has 6D pose info ([x,y,z,roll,pitch,yaw] intensity is time stamp) */ struct PointXYZIRPYT { PCL_ADD_POINT4D PCL_ADD_INTENSITY; // preferred way of adding a XYZ+padding float roll; float pitch; float yaw; double time; EIGEN_MAKE_ALIGNED_OPERATOR_NEW // make sure our new allocators are aligned } EIGEN_ALIGN16; // enforce SSE padding for correct memory alignment POINT_CLOUD_REGISTER_POINT_STRUCT (PointXYZIRPYT, (float, x, x) (float, y, y) (float, z, z) (float, intensity, intensity) (float, roll, roll) (float, pitch, pitch) (float, yaw, yaw) (double, time, time)) typedef PointXYZIRPYT PointTypePose; class mapOptimization : public ParamServer { public: // gtsam NonlinearFactorGraph gtSAMgraph; Values initialEstimate; Values optimizedEstimate; ISAM2 *isam; Values isamCurrentEstimate; Eigen::MatrixXd poseCovariance; ros::Publisher pubLaserCloudSurround; ros::Publisher pubLaserOdometryGlobal; ros::Publisher pubLaserOdometryIncremental; ros::Publisher pubKeyPoses; ros::Publisher pubPath; ros::Publisher pubHistoryKeyFrames; ros::Publisher pubIcpKeyFrames; ros::Publisher pubRecentKeyFrames; ros::Publisher pubRecentKeyFrame; ros::Publisher pubCloudRegisteredRaw; ros::Publisher pubLoopConstraintEdge; ros::Subscriber subLaserCloudInfo; ros::Subscriber subGPS; std::deque<nav_msgs::Odometry> gpsQueue; lio_sam::cloud_info cloudInfo; vector<pcl::PointCloud<PointType>::Ptr> cornerCloudKeyFrames; vector<pcl::PointCloud<PointType>::Ptr> surfCloudKeyFrames; pcl::PointCloud<PointType>::Ptr cloudKeyPoses3D; pcl::PointCloud<PointTypePose>::Ptr cloudKeyPoses6D; pcl::PointCloud<PointType>::Ptr copy_cloudKeyPoses3D; pcl::PointCloud<PointTypePose>::Ptr copy_cloudKeyPoses6D; pcl::PointCloud<PointType>::Ptr laserCloudCornerLast; // corner feature set from odoOptimization pcl::PointCloud<PointType>::Ptr laserCloudSurfLast; // surf feature set from odoOptimization pcl::PointCloud<PointType>::Ptr laserCloudCornerLastDS; // downsampled corner featuer set from odoOptimization pcl::PointCloud<PointType>::Ptr laserCloudSurfLastDS; // downsampled surf featuer set from odoOptimization pcl::PointCloud<PointType>::Ptr laserCloudOri; pcl::PointCloud<PointType>::Ptr coeffSel; std::vector<PointType> laserCloudOriCornerVec; // corner point holder for parallel computation std::vector<PointType> coeffSelCornerVec; std::vector<bool> laserCloudOriCornerFlag; std::vector<PointType> laserCloudOriSurfVec; // surf point holder for parallel computation std::vector<PointType> coeffSelSurfVec; std::vector<bool> laserCloudOriSurfFlag; pcl::PointCloud<PointType>::Ptr laserCloudCornerFromMap; pcl::PointCloud<PointType>::Ptr laserCloudSurfFromMap; pcl::PointCloud<PointType>::Ptr laserCloudCornerFromMapDS; pcl::PointCloud<PointType>::Ptr laserCloudSurfFromMapDS; pcl::KdTreeFLANN<PointType>::Ptr kdtreeCornerFromMap; pcl::KdTreeFLANN<PointType>::Ptr kdtreeSurfFromMap; pcl::KdTreeFLANN<PointType>::Ptr kdtreeSurroundingKeyPoses; pcl::KdTreeFLANN<PointType>::Ptr kdtreeHistoryKeyPoses; pcl::PointCloud<PointType>::Ptr latestKeyFrameCloud; pcl::PointCloud<PointType>::Ptr nearHistoryKeyFrameCloud; pcl::VoxelGrid<PointType> downSizeFilterCorner; pcl::VoxelGrid<PointType> downSizeFilterSurf; pcl::VoxelGrid<PointType> downSizeFilterICP; pcl::VoxelGrid<PointType> downSizeFilterSurroundingKeyPoses; // for surrounding key poses of scan-to-map optimization ros::Time timeLaserInfoStamp; double timeLaserCloudInfoLast; float transformTobeMapped[6]; std::mutex mtx; bool isDegenerate = false; Eigen::Matrix<float, 6, 6> matP; int laserCloudCornerFromMapDSNum = 0; int laserCloudSurfFromMapDSNum = 0; int laserCloudCornerLastDSNum = 0; int laserCloudSurfLastDSNum = 0; bool aLoopIsClosed = false; map<int, int> loopIndexContainer; // from new to old vector<pair<int, int>> loopIndexQueue; vector<gtsam::Pose3> loopPoseQueue; vector<gtsam::noiseModel::Diagonal::shared_ptr> loopNoiseQueue; nav_msgs::Path globalPath; Eigen::Affine3f transPointAssociateToMap; Eigen::Affine3f incrementalOdometryAffineFront; Eigen::Affine3f incrementalOdometryAffineBack; mapOptimization() { ISAM2Params parameters; parameters.relinearizeThreshold = 0.1; parameters.relinearizeSkip = 1; isam = new ISAM2(parameters); pubKeyPoses = nh.advertise<sensor_msgs::PointCloud2>("lio_sam/mapping/trajectory", 1); pubLaserCloudSurround = nh.advertise<sensor_msgs::PointCloud2>("lio_sam/mapping/map_global", 1); pubLaserOdometryGlobal = nh.advertise<nav_msgs::Odometry> ("lio_sam/mapping/odometry", 1); pubLaserOdometryIncremental = nh.advertise<nav_msgs::Odometry> ("lio_sam/mapping/odometry_incremental", 1); pubPath = nh.advertise<nav_msgs::Path>("lio_sam/mapping/path", 1); subLaserCloudInfo = nh.subscribe<lio_sam::cloud_info>("lio_sam/feature/cloud_info", 1, &mapOptimization::laserCloudInfoHandler, this, ros::TransportHints().tcpNoDelay()); subGPS = nh.subscribe<nav_msgs::Odometry> (gpsTopic, 200, &mapOptimization::gpsHandler, this, ros::TransportHints().tcpNoDelay()); pubHistoryKeyFrames = nh.advertise<sensor_msgs::PointCloud2>("lio_sam/mapping/icp_loop_closure_history_cloud", 1); pubIcpKeyFrames = nh.advertise<sensor_msgs::PointCloud2>("lio_sam/mapping/icp_loop_closure_corrected_cloud", 1); pubLoopConstraintEdge = nh.advertise<visualization_msgs::MarkerArray>("/lio_sam/mapping/loop_closure_constraints", 1); pubRecentKeyFrames = nh.advertise<sensor_msgs::PointCloud2>("lio_sam/mapping/map_local", 1); pubRecentKeyFrame = nh.advertise<sensor_msgs::PointCloud2>("lio_sam/mapping/cloud_registered", 1); pubCloudRegisteredRaw = nh.advertise<sensor_msgs::PointCloud2>("lio_sam/mapping/cloud_registered_raw", 1); downSizeFilterCorner.setLeafSize(mappingCornerLeafSize, mappingCornerLeafSize, mappingCornerLeafSize); downSizeFilterSurf.setLeafSize(mappingSurfLeafSize, mappingSurfLeafSize, mappingSurfLeafSize); downSizeFilterICP.setLeafSize(mappingSurfLeafSize, mappingSurfLeafSize, mappingSurfLeafSize); downSizeFilterSurroundingKeyPoses.setLeafSize(surroundingKeyframeDensity, surroundingKeyframeDensity, surroundingKeyframeDensity); // for surrounding key poses of scan-to-map optimization allocateMemory(); } void allocateMemory() { cloudKeyPoses3D.reset(new pcl::PointCloud<PointType>()); cloudKeyPoses6D.reset(new pcl::PointCloud<PointTypePose>()); copy_cloudKeyPoses3D.reset(new pcl::PointCloud<PointType>()); copy_cloudKeyPoses6D.reset(new pcl::PointCloud<PointTypePose>()); kdtreeSurroundingKeyPoses.reset(new pcl::KdTreeFLANN<PointType>()); kdtreeHistoryKeyPoses.reset(new pcl::KdTreeFLANN<PointType>()); laserCloudCornerLast.reset(new pcl::PointCloud<PointType>()); // corner feature set from odoOptimization laserCloudSurfLast.reset(new pcl::PointCloud<PointType>()); // surf feature set from odoOptimization laserCloudCornerLastDS.reset(new pcl::PointCloud<PointType>()); // downsampled corner featuer set from odoOptimization laserCloudSurfLastDS.reset(new pcl::PointCloud<PointType>()); // downsampled surf featuer set from odoOptimization laserCloudOri.reset(new pcl::PointCloud<PointType>()); coeffSel.reset(new pcl::PointCloud<PointType>()); laserCloudOriCornerVec.resize(N_SCAN * Horizon_SCAN); coeffSelCornerVec.resize(N_SCAN * Horizon_SCAN); laserCloudOriCornerFlag.resize(N_SCAN * Horizon_SCAN); laserCloudOriSurfVec.resize(N_SCAN * Horizon_SCAN); coeffSelSurfVec.resize(N_SCAN * Horizon_SCAN); laserCloudOriSurfFlag.resize(N_SCAN * Horizon_SCAN); std::fill(laserCloudOriCornerFlag.begin(), laserCloudOriCornerFlag.end(), false); std::fill(laserCloudOriSurfFlag.begin(), laserCloudOriSurfFlag.end(), false); laserCloudCornerFromMap.reset(new pcl::PointCloud<PointType>()); laserCloudSurfFromMap.reset(new pcl::PointCloud<PointType>()); laserCloudCornerFromMapDS.reset(new pcl::PointCloud<PointType>()); laserCloudSurfFromMapDS.reset(new pcl::PointCloud<PointType>()); kdtreeCornerFromMap.reset(new pcl::KdTreeFLANN<PointType>()); kdtreeSurfFromMap.reset(new pcl::KdTreeFLANN<PointType>()); latestKeyFrameCloud.reset(new pcl::PointCloud<PointType>()); nearHistoryKeyFrameCloud.reset(new pcl::PointCloud<PointType>()); for (int i = 0; i < 6; ++i){ transformTobeMapped[i] = 0; } matP.setZero(); } void laserCloudInfoHandler(const lio_sam::cloud_infoConstPtr& msgIn) { // extract time stamp timeLaserInfoStamp = msgIn->header.stamp; timeLaserCloudInfoLast = msgIn->header.stamp.toSec(); // extract info and feature cloud cloudInfo = *msgIn; pcl::fromROSMsg(msgIn->cloud_corner, *laserCloudCornerLast); pcl::fromROSMsg(msgIn->cloud_surface, *laserCloudSurfLast); std::lock_guard<std::mutex> lock(mtx); static double timeLastProcessing = -1; if (timeLaserCloudInfoLast - timeLastProcessing >= mappingProcessInterval) { timeLastProcessing = timeLaserCloudInfoLast; updateInitialGuess(); extractSurroundingKeyFrames(); downsampleCurrentScan(); scan2MapOptimization(); saveKeyFramesAndFactor(); correctPoses(); publishOdometry(); publishFrames(); } } void gpsHandler(const nav_msgs::Odometry::ConstPtr& gpsMsg) { gpsQueue.push_back(*gpsMsg); } void pointAssociateToMap(PointType const * const pi, PointType * const po) { po->x = transPointAssociateToMap(0,0) * pi->x + transPointAssociateToMap(0,1) * pi->y + transPointAssociateToMap(0,2) * pi->z + transPointAssociateToMap(0,3); po->y = transPointAssociateToMap(1,0) * pi->x + transPointAssociateToMap(1,1) * pi->y + transPointAssociateToMap(1,2) * pi->z + transPointAssociateToMap(1,3); po->z = transPointAssociateToMap(2,0) * pi->x + transPointAssociateToMap(2,1) * pi->y + transPointAssociateToMap(2,2) * pi->z + transPointAssociateToMap(2,3); po->intensity = pi->intensity; } pcl::PointCloud<PointType>::Ptr transformPointCloud(pcl::PointCloud<PointType>::Ptr cloudIn, PointTypePose* transformIn) { pcl::PointCloud<PointType>::Ptr cloudOut(new pcl::PointCloud<PointType>()); PointType *pointFrom; int cloudSize = cloudIn->size(); cloudOut->resize(cloudSize); Eigen::Affine3f transCur = pcl::getTransformation(transformIn->x, transformIn->y, transformIn->z, transformIn->roll, transformIn->pitch, transformIn->yaw); for (int i = 0; i < cloudSize; ++i){ pointFrom = &cloudIn->points[i]; cloudOut->points[i].x = transCur(0,0) * pointFrom->x + transCur(0,1) * pointFrom->y + transCur(0,2) * pointFrom->z + transCur(0,3); cloudOut->points[i].y = transCur(1,0) * pointFrom->x + transCur(1,1) * pointFrom->y + transCur(1,2) * pointFrom->z + transCur(1,3); cloudOut->points[i].z = transCur(2,0) * pointFrom->x + transCur(2,1) * pointFrom->y + transCur(2,2) * pointFrom->z + transCur(2,3); cloudOut->points[i].intensity = pointFrom->intensity; } return cloudOut; } gtsam::Pose3 pclPointTogtsamPose3(PointTypePose thisPoint) { return gtsam::Pose3(gtsam::Rot3::RzRyRx(double(thisPoint.roll), double(thisPoint.pitch), double(thisPoint.yaw)), gtsam::Point3(double(thisPoint.x), double(thisPoint.y), double(thisPoint.z))); } gtsam::Pose3 trans2gtsamPose(float transformIn[]) { return gtsam::Pose3(gtsam::Rot3::RzRyRx(transformIn[0], transformIn[1], transformIn[2]), gtsam::Point3(transformIn[3], transformIn[4], transformIn[5])); } Eigen::Affine3f pclPointToAffine3f(PointTypePose thisPoint) { return pcl::getTransformation(thisPoint.x, thisPoint.y, thisPoint.z, thisPoint.roll, thisPoint.pitch, thisPoint.yaw); } Eigen::Affine3f trans2Affine3f(float transformIn[]) { return pcl::getTransformation(transformIn[3], transformIn[4], transformIn[5], transformIn[0], transformIn[1], transformIn[2]); } PointTypePose trans2PointTypePose(float transformIn[]) { PointTypePose thisPose6D; thisPose6D.x = transformIn[3]; thisPose6D.y = transformIn[4]; thisPose6D.z = transformIn[5]; thisPose6D.roll = transformIn[0]; thisPose6D.pitch = transformIn[1]; thisPose6D.yaw = transformIn[2]; return thisPose6D; } void visualizeGlobalMapThread() { ros::Rate rate(0.2); while (ros::ok()){ rate.sleep(); publishGlobalMap(); } if (savePCD == false) return; cout << "****************************************************" << endl; cout << "Saving map to pcd files ..." << endl; // create directory and remove old files; savePCDDirectory = std::getenv("HOME") + savePCDDirectory; int unused = system((std::string("exec rm -r ") + savePCDDirectory).c_str()); unused = system((std::string("mkdir ") + savePCDDirectory).c_str()); // save key frame transformations pcl::io::savePCDFileASCII(savePCDDirectory + "trajectory.pcd", *cloudKeyPoses3D); pcl::io::savePCDFileASCII(savePCDDirectory + "transformations.pcd", *cloudKeyPoses6D); // extract global point cloud map pcl::PointCloud<PointType>::Ptr globalCornerCloud(new pcl::PointCloud<PointType>()); pcl::PointCloud<PointType>::Ptr globalCornerCloudDS(new pcl::PointCloud<PointType>()); pcl::PointCloud<PointType>::Ptr globalSurfCloud(new pcl::PointCloud<PointType>()); pcl::PointCloud<PointType>::Ptr globalSurfCloudDS(new pcl::PointCloud<PointType>()); pcl::PointCloud<PointType>::Ptr globalMapCloud(new pcl::PointCloud<PointType>()); for (int i = 0; i < (int)cloudKeyPoses3D->size(); i++) { *globalCornerCloud += *transformPointCloud(cornerCloudKeyFrames[i], &cloudKeyPoses6D->points[i]); *globalSurfCloud += *transformPointCloud(surfCloudKeyFrames[i], &cloudKeyPoses6D->points[i]); cout << "\r" << std::flush << "Processing feature cloud " << i << " of " << cloudKeyPoses6D->size() << " ..."; } // down-sample and save corner cloud downSizeFilterCorner.setInputCloud(globalCornerCloud); downSizeFilterCorner.filter(*globalCornerCloudDS); pcl::io::savePCDFileASCII(savePCDDirectory + "cloudCorner.pcd", *globalCornerCloudDS); // down-sample and save surf cloud downSizeFilterSurf.setInputCloud(globalSurfCloud); downSizeFilterSurf.filter(*globalSurfCloudDS); pcl::io::savePCDFileASCII(savePCDDirectory + "cloudSurf.pcd", *globalSurfCloudDS); // down-sample and save global point cloud map *globalMapCloud += *globalCornerCloud; *globalMapCloud += *globalSurfCloud; pcl::io::savePCDFileASCII(savePCDDirectory + "cloudGlobal.pcd", *globalMapCloud); cout << "****************************************************" << endl; cout << "Saving map to pcd files completed" << endl; } void publishGlobalMap() { if (pubLaserCloudSurround.getNumSubscribers() == 0) return; if (cloudKeyPoses3D->points.empty() == true) return; pcl::KdTreeFLANN<PointType>::Ptr kdtreeGlobalMap(new pcl::KdTreeFLANN<PointType>());; pcl::PointCloud<PointType>::Ptr globalMapKeyPoses(new pcl::PointCloud<PointType>()); pcl::PointCloud<PointType>::Ptr globalMapKeyPosesDS(new pcl::PointCloud<PointType>()); pcl::PointCloud<PointType>::Ptr globalMapKeyFrames(new pcl::PointCloud<PointType>()); pcl::PointCloud<PointType>::Ptr globalMapKeyFramesDS(new pcl::PointCloud<PointType>()); // kd-tree to find near key frames to visualize std::vector<int> pointSearchIndGlobalMap; std::vector<float> pointSearchSqDisGlobalMap; // search near key frames to visualize mtx.lock(); kdtreeGlobalMap->setInputCloud(cloudKeyPoses3D); kdtreeGlobalMap->radiusSearch(cloudKeyPoses3D->back(), globalMapVisualizationSearchRadius, pointSearchIndGlobalMap, pointSearchSqDisGlobalMap, 0); mtx.unlock(); for (int i = 0; i < (int)pointSearchIndGlobalMap.size(); ++i) globalMapKeyPoses->push_back(cloudKeyPoses3D->points[pointSearchIndGlobalMap[i]]); // downsample near selected key frames pcl::VoxelGrid<PointType> downSizeFilterGlobalMapKeyPoses; // for global map visualization downSizeFilterGlobalMapKeyPoses.setLeafSize(globalMapVisualizationPoseDensity, globalMapVisualizationPoseDensity, globalMapVisualizationPoseDensity); // for global map visualization downSizeFilterGlobalMapKeyPoses.setInputCloud(globalMapKeyPoses); downSizeFilterGlobalMapKeyPoses.filter(*globalMapKeyPosesDS); // extract visualized and downsampled key frames for (int i = 0; i < (int)globalMapKeyPosesDS->size(); ++i){ if (pointDistance(globalMapKeyPosesDS->points[i], cloudKeyPoses3D->back()) > globalMapVisualizationSearchRadius) continue; int thisKeyInd = (int)globalMapKeyPosesDS->points[i].intensity; *globalMapKeyFrames += *transformPointCloud(cornerCloudKeyFrames[thisKeyInd], &cloudKeyPoses6D->points[thisKeyInd]); *globalMapKeyFrames += *transformPointCloud(surfCloudKeyFrames[thisKeyInd], &cloudKeyPoses6D->points[thisKeyInd]); } // downsample visualized points pcl::VoxelGrid<PointType> downSizeFilterGlobalMapKeyFrames; // for global map visualization downSizeFilterGlobalMapKeyFrames.setLeafSize(globalMapVisualizationLeafSize, globalMapVisualizationLeafSize, globalMapVisualizationLeafSize); // for global map visualization downSizeFilterGlobalMapKeyFrames.setInputCloud(globalMapKeyFrames); downSizeFilterGlobalMapKeyFrames.filter(*globalMapKeyFramesDS); publishCloud(&pubLaserCloudSurround, globalMapKeyFramesDS, timeLaserInfoStamp, odometryFrame); } void loopClosureThread() { if (loopClosureEnableFlag == false) return; ros::Rate rate(1); while (ros::ok()) { rate.sleep(); performLoopClosure(); visualizeLoopClosure(); } } bool detectLoopClosure(int *latestID, int *closestID) { int latestFrameIDLoopCloure = copy_cloudKeyPoses3D->size() - 1; int closestHistoryFrameID = -1; // check loop constraint added before auto it = loopIndexContainer.find(latestFrameIDLoopCloure); if (it != loopIndexContainer.end()) return false; latestKeyFrameCloud->clear(); nearHistoryKeyFrameCloud->clear(); // find the closest history key frame std::vector<int> pointSearchIndLoop; std::vector<float> pointSearchSqDisLoop; kdtreeHistoryKeyPoses->setInputCloud(copy_cloudKeyPoses3D); kdtreeHistoryKeyPoses->radiusSearch(copy_cloudKeyPoses3D->back(), historyKeyframeSearchRadius, pointSearchIndLoop, pointSearchSqDisLoop, 0); for (int i = 0; i < (int)pointSearchIndLoop.size(); ++i) { int id = pointSearchIndLoop[i]; if (abs(copy_cloudKeyPoses6D->points[id].time - timeLaserCloudInfoLast) > historyKeyframeSearchTimeDiff) { closestHistoryFrameID = id; break; } } if (closestHistoryFrameID == -1) return false; if (latestFrameIDLoopCloure == closestHistoryFrameID) return false; // save latest key frames *latestKeyFrameCloud += *transformPointCloud(cornerCloudKeyFrames[latestFrameIDLoopCloure], &copy_cloudKeyPoses6D->points[latestFrameIDLoopCloure]); *latestKeyFrameCloud += *transformPointCloud(surfCloudKeyFrames[latestFrameIDLoopCloure], &copy_cloudKeyPoses6D->points[latestFrameIDLoopCloure]); // save history near key frames bool nearFrameAvailable = false; for (int j = -historyKeyframeSearchNum; j <= historyKeyframeSearchNum; ++j) { if (closestHistoryFrameID + j < 0 || closestHistoryFrameID + j > latestFrameIDLoopCloure) continue; *nearHistoryKeyFrameCloud += *transformPointCloud(cornerCloudKeyFrames[closestHistoryFrameID+j], &copy_cloudKeyPoses6D->points[closestHistoryFrameID+j]); *nearHistoryKeyFrameCloud += *transformPointCloud(surfCloudKeyFrames[closestHistoryFrameID+j], &copy_cloudKeyPoses6D->points[closestHistoryFrameID+j]); nearFrameAvailable = true; } if (nearFrameAvailable == false) return false; *latestID = latestFrameIDLoopCloure; *closestID = closestHistoryFrameID; return true; } void performLoopClosure() { if (cloudKeyPoses3D->points.empty() == true) return; mtx.lock(); *copy_cloudKeyPoses3D = *cloudKeyPoses3D; *copy_cloudKeyPoses6D = *cloudKeyPoses6D; mtx.unlock(); int latestFrameIDLoopCloure; int closestHistoryFrameID; if (detectLoopClosure(&latestFrameIDLoopCloure, &closestHistoryFrameID) == false) return; // ICP Settings static pcl::IterativeClosestPoint<PointType, PointType> icp; icp.setMaxCorrespondenceDistance(historyKeyframeSearchRadius*2); icp.setMaximumIterations(100); icp.setTransformationEpsilon(1e-6); icp.setEuclideanFitnessEpsilon(1e-6); icp.setRANSACIterations(0); // Downsample map cloud pcl::PointCloud<PointType>::Ptr cloud_temp(new pcl::PointCloud<PointType>()); downSizeFilterICP.setInputCloud(nearHistoryKeyFrameCloud); downSizeFilterICP.filter(*cloud_temp); *nearHistoryKeyFrameCloud = *cloud_temp; // publish history near key frames publishCloud(&pubHistoryKeyFrames, nearHistoryKeyFrameCloud, timeLaserInfoStamp, odometryFrame); // Align clouds icp.setInputSource(latestKeyFrameCloud); icp.setInputTarget(nearHistoryKeyFrameCloud); pcl::PointCloud<PointType>::Ptr unused_result(new pcl::PointCloud<PointType>()); icp.align(*unused_result); // std::cout << "ICP converg flag:" << icp.hasConverged() << ". Fitness score: " << icp.getFitnessScore() << std::endl; if (icp.hasConverged() == false || icp.getFitnessScore() > historyKeyframeFitnessScore) return; // publish corrected cloud if (pubIcpKeyFrames.getNumSubscribers() != 0){ pcl::PointCloud<PointType>::Ptr closed_cloud(new pcl::PointCloud<PointType>()); pcl::transformPointCloud(*latestKeyFrameCloud, *closed_cloud, icp.getFinalTransformation()); publishCloud(&pubIcpKeyFrames, closed_cloud, timeLaserInfoStamp, odometryFrame); } // Get pose transformation float x, y, z, roll, pitch, yaw; Eigen::Affine3f correctionLidarFrame; correctionLidarFrame = icp.getFinalTransformation(); // transform from world origin to wrong pose Eigen::Affine3f tWrong = pclPointToAffine3f(copy_cloudKeyPoses6D->points[latestFrameIDLoopCloure]); // transform from world origin to corrected pose Eigen::Affine3f tCorrect = correctionLidarFrame * tWrong;// pre-multiplying -> successive rotation about a fixed frame pcl::getTranslationAndEulerAngles (tCorrect, x, y, z, roll, pitch, yaw); gtsam::Pose3 poseFrom = Pose3(Rot3::RzRyRx(roll, pitch, yaw), Point3(x, y, z)); gtsam::Pose3 poseTo = pclPointTogtsamPose3(copy_cloudKeyPoses6D->points[closestHistoryFrameID]); gtsam::Vector Vector6(6); float noiseScore = icp.getFitnessScore(); Vector6 << noiseScore, noiseScore, noiseScore, noiseScore, noiseScore, noiseScore; noiseModel::Diagonal::shared_ptr constraintNoise = noiseModel::Diagonal::Variances(Vector6); // Add pose constraint mtx.lock(); loopIndexQueue.push_back(make_pair(latestFrameIDLoopCloure, closestHistoryFrameID)); loopPoseQueue.push_back(poseFrom.between(poseTo)); loopNoiseQueue.push_back(constraintNoise); mtx.unlock(); // add loop constriant loopIndexContainer[latestFrameIDLoopCloure] = closestHistoryFrameID; } void visualizeLoopClosure() { visualization_msgs::MarkerArray markerArray; // loop nodes visualization_msgs::Marker markerNode; markerNode.header.frame_id = odometryFrame; markerNode.header.stamp = timeLaserInfoStamp; markerNode.action = visualization_msgs::Marker::ADD; markerNode.type = visualization_msgs::Marker::SPHERE_LIST; markerNode.ns = "loop_nodes"; markerNode.id = 0; markerNode.pose.orientation.w = 1; markerNode.scale.x = 0.3; markerNode.scale.y = 0.3; markerNode.scale.z = 0.3; markerNode.color.r = 0; markerNode.color.g = 0.8; markerNode.color.b = 1; markerNode.color.a = 1; // loop edges visualization_msgs::Marker markerEdge; markerEdge.header.frame_id = odometryFrame; markerEdge.header.stamp = timeLaserInfoStamp; markerEdge.action = visualization_msgs::Marker::ADD; markerEdge.type = visualization_msgs::Marker::LINE_LIST; markerEdge.ns = "loop_edges"; markerEdge.id = 1; markerEdge.pose.orientation.w = 1; markerEdge.scale.x = 0.1; markerEdge.scale.y = 0.1; markerEdge.scale.z = 0.1; markerEdge.color.r = 0.9; markerEdge.color.g = 0.9; markerEdge.color.b = 0; markerEdge.color.a = 1; for (auto it = loopIndexContainer.begin(); it != loopIndexContainer.end(); ++it) { int key_cur = it->first; int key_pre = it->second; geometry_msgs::Point p; p.x = copy_cloudKeyPoses6D->points[key_cur].x; p.y = copy_cloudKeyPoses6D->points[key_cur].y; p.z = copy_cloudKeyPoses6D->points[key_cur].z; markerNode.points.push_back(p); markerEdge.points.push_back(p); p.x = copy_cloudKeyPoses6D->points[key_pre].x; p.y = copy_cloudKeyPoses6D->points[key_pre].y; p.z = copy_cloudKeyPoses6D->points[key_pre].z; markerNode.points.push_back(p); markerEdge.points.push_back(p); } markerArray.markers.push_back(markerNode); markerArray.markers.push_back(markerEdge); pubLoopConstraintEdge.publish(markerArray); } void updateInitialGuess() { // save current transformation before any processing incrementalOdometryAffineFront = trans2Affine3f(transformTobeMapped); static Eigen::Affine3f lastImuTransformation; // initialization if (cloudKeyPoses3D->points.empty()) { transformTobeMapped[0] = cloudInfo.imuRollInit; transformTobeMapped[1] = cloudInfo.imuPitchInit; transformTobeMapped[2] = cloudInfo.imuYawInit; if (!useImuHeadingInitialization) transformTobeMapped[2] = 0; lastImuTransformation = pcl::getTransformation(0, 0, 0, cloudInfo.imuRollInit, cloudInfo.imuPitchInit, cloudInfo.imuYawInit); // save imu before return; return; } // use imu pre-integration estimation for pose guess static bool lastImuPreTransAvailable = false; static Eigen::Affine3f lastImuPreTransformation; if (cloudInfo.odomAvailable == true) { Eigen::Affine3f transBack = pcl::getTransformation(cloudInfo.initialGuessX, cloudInfo.initialGuessY, cloudInfo.initialGuessZ, cloudInfo.initialGuessRoll, cloudInfo.initialGuessPitch, cloudInfo.initialGuessYaw); if (lastImuPreTransAvailable == false) { lastImuPreTransformation = transBack; lastImuPreTransAvailable = true; } else { Eigen::Affine3f transIncre = lastImuPreTransformation.inverse() * transBack; Eigen::Affine3f transTobe = trans2Affine3f(transformTobeMapped); Eigen::Affine3f transFinal = transTobe * transIncre; pcl::getTranslationAndEulerAngles(transFinal, transformTobeMapped[3], transformTobeMapped[4], transformTobeMapped[5], transformTobeMapped[0], transformTobeMapped[1], transformTobeMapped[2]); lastImuPreTransformation = transBack; lastImuTransformation = pcl::getTransformation(0, 0, 0, cloudInfo.imuRollInit, cloudInfo.imuPitchInit, cloudInfo.imuYawInit); // save imu before return; return; } } // use imu incremental estimation for pose guess (only rotation) if (cloudInfo.imuAvailable == true) { Eigen::Affine3f transBack = pcl::getTransformation(0, 0, 0, cloudInfo.imuRollInit, cloudInfo.imuPitchInit, cloudInfo.imuYawInit); Eigen::Affine3f transIncre = lastImuTransformation.inverse() * transBack; Eigen::Affine3f transTobe = trans2Affine3f(transformTobeMapped); Eigen::Affine3f transFinal = transTobe * transIncre; pcl::getTranslationAndEulerAngles(transFinal, transformTobeMapped[3], transformTobeMapped[4], transformTobeMapped[5], transformTobeMapped[0], transformTobeMapped[1], transformTobeMapped[2]); lastImuTransformation = pcl::getTransformation(0, 0, 0, cloudInfo.imuRollInit, cloudInfo.imuPitchInit, cloudInfo.imuYawInit); // save imu before return; return; } } void extractForLoopClosure() { pcl::PointCloud<PointType>::Ptr cloudToExtract(new pcl::PointCloud<PointType>()); int numPoses = cloudKeyPoses3D->size(); for (int i = numPoses-1; i >= 0; --i) { if ((int)cloudToExtract->size() <= surroundingKeyframeSize) cloudToExtract->push_back(cloudKeyPoses3D->points[i]); else break; } extractCloud(cloudToExtract); } void extractNearby() { pcl::PointCloud<PointType>::Ptr surroundingKeyPoses(new pcl::PointCloud<PointType>()); pcl::PointCloud<PointType>::Ptr surroundingKeyPosesDS(new pcl::PointCloud<PointType>()); std::vector<int> pointSearchInd; std::vector<float> pointSearchSqDis; // extract all the nearby key poses and downsample them kdtreeSurroundingKeyPoses->setInputCloud(cloudKeyPoses3D); // create kd-tree kdtreeSurroundingKeyPoses->radiusSearch(cloudKeyPoses3D->back(), (double)surroundingKeyframeSearchRadius, pointSearchInd, pointSearchSqDis); for (int i = 0; i < (int)pointSearchInd.size(); ++i) { int id = pointSearchInd[i]; surroundingKeyPoses->push_back(cloudKeyPoses3D->points[id]); } downSizeFilterSurroundingKeyPoses.setInputCloud(surroundingKeyPoses); downSizeFilterSurroundingKeyPoses.filter(*surroundingKeyPosesDS); // also extract some latest key frames in case the robot rotates in one position int numPoses = cloudKeyPoses3D->size(); for (int i = numPoses-1; i >= 0; --i) { if (timeLaserCloudInfoLast - cloudKeyPoses6D->points[i].time < 10.0) surroundingKeyPosesDS->push_back(cloudKeyPoses3D->points[i]); else break; } extractCloud(surroundingKeyPosesDS); } void extractCloud(pcl::PointCloud<PointType>::Ptr cloudToExtract) { std::vector<pcl::PointCloud<PointType>> laserCloudCornerSurroundingVec; std::vector<pcl::PointCloud<PointType>> laserCloudSurfSurroundingVec; laserCloudCornerSurroundingVec.resize(cloudToExtract->size()); laserCloudSurfSurroundingVec.resize(cloudToExtract->size()); // extract surrounding map #pragma omp parallel for num_threads(numberOfCores) for (int i = 0; i < (int)cloudToExtract->size(); ++i) { if (pointDistance(cloudToExtract->points[i], cloudKeyPoses3D->back()) > surroundingKeyframeSearchRadius) continue; int thisKeyInd = (int)cloudToExtract->points[i].intensity; laserCloudCornerSurroundingVec[i] = *transformPointCloud(cornerCloudKeyFrames[thisKeyInd], &cloudKeyPoses6D->points[thisKeyInd]); laserCloudSurfSurroundingVec[i] = *transformPointCloud(surfCloudKeyFrames[thisKeyInd], &cloudKeyPoses6D->points[thisKeyInd]); } // fuse the map laserCloudCornerFromMap->clear(); laserCloudSurfFromMap->clear(); for (int i = 0; i < (int)cloudToExtract->size(); ++i) { *laserCloudCornerFromMap += laserCloudCornerSurroundingVec[i]; *laserCloudSurfFromMap += laserCloudSurfSurroundingVec[i]; } // Downsample the surrounding corner key frames (or map) downSizeFilterCorner.setInputCloud(laserCloudCornerFromMap); downSizeFilterCorner.filter(*laserCloudCornerFromMapDS); laserCloudCornerFromMapDSNum = laserCloudCornerFromMapDS->size(); // Downsample the surrounding surf key frames (or map) downSizeFilterSurf.setInputCloud(laserCloudSurfFromMap); downSizeFilterSurf.filter(*laserCloudSurfFromMapDS); laserCloudSurfFromMapDSNum = laserCloudSurfFromMapDS->size(); } void extractSurroundingKeyFrames() { if (cloudKeyPoses3D->points.empty() == true) return; if (loopClosureEnableFlag == true) { extractForLoopClosure(); } else { extractNearby(); } } void downsampleCurrentScan() { // Downsample cloud from current scan laserCloudCornerLastDS->clear(); downSizeFilterCorner.setInputCloud(laserCloudCornerLast); downSizeFilterCorner.filter(*laserCloudCornerLastDS); laserCloudCornerLastDSNum = laserCloudCornerLastDS->size(); laserCloudSurfLastDS->clear(); downSizeFilterSurf.setInputCloud(laserCloudSurfLast); downSizeFilterSurf.filter(*laserCloudSurfLastDS); laserCloudSurfLastDSNum = laserCloudSurfLastDS->size(); } void updatePointAssociateToMap() { transPointAssociateToMap = trans2Affine3f(transformTobeMapped); } void cornerOptimization() { updatePointAssociateToMap(); #pragma omp parallel for num_threads(numberOfCores) for (int i = 0; i < laserCloudCornerLastDSNum; i++) { PointType pointOri, pointSel, coeff; std::vector<int> pointSearchInd; std::vector<float> pointSearchSqDis; pointOri = laserCloudCornerLastDS->points[i]; pointAssociateToMap(&pointOri, &pointSel); kdtreeCornerFromMap->nearestKSearch(pointSel, 5, pointSearchInd, pointSearchSqDis); cv::Mat matA1(3, 3, CV_32F, cv::Scalar::all(0)); cv::Mat matD1(1, 3, CV_32F, cv::Scalar::all(0)); cv::Mat matV1(3, 3, CV_32F, cv::Scalar::all(0)); if (pointSearchSqDis[4] < 1.0) { float cx = 0, cy = 0, cz = 0; for (int j = 0; j < 5; j++) { cx += laserCloudCornerFromMapDS->points[pointSearchInd[j]].x; cy += laserCloudCornerFromMapDS->points[pointSearchInd[j]].y; cz += laserCloudCornerFromMapDS->points[pointSearchInd[j]].z; } cx /= 5; cy /= 5; cz /= 5; float a11 = 0, a12 = 0, a13 = 0, a22 = 0, a23 = 0, a33 = 0; for (int j = 0; j < 5; j++) { float ax = laserCloudCornerFromMapDS->points[pointSearchInd[j]].x - cx; float ay = laserCloudCornerFromMapDS->points[pointSearchInd[j]].y - cy; float az = laserCloudCornerFromMapDS->points[pointSearchInd[j]].z - cz; a11 += ax * ax; a12 += ax * ay; a13 += ax * az; a22 += ay * ay; a23 += ay * az; a33 += az * az; } a11 /= 5; a12 /= 5; a13 /= 5; a22 /= 5; a23 /= 5; a33 /= 5; matA1.at<float>(0, 0) = a11; matA1.at<float>(0, 1) = a12; matA1.at<float>(0, 2) = a13; matA1.at<float>(1, 0) = a12; matA1.at<float>(1, 1) = a22; matA1.at<float>(1, 2) = a23; matA1.at<float>(2, 0) = a13; matA1.at<float>(2, 1) = a23; matA1.at<float>(2, 2) = a33; cv::eigen(matA1, matD1, matV1); if (matD1.at<float>(0, 0) > 3 * matD1.at<float>(0, 1)) { float x0 = pointSel.x; float y0 = pointSel.y; float z0 = pointSel.z; float x1 = cx + 0.1 * matV1.at<float>(0, 0); float y1 = cy + 0.1 * matV1.at<float>(0, 1); float z1 = cz + 0.1 * matV1.at<float>(0, 2); float x2 = cx - 0.1 * matV1.at<float>(0, 0); float y2 = cy - 0.1 * matV1.at<float>(0, 1); float z2 = cz - 0.1 * matV1.at<float>(0, 2); float a012 = sqrt(((x0 - x1)*(y0 - y2) - (x0 - x2)*(y0 - y1)) * ((x0 - x1)*(y0 - y2) - (x0 - x2)*(y0 - y1)) + ((x0 - x1)*(z0 - z2) - (x0 - x2)*(z0 - z1)) * ((x0 - x1)*(z0 - z2) - (x0 - x2)*(z0 - z1)) + ((y0 - y1)*(z0 - z2) - (y0 - y2)*(z0 - z1)) * ((y0 - y1)*(z0 - z2) - (y0 - y2)*(z0 - z1))); float l12 = sqrt((x1 - x2)*(x1 - x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2)); float la = ((y1 - y2)*((x0 - x1)*(y0 - y2) - (x0 - x2)*(y0 - y1)) + (z1 - z2)*((x0 - x1)*(z0 - z2) - (x0 - x2)*(z0 - z1))) / a012 / l12; float lb = -((x1 - x2)*((x0 - x1)*(y0 - y2) - (x0 - x2)*(y0 - y1)) - (z1 - z2)*((y0 - y1)*(z0 - z2) - (y0 - y2)*(z0 - z1))) / a012 / l12; float lc = -((x1 - x2)*((x0 - x1)*(z0 - z2) - (x0 - x2)*(z0 - z1)) + (y1 - y2)*((y0 - y1)*(z0 - z2) - (y0 - y2)*(z0 - z1))) / a012 / l12; float ld2 = a012 / l12; float s = 1 - 0.9 * fabs(ld2); coeff.x = s * la; coeff.y = s * lb; coeff.z = s * lc; coeff.intensity = s * ld2; if (s > 0.1) { laserCloudOriCornerVec[i] = pointOri; coeffSelCornerVec[i] = coeff; laserCloudOriCornerFlag[i] = true; } } } } } void surfOptimization() { updatePointAssociateToMap(); #pragma omp parallel for num_threads(numberOfCores) for (int i = 0; i < laserCloudSurfLastDSNum; i++) { PointType pointOri, pointSel, coeff; std::vector<int> pointSearchInd; std::vector<float> pointSearchSqDis; pointOri = laserCloudSurfLastDS->points[i]; pointAssociateToMap(&pointOri, &pointSel); kdtreeSurfFromMap->nearestKSearch(pointSel, 5, pointSearchInd, pointSearchSqDis); Eigen::Matrix<float, 5, 3> matA0; Eigen::Matrix<float, 5, 1> matB0; Eigen::Vector3f matX0; matA0.setZero(); matB0.fill(-1); matX0.setZero(); if (pointSearchSqDis[4] < 1.0) { for (int j = 0; j < 5; j++) { matA0(j, 0) = laserCloudSurfFromMapDS->points[pointSearchInd[j]].x; matA0(j, 1) = laserCloudSurfFromMapDS->points[pointSearchInd[j]].y; matA0(j, 2) = laserCloudSurfFromMapDS->points[pointSearchInd[j]].z; } matX0 = matA0.colPivHouseholderQr().solve(matB0); float pa = matX0(0, 0); float pb = matX0(1, 0); float pc = matX0(2, 0); float pd = 1; float ps = sqrt(pa * pa + pb * pb + pc * pc); pa /= ps; pb /= ps; pc /= ps; pd /= ps; bool planeValid = true; for (int j = 0; j < 5; j++) { if (fabs(pa * laserCloudSurfFromMapDS->points[pointSearchInd[j]].x + pb * laserCloudSurfFromMapDS->points[pointSearchInd[j]].y + pc * laserCloudSurfFromMapDS->points[pointSearchInd[j]].z + pd) > 0.2) { planeValid = false; break; } } if (planeValid) { float pd2 = pa * pointSel.x + pb * pointSel.y + pc * pointSel.z + pd; float s = 1 - 0.9 * fabs(pd2) / sqrt(sqrt(pointSel.x * pointSel.x + pointSel.y * pointSel.y + pointSel.z * pointSel.z)); coeff.x = s * pa; coeff.y = s * pb; coeff.z = s * pc; coeff.intensity = s * pd2; if (s > 0.1) { laserCloudOriSurfVec[i] = pointOri; coeffSelSurfVec[i] = coeff; laserCloudOriSurfFlag[i] = true; } } } } } void combineOptimizationCoeffs() { // combine corner coeffs for (int i = 0; i < laserCloudCornerLastDSNum; ++i){ if (laserCloudOriCornerFlag[i] == true){ laserCloudOri->push_back(laserCloudOriCornerVec[i]); coeffSel->push_back(coeffSelCornerVec[i]); } } // combine surf coeffs for (int i = 0; i < laserCloudSurfLastDSNum; ++i){ if (laserCloudOriSurfFlag[i] == true){ laserCloudOri->push_back(laserCloudOriSurfVec[i]); coeffSel->push_back(coeffSelSurfVec[i]); } } // reset flag for next iteration std::fill(laserCloudOriCornerFlag.begin(), laserCloudOriCornerFlag.end(), false); std::fill(laserCloudOriSurfFlag.begin(), laserCloudOriSurfFlag.end(), false); } bool LMOptimization(int iterCount) { // This optimization is from the original loam_velodyne by Ji Zhang, need to cope with coordinate transformation // lidar <- camera --- camera <- lidar // x = z --- x = y // y = x --- y = z // z = y --- z = x // roll = yaw --- roll = pitch // pitch = roll --- pitch = yaw // yaw = pitch --- yaw = roll // lidar -> camera float srx = sin(transformTobeMapped[1]); float crx = cos(transformTobeMapped[1]); float sry = sin(transformTobeMapped[2]); float cry = cos(transformTobeMapped[2]); float srz = sin(transformTobeMapped[0]); float crz = cos(transformTobeMapped[0]); int laserCloudSelNum = laserCloudOri->size(); if (laserCloudSelNum < 50) { return false; } cv::Mat matA(laserCloudSelNum, 6, CV_32F, cv::Scalar::all(0)); cv::Mat matAt(6, laserCloudSelNum, CV_32F, cv::Scalar::all(0)); cv::Mat matAtA(6, 6, CV_32F, cv::Scalar::all(0)); cv::Mat matB(laserCloudSelNum, 1, CV_32F, cv::Scalar::all(0)); cv::Mat matAtB(6, 1, CV_32F, cv::Scalar::all(0)); cv::Mat matX(6, 1, CV_32F, cv::Scalar::all(0)); cv::Mat matP(6, 6, CV_32F, cv::Scalar::all(0)); PointType pointOri, coeff; for (int i = 0; i < laserCloudSelNum; i++) { // lidar -> camera pointOri.x = laserCloudOri->points[i].y; pointOri.y = laserCloudOri->points[i].z; pointOri.z = laserCloudOri->points[i].x; // lidar -> camera coeff.x = coeffSel->points[i].y; coeff.y = coeffSel->points[i].z; coeff.z = coeffSel->points[i].x; coeff.intensity = coeffSel->points[i].intensity; // in camera float arx = (crx*sry*srz*pointOri.x + crx*crz*sry*pointOri.y - srx*sry*pointOri.z) * coeff.x + (-srx*srz*pointOri.x - crz*srx*pointOri.y - crx*pointOri.z) * coeff.y + (crx*cry*srz*pointOri.x + crx*cry*crz*pointOri.y - cry*srx*pointOri.z) * coeff.z; float ary = ((cry*srx*srz - crz*sry)*pointOri.x + (sry*srz + cry*crz*srx)*pointOri.y + crx*cry*pointOri.z) * coeff.x + ((-cry*crz - srx*sry*srz)*pointOri.x + (cry*srz - crz*srx*sry)*pointOri.y - crx*sry*pointOri.z) * coeff.z; float arz = ((crz*srx*sry - cry*srz)*pointOri.x + (-cry*crz-srx*sry*srz)*pointOri.y)*coeff.x + (crx*crz*pointOri.x - crx*srz*pointOri.y) * coeff.y + ((sry*srz + cry*crz*srx)*pointOri.x + (crz*sry-cry*srx*srz)*pointOri.y)*coeff.z; // lidar -> camera matA.at<float>(i, 0) = arz; matA.at<float>(i, 1) = arx; matA.at<float>(i, 2) = ary; matA.at<float>(i, 3) = coeff.z; matA.at<float>(i, 4) = coeff.x; matA.at<float>(i, 5) = coeff.y; matB.at<float>(i, 0) = -coeff.intensity; } cv::transpose(matA, matAt); matAtA = matAt * matA; matAtB = matAt * matB; cv::solve(matAtA, matAtB, matX, cv::DECOMP_QR); if (iterCount == 0) { cv::Mat matE(1, 6, CV_32F, cv::Scalar::all(0)); cv::Mat matV(6, 6, CV_32F, cv::Scalar::all(0)); cv::Mat matV2(6, 6, CV_32F, cv::Scalar::all(0)); cv::eigen(matAtA, matE, matV); matV.copyTo(matV2); isDegenerate = false; float eignThre[6] = {100, 100, 100, 100, 100, 100}; for (int i = 5; i >= 0; i--) { if (matE.at<float>(0, i) < eignThre[i]) { for (int j = 0; j < 6; j++) { matV2.at<float>(i, j) = 0; } isDegenerate = true; } else { break; } } matP = matV.inv() * matV2; } if (isDegenerate) { cv::Mat matX2(6, 1, CV_32F, cv::Scalar::all(0)); matX.copyTo(matX2); matX = matP * matX2; } transformTobeMapped[0] += matX.at<float>(0, 0); transformTobeMapped[1] += matX.at<float>(1, 0); transformTobeMapped[2] += matX.at<float>(2, 0); transformTobeMapped[3] += matX.at<float>(3, 0); transformTobeMapped[4] += matX.at<float>(4, 0); transformTobeMapped[5] += matX.at<float>(5, 0); float deltaR = sqrt( pow(pcl::rad2deg(matX.at<float>(0, 0)), 2) + pow(pcl::rad2deg(matX.at<float>(1, 0)), 2) + pow(pcl::rad2deg(matX.at<float>(2, 0)), 2)); float deltaT = sqrt( pow(matX.at<float>(3, 0) * 100, 2) + pow(matX.at<float>(4, 0) * 100, 2) + pow(matX.at<float>(5, 0) * 100, 2)); if (deltaR < 0.05 && deltaT < 0.05) { return true; // converged } return false; // keep optimizing } void scan2MapOptimization() { if (cloudKeyPoses3D->points.empty()) return; if (laserCloudCornerLastDSNum > edgeFeatureMinValidNum && laserCloudSurfLastDSNum > surfFeatureMinValidNum) { kdtreeCornerFromMap->setInputCloud(laserCloudCornerFromMapDS); kdtreeSurfFromMap->setInputCloud(laserCloudSurfFromMapDS); for (int iterCount = 0; iterCount < 30; iterCount++) { laserCloudOri->clear(); coeffSel->clear(); cornerOptimization(); surfOptimization(); combineOptimizationCoeffs(); if (LMOptimization(iterCount) == true) break; } transformUpdate(); } else { ROS_WARN("Not enough features! Only %d edge and %d planar features available.", laserCloudCornerLastDSNum, laserCloudSurfLastDSNum); } } void transformUpdate() { if (cloudInfo.imuAvailable == true) { if (std::abs(cloudInfo.imuPitchInit) < 1.4) { double imuWeight = 0.01; tf::Quaternion imuQuaternion; tf::Quaternion transformQuaternion; double rollMid, pitchMid, yawMid; // slerp roll transformQuaternion.setRPY(transformTobeMapped[0], 0, 0); imuQuaternion.setRPY(cloudInfo.imuRollInit, 0, 0); tf::Matrix3x3(transformQuaternion.slerp(imuQuaternion, imuWeight)).getRPY(rollMid, pitchMid, yawMid); transformTobeMapped[0] = rollMid; // slerp pitch transformQuaternion.setRPY(0, transformTobeMapped[1], 0); imuQuaternion.setRPY(0, cloudInfo.imuPitchInit, 0); tf::Matrix3x3(transformQuaternion.slerp(imuQuaternion, imuWeight)).getRPY(rollMid, pitchMid, yawMid); transformTobeMapped[1] = pitchMid; } } transformTobeMapped[0] = constraintTransformation(transformTobeMapped[0], rotation_tollerance); transformTobeMapped[1] = constraintTransformation(transformTobeMapped[1], rotation_tollerance); transformTobeMapped[5] = constraintTransformation(transformTobeMapped[5], z_tollerance); incrementalOdometryAffineBack = trans2Affine3f(transformTobeMapped); } float constraintTransformation(float value, float limit) { if (value < -limit) value = -limit; if (value > limit) value = limit; return value; } bool saveFrame() { if (cloudKeyPoses3D->points.empty()) return true; Eigen::Affine3f transStart = pclPointToAffine3f(cloudKeyPoses6D->back()); Eigen::Affine3f transFinal = pcl::getTransformation(transformTobeMapped[3], transformTobeMapped[4], transformTobeMapped[5], transformTobeMapped[0], transformTobeMapped[1], transformTobeMapped[2]); Eigen::Affine3f transBetween = transStart.inverse() * transFinal; float x, y, z, roll, pitch, yaw; pcl::getTranslationAndEulerAngles(transBetween, x, y, z, roll, pitch, yaw); if (abs(roll) < surroundingkeyframeAddingAngleThreshold && abs(pitch) < surroundingkeyframeAddingAngleThreshold && abs(yaw) < surroundingkeyframeAddingAngleThreshold && sqrt(x*x + y*y + z*z) < surroundingkeyframeAddingDistThreshold) return false; return true; } void addOdomFactor() { if (cloudKeyPoses3D->points.empty()) { noiseModel::Diagonal::shared_ptr priorNoise = noiseModel::Diagonal::Variances((Vector(6) << 1e-2, 1e-2, M_PI*M_PI, 1e8, 1e8, 1e8).finished()); // rad*rad, meter*meter gtSAMgraph.add(PriorFactor<Pose3>(0, trans2gtsamPose(transformTobeMapped), priorNoise)); initialEstimate.insert(0, trans2gtsamPose(transformTobeMapped)); }else{ noiseModel::Diagonal::shared_ptr odometryNoise = noiseModel::Diagonal::Variances((Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished()); gtsam::Pose3 poseFrom = pclPointTogtsamPose3(cloudKeyPoses6D->points.back()); gtsam::Pose3 poseTo = trans2gtsamPose(transformTobeMapped); gtSAMgraph.add(BetweenFactor<Pose3>(cloudKeyPoses3D->size()-1, cloudKeyPoses3D->size(), poseFrom.between(poseTo), odometryNoise)); initialEstimate.insert(cloudKeyPoses3D->size(), poseTo); } } void addGPSFactor() { if (gpsQueue.empty()) return; // wait for system initialized and settles down if (cloudKeyPoses3D->points.empty()) return; else { if (pointDistance(cloudKeyPoses3D->front(), cloudKeyPoses3D->back()) < 5.0) return; } // pose covariance small, no need to correct if (poseCovariance(3,3) < poseCovThreshold && poseCovariance(4,4) < poseCovThreshold) return; // last gps position static PointType lastGPSPoint; while (!gpsQueue.empty()) { if (gpsQueue.front().header.stamp.toSec() < timeLaserCloudInfoLast - 0.2) { // message too old gpsQueue.pop_front(); } else if (gpsQueue.front().header.stamp.toSec() > timeLaserCloudInfoLast + 0.2) { // message too new break; } else { nav_msgs::Odometry thisGPS = gpsQueue.front(); gpsQueue.pop_front(); // GPS too noisy, skip float noise_x = thisGPS.pose.covariance[0]; float noise_y = thisGPS.pose.covariance[7]; float noise_z = thisGPS.pose.covariance[14]; if (noise_x > gpsCovThreshold || noise_y > gpsCovThreshold) continue; float gps_x = thisGPS.pose.pose.position.x; float gps_y = thisGPS.pose.pose.position.y; float gps_z = thisGPS.pose.pose.position.z; if (!useGpsElevation) { gps_z = transformTobeMapped[5]; noise_z = 0.01; } // GPS not properly initialized (0,0,0) if (abs(gps_x) < 1e-6 && abs(gps_y) < 1e-6) continue; // Add GPS every a few meters PointType curGPSPoint; curGPSPoint.x = gps_x; curGPSPoint.y = gps_y; curGPSPoint.z = gps_z; if (pointDistance(curGPSPoint, lastGPSPoint) < 5.0) continue; else lastGPSPoint = curGPSPoint; gtsam::Vector Vector3(3); Vector3 << max(noise_x, 1.0f), max(noise_y, 1.0f), max(noise_z, 1.0f); noiseModel::Diagonal::shared_ptr gps_noise = noiseModel::Diagonal::Variances(Vector3); gtsam::GPSFactor gps_factor(cloudKeyPoses3D->size(), gtsam::Point3(gps_x, gps_y, gps_z), gps_noise); gtSAMgraph.add(gps_factor); aLoopIsClosed = true; break; } } } void addLoopFactor() { if (loopIndexQueue.empty()) return; for (int i = 0; i < (int)loopIndexQueue.size(); ++i) { int indexFrom = loopIndexQueue[i].first; int indexTo = loopIndexQueue[i].second; gtsam::Pose3 poseBetween = loopPoseQueue[i]; gtsam::noiseModel::Diagonal::shared_ptr noiseBetween = loopNoiseQueue[i]; gtSAMgraph.add(BetweenFactor<Pose3>(indexFrom, indexTo, poseBetween, noiseBetween)); } loopIndexQueue.clear(); loopPoseQueue.clear(); loopNoiseQueue.clear(); aLoopIsClosed = true; } void saveKeyFramesAndFactor() { if (saveFrame() == false) return; // odom factor addOdomFactor(); // gps factor addGPSFactor(); // loop factor addLoopFactor(); // cout << "****************************************************" << endl; // gtSAMgraph.print("GTSAM Graph:\n"); // update iSAM isam->update(gtSAMgraph, initialEstimate); isam->update(); if (aLoopIsClosed == true) { isam->update(); isam->update(); isam->update(); isam->update(); isam->update(); } gtSAMgraph.resize(0); initialEstimate.clear(); //save key poses PointType thisPose3D; PointTypePose thisPose6D; Pose3 latestEstimate; isamCurrentEstimate = isam->calculateEstimate(); latestEstimate = isamCurrentEstimate.at<Pose3>(isamCurrentEstimate.size()-1); // cout << "****************************************************" << endl; // isamCurrentEstimate.print("Current estimate: "); thisPose3D.x = latestEstimate.translation().x(); thisPose3D.y = latestEstimate.translation().y(); thisPose3D.z = latestEstimate.translation().z(); thisPose3D.intensity = cloudKeyPoses3D->size(); // this can be used as index cloudKeyPoses3D->push_back(thisPose3D); thisPose6D.x = thisPose3D.x; thisPose6D.y = thisPose3D.y; thisPose6D.z = thisPose3D.z; thisPose6D.intensity = thisPose3D.intensity ; // this can be used as index thisPose6D.roll = latestEstimate.rotation().roll(); thisPose6D.pitch = latestEstimate.rotation().pitch(); thisPose6D.yaw = latestEstimate.rotation().yaw(); thisPose6D.time = timeLaserCloudInfoLast; cloudKeyPoses6D->push_back(thisPose6D); // cout << "****************************************************" << endl; // cout << "Pose covariance:" << endl; // cout << isam->marginalCovariance(isamCurrentEstimate.size()-1) << endl << endl; poseCovariance = isam->marginalCovariance(isamCurrentEstimate.size()-1); // save updated transform transformTobeMapped[0] = latestEstimate.rotation().roll(); transformTobeMapped[1] = latestEstimate.rotation().pitch(); transformTobeMapped[2] = latestEstimate.rotation().yaw(); transformTobeMapped[3] = latestEstimate.translation().x(); transformTobeMapped[4] = latestEstimate.translation().y(); transformTobeMapped[5] = latestEstimate.translation().z(); // save all the received edge and surf points pcl::PointCloud<PointType>::Ptr thisCornerKeyFrame(new pcl::PointCloud<PointType>()); pcl::PointCloud<PointType>::Ptr thisSurfKeyFrame(new pcl::PointCloud<PointType>()); pcl::copyPointCloud(*laserCloudCornerLastDS, *thisCornerKeyFrame); pcl::copyPointCloud(*laserCloudSurfLastDS, *thisSurfKeyFrame); // save key frame cloud cornerCloudKeyFrames.push_back(thisCornerKeyFrame); surfCloudKeyFrames.push_back(thisSurfKeyFrame); // save path for visualization updatePath(thisPose6D); } void correctPoses() { if (cloudKeyPoses3D->points.empty()) return; if (aLoopIsClosed == true) { // clear path globalPath.poses.clear(); // update key poses int numPoses = isamCurrentEstimate.size(); for (int i = 0; i < numPoses; ++i) { cloudKeyPoses3D->points[i].x = isamCurrentEstimate.at<Pose3>(i).translation().x(); cloudKeyPoses3D->points[i].y = isamCurrentEstimate.at<Pose3>(i).translation().y(); cloudKeyPoses3D->points[i].z = isamCurrentEstimate.at<Pose3>(i).translation().z(); cloudKeyPoses6D->points[i].x = cloudKeyPoses3D->points[i].x; cloudKeyPoses6D->points[i].y = cloudKeyPoses3D->points[i].y; cloudKeyPoses6D->points[i].z = cloudKeyPoses3D->points[i].z; cloudKeyPoses6D->points[i].roll = isamCurrentEstimate.at<Pose3>(i).rotation().roll(); cloudKeyPoses6D->points[i].pitch = isamCurrentEstimate.at<Pose3>(i).rotation().pitch(); cloudKeyPoses6D->points[i].yaw = isamCurrentEstimate.at<Pose3>(i).rotation().yaw(); updatePath(cloudKeyPoses6D->points[i]); } aLoopIsClosed = false; } } void updatePath(const PointTypePose& pose_in) { geometry_msgs::PoseStamped pose_stamped; pose_stamped.header.stamp = ros::Time().fromSec(pose_in.time); pose_stamped.header.frame_id = odometryFrame; pose_stamped.pose.position.x = pose_in.x; pose_stamped.pose.position.y = pose_in.y; pose_stamped.pose.position.z = pose_in.z; tf::Quaternion q = tf::createQuaternionFromRPY(pose_in.roll, pose_in.pitch, pose_in.yaw); pose_stamped.pose.orientation.x = q.x(); pose_stamped.pose.orientation.y = q.y(); pose_stamped.pose.orientation.z = q.z(); pose_stamped.pose.orientation.w = q.w(); globalPath.poses.push_back(pose_stamped); } void publishOdometry() { // Publish odometry for ROS (global) nav_msgs::Odometry laserOdometryROS; laserOdometryROS.header.stamp = timeLaserInfoStamp; laserOdometryROS.header.frame_id = odometryFrame; laserOdometryROS.child_frame_id = "odom_mapping"; laserOdometryROS.pose.pose.position.x = transformTobeMapped[3]; laserOdometryROS.pose.pose.position.y = transformTobeMapped[4]; laserOdometryROS.pose.pose.position.z = transformTobeMapped[5]; laserOdometryROS.pose.pose.orientation = tf::createQuaternionMsgFromRollPitchYaw(transformTobeMapped[0], transformTobeMapped[1], transformTobeMapped[2]); pubLaserOdometryGlobal.publish(laserOdometryROS); // Publish odometry for ROS (incremental) static bool lastIncreOdomPubFlag = false; static nav_msgs::Odometry laserOdomIncremental; // incremental odometry msg static Eigen::Affine3f increOdomAffine; // incremental odometry in affine if (lastIncreOdomPubFlag == false) { lastIncreOdomPubFlag = true; laserOdomIncremental = laserOdometryROS; increOdomAffine = trans2Affine3f(transformTobeMapped); } else { Eigen::Affine3f affineIncre = incrementalOdometryAffineFront.inverse() * incrementalOdometryAffineBack; increOdomAffine = increOdomAffine * affineIncre; float x, y, z, roll, pitch, yaw; pcl::getTranslationAndEulerAngles (increOdomAffine, x, y, z, roll, pitch, yaw); if (cloudInfo.imuAvailable == true) { if (std::abs(cloudInfo.imuPitchInit) < 1.4) { double imuWeight = 0.01; tf::Quaternion imuQuaternion; tf::Quaternion transformQuaternion; double rollMid, pitchMid, yawMid; transformQuaternion.setRPY(roll, pitch, 0); imuQuaternion.setRPY(cloudInfo.imuRollInit, cloudInfo.imuPitchInit, 0); tf::Matrix3x3(transformQuaternion.slerp(imuQuaternion, imuWeight)).getRPY(rollMid, pitchMid, yawMid); roll = rollMid; pitch = pitchMid; } } laserOdomIncremental.header.stamp = timeLaserInfoStamp; laserOdomIncremental.header.frame_id = odometryFrame; laserOdomIncremental.child_frame_id = "odom_mapping"; laserOdomIncremental.pose.pose.position.x = x; laserOdomIncremental.pose.pose.position.y = y; laserOdomIncremental.pose.pose.position.z = z; laserOdomIncremental.pose.pose.orientation = tf::createQuaternionMsgFromRollPitchYaw(roll, pitch, yaw); } pubLaserOdometryIncremental.publish(laserOdomIncremental); } void publishFrames() { if (cloudKeyPoses3D->points.empty()) return; // publish key poses publishCloud(&pubKeyPoses, cloudKeyPoses3D, timeLaserInfoStamp, odometryFrame); // Publish surrounding key frames publishCloud(&pubRecentKeyFrames, laserCloudSurfFromMapDS, timeLaserInfoStamp, odometryFrame); // publish registered key frame if (pubRecentKeyFrame.getNumSubscribers() != 0) { pcl::PointCloud<PointType>::Ptr cloudOut(new pcl::PointCloud<PointType>()); PointTypePose thisPose6D = trans2PointTypePose(transformTobeMapped); *cloudOut += *transformPointCloud(laserCloudCornerLastDS, &thisPose6D); *cloudOut += *transformPointCloud(laserCloudSurfLastDS, &thisPose6D); publishCloud(&pubRecentKeyFrame, cloudOut, timeLaserInfoStamp, odometryFrame); } // publish registered high-res raw cloud if (pubCloudRegisteredRaw.getNumSubscribers() != 0) { pcl::PointCloud<PointType>::Ptr cloudOut(new pcl::PointCloud<PointType>()); pcl::fromROSMsg(cloudInfo.cloud_deskewed, *cloudOut); PointTypePose thisPose6D = trans2PointTypePose(transformTobeMapped); *cloudOut = *transformPointCloud(cloudOut, &thisPose6D); publishCloud(&pubCloudRegisteredRaw, cloudOut, timeLaserInfoStamp, odometryFrame); } // publish path if (pubPath.getNumSubscribers() != 0) { globalPath.header.stamp = timeLaserInfoStamp; globalPath.header.frame_id = odometryFrame; pubPath.publish(globalPath); } } }; /* int main(int argc, char** argv) { ros::init(argc, argv, "lio_sam"); mapOptimization MO; ROS_INFO("\033[1;32m----> Map Optimization Started.\033[0m"); std::thread loopthread(&mapOptimization::loopClosureThread, &MO); std::thread visualizeMapThread(&mapOptimization::visualizeGlobalMapThread, &MO); ros::spin(); loopthread.join(); visualizeMapThread.join(); return 0; } */
functions.h
// // Created by xaris on 22/01/2021. // #include <cstring> #ifndef HYKSORTINCPP_FUNCTIONS_H #define HYKSORTINCPP_FUNCTIONS_H #endif //HYKSORTINCPP_FUNCTIONS_H namespace omp_par { template <class T,class StrictWeakOrdering> void merge(T A_,T A_last,T B_,T B_last,T C_,int p,StrictWeakOrdering comp); template <class T,class StrictWeakOrdering> void merge_sort(T A,T A_last,StrictWeakOrdering comp); template <class T> void merge_sort(T A,T A_last); template <class T> void merge_sort_ptrs(T A,T A_last); template <class T, class I> T reduce(T* A, I cnt); template <class T, class I> void scan(T* A, T* B,I cnt); } template <class T,class StrictWeakOrdering> void omp_par::merge(T A_,T A_last,T B_,T B_last,T C_,int p,StrictWeakOrdering comp){ typedef typename std::iterator_traits<T>::difference_type _DiffType; typedef typename std::iterator_traits<T>::value_type _ValType; _DiffType N1=A_last-A_; _DiffType N2=B_last-B_; if(N1==0 && N2==0) return; if(N1==0 || N2==0){ _ValType* A=(N1==0? &B_[0]: &A_[0]); _DiffType N=(N1==0? N2 : N1 ); #pragma omp parallel for for(int i=0;i<p;i++){ _DiffType indx1=( i *N)/p; _DiffType indx2=((i+1)*N)/p; std::memcpy(&C_[indx1], &A[indx1], (indx2-indx1)*sizeof(_ValType)); } return; } //Split both arrays ( A and B ) into n equal parts. //Find the position of each split in the final merged array. int n=10; _ValType* split=new _ValType[p*n*2]; _DiffType* split_size=new _DiffType[p*n*2]; #pragma omp parallel for for(int i=0;i<p;i++){ for(int j=0;j<n;j++){ int indx=i*n+j; _DiffType indx1=(indx*N1)/(p*n); split [indx]=A_[indx1]; split_size[indx]=indx1+(std::lower_bound(B_,B_last,split[indx],comp)-B_); indx1=(indx*N2)/(p*n); indx+=p*n; split [indx]=B_[indx1]; split_size[indx]=indx1+(std::lower_bound(A_,A_last,split[indx],comp)-A_); } } //Find the closest split position for each thread that will //divide the final array equally between the threads. _DiffType* split_indx_A=new _DiffType[p+1]; _DiffType* split_indx_B=new _DiffType[p+1]; split_indx_A[0]=0; split_indx_B[0]=0; split_indx_A[p]=N1; split_indx_B[p]=N2; #pragma omp parallel for for(int i=1;i<p;i++){ _DiffType req_size=(i*(N1+N2))/p; int j=std::lower_bound(&split_size[0],&split_size[p*n],req_size,std::less<_DiffType>())-&split_size[0]; if(j>=p*n) j=p*n-1; _ValType split1 =split [j]; _DiffType split_size1=split_size[j]; j=(std::lower_bound(&split_size[p*n],&split_size[p*n*2],req_size,std::less<_DiffType>())-&split_size[p*n])+p*n; if(j>=2*p*n) j=2*p*n-1; if(abs(split_size[j]-req_size)<abs(split_size1-req_size)){ split1 =split [j]; split_size1=split_size[j]; } split_indx_A[i]=std::lower_bound(A_,A_last,split1,comp)-A_; split_indx_B[i]=std::lower_bound(B_,B_last,split1,comp)-B_; } delete[] split; delete[] split_size; //Merge for each thread independently. #pragma omp parallel for for(int i=0;i<p;i++){ T C=C_+split_indx_A[i]+split_indx_B[i]; //std::merge(A_+split_indx_A[i],A_+split_indx_A[i+1],B_+split_indx_B[i],B_+split_indx_B[i+1],C,comp); //sse<_ValType>::merge(A_+split_indx_A[i],A_+split_indx_A[i+1],B_+split_indx_B[i],B_+split_indx_B[i+1],C); std::merge(A_+split_indx_A[i],A_+split_indx_A[i+1],B_+split_indx_B[i],B_+split_indx_B[i+1],C); } delete[] split_indx_A; delete[] split_indx_B; } template <class T,class StrictWeakOrdering> void omp_par::merge_sort(T A,T A_last,StrictWeakOrdering comp){ typedef typename std::iterator_traits<T>::difference_type _DiffType; typedef typename std::iterator_traits<T>::value_type _ValType; int p=omp_get_max_threads(); _DiffType N=A_last-A; if(N<2*p || p==1){ std::sort(A,A_last,comp); return; } //Split the array A into p equal parts. _DiffType* split=new _DiffType[p+1]; split[p]=N; #pragma omp parallel for for(int id=0;id<p;id++){ split[id]=(id*N)/p; } //Sort each part independently. #pragma omp parallel for for(int id=0;id<p;id++){ std::sort(A+split[id],A+split[id+1],comp); } //Merge two parts at a time. _ValType* B=new _ValType[N]; _ValType* A_=&A[0]; _ValType* B_=&B[0]; for(int j=1;j<p;j=j*2){ for(int i=0;i<p;i=i+2*j){ if(i+j<p){ omp_par::merge(A_+split[i],A_+split[i+j],A_+split[i+j],A_+split[(i+2*j<=p?i+2*j:p)],B_+split[i],p,comp); }else{ #pragma omp parallel for for(int k=split[i];k<split[p];k++) B_[k]=A_[k]; } } _ValType* tmp_swap=A_; A_=B_; B_=tmp_swap; } //The final result should be in A. if(A_!=&A[0]){ #pragma omp parallel for for(int i=0;i<N;i++) A[i]=A_[i]; } //Free memory. delete[] split; delete[] B; } template <class T> void omp_par::merge_sort(T A,T A_last){ typedef typename std::iterator_traits<T>::value_type _ValType; if(sizeof(_ValType)<=8*sizeof(_ValType*)) omp_par::merge_sort(A,A_last,std::less<_ValType>()); else omp_par::merge_sort_ptrs(A,A_last); } template <class T> struct DataPtr{ public: T* elem; inline bool operator < ( DataPtr<T> const &other) const { return ((*elem)<(*(other.elem))); } }; template <class T> void omp_par::merge_sort_ptrs(T A,T A_last){ //std::cout<<"Using Pointer sort.\n"; int p=omp_get_max_threads(); typedef typename std::iterator_traits<T>::difference_type _DiffType; typedef typename std::iterator_traits<T>::value_type _ValType; _DiffType N=A_last-A; // Make copy and init pointer array to be sorted. DataPtr<_ValType>* B=new DataPtr<_ValType>[N]; _ValType* C=new _ValType[N]; #pragma omp parallel for for(int i=0;i<p;i++){ _DiffType start=(N*i)/p; _DiffType end=(N*(i+1))/p; std::memcpy(&C[start], &A[start], (end-start)*sizeof(_ValType)); for(_DiffType j=start;j<end;j++) B[j].elem=&C[0]+j; } // Sort pointers. omp_par::merge_sort(B,B+N,std::less<DataPtr<_ValType> >()); // Copy data to its sorted position. #pragma omp parallel for for(int i=0;i<p;i++){ _DiffType start=(N*i)/p; _DiffType end=(N*(i+1))/p; for(size_t j=start;j<end;j++) A[j]=*(B[j].elem); } delete[] B; delete[] C; } template <class T, class I> T omp_par::reduce(T* A, I cnt){ T sum=0; #pragma omp parallel for reduction(+:sum) for(I i = 0; i < cnt; i++) sum+=A[i]; return sum; } template <class T, class I> void omp_par::scan(T* A, T* B,I cnt){ int p=omp_get_max_threads(); if(cnt<100*p){ for(I i=1;i<cnt;i++) B[i]=B[i-1]+A[i-1]; return; } I step_size=cnt/p; #pragma omp parallel for for(int i=0; i<p; i++){ int start=i*step_size; int end=start+step_size; if(i==p-1) end=cnt; if(i!=0)B[start]=0; for(I j=start+1; j<end; j++) B[j]=B[j-1]+A[j-1]; } T* sum=new T[p]; sum[0]=0; for(int i=1;i<p;i++) sum[i]=sum[i-1]+B[i*step_size-1]+A[i*step_size-1]; #pragma omp parallel for for(int i=1; i<p; i++){ int start=i*step_size; int end=start+step_size; if(i==p-1) end=cnt; T sum_=sum[i]; for(I j=start; j<end; j++) B[j]+=sum_; } delete[] sum; } std::vector<unsigned int> init_array(int size,int task) { /* Intializes random number generator */ srand((unsigned) time(NULL)+task*100); std::vector<unsigned int> v(size); // contains size number of 0's std::generate (v.begin(), v.end(), []{ return rand() % 100; }); return v; } void print_array(std::vector<unsigned int> v, int size) { //This program prints the n values of an array int i; // printf("["); for(int i=0; i<size-1; i++){printf("%2d, ",v[i]);} printf("%2d]",v[size-1]); } void print_array_in_process(std::vector<unsigned int> v, int n, int p, int rank ) { printf("Array in process %d of %d with %d elements : ",rank,p,n); //This program prints the n values of an array int i; // printf("["); for(int i=0; i<n-1; i++){printf("%2d, ",v[i]);} printf("%2d]",v[n-1]); printf ( "\n\n"); } void scan(int *A, int* B, int cnt){ int p=omp_get_max_threads(); if(cnt<100*p){ for(int i=1;i<cnt;i++) B[i]=B[i-1]+A[i-1]; return; } int step_size=cnt/p; #pragma omp parallel for for(int i=0; i<p; i++){ int start=i*step_size; int end=start+step_size; if(i==p-1) end=cnt; if(i!=0)B[start]=0; for(int j=start+1; j<end; j++) B[j]=B[j-1]+A[j-1]; } int* sum=new int[p]; sum[0]=0; for(int i=1;i<p;i++) sum[i]=sum[i-1]+B[i*step_size-1]+A[i*step_size-1]; #pragma omp parallel for for(int i=1; i<p; i++){ int start=i*step_size; int end=start+step_size; if(i==p-1) end=cnt; int sum_=sum[i]; for(int j=start; j<end; j++) B[j]+=sum_; } delete[] sum; } void MergeLists( std::vector<unsigned int> &listA, std::vector<unsigned int> &listB) { unsigned int _low, _high; _low = ( (listA[0] > listB[0]) ? listA[0] : listB[0]); _high = ( (listA[listA.size()-1] < listB[listB.size()-1]) ? listA[listA.size()-1] : listB[listB.size()-1]); // We will do a full merge first ... size_t list_size = listA.size() + listB.size(); std::vector<unsigned int> scratch_list(list_size); unsigned int index1 = 0; unsigned int index2 = 0; for (size_t i = 0; i < list_size; i++) { //The order of (A || B) is important here, //so that index2 remains within bounds if ( (index1 < listA.size()) && ( (index2 >= listB.size()) || (listA[index1] <= listB[index2]) ) ) { scratch_list[i] = listA[index1]; index1++; } else { scratch_list[i] = listB[index2]; index2++; } } listA.clear(); listB.clear(); int ii=0; while ( ( (scratch_list[ii] < _low) || (ii < (list_size/2)) ) && (scratch_list[ii] <= _high) ) { ii++; } if(ii) { listA.insert(listA.end(), scratch_list.begin(), (scratch_list.begin() + ii)); } scratch_list.clear(); }
GB_unop__isinf_bool_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__isinf_bool_fp64 // op(A') function: GB_unop_tran__isinf_bool_fp64 // C type: bool // A type: double // cast: double cij = (aij) // unaryop: cij = isinf (aij) #define GB_ATYPE \ double #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = isinf (x) ; // casting #define GB_CAST(z, aij) \ double z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (aij) ; \ Cx [pC] = isinf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISINF || GxB_NO_BOOL || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__isinf_bool_fp64 ( bool *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = (aij) ; Cx [p] = isinf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = (aij) ; Cx [p] = isinf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__isinf_bool_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Nonlocal_TV_core.c
/* * This work is part of the Core Imaging Library developed by * Visual Analytics and Imaging System Group of the Science Technology * Facilities Council, STFC and Diamond Light Source Ltd. * * Copyright 2017 Daniil Kazantsev * Copyright 2017 Srikanth Nagella, Edoardo Pasca * Copyright 2018 Diamond Light Source Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "Nonlocal_TV_core.h" /* C-OMP implementation of non-local regulariser * Weights and associated indices must be given as an input. * Gauss-Seidel fixed point iteration requires ~ 3 iterations, so the main effort * goes in pre-calculation of weights and selection of patches * * * Input Parameters: * 1. 2D/3D grayscale image/volume * 2. AR_i - indeces of i neighbours * 3. AR_j - indeces of j neighbours * 4. AR_k - indeces of k neighbours (0 - for 2D case) * 5. Weights_ij(k) - associated weights * 6. regularisation parameter * 7. iterations number * * Output: * 1. denoised image/volume * Elmoataz, Abderrahim, Olivier Lezoray, and Sébastien Bougleux. "Nonlocal discrete regularization on weighted graphs: a framework for image and manifold processing." IEEE Trans. Image Processing 17, no. 7 (2008): 1047-1060. * */ /*****************************************************************************/ float Nonlocal_TV_CPU_main(float *A_orig, float *Output, unsigned short *H_i, unsigned short *H_j, unsigned short *H_k, float *Weights, int dimX, int dimY, int dimZ, int NumNeighb, float lambdaReg, int IterNumb, int switchM) { long i, j, k; int iter; lambdaReg = 1.0f/lambdaReg; /*****2D INPUT *****/ if (dimZ == 0) { copyIm(A_orig, Output, (long)(dimX), (long)(dimY), 1l); /* for each pixel store indeces of the most similar neighbours (patches) */ for(iter=0; iter<IterNumb; iter++) { #pragma omp parallel for shared (A_orig, Output, Weights, H_i, H_j, iter) private(i,j) for(j=0; j<(long)(dimY); j++) { for(i=0; i<(long)(dimX); i++) { /*NLM_H1_2D(Output, A_orig, H_i, H_j, Weights, i, j, (long)(dimX), (long)(dimY), NumNeighb, lambdaReg);*/ /* NLM - H1 penalty */ if (switchM == 1) { NLM_TV_2D(Output, A_orig, H_j, H_i, Weights, i, j, (long)(dimX), (long)(dimY), NumNeighb, lambdaReg); /* NLM - TV penalty */ } else { NLM_TV_2D(Output, A_orig, H_i, H_j, Weights, i, j, (long)(dimX), (long)(dimY), NumNeighb, lambdaReg); /* NLM - TV penalty */ } }} } } else { /*****3D INPUT *****/ copyIm(A_orig, Output, (long)(dimX), (long)(dimY), (long)(dimZ)); /* for each pixel store indeces of the most similar neighbours (patches) */ for(iter=0; iter<IterNumb; iter++) { #pragma omp parallel for shared (A_orig, Output, Weights, H_i, H_j, H_k, iter) private(i,j,k) for(k=0; k<(long)(dimZ); k++) { for(j=0; j<(long)(dimY); j++) { for(i=0; i<(long)(dimX); i++) { /* NLM_H1_3D(Output, A_orig, H_i, H_j, H_k, Weights, i, j, k, dimX, dimY, dimZ, NumNeighb, lambdaReg); */ /* NLM - H1 penalty */ NLM_TV_3D(Output, A_orig, H_i, H_j, H_k, Weights, i, j, k, (long)(dimX), (long)(dimY), (long)(dimZ), NumNeighb, lambdaReg); /* NLM - TV penalty */ }}} } } return *Output; } /***********<<<<Main Function for NLM - H1 penalty>>>>**********/ float NLM_H1_2D(float *A, float *A_orig, unsigned short *H_i, unsigned short *H_j, float *Weights, long i, long j, long dimX, long dimY, int NumNeighb, float lambdaReg) { long x, i1, j1, index, index_m; float value = 0.0f, normweight = 0.0f; index_m = j*dimX+i; for(x=0; x < NumNeighb; x++) { index = (dimX*dimY*x) + j*dimX+i; i1 = H_i[index]; j1 = H_j[index]; value += A[j1*dimX+i1]*Weights[index]; normweight += Weights[index]; } A[index_m] = (lambdaReg*A_orig[index_m] + value)/(lambdaReg + normweight); return *A; } /*3D version*/ float NLM_H1_3D(float *A, float *A_orig, unsigned short *H_i, unsigned short *H_j, unsigned short *H_k, float *Weights, long i, long j, long k, long dimX, long dimY, long dimZ, int NumNeighb, float lambdaReg) { long x, i1, j1, k1, index; float value = 0.0f, normweight = 0.0f; for(x=0; x < NumNeighb; x++) { index = dimX*dimY*dimZ*x + (dimX*dimY*k) + j*dimX+i; i1 = H_i[index]; j1 = H_j[index]; k1 = H_k[index]; value += A[(dimX*dimY*k1) + j1*dimX+i1]*Weights[index]; normweight += Weights[index]; } A[(dimX*dimY*k) + j*dimX+i] = (lambdaReg*A_orig[(dimX*dimY*k) + j*dimX+i] + value)/(lambdaReg + normweight); return *A; } /***********<<<<Main Function for NLM - TV penalty>>>>**********/ float NLM_TV_2D(float *A, float *A_orig, unsigned short *H_i, unsigned short *H_j, float *Weights, long i, long j, long dimX, long dimY, int NumNeighb, float lambdaReg) { long x, i1, j1, index, index_m; float value = 0.0f, normweight = 0.0f, NLgrad_magn = 0.0f, NLCoeff; index_m = j*dimX+i; for(x=0; x < NumNeighb; x++) { index = (dimX*dimY*x) + j*dimX+i; /*c*/ i1 = H_i[index]; j1 = H_j[index]; NLgrad_magn += powf((A[j1*dimX+i1] - A[index_m]),2)*Weights[index]; } NLgrad_magn = sqrtf(NLgrad_magn); /*Non Local Gradients Magnitude */ NLCoeff = 2.0f*(1.0f/(NLgrad_magn + EPS)); for(x=0; x < NumNeighb; x++) { index = (dimX*dimY*x) + j*dimX+i; /*c*/ i1 = H_i[index]; j1 = H_j[index]; value += A[j1*dimX+i1]*NLCoeff*Weights[index]; normweight += Weights[index]*NLCoeff; } A[index_m] = (lambdaReg*A_orig[index_m] + value)/(lambdaReg + normweight); return *A; } /*3D version*/ float NLM_TV_3D(float *A, float *A_orig, unsigned short *H_i, unsigned short *H_j, unsigned short *H_k, float *Weights, long i, long j, long k, long dimX, long dimY, long dimZ, int NumNeighb, float lambdaReg) { long x, i1, j1, k1, index; float value = 0.0f, normweight = 0.0f, NLgrad_magn = 0.0f, NLCoeff; for(x=0; x < NumNeighb; x++) { index = dimX*dimY*dimZ*x + (dimX*dimY*k) + j*dimX+i; i1 = H_i[index]; j1 = H_j[index]; k1 = H_k[index]; NLgrad_magn += powf((A[(dimX*dimY*k1) + j1*dimX+i1] - A[(dimX*dimY*k1) + j*dimX+i]),2)*Weights[index]; } NLgrad_magn = sqrtf(NLgrad_magn); /*Non Local Gradients Magnitude */ NLCoeff = 2.0f*(1.0f/(NLgrad_magn + EPS)); for(x=0; x < NumNeighb; x++) { index = dimX*dimY*dimZ*x + (dimX*dimY*k) + j*dimX+i; i1 = H_i[index]; j1 = H_j[index]; k1 = H_k[index]; value += A[(dimX*dimY*k1) + j1*dimX+i1]*NLCoeff*Weights[index]; normweight += Weights[index]*NLCoeff; } A[(dimX*dimY*k) + j*dimX+i] = (lambdaReg*A_orig[(dimX*dimY*k) + j*dimX+i] + value)/(lambdaReg + normweight); return *A; }
GB_unop__identity_uint16_uint16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: (none) // op(A') function: GB_unop_tran__identity_uint16_uint16 // C type: uint16_t // A type: uint16_t // cast: uint16_t cij = aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint16_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint16_t z = aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( uint16_t *Cx, // Cx and Ax may be aliased const uint16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; uint16_t z = aij ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_uint16_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
compact_qed.h
/// \file /// /// \brief Naive compact QED action /// \details The naice compact QED action implemented here does not know about /// expansions. #pragma once #include "../gaugegroups/u1.h" #include "../lattice.h" #include "action.h" #include "expansion.h" #include <algorithm> #include <cassert> #include <cstddef> #include <limits> #include <numeric> #ifndef M_PI #define M_PI 3.14159265358979323846 // Double precision pi #endif using size_t = std::size_t; template <size_t dim> class QEDAction { private: LinkLattice<U1, dim> const *lattice; double beta; public: // ********************************************************************** // Constructors // ********************************************************************** // Default constructor QEDAction() : beta(1.0) { lattice = nullptr; } // Construct with given lattice QEDAction(LinkLattice<U1, dim> &orig, const double &coupling = 1.0) : lattice(&orig), beta(coupling) { } // ********************************************************************** // Deconstructor // ********************************************************************** ~QEDAction() = default; // ********************************************************************** // Member Functions // ********************************************************************** /// Setter for beta void setbeta(const double &coupling) { beta = coupling; } /// Getter for beta double getbeta() { return this->beta; } template <typename P = double> double atLink( size_t const &idx, size_t const &dir, const BoundaryCondition<P, dim> &bc = BoundaryCondition<P, dim>()) const { auto lat = *lattice; LinkLatticeIterator<const LinkLattice<U1, dim>> lli(idx, lat); if (idx >= lattice->volume()) { throw std::runtime_error("Site out of range"); } if (dir >= lattice->dimensions()) { throw std::runtime_error("Direction out of range"); } return beta * (static_cast<double>((dim - 1)) * 2. - std::real((lli.link(dir)).value() * staples(idx, dir, bc))); } template <typename P = double> std::complex<double> staples( size_t const &idx, size_t const &dir, const BoundaryCondition<P, dim> &bc = BoundaryCondition<P, dim>()) const { auto lat = *lattice; LinkLatticeIterator<const LinkLattice<U1, dim>> lli(idx, lat); std::complex<double> A = std::complex<double>(0.0); for (auto nu = 0LU; nu < dim; ++nu) { if (nu != dir) { U1 plus(std::complex<double>(1., 0.)); U1 minus(std::complex<double>(1., 0.)); // U_ν(x+µ)dagger(U_µ(n+ν))dagger(U_ν(x)) plus *= lli.neighborLink(dir, nu, Direction::FORWARD, bc); plus *= dagger(lli.neighborLink(nu, dir, Direction::FORWARD, bc)); plus *= dagger(lli.link(nu)); // access to site (x-ν) auto neib = lli.neighbor(nu, -1); // dagger(U_ν(x+µ-ν))dagger(U_µ(n-ν))U_ν(x-ν) minus *= dagger(neib.neighborLink(dir, nu, Direction::FORWARD, bc)); minus *= dagger(neib.link(dir)); minus *= neib.link(nu); A += plus.value() + minus.value(); } } return A; } /// Returns (the real part of the trace of ) the plaquette template <typename P = double> auto plaquette( size_t site, size_t mu, size_t nu, BoundaryCondition<P, dim> const &bc = BoundaryCondition<P, dim>()) const -> decltype(std::real(trace(U1(1.)))) { if (site >= lattice->volume()) { throw std::runtime_error("Site out of range"); } if (mu >= dim) { throw std::runtime_error("mu out of range"); } if (nu >= dim) { throw std::runtime_error("nu out of range"); } if (mu == nu) { throw std::runtime_error("mu must not equal nu"); } auto it = lattice->begin(); it = it[site]; // U_µν(x) =U_µ(x)U_ν(x+µ)dagger(U_µ(n+ν))dagger(U_ν(x)) auto plaq = it.link(mu); plaq *= it.neighborLink(mu, nu, Direction::FORWARD, bc); plaq *= dagger(it.neighborLink(nu, mu, Direction::FORWARD, bc)); plaq *= dagger(it.link(nu)); return std::real(trace(plaq)); } /// Compute mean plaquette in the mu-nu plane template <typename P = double> auto meanPlaquette(size_t mu, size_t nu, BoundaryCondition<P, dim> const &bc = BoundaryCondition<P, dim>()) const -> decltype(std::real(trace(U1(1.)))) { auto res = plaquette(0lu, mu, nu, bc); for (auto i = 1ul; i < lattice->volume(); ++i) { res += plaquette(i, mu, nu); } return res / static_cast<double>(lattice->volume()); } /// Compute global plaquette average template <typename P = double> auto meanPlaquette( BoundaryCondition<P, dim> const &bc = BoundaryCondition<P, dim>()) const -> decltype(std::real(trace(U1(1.)))) { decltype(std::real(trace(U1(1.)))) res = 0.0; for (auto mu = 0lu; mu < dim; ++mu) { for (auto nu = mu + 1lu; nu < dim; ++nu) { auto res_mu_nu = plaquette(0ul, mu, nu); for (auto i = 1ul; i < lattice->volume(); ++i) { res_mu_nu += plaquette(i, mu, nu, bc); } res_mu_nu *= 1. / static_cast<double>(lattice->volume()); res += res_mu_nu; } } return 2.0 * res / (static_cast<double>(dim * (dim - 1))); } template <typename P = double> auto energyDensity( BoundaryCondition<P, dim> const &bc = BoundaryCondition<P, dim>()) const -> decltype(std::real(trace(U1(1.)))) { return (1. - meanPlaquette(bc)); } auto GaugeFieldSpatialSum() -> std::vector<std::array<double, dim>> { auto dims = lattice->dimensionsArray(); std::array<double, dim> A; std::vector<std::array<double, dim>> Avec; A.fill(0.); // Output vector has size of temporal lattice extend for (auto l = 0LU; l < dims[0]; ++l) { Avec.push_back(A); // Init with "zero" } // Loop over all lattice sites // std::vector<std::size_t> tvec; // tvec.resize(Avec.size()); for (auto fli = lattice->begin(); fli != lattice->end(); ++fli) { auto cord = lattice->linearIndexToCoord(fli.index()); auto t = cord[0]; // tvec[t]++; for (auto mu = 0LU; mu < dim; ++mu) { (Avec[t])[mu] += std::sin((fli.link(mu)).phase()); } } // No normalisation !!! return Avec; } }; template <size_t dim, typename P = double> class Compact_QED_update { private: LinkLattice<U1, dim> *const lattice; double beta; BoundaryCondition<P, dim> link_bc; public: // ********************************************************************** // Constructors // ********************************************************************** Compact_QED_update() : lattice(nullptr), beta(1.0), link_bc(BoundaryCondition<double, dim>()) { } Compact_QED_update( LinkLattice<U1, dim> *const l, double const &coupling, BoundaryCondition<P, dim> const &lb = BoundaryCondition<P, dim>()) : lattice(l), beta(coupling), link_bc(lb) { } // ********************************************************************** // Destructor // ********************************************************************** ~Compact_QED_update() = default; // ********************************************************************** // Member functions // ********************************************************************** /// Helper function for MC update template <typename generator> bool accept(double const &res, generator &gen) const { // Always accept smaller new action if (res > 1.) { return true; } // MC for larger new action std::uniform_real_distribution<double> dist(0, 1.0); double rnd = dist(gen); return (rnd <= res); } /// Multi-hit MC template <typename generator> std::size_t multihit_MC(generator &gen, const std::size_t &hits = 1, const double &eps = 1.e-2) const { std::size_t accepted = 0; // Iterate over lattice sites for (LinkLatticeIterator<LinkLattice<U1, dim>> it = this->lattice->begin(); it != this->lattice->end(); ++it) { // Loop over links for (auto mu = 0LU; mu < this->lattice->dimensions(); ++mu) { // Get Link auto U_old = it.link(mu); // Compute staples std::complex<double> A = std::complex<double>(0.0); for (auto nu = 0LU; nu < this->lattice->dimensions(); ++nu) { if (nu != mu) { /// \todo Check boundary conditions in staples U1 plus(std::complex<double>(1., 0.)); U1 minus(std::complex<double>(1., 0.)); // U_ν(x+µ)dagger(U_µ(n+ν))dagger(U_ν(x)) plus *= it.neighborLink(mu, nu, Direction::FORWARD, this->link_bc); plus *= dagger( it.neighborLink(nu, mu, Direction::FORWARD, this->link_bc)); plus *= dagger(it.link(nu)); // access to site (x-ν) auto neib = it.neighbor(nu, -1); // dagger(U_ν(x+µ-ν))dagger(U_µ(n-ν))U_ν(x-ν) minus *= dagger( neib.neighborLink(mu, nu, Direction::FORWARD, this->link_bc)); minus *= dagger( it.neighborLink(nu, mu, Direction::BACKWARD, this->link_bc)); minus *= it.neighborLink(nu, nu, Direction::BACKWARD, this->link_bc); A += plus.value() + minus.value(); } } // MC part for (auto h = 0LU; h < hits; ++h) { auto U_new = update(U_old, gen, eps); auto delta_S = -beta * std::real((U_new.value() - U_old.value()) * A); auto boltzmann = std::exp(-1. * delta_S); auto change = this->accept(boltzmann, gen); // std::cout << delta_S << "\t" << boltzmann << "\t" << change << // std::endl; if (change) { ++accepted; U_old = U_new; (*it)[mu] = U_new; } } } } // Return the number of accepted changes for acceptance tuning and // book keeping return accepted; } /// Overrelaxation void overrelaxation(size_t const &sweeps = 1) const { for (auto s = 0LU; s < sweeps; ++s) { // Iterate over lattice sites for (LinkLatticeIterator<LinkLattice<U1, dim>> it = this->lattice->begin(); it != this->lattice->end(); ++it) { // Loop over links for (auto mu = 0LU; mu < this->lattice->dimensions(); ++mu) { // Compute staples std::complex<double> A = std::complex<double>(0.0); for (auto nu = 0LU; nu < this->lattice->dimensions(); ++nu) { if (nu != mu) { /// \todo Check boundary conditions in staples U1 plus(std::complex<double>(1., 0.)); U1 minus(std::complex<double>(1., 0.)); // U_ν(x+µ)dagger(U_µ(n+ν))dagger(U_ν(x)) plus *= it.neighborLink(mu, nu, Direction::FORWARD, this->link_bc); plus *= dagger( it.neighborLink(nu, mu, Direction::FORWARD, this->link_bc)); plus *= dagger(it.link(nu)); // access to site (x-ν) auto neib = it.neighbor(nu, -1); // dagger(U_ν(x+µ-ν))dagger(U_µ(n-ν))U_ν(x-ν) minus *= dagger( neib.neighborLink(mu, nu, Direction::FORWARD, this->link_bc)); minus *= dagger( it.neighborLink(nu, mu, Direction::BACKWARD, this->link_bc)); minus *= it.neighborLink(nu, nu, Direction::BACKWARD, this->link_bc); A += plus.value() + minus.value(); } } auto alpha = std::arg(A); auto phi = std::arg((it.link(mu)).value()); // update phase phi = 2 * M_PI - 2 * alpha - phi; (*it)[mu] = std::polar(1., phi); } } } } template <typename generator> double Sweep(generator &gen, const double &eps = 0.5, size_t nsweep = 1, size_t nhit = 1, size_t nor = 1) const { auto accepted = 0LU; for (auto sw = 0ul; sw < nsweep; ++sw) { accepted += multihit_MC(gen, nhit, eps); overrelaxation(nor); } return static_cast<double>(accepted) / static_cast<double>(nhit * nsweep * lattice->volume() * lattice->dimensions()); } template <typename generator> void prepare_hot(generator &gen) const { // Iterate over lattice sites for (LinkLatticeIterator<LinkLattice<U1, dim>> it = this->lattice->begin(); it != this->lattice->end(); ++it) { // Loop over links for (auto mu = 0LU; mu < this->lattice->dimensions(); ++mu) { (*it)[mu] = randomU1(gen); } } } }; template <size_t dim> class QEDGaugeFix { private: LinkLattice<U1, dim> *const lattice; double beta; public: // ********************************************************************** // Constructors // ********************************************************************** /// Default constructor QEDGaugeFix() : beta(1.0) { lattice = nullptr; } /// Construct with given lattice QEDGaugeFix(LinkLattice<U1, dim> &orig, const double &coupling = 1.0) : lattice(&orig), beta(coupling) { } // ********************************************************************** // Deconstructor // ********************************************************************** ~QEDGaugeFix() = default; // ********************************************************************** // Member Functions // ********************************************************************** void randomGaugeTransformation() { auto &lat = *lattice; LinkLatticeIterator<LinkLattice<U1, dim>> lli(0, lat); // Init ranlux with true random number (Well, at least we try. The C++ // standard does not guaranty std::random_device() gives a true random // number) std::random_device rd; std::seed_seq sseq({ rd(), rd(), rd() }); // std::seed_seq sseq ({1,2,3,4}); std::ranlux48 generator(sseq); // std::ostream_iterator<unsigned> out (std::cout," "); // sseq.param(out); std::cout << "\n" << generator << "\n" << std::endl; // Uniformly pick a phase from [-pi,pi) // Note that this is consistent with the std::arg function from std::complex std::uniform_real_distribution<double> dist(-1.0, 1.0); for (lli = lat.begin(); lli != lat.end(); ++lli) { double rphase = M_PI * dist(generator); std::complex<double> ii(0, 1); for (auto i = 0UL; i < dim; ++i) { auto n_dir = lli.neighbor(i, -1); (*lli)[i] *= std::exp(-ii * rphase); (*n_dir)[i] *= std::exp(ii * rphase); } } } double LandauGaugeFunctional() const { double res = 0; std::size_t i; #pragma omp parallel for private(i) reduction(+ : res) for (auto idx = 0LU; idx < lattice->volume(); ++idx) { auto links = (*lattice)[idx]; for (i = 0; i < lattice->dimensions(); ++i) { res += std::cos(links[i].phase()); } } return res; } void localLandauGauge(size_t const &idx, const double or_param = 1.0) { auto &lat = *lattice; LinkLatticeIterator<LinkLattice<U1, dim>> lli(idx, lat); // Calculate the phase to locally fix the config to Landau gauge double num = 0.0; double den = 0.0; for (auto i = 0UL; i < dim; ++i) { auto n_dir = lli.neighbor(i, -1); auto here = (lli.link(i)).phase(); auto neib = (n_dir.link(i)).phase(); num += (std::sin(here) - std::sin(neib)); den += (std::cos(here) + std::cos(neib)); } auto phase = or_param * std::atan(num / den); // Apply gauge transformation to all adjacent links std::complex<double> ii(0, 1); for (auto i = 0UL; i < dim; ++i) { auto n_dir = lli.neighbor(i, -1); (*lli)[i] *= std::exp(-ii * phase); (*n_dir)[i] *= std::exp(ii * phase); } } double localLandauGaugeQuality(size_t const &idx) { auto &lat = *lattice; LinkLatticeIterator<LinkLattice<U1, dim>> lli(idx, lat); double res = 0.0; for (auto i = 0UL; i < dim; ++i) { auto n_dir = lli.neighbor(i, -1); auto here = (lli.link(i)).phase(); auto neib = (n_dir.link(i)).phase(); res += (std::sin(here) - std::sin(neib)); } return res * res; } /// \Todo Implement boundary condition void LandauGaugeSweep(const double or_param = 1.0) { auto lat = *lattice; // FullLatticeIterator<FullLattice<CPtype,U1, dim>> fli(lat); // std::size_t count[] = {0,0}; // Loop over all even and odd sites separately for (auto eo = 0; eo < 2; ++eo) { #pragma omp parallel for for (auto idx = 0LU; idx < lat.volume(); ++idx) { auto coords = lat.linearIndexToCoord(idx); auto csum = std::accumulate(coords.begin(), coords.end(), 0); if (eo == csum % 2) { localLandauGauge(idx, or_param); // count[eo]++; } } } // std::cout << "Sites: " << count[0] << "/" << count[1] << std::endl; // std::cout << "Volume: " << lat.volume() << std::endl; } double LandauGaugeQuality() { auto lat = *lattice; double res = 0.0; #pragma omp parallel for reduction(+ : res) for (auto idx = 0LU; idx < lat.volume(); ++idx) { res += localLandauGaugeQuality(idx); } return res / static_cast<double>(lat.volume()); } std::size_t LandauGaugeDriver(const size_t gc_num, const size_t sw_num, const double or_param = 1.0) { double minus_inf = std::numeric_limits<double>::lowest(); LinkLattice<U1, dim> gauge_copy(*lattice), best_copy(*lattice); std::size_t iter = 0LU; QEDGaugeFix<dim> gc_gf(gauge_copy, this->beta); // double best= gc_gf.LandauGaugeQuality(); // double last= std::numeric_limits<double>::max(); double best = minus_inf; // Init with large negative number double last = 0; for (auto i = 0UL; i < gc_num; ++i) { if (0LU != i) // Use the original config once { gc_gf.randomGaugeTransformation(); } // double last_LF=gc_gf.LandauGaugeFunctional(); for (auto s = 0UL; s < sw_num; ++s) { gc_gf.LandauGaugeSweep(or_param); // last=gc_gf.LandauGaugeQuality(); ++iter; if (gc_gf.LandauGaugeQuality() < 1.e-9) { // //last_LF = new_LF; // last=gc_gf.LandauGaugeQuality(); last = gc_gf.LandauGaugeFunctional(); // // std::cout << "\t\t Best local LF: " << last_LF << "\t (best // global: " << best << ")" // // << "(" << ++s << " sweeps)" << std::endl; break; } // last_LF = new_LF; } // last = last_LF; if (last > best) { best_copy = gauge_copy; best = last; } } *lattice = best_copy; return iter; } };
5.c
/* Написать программу, в которой объявить и присвоить начальные значения элементам двумерного массива d[6][8], для инициализации значений использовать генератор случайных чисел. Используя конструкцию директивы sections...section определить три секции для выполнения следующих операций: - первая секция выполняет вычисление среднего арифметического значения элементов двумерного массива, - вторая секция выполняет вычисление минимального и максимального значений элементов двумерного массива, - третья секция выполняет вычисление количества элементов массива, числовые значения которых кратны 3. В каждой секции определить и выдать на экран номер исполняющей нити и результат выполнения вычислений */ #include <stdio.h> #include <omp.h> #include <time.h> #include <stdlib.h> #include <limits.h> int main(int argc, char *argv[]) { srand(time(NULL)); int d[6][8]; // Заполняем массив случайными значениями от 0 до 50 for (int i = 0; i < 6; i++) { for (int j = 0; j < 8; j++) { d[i][j] = rand() % 50; printf("%d, ", d[i][j]); } printf("\n"); } #pragma omp parallel sections num_threads(3) { #pragma omp section { int sum = 0, length = 0; for (int i = 0; i < 6; i++) { printf("Нить %d считает среднее арифметическое\n", omp_get_thread_num()); for (int j = 0; j < 8; j++) { sum += d[i][j]; length += 1; } } printf("Среднее арифметическое: %d\n", sum / length); } #pragma omp section { int min = INT_MAX; int max = INT_MIN; for (int i = 0; i < 6; i++) { printf("Нить %d считает минимум и максимум\n", omp_get_thread_num()); for (int j = 0; j < 8; j++) { if (d[i][j] < min) min = d[i][j]; if (d[i][j] > max) max = d[i][j]; } } printf("Минимум: %d, Максимум: %d\n", min, max); } #pragma omp section { int count3 = 0; for (int i = 0; i < 6; i++) { printf("Нить %d считает количество элементов, кратных 3\n", omp_get_thread_num()); for (int j = 0; j < 8; j++) { if (d[i][j] % 3 == 0) count3 += 1; } } printf("Количество элементов, кратных 3: %d\n", count3); } } }
filter.c
/* This program was modified (incrementally simplified) in 2013-2014 by Grigori Fursin to understand performance regressions for different images ... */ /* This program detects the edges in a 256 gray-level 128 x 128 pixel image. The program relies on a 2D-convolution routine to convolve the image with kernels (Sobel operators) that expose horizontal and vertical edge information. The following is a block diagram of the steps performed in edge detection, +---------+ +----------+ Input |Smoothing| |Horizontal|-------+ Image -->| Filter |---+-->| Gradient | | +---------+ | +----------+ +----x-----+ +---------+ Binary | | Gradient | | Apply | Edge | | Combining|-->|Threshold|->Detected | +----------+ +----x-----+ +----x----+ Output | | Vertical | | | +-->| Gradient |-------+ | +----------+ Threshold Value This program is based on the routines and algorithms found in the book "C Language Algorithms for Digital Signal Processing" by P.M. Embree and B. Kimble. Copyright (c) 1992 -- Mazen A.R. Saghir -- University of Toronto */ #include <omp.h> #include <stdio.h> #include <stdlib.h> /* #include <string.h> */ /* #include "traps.h" */ #include "filter.h" FILE *input_fp=NULL; FILE *output_fp=NULL; int input_dsp(); /* input_dsp (dest, char*) */ void output_dsp(); /* output_dsp (source) */ void filter_codelet(); int image_buffer1[N][N]; int image_buffer2[N][N]; int image_buffer3[N][N]; int filter[K][K]; int main(int argc, const char **argv) { int *matrix_ptr1; int *matrix_ptr2; int *matrix_ptr3; int *filter_ptr; int temp1; int temp2; int temp3; int v1; int v2; int v3; int i; int j; void convolve2d(); long rr=0, r=1; int tm=0; char *stm; if (argc<1) { printf("Usage: ./a.out <input file name with an image in a raw format\n"); return 1; } /* FGG adding kernel repetition */ if (getenv("CT_REPEAT_MAIN")!=NULL) r=atol(getenv("CT_REPEAT_MAIN")); /* FGG adding "time of the day" feature */ stm=getenv("CT_TIME"); if (stm!=NULL) { if (strcmp(stm, "day")==0) tm=0; else if (strcmp(stm, "night")==0) tm=1; } /* Read input image. */ input_dsp(image_buffer1, argv[1]); /* Initialize image_buffer2 and image_buffer3 */ for (i = 0; i < N; i++) { for (j = 0; j < N; ++j) { image_buffer2[i][j] = 0; } } #pragma omp parallel for printf("Time of the day feature: %u\n",tm); for (rr=0; rr<r; rr++) { if (tm==0) filter_codelet_day(image_buffer1, image_buffer2); else filter_codelet_night(image_buffer1, image_buffer2); } /* Store binary image. */ output_dsp(image_buffer2, N*N); return 0; } /* This function convolves the input image by the kernel and stores the result in the output image. */ void convolve2d(input_image, kernel, output_image) int *input_image; int *kernel; int *output_image; { int *kernel_ptr; int *input_image_ptr; int *output_image_ptr; int *kernel_offset; int *input_image_offset; int *output_image_offset; int i; int j; int c; int r; int row; int col; int normal_factor; int sum; int temp1; int temp2; int dead_rows; int dead_cols; /* Set the number of dead rows and columns. These represent the band of rows and columns around the edge of the image whose pixels must be formed from less than a full kernel-sized compliment of input image pixels. No output values for these dead rows and columns since they would tend to have less than full amplitude values and would exhibit a "washed-out" look known as convolution edge effects. */ dead_rows = K / 2; dead_cols = K / 2; /* Calculate the normalization factor of the kernel matrix. */ normal_factor = 0; kernel_ptr = kernel; for (r = 0; r < K; r++) { kernel_offset = kernel_ptr; temp1 = *kernel_offset++; for (c = 1; c < K; c++) { normal_factor += abs(temp1); temp1 = *kernel_offset++; } normal_factor += abs(temp1); kernel_ptr += K; } if (normal_factor == 0) normal_factor = 1; /* Convolve the input image with the kernel. */ row = 0; output_image_ptr = output_image; output_image_ptr += (N * dead_rows); for (r = 0; r < N - K + 1; r++) { output_image_offset = output_image_ptr; output_image_offset += dead_cols; col = 0; for (c = 0; c < N - K + 1; c++) { input_image_ptr = input_image; input_image_ptr += (N * row); kernel_ptr = kernel; sum = 0; for (i = 0; i < K; i++) { input_image_offset = input_image_ptr; input_image_offset += col; kernel_offset = kernel_ptr; temp1 = *input_image_offset++; temp2 = *kernel_offset++; for (j = 1; j < K; j++) { sum += temp1 * temp2; temp1 = *input_image_offset++; temp2 = *kernel_offset++; } sum += temp1 * temp2; kernel_ptr += K; input_image_ptr += N; } *output_image_offset++ = (sum / normal_factor); col++; } output_image_ptr += N; row++; } } int input_dsp (int *dest, char *fgg_file) { int success; input_fp=fopen(fgg_file,"rb"); if (input_fp==NULL) { printf ("Error: cannot open input image file %s ...\n", fgg_file); exit(1); } success=fread(dest, N, N*sizeof(int), input_fp); fclose(input_fp); return success; } void output_dsp (int *src) { output_fp=fopen("image_output.bin","wb"); fwrite(src, N, N*sizeof(int), output_fp); fclose(output_fp); }
GB_unop__identity_int8_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int8_fp64) // op(A') function: GB (_unop_tran__identity_int8_fp64) // C type: int8_t // A type: double // cast: int8_t cij = GB_cast_to_int8_t ((double) (aij)) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int8_t z = GB_cast_to_int8_t ((double) (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int8_t z = GB_cast_to_int8_t ((double) (aij)) ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int8_fp64) ( int8_t *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; int8_t z = GB_cast_to_int8_t ((double) (aij)) ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; int8_t z = GB_cast_to_int8_t ((double) (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int8_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
peel.h
#include "count.h" //buffers/arrays for histogramming edges/edge workload for wing decomposition std::vector<std::array<intB, locBuffSizeLarge>> thdBloomBuff; std::vector<std::array<intE, locBuffSizeLarge>> thdEdgeBuff; std::vector<std::vector<intE>> histCountPerThread; std::vector<intE> histCountGlobal; std::vector<intE> histAccGlobal; std::vector<std::vector<intB>> histWorkPerThread; std::vector<intB> histWorkGlobal; std::vector<intB> histWorkAccGlobal; //for static load balancing std::vector<intB> workBloomSchedule; std::vector<intB> accWorkBloomSchedule; std::vector<intB> partBloomStart; /***************************************************************************** Re-count and generate updates for 2-hop neighbors of deleted vertices Inputs: 1. G-> graph object 2. labels -> vertices not deleted yet 3. activeList -> vertices peeled in this round 4. isActive -> boolean vector mapping a vertex ID to its active status 5. currSupport -> current support of vertices 5. nonNativeSupport -> support from other vertices not included in G 6. wedgeCnt -> 2D array for threads to store wedges while counting Outputs: 1. updateVertexList -> list of vertices whose support values are updated 2. updateValueList -> corresponding values by which support should be reduced ******************************************************************************/ void return_updates_by_counting(Graph &G, std::vector<intV> &labels, std::vector<intV> &activeList, std::vector<uint8_t> &isActive, std::vector<intB> &currSupport, std::vector<intB> &nonNativeSupport, std::vector<intV> &updateVertexList, std::vector<intB> &updateValueList, std::vector<std::vector<intV>> &wedgeCnt) { std::vector<intB> bcnt; count_per_vertex (G, labels, bcnt, wedgeCnt); std::vector<uint8_t> differs(labels.size()); #pragma omp parallel for for (intV i=0; i<labels.size(); i++) { auto v = labels[i]; differs[i] = (((bcnt[v] + nonNativeSupport[v]) != currSupport[v]) && (!G.is_deleted(v)) && (!isActive[v])) ? 1 : 0; } parallel_compact<intV, intV>(labels, differs, updateVertexList); updateValueList.resize(updateVertexList.size()); #pragma omp parallel for for (intV i=0; i<updateVertexList.size(); i++) { auto v = updateVertexList[i]; updateValueList[i] = currSupport[v]-bcnt[v]-nonNativeSupport[v]; } } /***************************************************************************** Peel active vertices and generate updates for 2-hop neighbors of deleted vertices Inputs: 1. G-> graph object 2. labels -> vertices to be peeled but not deleted yet 3. activeList -> vertices peeled in this round 4. isActive -> boolean vector mapping a vertex ID to its active status 5. currSupport -> current support of vertices 6. wedgeCnt -> 2D array for threads to store wedges while counting Outputs: 1. updateVertexList -> list of vertices whose support values are updated 2. updateValueList -> corresponding values by which support should be reduced Other args(they must be initialized to all "falses/zeros"): //can be replaced by sparseAdditiveSet from ligra 1. isUpdated -> boolean vector that maps vertices to their "support updated" status in the current peeling round 2. peelCnts -> store the running count of #butterflies deleted for vertices during peeling ******************************************************************************/ void return_updates_by_peeling(Graph &G, std::vector<intV> &labels, std::vector<intV> &activeList, std::vector<uint8_t> &isActive, std::vector<intB> &currSupport, std::vector<intV> &updateVertexList, std::vector<intB> &updateValueList, std::vector<std::vector<intV>> &wedgeCnt, std::vector<uint8_t> &isUpdated, std::vector<intB> &peelCnts) { std::vector<intV> updatesPerThread (NUM_THREADS, 0); std::vector<intV> offset (NUM_THREADS+1, 0); intV numActiveVertices = activeList.size(); int numActiveThreads = std::min((unsigned int)(numActiveVertices>>1) + 1, NUM_THREADS); intV BS = ((numActiveVertices-1)/numActiveThreads + 1); BS = (BS > 5) ? 5 : BS; #pragma omp parallel num_threads(numActiveThreads) { size_t tid = omp_get_thread_num(); std::vector<intV> tmpVertexList; std::vector<intV> &numW = wedgeCnt[tid]; std::vector<intV> hop2Neighs; hop2Neighs.reserve(8096); #pragma omp for schedule(dynamic, BS) for (intV i=0; i<numActiveVertices; i++) { intV delV = activeList[i]; intV deg; std::vector<intV> &neighList = G.get_neigh(delV, deg); for (intV j=0; j<deg; j++) { intV neigh = neighList[j]; intV neighDeg; std::vector<intV> &neighOfNeighList = G.get_neigh(neigh, neighDeg); for (intV k=0; k<neighDeg; k++) { intV neighOfNeigh = neighOfNeighList[k]; if(isActive[neighOfNeigh] || G.is_deleted(neighOfNeigh)) continue; if (numW[neighOfNeigh]==0) hop2Neighs.push_back(neighOfNeigh); numW[neighOfNeigh] = numW[neighOfNeigh] + 1; } } for (auto x:hop2Neighs) { if (numW[x] >= 2) { intB butterflies = choose2<intB, intV>(numW[x]); if (__sync_bool_compare_and_swap(&isUpdated[x], 0, 1)) tmpVertexList.push_back(x); __sync_fetch_and_add(&peelCnts[x], butterflies); } numW[x] = 0; } hop2Neighs.clear(); } updatesPerThread[tid] = tmpVertexList.size(); #pragma omp barrier #pragma omp single { serial_prefix_sum(offset, updatesPerThread); updateVertexList.clear(); updateValueList.clear(); updateVertexList.resize(offset[NUM_THREADS]); updateValueList.resize(offset[NUM_THREADS]); } #pragma omp barrier for (intV i=0; i<tmpVertexList.size(); i++) { intV vId = tmpVertexList[i]; updateVertexList[offset[tid]+i] = vId; updateValueList[offset[tid]+i] = peelCnts[vId]; } #pragma omp barrier #pragma omp for for (intV i=0; i<offset[NUM_THREADS]; i++) { intV vId = updateVertexList[i]; isUpdated[vId] = 0; peelCnts[vId] = 0; } } } /***************************************************************************** Update the deleted status of vertices peeled in current round Inputs: 1. G-> graph object 2. activeList -> List of vertices peeled Outputs: 1. isActive -> array that maps vertex IDs to their "active" status ******************************************************************************/ void delete_active_vertices(Graph &G, std::vector<intV> &activeList, std::vector<uint8_t> &isActive) { intV numActiveVertices = activeList.size(); #pragma omp parallel for for (intV i=0; i<numActiveVertices; i++) { isActive[activeList[i]] = false; G.delete_vertex(activeList[i]); } } /***************************************************************************** Peel the active vertices and generated count updates to their 2-hop neighbors Choose either re-counting or peeling for update generation Inputs: 1. G-> graph object 2. labels -> vertices to be peeled but not deleted yet 3. activeList -> vertices peeled in this round 4. isActive -> boolean vector mapping a vertex ID to its active status 5. supp -> current support of vertices 5. nonNativeSupport -> support from other vertices not included in G 6. countComplexity -> work required to do a re-count 7. peelWork -> per-vertex work for peeling 6. wedgeCnt -> 2D array for threads to store wedges while counting Outputs: 1. updateVertexList -> list of vertices whose support values are updated 2. updateValueList -> corresponding values by which support should be reduced Other args(they must be initialized to all "falses/zeros"): //can be replaced by sparseAdditiveSet from ligra 1. isUpdated -> boolean vector that maps vertices to their "support updated" status in the current peeling round 2. peelCnts -> store the running count of #butterflies deleted for vertices during peeling ******************************************************************************/ int update_count (Graph &G, std::vector<intV> &labels, std::vector<intV> &activeList, std::vector<uint8_t> &isActive, std::vector<intB> &supp, std::vector<intB> &nonNativeSupport, std::vector<intV> &updateVertexList, std::vector<intB> &updateSupportVal, intB countComplexity, std::vector<intE> &peelWork, std::vector<std::vector<intV>> &wedgeCnt, std::vector<uint8_t> &isUpdated, std::vector<intB> &peelCnts) { intB peelComplexity = 0; #pragma omp parallel for reduction(+:peelComplexity) for (intV i=0; i<activeList.size(); i++) peelComplexity += peelWork[activeList[i]]; bool dontPeel = (countComplexity < peelComplexity); //dontPeel = false; if (dontPeel) { delete_active_vertices(G, activeList, isActive); return_updates_by_counting(G, labels, activeList, isActive, supp, nonNativeSupport, updateVertexList, updateSupportVal, wedgeCnt); return 0; } else { return_updates_by_peeling(G, labels, activeList, isActive, supp, updateVertexList, updateSupportVal, wedgeCnt, isUpdated, peelCnts); delete_active_vertices(G, activeList, isActive); return 1; } } /***************************************************************************** Construct (in parallel) a list of vertices whose support lies between 'lo' and 'hi' Inputs: 1. G-> graph object 2. candidates -> vector of potential vertices that can be activated 3. lo, hi -> range of support to be activated 4. supp -> current support of vertices Outputs: 1. activeList -> list of active vertices 2. isActive -> boolean vector mapping a vertex ID to its active status ******************************************************************************/ void construct_active_list (Graph &G, std::vector<intV> &candidates, intB lo, intB hi, std::vector<intV> &activeList, std::vector<uint8_t> &isActive, std::vector<intB> &supp) { #pragma omp parallel for for (int i=0; i<candidates.size(); i++) { auto v = candidates[i]; if((supp[v]<hi) && (supp[v]>=lo) && (!G.is_deleted(v))) { supp[v] = lo; isActive[v] = 1; } } parallel_compact_kv<intV, intV>(candidates, isActive, activeList); } /***************************************************************************** Peel vertices whose support is in the given range and update support of other vertices Arguments: 1. G-> graph object 2. vertices -> candidate (remaining) vertices on the side being peeled 3. lo, hi -> range of support values to be peeled 4. isActive -> boolean vector mapping a vertex ID to its "active" status 5. supp -> support vector 5. nonNativeSupport -> support from other vertices not included in G 6. countComplexity -> work required to do a re-count 7. peelWork -> per-vertex work for peeling 8. wedgeCnt -> 2D array for threads to store wedges while counting 9. isUpdated -> boolean vector that maps vertices to their "support updated" status in the current peeling round 10. peelCnts -> store the running count of #butterflies deleted for vertices during peeling ******************************************************************************/ intV peel_range (Graph &G, std::vector<intV> &vertices, intB lo, intB hi, std::vector<uint8_t> &isActive, std::vector<intB> &supp, std::vector<intB> &nonNativeSupport, intB countComplexity, std::vector<intE> &peelWork, std::vector<std::vector<intV>> &wedgeCnt, std::vector<uint8_t> &isUpdated, std::vector<intB> &peelCnts) { intV numDeleted = 0; std::vector<intV> activeList; for (auto x : vertices) { assert((supp[x] >= lo) || G.is_deleted(x)); } construct_active_list(G, vertices, lo, hi, activeList, isActive, supp); numDeleted += activeList.size(); //iteratively delete all vertices with tip values in this range// //////////////////////////////////////////////////////////////// std::vector<intV> updateVertexList; std::vector<intB> updateSupportVal; intB edgeDelThresh = ((intB)G.numE*((intB)std::log2(double(G.numV)))); intB peelWorkDone = 0; intV numRounds = 0; intV numPeeled = 0; while(activeList.size() > 0) { //for (auto x:activeList) // printf("deleting %u with support %llu\n", x, supp[x]); numPeeled += update_count(G, vertices, activeList, isActive, supp, nonNativeSupport, updateVertexList, updateSupportVal, countComplexity, peelWork, wedgeCnt, isUpdated, peelCnts); intV numUpdates = updateVertexList.size(); #pragma omp parallel for for (intV i=0; i<numUpdates; i++) { intV v = updateVertexList[i]; intB updateVal = std::min(updateSupportVal[i], supp[v]-lo); supp[v] -= updateVal; } activeList.clear(); construct_active_list(G, updateVertexList, lo, hi, activeList, isActive, supp); numDeleted += activeList.size(); numRounds++; updateVertexList.clear(); updateSupportVal.clear(); } //printf("number of rounds required = %d, peeled = %d, counted = %d\n", numRounds, numPeeled, numRounds-numPeeled); return numDeleted; } /***************************************************************************** Remove deleted vertices from the candidate list and return peeling complexity of remaining vertices Arguments: 1. G -> graph object 2. vertices -> current vertex list (will be updated) 3. peelComplexity -> peeling work required for each vertex (vector) 4. keep -> helper boolean vector to be used in parallel compaction ******************************************************************************/ intB remove_deleted_vertices(Graph &G, std::vector<intV> &vertices, std::vector<intE> &peelComplexity, std::vector<uint8_t>& keep) { keep.resize(vertices.size()); intB remPeelComplexity = 0; #pragma omp parallel for reduction (+:remPeelComplexity) for (intV i=0; i<vertices.size(); i++) { keep[i] = (G.is_deleted(vertices[i])) ? 0 : 1; remPeelComplexity += (keep[i]) ? peelComplexity[vertices[i]] : 0; } parallel_compact_in_place<intV, intV>(vertices, keep); return remPeelComplexity; } /***************************************************************************** //overloaded function defintion //cleans "vertices" vector and also creates a list of deleted vertices //Additional argument - "delVertices" vector ******************************************************************************/ intB remove_deleted_vertices(Graph &G, std::vector<intV> &vertices, std::vector<intE> &peelComplexity, std::vector<uint8_t>& keep, std::vector<intV> &delVertices) { keep.resize(vertices.size()); intB remPeelComplexity = 0; #pragma omp parallel for reduction (+:remPeelComplexity) for (intV i=0; i<vertices.size(); i++) { keep[i] = (G.is_deleted(vertices[i])) ? 0 : 1; remPeelComplexity += (keep[i]) ? peelComplexity[vertices[i]] : 0; } parallel_compact_in_place<intV, intV>(vertices, keep, delVertices); return remPeelComplexity; } /***************************************************************************** 2-approximate tip-decomposition. Peel a range of support values that doubles every round Arguments: 1. G -> graph object 2. tipVal -> half approximation of tip values of vertices must be initialized with the per-vertex butterfly counts 3. peelSide -> 0 implies peeling vertices in U, 1 means V 4. wedgeCnt -> 2D helper array for threads to store wedges ******************************************************************************/ /* void approx_tip_decomposition(Graph &G, std::vector<intB> &tipVal, int peelSide, std::vector<std::vector<intV>> &wedgeCnt) { std::vector<intV> vertices; G.get_labels(vertices, peelSide); std::vector<uint8_t> keep; //std::vector<uint8_t> keep(vertices.size()); printf("number of vertices to peel = %u\n", vertices.size()); //printf("vertices are"); //print_list_horizontal(vertices); std::vector<intE> countWork; std::vector<intE> peelWork; printf("estimating workloads\n"); intB totalCountComplexity = estimate_total_workload(G, countWork, peelWork); std::vector<uint8_t> isActive (G.numT); std::vector<uint8_t> isUpdated(G.numT); std::vector<intB> peelCnts(G.numT); std::vector<intB> nonNativeSupport(G.numT); #pragma omp parallel for for (intV i=0; i<G.numT; i++) { isActive[i] = 0; isUpdated[i] = 0; peelCnts[i] = 0; nonNativeSupport[i] = 0; } intB lo = 0; intB range = 1; intV numDeleted = 0; intV targetDeletion = (peelSide) ? G.numV : G.numU; printf("starting decopmosition\n"); while(numDeleted < targetDeletion) { numDeleted = numDeleted + peel_range(G, vertices, lo, lo+range, isActive, tipVal, nonNativeSupport, totalCountComplexity, peelWork, wedgeCnt, isUpdated, peelCnts); //update range lo = lo + range; range = range*2; intB remPeelComplexity = remove_deleted_vertices(G, vertices, peelWork, keep); } } */ /***************************************************************************** Find the target range to create partition with desired peeling complexity Arguments: 1. vertices -> candidate vertices 2. tipVal -> support value of vertices 3. targetPeelComplexity -> desired amount of work required to peel the partition 4. lowerBound -> lowest tip value 5. peelWork -> work required to peel the vertices ******************************************************************************/ std::tuple<intB, intV, intV> find_range (std::vector<intV> &vertices, std::vector<intB>&tipVal, intB targetPeelComplexity, intB lowerBound, std::vector<intE> &peelWork) { parallel_sort_kv_increasing<intV, intB>(vertices, tipVal); //sort vertices on their current support //compute workload for each bucket - map, prefix sum, scatter //find bucket id for each vertex using map and prefix sum //scatter with atomic add to compute workload for the buckets std::vector<uint8_t> suppIsUniq(vertices.size()); suppIsUniq[suppIsUniq.size()-1] = 1; #pragma omp parallel for for (intV i=0; i<vertices.size()-1; i++) suppIsUniq[i] = (tipVal[vertices[i]]==tipVal[vertices[i+1]]) ? 0 : 1; std::vector<intV> wrOffset; parallel_prefix_sum(wrOffset, suppIsUniq); intV numUniqSuppVals = wrOffset.back(); //last element in the offset vector std::vector<intB> workPerSuppVal(numUniqSuppVals); //work to peel all vertices in a given bucket std::vector<intB> suppVal(numUniqSuppVals); //support value corresponding to the individual buckets #pragma omp parallel { #pragma omp for for (intV i=0; i<numUniqSuppVals; i++) workPerSuppVal[i] = 0; #pragma omp barrier #pragma omp for for (intV i=0; i<vertices.size(); i++) { intV v = vertices[i]; intB work = peelWork[v]; suppVal[wrOffset[i]] = tipVal[v]; __sync_fetch_and_add(&workPerSuppVal[wrOffset[i]], work); } } //none of the vertices with support < lo should've survived assert(suppVal[0] >= lowerBound); //prefix sum to compute work required to peel all vertices till a particular bucket parallel_prefix_sum_inclusive(workPerSuppVal, workPerSuppVal); //find the first bucket with work just lower than the target value intV tgtBktId = std::lower_bound(workPerSuppVal.begin(), workPerSuppVal.end(), targetPeelComplexity) - workPerSuppVal.begin(); intB hi = std::max(suppVal[tgtBktId], suppVal[0]+1); //hi should be greater than the support of the first bucket to ensure non-zero vertex peeling return std::make_tuple(hi, tgtBktId, numUniqSuppVals); } /***************************************************************************** Coarse-grained decomposition with (targetted) equal workload partitions. Arguments: 1. G -> graph object 2. tipVal -> (output) support of vertices when their partition begins peeling must be initialized with the per-vertex butterfly counts 3. peelSide -> 0 implies peeling vertices in U, 1 means V 4. wedgeCnt -> 2D helper array for threads to store wedges 5. numParts -> number of partitions to create; final partitions may be smaller 6. partTipVals -> output vector containing support ranges of the partitions 7. partVertices -> 2D array to store vertices for each partition 8. partPeelWork -> work done to peel the entire partition (considering no re-counting) ******************************************************************************/ int create_balanced_partitions(Graph &G, std::vector<intB> &tipVal, int peelSide, std::vector<std::vector<intV>> &wedgeCnt, int numParts, std::vector<std::pair<intB, intB>> &partTipVals, std::vector<std::vector<intV>> &partVertices, std::vector<intB> &partPeelWork) { std::vector<intV> vertices; G.get_labels(vertices, peelSide); intV targetDeletion = vertices.size(); std::vector<uint8_t> keep; std::vector<uint8_t> isActive(G.numT); std::vector<uint8_t> isUpdated(G.numT); std::vector<intB> peelCnts(G.numT); std::vector<intB> nonNativeSupport(G.numT); #pragma omp parallel for for (intV i=0; i<G.numT; i++) { isActive[i] = 0; isUpdated[i] = 0; peelCnts[i] = 0; nonNativeSupport[i] = 0; } std::vector<intE> countWork; //work required per vertex to count std::vector<intE> peelWork; //work required per vertex to peel, 2-hop neighborhood size intB totalCountComplexity = estimate_total_workload(G, countWork, peelWork); partTipVals.resize(numParts); partPeelWork.resize(numParts); std::vector<intV> verticesPerPart (numParts); std::vector<intB> partTipValInit; //initial values of vertices when their corresponding partitions starts peeling parallel_vec_copy(partTipValInit, tipVal); intB totalPeelComplexity = 0; #pragma omp parallel for reduction(+:totalPeelComplexity) for (intV i=0; i<vertices.size(); i++) totalPeelComplexity += peelWork[vertices[i]]; intB avgPeelComplexityRequired = totalPeelComplexity/numParts; printf("total peel complexity = %lld, count complexity = %lld\n", totalPeelComplexity, totalCountComplexity); intB remPeelComplexity = totalPeelComplexity; intB lo = 0; int numPartsCreated = 0; int numPartsPerThread = numParts/NUM_THREADS; intV numDeleted = 0; //if lot of work done, remove deleted edges to speedup further processing intB edgeDelThresh = ((intB)G.numE)*((intB)std::log2((double)G.numV)); intB peelWorkDone = 0; //helps in adapting the targetWorkComplexity if the partitions become too heavy double scaleFactor = 1.0; //till there is something to peel or only last partition remains while((remPeelComplexity > 0) && (numPartsCreated < numParts-1) && (numDeleted < targetDeletion)) { if (peelWorkDone > edgeDelThresh) { G.delete_edges(); peelWorkDone = 0; } double bktPeelStart = omp_get_wtime(); intB targetPeelComplexity = (intB)((scaleFactor*(double)remPeelComplexity)/(numParts-numPartsCreated)); //figure out target complexity to cover intB desiredPeelComplexity = remPeelComplexity/(numParts-numPartsCreated); intB hi; intV tgtBktId, numUniqSuppVals; std::tie(hi, tgtBktId, numUniqSuppVals) = find_range(vertices, tipVal, targetPeelComplexity, lo, peelWork); //peel the range verticesPerPart[numPartsCreated] = peel_range(G, vertices, lo, hi, isActive, tipVal, nonNativeSupport, totalCountComplexity, peelWork, wedgeCnt, isUpdated, peelCnts); //logistics, track # deleted vertices, record range of the partition numDeleted += verticesPerPart[numPartsCreated]; partTipVals[numPartsCreated] = std::make_pair(lo, hi); intB prevRemPeelComplexity = remPeelComplexity; std::vector<intV> delVertices; remPeelComplexity = remove_deleted_vertices(G, vertices, peelWork, keep, delVertices); partPeelWork[numPartsCreated] = prevRemPeelComplexity - remPeelComplexity; peelWorkDone += partPeelWork[numPartsCreated]; double bktPeelEnd = omp_get_wtime(); #ifdef DEBUG printf("partition id = %d, time taken = %lf, vertices deleted = %u, range from %lld to %lld, desired complexity = %lld, target complexity = %lld, actual work done = %lld\n", numPartsCreated, (bktPeelEnd-bktPeelStart)*1000, verticesPerPart[numPartsCreated], lo, hi, desiredPeelComplexity, targetPeelComplexity, partPeelWork[numPartsCreated]); #endif //adapt, if too much work in this bucket, make targets smaller for the next partition scaleFactor = std::min(((double)targetPeelComplexity)/((double)partPeelWork[numPartsCreated]), 1.0); partVertices.push_back(delVertices); numPartsCreated++; parallel_vec_elems_copy(partTipValInit, tipVal, vertices); //prep for next partition creation lo = hi; } intV remVertices = vertices.size(); intB maxRemSupp = 0; //put anything remaining in the last partition if (remVertices > 0) { partPeelWork[numPartsCreated] = remPeelComplexity; partVertices.push_back(vertices); #pragma omp parallel for reduction(max:maxRemSupp) for (intV i=0; i<remVertices; i++) { maxRemSupp = std::max(tipVal[vertices[i]], maxRemSupp); tipVal[vertices[i]] = lo; } partTipVals[numPartsCreated++] = std::make_pair(lo, maxRemSupp+1); } tipVal.swap(partTipValInit); #pragma omp parallel for for (intV i=0; i<G.numU; i++) G.restore_vertex(G.uLabels[i]); G.restore_edges(); G.sort_adj(); return numPartsCreated; } /***************************************************************************** Print Coarse-grained decomposition details into a binary file Arguments: 1. G -> graph object 2. tipVal -> (output) support of vertices when their partition begins peeling must be initialized with the per-vertex butterfly counts 5. numParts -> number of partitions to create; final partitions may be smaller 6. partTipVals -> output vector containing support ranges of the partitions 7. partVertices -> 2D array to store vertices for each partition 8. partPeelWork -> work done to peel the entire partition (considering no re-counting) ******************************************************************************/ void print_partitioning_details (std::string &filename, Graph &G, std::vector<intB> &tipVal, int numParts, std::vector<std::pair<intB, intB>> &partTipVals, std::vector<std::vector<intV>> &partVertices, std::vector<intB> &partPeelWork) { std::vector<intV> vOut; std::vector<int> pOut; std::vector<intB> tOut; std::vector<intB> pRangeLo; std::vector<intB> pRangeHi; intV numVOut = 0; for (int i=0; i<numParts; i++) { pRangeLo.push_back(partTipVals[i].first); pRangeHi.push_back(partTipVals[i].second); for (intV j=0; j<partVertices[i].size(); j++) { numVOut++; vOut.push_back(partVertices[i][j]); pOut.push_back(i); tOut.push_back(tipVal[partVertices[i][j]]); } } assert(G.numU==numVOut); FILE* fpart = fopen("part_details.bin", "w"); fwrite(&numParts, sizeof(int), 1, fpart); fwrite(&partPeelWork[0], sizeof(intB), numParts, fpart); fwrite(&pRangeLo[0], sizeof(intB), numParts, fpart); fwrite(&pRangeHi[0], sizeof(intB), numParts, fpart); fwrite(&vOut[0], sizeof(intV), numVOut, fpart); fwrite(&pOut[0], sizeof(int), numVOut, fpart); fwrite(&tOut[0], sizeof(intB), numVOut, fpart); fclose (fpart); } /***************************************************************************** Read Coarse-grained decomposition details from a binary file Arguments: 1. G -> graph object 2. tipVal -> (output) support of vertices when their partition begins peeling must be initialized with the per-vertex butterfly counts 5. numParts -> number of partitions to create; final partitions may be smaller 6. partTipVals -> output vector containing support ranges of the partitions 7. partVertices -> 2D array to store vertices for each partition 8. partPeelWork -> work done to peel the entire partition (considering no re-counting) ******************************************************************************/ void read_partitioning_details (std::string &filename, Graph &G, std::vector<intB> &tipVal, int &numParts, std::vector<std::pair<intB, intB>> &partTipVals, std::vector<std::vector<intV>> &partVertices, std::vector<intB> &partPeelWork) { FILE* fcd = fopen(filename.c_str(), "rb"); if (fcd==NULL) { fputs("file error\n", stderr); exit(EXIT_FAILURE); } printf("file opened\n"); int np; fread(&np, sizeof(int), 1, fcd); numParts = np; printf("number of partitions = %d\n", numParts); partPeelWork.resize(numParts); partTipVals.resize(numParts); partVertices.resize(numParts); fread(&partPeelWork[0], sizeof(intB), numParts, fcd); printf("read peel work\n"); for (int i=0; i<numParts; i++) fread(&partTipVals[i].first, sizeof(intB), 1, fcd); for (int i=0; i<numParts; i++) fread(&partTipVals[i].second, sizeof(intB), 1, fcd); printf("read partition ranges\n"); std::vector<intV> vIn (G.numU); std::vector<intV> pIn (G.numU); std::vector<intB> tIn (G.numU); intV bytesRead = fread(&vIn[0], sizeof(intV), G.numU, fcd); printf("number of vertices read = %u\n", bytesRead); assert(bytesRead==G.numU); bytesRead = fread(&pIn[0], sizeof(int), G.numU, fcd); assert(bytesRead==G.numU); printf("read partition map\n"); bytesRead = fread(&tIn[0], sizeof(intB), G.numU, fcd); assert(bytesRead==G.numU); printf("read tipvals\n"); for (intV i=0; i<G.numU; i++) { intV v = vIn[i]; tipVal[v] = tIn[i]; partVertices[pIn[i]].push_back(v); } } /***************************************************************************** Compute upper bound on the maximum wing number Inputs: 1. eIds -> edge indices sorted on current support 2. tipVal -> vector of current support of edges 3. nEdgesRem -> number of not yet peeled edges in eIds vector Outputs: returns an upper bound on max wing number ******************************************************************************/ intE find_upper_bound_wing(std::vector<intE> &eIds, std::vector<intE> &tipVal, intE nEdgesRem) { parallel_unstable_sort_kv_increasing(eIds, tipVal); intE ub = 0; if (nEdgesRem > 10*NUM_THREADS) { intE BS = (nEdgesRem-1)/NUM_THREADS + 1; #pragma omp parallel num_threads(NUM_THREADS) reduction (max:ub) { unsigned tid = omp_get_thread_num(); intE start = tid*BS; intE end = std::min(nEdgesRem, start+BS); for (intE i = end-1; i>=start; i--) { intE currSupp = tipVal[eIds[i]]; intE numEdgesWithHigherSupp = nEdgesRem - i; if (numEdgesWithHigherSupp >= currSupp) { ub = std::max(ub, currSupp); break; } else ub = std::max(std::min(currSupp, numEdgesWithHigherSupp), ub); } } } else { for (intE i=nEdgesRem-1; i>=0; i--) { intE currSupp = tipVal[eIds[i]]; intE numEdgesWithHigherSupp = nEdgesRem - i; if (numEdgesWithHigherSupp >= currSupp) { ub = std::max(ub, currSupp); break; } else ub = std::max(std::min(currSupp, numEdgesWithHigherSupp), ub); } } return ub; } /***************************************************************************** Re-compute upper bound on the maximum wing number and populate histograms for range determination Inputs: 1. eIds -> edge indices sorted on current support 2. tipVal -> vector of current support of edges 3. nEdgesRem -> number of not yet peeled edges in eIds vector 4. minTipVal -> lower bound based on edges peeled so far 5. currUb -> previous upper bound Outputs: returns an upper bound on max wing number ******************************************************************************/ intE update_upper_bound_wing(std::vector<intE> &eIds, intE nEdgesRem, std::vector<intE> &tipVal, intE minTipVal, intE currUb) { intE range = currUb - minTipVal + 1; if (histCountGlobal.size() < range) histCountGlobal.resize(range); if (histAccGlobal.size() < range) histAccGlobal.resize(range); if (histCountPerThread.size() < NUM_THREADS) histCountPerThread.resize(NUM_THREADS); intE edgesPerThread = (nEdgesRem-1)/NUM_THREADS + 1; intE BS = (range-1)/NUM_THREADS + 1; intE newUb = minTipVal; #pragma omp parallel num_threads(NUM_THREADS) { intE tid = omp_get_thread_num(); #pragma omp for for (intE i=0; i<range; i++) histCountGlobal[i] = 0; std::vector<intE> &locHistCount = histCountPerThread[tid]; if (locHistCount.size() < range) locHistCount.resize(range); for (intE i=0; i<range; i++) locHistCount[i] = 0; #pragma omp for for (intE i=0; i<nEdgesRem; i++) { intE val = std::min(tipVal[eIds[i]], currUb) - minTipVal; assert(val < range); //reverse for suffix sum locHistCount[range-val-1]++; } intE ptr = rand()%range; for (intE i=0; i<range; i++) { intE idx = (ptr+i)%range; __sync_fetch_and_add(&histCountGlobal[idx], locHistCount[idx]); } #pragma omp barrier //PREFIX SUM intE start = BS*tid; intE end = std::min((intE)(start+BS), range); if (range > NUM_THREADS*10) { histAccGlobal[start] = histCountGlobal[start]; for (intE i=start+1; i<end; i++) histAccGlobal[i] = histAccGlobal[i-1] + histCountGlobal[i]; #pragma omp barrier #pragma omp single { for (size_t i=1; i<NUM_THREADS; i++) { intE prevEnd = BS*i; if (prevEnd >= range) continue; intE tend = std::min(prevEnd + BS, range); histAccGlobal[tend-1] += histAccGlobal[prevEnd-1]; } } #pragma omp barrier if (tid > 0) { intB blockScan = histAccGlobal[start-1]; for (intE i=start; i<end-1; i++) histAccGlobal[i] += blockScan; } intE locMax = 0; if (end > start) { for (intE i=start; i<end; i++) { intE supp = (range - i - 1) + minTipVal; if (histAccGlobal[i] >= supp) { locMax = supp; break; } } #pragma omp critical { if (locMax > newUb) newUb = locMax; } } } else { #pragma omp single { histAccGlobal[0] = histCountGlobal[0]; for (intE i=1; i<range; i++) histAccGlobal[i] = histAccGlobal[i-1] + histCountGlobal[i]; for (intE i=0; i<range; i++) { intE supp = (range - i - 1) + minTipVal; if (histAccGlobal[i] >= supp) { newUb = supp; break; } } assert(histAccGlobal[range-1]==nEdgesRem); } } } return newUb; } /***************************************************************************** Compute upper bound for the range of a partition Inputs: 1. eIds -> edge indices sorted on current support 2. tipVal -> vector of current support of edges 3. nPartsRem -> number of partitions remaining to be created 4. nEdgesRem -> number of not yet peeled edges in eIds vector 5. scaling -> scaling factor to apply 6. tipMin -> lower bound based on partition's wing number range 7. tipMax -> recently updated upper bound 8. oldMax -> previous upper bound Outputs: 1. range upper bound for the partition 2. estimated work value for the partition based on current edge support ******************************************************************************/ std::tuple<intE, intB> find_upper_bound_part(std::vector<intE> &eIds, std::vector<intE> &tipVal, intE nPartsRem, intE nEdgesRem, double scaling, intE tipMin, intE tipMax, intE oldMax) { intE range = tipMax - tipMin + 1; intE oldRange = oldMax - tipMin + 1; std::vector<intB> &workPerSupp = histWorkGlobal; if (workPerSupp.size() < range) workPerSupp.resize(range); std::vector<intB> &accWork = histWorkAccGlobal; if (accWork.size() < range) accWork.resize(range); intE BS = (range-1)/NUM_THREADS + 1; intE newMaxCount = histCountGlobal[oldMax-tipMax]; #pragma omp parallel num_threads (NUM_THREADS) { unsigned tid = omp_get_thread_num(); //count edges with higher support than new max into the bin of new max value //histCountGlobal[i] is the no. of edges with support oldMax - (tipMin + i) #pragma omp for reduction (+:newMaxCount) for (intE i=0; i<oldMax-tipMax; i++) newMaxCount += histCountGlobal[i]; #pragma omp single { histCountGlobal[oldMax-tipMax] = newMaxCount; } #pragma omp for for (intE i=0; i<range; i++) { intB val = i + tipMin; intE countIdx = oldMax - val; intB edgeCnt = histCountGlobal[countIdx]; workPerSupp[i] = edgeCnt*val; } //PREFIX SUM counts to compute write offsets for each support value if (range < 10*NUM_THREADS) { #pragma omp single { accWork[0] = workPerSupp[0]; for (intE i=1; i<range; i++) accWork[i] = accWork[i-1]+workPerSupp[i]; } } else { intE start = BS*tid; intE end = std::min((intE)(start+BS), range); if (start < range) accWork[start] = workPerSupp[start]; for (intE i=start+1; i<end; i++) accWork[i] = accWork[i-1] + workPerSupp[i]; #pragma omp barrier #pragma omp single { for (size_t i=1; i<NUM_THREADS; i++) { intE prevEnd = BS*i; if (prevEnd >= range) continue; intE tend = std::min(prevEnd + BS, range); accWork[tend-1] += accWork[prevEnd-1]; } } #pragma omp barrier if (tid > 0) { intB blockScan = accWork[start-1]; for (intE i=start; i<end-1; i++) accWork[i] += blockScan; } } } //dynamic average with scaling intB tgtWorkVal = (long long int)(double(accWork[range-1]/nPartsRem)*scaling); //find smallest support value at which work is greater than average intE partUB = (std::lower_bound(accWork.begin(), accWork.begin()+range, tgtWorkVal) - accWork.begin()) + tipMin + 1; tgtWorkVal = accWork[std::min(partUB-tipMin-1, range-1)]; return std::make_tuple(partUB, tgtWorkVal); } //compute scaling factor double compute_scale(std::vector<intE> &partEdges, std::vector<intE> &initSupp, intE maxSupp, intB tgtWork) { intB actualWork = 0; #pragma omp parallel for num_threads(NUM_THREADS) reduction (+:actualWork) for (intE i=0; i<partEdges.size(); i++) actualWork += std::min(initSupp[partEdges[i]], maxSupp); if (actualWork == 0) return 1.0; assert(actualWork >= tgtWork); double scaling = ((double)tgtWork)/((double)actualWork); return scaling; } //find active edges for the first peeling iteration of a partition void find_active_edges(std::vector<intE> &eIds, std::vector<intE> &tipVal, std::vector<uint8_t> &isActive, intE nEdgesRem, intE kLo, intE kHi, std::vector<intE> &activeEdges, intE &activeEdgePtr) { if (thdEdgeBuff.size() < NUM_THREADS) thdEdgeBuff.resize(NUM_THREADS); #pragma omp parallel num_threads(NUM_THREADS) { size_t tid = omp_get_thread_num(); std::array<intE, locBuffSizeLarge> &locBuff = thdEdgeBuff[tid]; unsigned locBuffPtr = 0; #pragma omp for for (intE i=0; i<nEdgesRem; i++) { intE e = eIds[i]; assert(tipVal[e] >= kLo); if (tipVal[e] < kHi) { locBuff[locBuffPtr++] = e; locBuffPtr = updateGlobalQueue(locBuffPtr, locBuffSizeLarge, activeEdgePtr, locBuff, activeEdges); isActive[e] = true; } } if (locBuffPtr > 0) locBuffPtr = updateGlobalQueue(locBuffPtr, locBuffPtr, activeEdgePtr, locBuff, activeEdges); } } /***************************************************************************** Update support of edges in a peeling iteration Inputs: 1. BEG -> BE-Index 2. tipVal -> vector of support of edges 3. kLo, kHi -> partition range 4. activeEdges, activeEdgePtr, activeEdgeStartOffset -> set of edges to Peel 5. isActive -> boolean array to indicate if an edge is active 6. isPeeled -> boolean array to indicate if an edge is already peeled Outputs: 1. updated edge supports 2. updated list of active edges 3. returns a pointer to indicate the newly added active edges in activeEdges[] array Arguments: 1. bloomUpdates -> vector to accumulate updates at blooms 2. activeBlooms -> array to store blooms with non-zero updates ******************************************************************************/ intE update_edge_supp(BEGraphLoMem& BEG, std::vector<intE> &tipVal, intE kLo, intE kHi, std::vector<intE> &activeEdges, intE activeEdgePtr, intE activeEdgeStartOffset, std::vector<intE> &bloomUpdates, std::vector<intB> &activeBlooms, std::vector<uint8_t> &isActive, std::vector<uint8_t> &isPeeled) { intE prevActiveEdgePtr = activeEdgePtr; intB activeBloomPtr = 0; if (thdBloomBuff.size() < NUM_THREADS) thdBloomBuff.resize(NUM_THREADS); if (thdEdgeBuff.size() < NUM_THREADS) thdEdgeBuff.resize(NUM_THREADS); unsigned numBloomParts = NUM_THREADS*50; if (partBloomStart.size() < numBloomParts+1) partBloomStart.resize(numBloomParts+1); if (workBloomSchedule.size() == 0) { workBloomSchedule.resize(BEG.numV); accWorkBloomSchedule.resize(BEG.numV + 1); } #pragma omp parallel num_threads(NUM_THREADS) { size_t tid = omp_get_thread_num(); std::array<intB, locBuffSizeLarge> &locBloomBuff = thdBloomBuff[tid]; unsigned locBloomBuffPtr = 0; std::array<intE, locBuffSizeLarge> &locEdgeBuff = thdEdgeBuff[tid]; unsigned locEdgeBuffPtr = 0; //Explore active edges and activate blooms #pragma omp for schedule (dynamic) for (intE i=activeEdgeStartOffset; i<prevActiveEdgePtr; i++) { intE e = activeEdges[i]; assert(!isPeeled[e]); assert(isActive[e]); intE NeI = BEG.edgeDegree[e]; for (intE j=0; j<NeI; j++) { intB belink = BEG.edgeVI[e]+j; intB bloomId = BEG.edgeEI[belink].first; intE neighEdgeId = BEG.edgeEI[belink].second; if (isPeeled[neighEdgeId] || (BEG.bloomDegree[bloomId]<2)) continue; if (isActive[neighEdgeId] && (neighEdgeId>e)) continue; intE updateVal = BEG.bloomDegree[bloomId]-1; //update neighbor edge intE prevTipVal = tipVal[neighEdgeId]; if (prevTipVal >= kHi) { prevTipVal = __sync_fetch_and_sub(&tipVal[neighEdgeId], updateVal); if ((prevTipVal < kHi + updateVal) && (prevTipVal >= kHi)) { locEdgeBuff[locEdgeBuffPtr++] = neighEdgeId; locEdgeBuffPtr = updateGlobalQueue(locEdgeBuffPtr, locBuffSizeLarge, activeEdgePtr, locEdgeBuff, activeEdges); } } //update bloom intE numDels = __sync_fetch_and_add(&bloomUpdates[bloomId], (intE)1); if (numDels==0) { locBloomBuff[locBloomBuffPtr++] = bloomId; locBloomBuffPtr = updateGlobalQueue(locBloomBuffPtr, locBuffSizeLarge, activeBloomPtr, locBloomBuff, activeBlooms); } } } if (locBloomBuffPtr > 0) locBloomBuffPtr = updateGlobalQueue(locBloomBuffPtr, locBloomBuffPtr, activeBloomPtr, locBloomBuff, activeBlooms); #pragma omp barrier #pragma omp for for (intE i=activeEdgeStartOffset; i<prevActiveEdgePtr; i++) { intE e = activeEdges[i]; isActive[e] = false; isPeeled[e] = true; } //LOAD BALANCING #pragma omp for for (intB i=0; i<activeBloomPtr; i++) workBloomSchedule[i] = BEG.bloomDegree[activeBlooms[i]]; //compute prefix scan int bloomsPerThd = (activeBloomPtr-1)/NUM_THREADS + 1; if (tid==0) accWorkBloomSchedule[0] = 0; #pragma omp barrier if (bloomsPerThd < 10) { #pragma omp single { for (intB i=0; i<activeBloomPtr; i++) accWorkBloomSchedule[i+1] = accWorkBloomSchedule[i] + workBloomSchedule[i]; } } else { intB startBloomIdx = bloomsPerThd*tid+1; intB endBloomIdx = std::min(startBloomIdx + bloomsPerThd, activeBloomPtr+1); accWorkBloomSchedule[startBloomIdx] = workBloomSchedule[startBloomIdx-1]; for (intB i=startBloomIdx+1; i<endBloomIdx; i++) accWorkBloomSchedule[i] = accWorkBloomSchedule[i-1] + workBloomSchedule[i-1]; #pragma omp barrier #pragma omp single { for (size_t i=1; i<NUM_THREADS; i++) { intB prevEnd = bloomsPerThd*i + 1; if (prevEnd > activeBloomPtr) continue; intB tend = std::min(prevEnd + bloomsPerThd, activeBloomPtr+1); accWorkBloomSchedule[tend-1] += accWorkBloomSchedule[prevEnd-1]; } partBloomStart[0] = 0; } #pragma omp barrier if (tid>0) { intB blockScan = accWorkBloomSchedule[startBloomIdx-1]; for (intB i=startBloomIdx; i<endBloomIdx-1; i++) accWorkBloomSchedule[i] += blockScan; } } #pragma omp barrier intB workPerPart = (accWorkBloomSchedule[activeBloomPtr]-1)/numBloomParts + 1; //find task offsets #pragma omp for for (intB i=0; i<numBloomParts; i++) { intB ptrOff = std::lower_bound(accWorkBloomSchedule.begin(), accWorkBloomSchedule.begin()+activeBloomPtr+1, workPerPart*(i+1)) - accWorkBloomSchedule.begin(); partBloomStart[i+1] = std::min(ptrOff, activeBloomPtr); } #pragma omp barrier #pragma omp for for (intB i=0; i<numBloomParts; i++) { assert(partBloomStart[i+1] >= partBloomStart[i]); } #pragma omp barrier //explore active blooms and update edge supports #pragma omp for schedule (dynamic,5) for (intB i=0; i<activeBloomPtr; i++) { intB bloomId = activeBlooms[i]; intE numDels = bloomUpdates[bloomId]; bloomUpdates[bloomId] = 0; intB baseIndex = BEG.bloomVI[bloomId]; for (intE j=0; j<BEG.bloomDegree[bloomId]; j++) { intE e1Id = BEG.bloomEI[baseIndex + j].first; intE e2Id = BEG.bloomEI[baseIndex + j].second; if (isPeeled[e1Id] || isPeeled[e2Id]) { std::swap(BEG.bloomEI[baseIndex+j], BEG.bloomEI[baseIndex+BEG.bloomDegree[bloomId]-1]); j--; BEG.bloomDegree[bloomId]--; continue; } intE prevTipVal = tipVal[e1Id]; if (prevTipVal >= kHi) { prevTipVal = __sync_fetch_and_sub(&tipVal[e1Id], numDels); if ((prevTipVal < kHi + numDels) && (prevTipVal >= kHi)) { locEdgeBuff[locEdgeBuffPtr++] = e1Id; locEdgeBuffPtr = updateGlobalQueue(locEdgeBuffPtr, locBuffSizeLarge, activeEdgePtr, locEdgeBuff, activeEdges); } //else if (prevTipVal < kHi) __sync_fetch_and_add(&tipVal[e1Id], numDels); } prevTipVal = tipVal[e2Id]; if (prevTipVal >= kHi) { prevTipVal = __sync_fetch_and_sub(&tipVal[e2Id], numDels); if ((prevTipVal < kHi + numDels) && (prevTipVal >= kHi)) { locEdgeBuff[locEdgeBuffPtr++] = e2Id; locEdgeBuffPtr = updateGlobalQueue(locEdgeBuffPtr, locBuffSizeLarge, activeEdgePtr, locEdgeBuff, activeEdges); } //else if (prevTipVal < kHi) __sync_fetch_and_add(&tipVal[e2Id], numDels); } } } if (locEdgeBuffPtr > 0) locEdgeBuffPtr = updateGlobalQueue(locEdgeBuffPtr, locEdgeBuffPtr, activeEdgePtr, locEdgeBuff, activeEdges); #pragma omp barrier #pragma omp for for (intE i=prevActiveEdgePtr; i<activeEdgePtr; i++) isActive[activeEdges[i]] = true; } return activeEdgePtr; }
GB_unop__cimag_fp32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__cimag_fp32_fc32) // op(A') function: GB (_unop_tran__cimag_fp32_fc32) // C type: float // A type: GxB_FC32_t // cast: GxB_FC32_t cij = (aij) // unaryop: cij = cimagf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = cimagf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = (aij) ; \ Cx [pC] = cimagf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_CIMAG || GxB_NO_FP32 || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__cimag_fp32_fc32) ( float *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = (aij) ; Cx [p] = cimagf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = (aij) ; Cx [p] = cimagf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__cimag_fp32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
grid.c
// ----------------------------------------------------------------------------- // // "00_AccelGraph" // // ----------------------------------------------------------------------------- // Copyright (c) 2014-2019 All rights reserved // ----------------------------------------------------------------------------- // Author : Abdullah Mughrabi // Email : atmughra@ncsu.edu||atmughrabi@gmail.com // File : grid.c // Create : 2019-06-21 17:15:17 // Revise : 2019-09-28 15:36:13 // Editor : Abdullah Mughrabi // ----------------------------------------------------------------------------- #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <stdint.h> #include <omp.h> #include "grid.h" #include "edgeList.h" #include "vertex.h" #include "myMalloc.h" #include "graphConfig.h" #include "bitmap.h" #include "timer.h" void gridPrint(struct Grid *grid) { printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Grid Properties"); printf(" -----------------------------------------------------\n"); #if WEIGHTED printf("| %-51s | \n", "WEIGHTED"); #else printf("| %-51s | \n", "UN-WEIGHTED"); #endif #if DIRECTED printf("| %-51s | \n", "DIRECTED"); #else printf("| %-51s | \n", "UN-DIRECTED"); #endif printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Number of Vertices (V)"); printf("| %-51u | \n", grid->num_vertices); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Number of Edges (E)"); printf("| %-51u | \n", grid->num_edges); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Number of Partitions (P)"); printf("| %-51u | \n", grid->num_partitions); printf(" -----------------------------------------------------\n"); // _u32 i; // for ( i = 0; i < grid->num_vertices; ++i) // { // uint32_t begin = getPartitionRangeBegin(); // uint32_t end = getPartitionRangeEnd(); // } // uint32_t i; // for ( i = 0; i < (grid->num_partitions*grid->num_partitions); ++i) // { // uint32_t x = i % grid->num_partitions; // % is the "modulo operator", the remainder of i / width; // uint32_t y = i / grid->num_partitions; // printf("| %-11s (%u,%u) | \n", "Partition: ", y, x); // printf("| %-11s %-40u | \n", "Edges: ", grid->partitions[i].num_edges); // printf("| %-11s %-40u | \n", "Vertices: ", grid->partitions[i].num_vertices); // edgeListPrint(grid->partitions[i].edgeList); // } } void graphGridResetActivePartitions(struct Grid *grid) { uint32_t totalPartitions = 0; totalPartitions = grid->num_partitions * grid->num_partitions; uint32_t i; #pragma omp parallel for default(none) shared(grid,totalPartitions) private(i) for (i = 0; i < totalPartitions; ++i) { grid->activePartitions[i] = 0; } } void graphGridResetActivePartitionsMap(struct Grid *grid) { clearBitmap(grid->activePartitionsMap); } void graphGridSetActivePartitionsMap(struct Grid *grid, uint32_t vertex) { uint32_t row = getPartitionID(grid->num_vertices, grid->num_partitions, vertex); uint32_t Partition_idx = 0; uint32_t i; uint32_t totalPartitions = 0; totalPartitions = grid->num_partitions; // #pragma omp parallel for default(none) shared(grid,totalPartitions,row) private(i,Partition_idx) for ( i = 0; i < totalPartitions; ++i) { Partition_idx = (row * totalPartitions) + i; if(grid->partitions[Partition_idx].edgeList->num_edges) { if(!getBit(grid->activePartitionsMap, Partition_idx)) { setBitAtomic(grid->activePartitionsMap, Partition_idx); } } } } void graphGridSetActivePartitions(struct Grid *grid, uint32_t vertex) { uint32_t row = getPartitionID(grid->num_vertices, grid->num_partitions, vertex); uint32_t Partition_idx = 0; uint32_t i; uint32_t totalPartitions = 0; totalPartitions = grid->num_partitions; // #pragma omp parallel for default(none) shared(grid,totalPartitions,row) private(i,Partition_idx) for ( i = 0; i < totalPartitions; ++i) { Partition_idx = (row * totalPartitions) + i; if(grid->partitions[Partition_idx].edgeList->num_edges) { grid->activePartitions[Partition_idx] = 1; } } } struct Grid *gridNew(struct EdgeList *edgeList, uint32_t cache_size) { uint32_t totalPartitions = 0; struct Grid *grid = (struct Grid *) my_malloc( sizeof(struct Grid)); struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); grid->num_edges = edgeList->num_edges; grid->num_vertices = edgeList->num_vertices; grid->num_partitions = gridCalculatePartitions(edgeList, cache_size); totalPartitions = grid->num_partitions * grid->num_partitions; grid->partitions = (struct Partition *) my_malloc(totalPartitions * sizeof(struct Partition)); grid->activePartitions = (uint32_t *) my_malloc(totalPartitions * sizeof(uint32_t)); grid->out_degree = (uint32_t *) my_malloc(grid->num_vertices * sizeof(uint32_t)); grid->in_degree = (uint32_t *) my_malloc(grid->num_vertices * sizeof(uint32_t)); // grid->activeVertices = newBitmap(grid->num_vertices); grid->activePartitionsMap = newBitmap(totalPartitions); uint32_t i; #pragma omp parallel for default(none) private(i) shared(totalPartitions,grid) for (i = 0; i < totalPartitions; ++i) { grid->partitions[i].num_edges = 0; grid->partitions[i].num_vertices = 0; /* code */ grid->activePartitions[i] = 0; } #pragma omp parallel for default(none) private(i) shared(grid) for (i = 0; i < grid->num_vertices ; ++i) { grid->out_degree[i] = 0; grid->in_degree[i] = 0; } Start(timer); grid = graphGridProcessInOutDegrees(grid, edgeList); Stop(timer); gridPrintMessageWithtime("Grid Process In Out Degrees (Seconds)", Seconds(timer)); Start(timer); grid = gridPartitionEdgeListSizePreprocessing(grid, edgeList); Stop(timer); gridPrintMessageWithtime("Partition EdgeList Size (Seconds)", Seconds(timer)); Start(timer); grid = gridPartitionsMemoryAllocations(grid); Stop(timer); gridPrintMessageWithtime("Partitions Memory Allocations (Seconds)", Seconds(timer)); Start(timer); grid = gridPartitionEdgePopulation(grid, edgeList); Stop(timer); gridPrintMessageWithtime("Partition Edge Population (Seconds)", Seconds(timer)); Start(timer); grid = gridPartitionVertexSizePreprocessing(grid); Stop(timer); gridPrintMessageWithtime("Partition Vertex Size (Seconds)", Seconds(timer)); return grid; } void gridFree(struct Grid *grid) { if(grid) { uint32_t totalPartitions = grid->num_partitions * grid->num_partitions; uint32_t i; for (i = 0; i < totalPartitions; ++i) { freeEdgeList(grid->partitions[i].edgeList); } freeBitmap(grid->activePartitionsMap); if(grid->activePartitions) free(grid->activePartitions); if(grid->out_degree) free(grid->out_degree); if(grid->in_degree) free(grid->in_degree); if(grid->partitions) free(grid->partitions); free(grid); } } struct Grid *graphGridProcessInOutDegrees(struct Grid *grid, struct EdgeList *edgeList) { uint32_t i; uint32_t src; uint32_t dest; #pragma omp parallel for default(none) private(i,src,dest) shared(edgeList,grid) for(i = 0; i < edgeList->num_edges; i++) { src = edgeList->edges_array_src[i]; dest = edgeList->edges_array_dest[i]; #pragma omp atomic update grid->out_degree[src]++; #pragma omp atomic update grid->in_degree[dest]++; } return grid; } struct Grid *gridPartitionVertexSizePreprocessing(struct Grid *grid) { uint32_t i; uint32_t j; uint32_t src; uint32_t dest; uint32_t num_vertices = 0; uint32_t totalPartitions = grid->num_partitions * grid->num_partitions; // #pragma omp parallel for default(none) private(i) shared(totalPartitions,grid) #pragma omp parallel for default(none) private(i,src,dest,num_vertices) shared(totalPartitions,grid) schedule(dynamic,1024) for ( j = 0; j < totalPartitions; ++j) { num_vertices = 0; // #pragma omp parallel for default(none) private(i,src,dest) shared(j,grid) schedule(dynamic,1024) reduction(max:num_vertices) for(i = 0; i < grid->partitions[j].edgeList->num_edges; i++) { src = grid->partitions[j].edgeList->edges_array_src[i]; dest = grid->partitions[j].edgeList->edges_array_dest[i]; num_vertices = maxTwoIntegers(num_vertices, maxTwoIntegers(src, dest)); } grid->partitions[j].num_vertices = num_vertices; grid->partitions[j].edgeList->num_vertices = num_vertices; } return grid; } struct Grid *gridPartitionEdgeListSizePreprocessing(struct Grid *grid, struct EdgeList *edgeList) { uint32_t i; uint32_t src; uint32_t dest; uint32_t Partition_idx; uint32_t num_partitions = grid->num_partitions; uint32_t num_vertices = grid->num_vertices; uint32_t row; uint32_t col; #pragma omp parallel for default(none) private(i,row,col,src,dest,Partition_idx) shared(num_vertices, num_partitions,edgeList,grid) for(i = 0; i < edgeList->num_edges; i++) { src = edgeList->edges_array_src[i]; dest = edgeList->edges_array_dest[i]; // __sync_fetch_and_add(&grid->out_degree[src],1); // __sync_fetch_and_add(&grid->in_degree[dest],1); row = getPartitionID(num_vertices, num_partitions, src); col = getPartitionID(num_vertices, num_partitions, dest); Partition_idx = (row * num_partitions) + col; // __sync_fetch_and_add(&grid->partitions[Partition_idx].num_edges,1); #pragma omp atomic update grid->partitions[Partition_idx].num_edges++; } return grid; } struct Grid *gridPartitionEdgePopulation(struct Grid *grid, struct EdgeList *edgeList) { uint32_t i; uint32_t src; uint32_t dest; uint32_t Partition_idx; uint32_t Edge_idx; uint32_t num_partitions = grid->num_partitions; uint32_t num_vertices = grid->num_vertices; uint32_t row; uint32_t col; #pragma omp parallel for default(none) private(Edge_idx,i,row,col,src,dest,Partition_idx) shared(num_vertices, num_partitions,edgeList,grid) for(i = 0; i < edgeList->num_edges; i++) { src = edgeList->edges_array_src[i]; dest = edgeList->edges_array_dest[i]; row = getPartitionID(num_vertices, num_partitions, src); col = getPartitionID(num_vertices, num_partitions, dest); Partition_idx = (row * num_partitions) + col; Edge_idx = __sync_fetch_and_add(&grid->partitions[Partition_idx].num_edges, 1); grid->partitions[Partition_idx].edgeList->edges_array_src[Edge_idx] = edgeList->edges_array_src[i]; grid->partitions[Partition_idx].edgeList->edges_array_dest[Edge_idx] = edgeList->edges_array_dest[i]; #if WEIGHTED grid->partitions[Partition_idx].edgeList->edges_array_weight[Edge_idx] = edgeList->edges_array_weight[i]; #endif } return grid; } struct Grid *gridPartitionsMemoryAllocations(struct Grid *grid) { uint32_t i; uint32_t totalPartitions = grid->num_partitions * grid->num_partitions; #pragma omp parallel for default(none) private(i) shared(totalPartitions,grid) for ( i = 0; i < totalPartitions; ++i) { grid->partitions[i].edgeList = newEdgeList(grid->partitions[i].num_edges); grid->partitions[i].edgeList->num_vertices = grid->partitions[i].num_vertices; grid->partitions[i].num_edges = 0; } return grid; } uint32_t gridCalculatePartitions(struct EdgeList *edgeList, uint32_t cache_size) { //epfl everything graph uint32_t num_vertices = edgeList->num_vertices; uint32_t num_Paritions = (num_vertices * 8 / 1024) / cache_size; if(num_Paritions > 512) num_Paritions = 256; if(num_Paritions == 0 ) num_Paritions = 4; return num_Paritions; } inline uint32_t getPartitionID(uint32_t vertices, uint32_t partitions, uint32_t vertex_id) { uint32_t partition_size = vertices / partitions; if (vertices % partitions == 0) { return vertex_id / partition_size; } partition_size += 1; uint32_t split_point = vertices % partitions * partition_size; return (vertex_id < split_point) ? vertex_id / partition_size : (vertex_id - split_point) / (partition_size - 1) + (vertices % partitions); } void gridPrintMessageWithtime(const char *msg, double time) { printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", msg); printf(" -----------------------------------------------------\n"); printf("| %-51f | \n", time); printf(" -----------------------------------------------------\n"); }
population.h
#pragma once #include <algorithm> #include <atomic> #include <cassert> #include <cstdlib> #include <iostream> #include <functional> #include <memory> #include <vector> #ifdef _OPENMP #include <omp.h> #endif #include "random_wrapper.h" enum class Origin { Initialization = 0, Recombination = 1, Mutation = 2, }; static inline std::ostream& operator<< (std::ostream& stream, const Origin& o) { switch (o) { case Origin::Initialization: stream << "Initialization"; break; case Origin::Recombination: stream << "Recombination"; break; case Origin::Mutation: stream << "Mutation"; break; } return stream; } /** A data structure for representing the population in a genetic algorithm * * All functionality except non-modifying iteration and insertion of new * children is not thread safe. * * The template parameter `elem_t` is the type of the individuals in the * population. The following member types have to be defined: * - `elem_t::Fitness` for a representation of its fitness * - `elem_t::EvalInfo` for data that are required for evaluation */ template<typename elem_t> class Population { public: struct Entry { elem_t elem; typename elem_t::Fitness fitness_val; size_t accumulated_position = 0; // used for rank-combined sorting float accumulated_value = 0.0; // used for ratio-combined sorting bool evaluated = false; size_t birth_generation; Origin origin; Entry(size_t birth_generation, Origin origin) : birth_generation(birth_generation), origin(origin) { } void evaluate(const typename elem_t::EvalInfo &eval_info) { if (evaluated) { return; } elem.evaluate(this->fitness_val, eval_info); evaluated = true; } // friend bool operator< ( Entry const& a, Entry const& b) { // return a.fitness_val < b.fitness_val; // } friend std::ostream& operator<< (std::ostream& stream, const Entry& entry) { stream << entry.elem << "\n# with fitness value "; stream << entry.fitness_val; stream << "\n# created in generation " << entry.birth_generation << " from " << entry.origin; stream << "\n"; return stream; } }; using iterator_t = typename std::vector<Entry*>::iterator; static typename std::unique_ptr<Population<elem_t>> create(size_t pop_size, size_t child_num) { return std::unique_ptr<Population>(new Population(pop_size, child_num)); } ~Population(void) { // creepy things we have to do because of placement new for (size_t i = 0; i < first_free_pop; ++i) { Arena[i]->~Entry(); } for (size_t i = start_dead_zone; i < first_free_child; ++i) { Arena[i]->~Entry(); } free(Storage); } void shuffle(RandomWrapper &rw) { assert(finalized); rw.shuffle(Arena.begin(), Arena.begin() + first_free_pop); } /// Sort the entire population (children and residual individuals). /// Sorting works as follows: /// The fitness type is expected to define a set of groups of fitness /// orderings (based on serveral different fitness aspects). /// The population is sorted wrt. each of these fitness ordering groups and /// for each sorting, the respective position is annotated to the /// individuals. The final sorting is done wrt. the sums of positions. /// Effectively, individuals are sorted wrt. the arithmetic mean of their /// performance in all ordering groups. void rank_sort(void) { assert(finalized); assert(first_free_pop == start_dead_zone); auto end_it = Arena.begin() + first_free_child; // initialize the accumulated sum of all positions for each individual for (auto it = Arena.begin(); it != end_it; ++it) { (*it)->accumulated_position = 0; } for (int group_idx = 0; group_idx <= elem_t::Fitness::getMaxGroup(); ++group_idx) { // sort wrt. each ordering group... std::sort(Arena.begin(), end_it, [&](const auto& a, const auto &b){ return elem_t::Fitness::compare(a->fitness_val, b->fitness_val, group_idx) == -1; } ); // ...and accumulate the respective positions for each individual size_t idx = 0; for (auto it = Arena.begin(); it != end_it; ++it) { (*it)->accumulated_position += idx; ++idx; } } // final sorting according to the accumulated positions std::sort(Arena.begin(), end_it, [](const auto& a, const auto &b){ return a->accumulated_position < b->accumulated_position; } ); } /// Sort the entire population like rank_sort, however do not use the /// positions after sorting but use for each individual the sum of all /// component fitness values after applying a linear scale to a fixed /// interval. /// This puts more focus the magnitude of fitness improvements. void ratio_sort(void) { assert(finalized); assert(first_free_pop == start_dead_zone); auto end_it = Arena.begin() + first_free_child; // initialize the accumulated sum of all positions for each individual for (auto it = Arena.begin(); it != end_it; ++it) { (*it)->accumulated_value = 0.0; } float range_min = 1; float range_max = 1000; for (int group_idx = 0; group_idx <= elem_t::Fitness::getMaxGroup(); ++group_idx) { float max_val = 0.0; float min_val = 0.0; for (auto it = Arena.begin(); it != end_it; ++it) { float val = (*it)->fitness_val.getComponentValue(group_idx); max_val = std::max(max_val, val); min_val = std::min(min_val, val); } for (auto it = Arena.begin(); it != end_it; ++it) { float val = (*it)->fitness_val.getComponentValue(group_idx); float x; // apply a linear transform to map it into [range_min, range_max] if (max_val == min_val) { x = range_min; } else { x = (((range_max - range_min) * (val - min_val)) / (max_val - min_val)) + 1; } (*it)->accumulated_value += x; } } // final sorting according to the accumulated positions std::sort(Arena.begin(), end_it, [](const auto& a, const auto &b){ return a->accumulated_value < b->accumulated_value; } ); } void swap(size_t idx1, size_t idx2) { assert(finalized); assert(0 <= idx1 && idx1 < first_free_child); assert(0 <= idx2 && idx2 < first_free_child); auto tmp = Arena[idx1]; Arena[idx1] = Arena[idx2]; Arena[idx2] = tmp; } size_t getPopEndIdx(void) { return first_free_pop; } size_t getChildrenEndIdx(void) { return first_free_child; } void purge(void) { assert(finalized); // creepy things we have to do because of placement new for (size_t i = start_dead_zone; i < first_free_child; ++i) { Arena[i]->~Entry(); } first_free_child = start_dead_zone; current_generation += 1; } Entry *insertPop(Origin origin=Origin::Initialization) { assert(not finalized); size_t pos = first_free_pop.fetch_add(1); assert(pos < start_dead_zone); Entry *res = Arena[pos]; //placement new new (res) Entry(current_generation, origin); return res; } Entry *replacePop(size_t idx, Origin origin=Origin::Initialization) { auto* e = Arena[idx]; e->~Entry(); new (e) Entry(current_generation, origin); return e; } Entry *insertChild(Origin origin) { assert(finalized); size_t pos = first_free_child.fetch_add(1); assert(pos < num_elements); Entry *res = Arena[pos]; //placement new new (res) Entry(current_generation, origin); return res; } typename std::vector<Entry*>::iterator getPopBegin(void) { assert(finalized); return Arena.begin(); } typename std::vector<Entry*>::const_iterator getPopBegin(void) const { assert(finalized); return Arena.begin(); } typename std::vector<Entry*>::iterator getPopEnd(void) { assert(finalized); return Arena.begin() + first_free_pop; } typename std::vector<Entry*>::const_iterator getPopEnd(void) const { assert(finalized); return Arena.begin() + first_free_pop; } void forall_entries(std::function<void(Entry&)> stmt) { size_t pop_size = first_free_pop; #pragma omp parallel for for (size_t i = 0; i < pop_size; ++i) { auto &entry = this->getPopAt(i); stmt(entry); } } void forall_chunks(std::function<void(typename std::vector<Entry*>::iterator, typename std::vector<Entry*>::iterator)> stmt) { auto pop_begin = this->getPopBegin(); #pragma omp parallel for for (size_t i = 0; i < num_chunks; ++i) { size_t chunk_start = i * chunk_size; size_t chunk_end = std::min(chunk_start + chunk_size, (size_t)first_free_pop); auto start_it = pop_begin + chunk_start; auto end_it = pop_begin + chunk_end; stmt(start_it, end_it); } } Entry& getPopAt(size_t idx) { assert(finalized); assert((0 <= idx) && (idx < first_free_pop)); return *Arena.at(idx); } /** Finalize construction phase of the initial population * * This checks several invariants that have to hold for the population at * any point after calling this function. This method has to be called * before new children can be added or iterated. */ void finalize(void) { assert(num_elements > start_dead_zone); assert(first_free_pop == start_dead_zone); assert(first_free_child == start_dead_zone); assert(start_dead_zone > 0); current_generation += 1; finalized = true; } void printJournal(std::ostream& journal, int indent) { add_indent(journal, indent); journal << "[\n"; bool first = true; for (auto it = this->getPopBegin(); it != this->getPopEnd(); ++it) { if (!first) { journal << ",\n"; } first = true; add_indent(journal, indent + 2); journal << "\""; journal << (*it)->fitness_val; journal << "\""; } journal << "\n"; add_indent(journal, indent); journal << "]\n"; } double computeDiversity(void) const { double result = 0; auto pop_end = getPopEnd(); for (auto it_a = getPopBegin(); it_a < pop_end; ++it_a) { for (auto it_b = it_a + 1; it_b < pop_end; ++it_b) { result += elem_t::distance((*it_a)->elem, (*it_b)->elem); } } return result / (double)first_free_pop; } size_t getCurrentGeneration(void) { return current_generation; } private: Population(size_t pop_size, size_t child_num) : num_elements(pop_size + child_num), Arena(num_elements) { start_dead_zone = pop_size; Storage = (Entry*)malloc(num_elements * sizeof(Entry)); for (size_t i = 0; i < num_elements; ++i) { Arena[i] = &Storage[i]; } first_free_pop = 0; first_free_child = start_dead_zone; #ifdef _OPENMP num_chunks = omp_get_max_threads(); #else num_chunks = 1; #endif chunk_size = pop_size / num_chunks; } void add_indent(std::ostream& stream, int indent) { for (int i = 0; i < indent; ++i) { stream << " "; } } size_t num_elements; size_t start_dead_zone; std::atomic<size_t> first_free_pop; std::atomic<size_t> first_free_child; size_t num_chunks = 1; size_t chunk_size; bool finalized = false; size_t current_generation = 0; std::vector<Entry*> Arena; Entry *Storage; };
NeighborhoodGraph.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #ifndef _SPTAG_COMMON_NG_H_ #define _SPTAG_COMMON_NG_H_ #include "../VectorIndex.h" #include "CommonUtils.h" #include "Dataset.h" #include "FineGrainedLock.h" #include "QueryResultSet.h" namespace SPTAG { namespace COMMON { class NeighborhoodGraph { public: NeighborhoodGraph(): m_iTPTNumber(32), m_iTPTLeafSize(2000), m_iSamples(1000), m_numTopDimensionTPTSplit(5), m_iNeighborhoodSize(32), m_iNeighborhoodScale(2), m_iCEFScale(2), m_iRefineIter(0), m_iCEF(1000), m_iMaxCheckForRefineGraph(10000) { m_pNeighborhoodGraph.SetName("Graph"); } ~NeighborhoodGraph() {} virtual void InsertNeighbors(VectorIndex* index, const SizeType node, SizeType insertNode, float insertDist) = 0; virtual void RebuildNeighbors(VectorIndex* index, const SizeType node, SizeType* nodes, const BasicResult* queryResults, const int numResults) = 0; virtual float GraphAccuracyEstimation(VectorIndex* index, const SizeType samples, const std::unordered_map<SizeType, SizeType>* idmap = nullptr) = 0; template <typename T> void BuildGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap = nullptr) { std::cout << "build RNG graph!" << std::endl; m_iGraphSize = index->GetNumSamples(); m_iNeighborhoodSize = m_iNeighborhoodSize * m_iNeighborhoodScale; m_pNeighborhoodGraph.Initialize(m_iGraphSize, m_iNeighborhoodSize); m_dataUpdateLock.resize(m_iGraphSize); if (m_iGraphSize < 1000) { RefineGraph<T>(index, idmap); std::cout << "Build RNG Graph end!" << std::endl; return; } { COMMON::Dataset<float> NeighborhoodDists(m_iGraphSize, m_iNeighborhoodSize); std::vector<std::vector<SizeType>> TptreeDataIndices(m_iTPTNumber, std::vector<SizeType>(m_iGraphSize)); std::vector<std::vector<std::pair<SizeType, SizeType>>> TptreeLeafNodes(m_iTPTNumber, std::vector<std::pair<SizeType, SizeType>>()); for (SizeType i = 0; i < m_iGraphSize; i++) for (DimensionType j = 0; j < m_iNeighborhoodSize; j++) (NeighborhoodDists)[i][j] = MaxDist; std::cout << "Parallel TpTree Partition begin " << std::endl; #pragma omp parallel for schedule(dynamic) for (int i = 0; i < m_iTPTNumber; i++) { Sleep(i * 100); std::srand(clock()); for (SizeType j = 0; j < m_iGraphSize; j++) TptreeDataIndices[i][j] = j; std::random_shuffle(TptreeDataIndices[i].begin(), TptreeDataIndices[i].end()); PartitionByTptree<T>(index, TptreeDataIndices[i], 0, m_iGraphSize - 1, TptreeLeafNodes[i]); std::cout << "Finish Getting Leaves for Tree " << i << std::endl; } std::cout << "Parallel TpTree Partition done" << std::endl; for (int i = 0; i < m_iTPTNumber; i++) { #pragma omp parallel for schedule(dynamic) for (SizeType j = 0; j < (SizeType)TptreeLeafNodes[i].size(); j++) { SizeType start_index = TptreeLeafNodes[i][j].first; SizeType end_index = TptreeLeafNodes[i][j].second; if (omp_get_thread_num() == 0) std::cout << "\rProcessing Tree " << i << ' ' << j * 100 / TptreeLeafNodes[i].size() << '%'; for (SizeType x = start_index; x < end_index; x++) { for (SizeType y = x + 1; y <= end_index; y++) { SizeType p1 = TptreeDataIndices[i][x]; SizeType p2 = TptreeDataIndices[i][y]; float dist = index->ComputeDistance(index->GetSample(p1), index->GetSample(p2)); if (idmap != nullptr) { p1 = (idmap->find(p1) == idmap->end()) ? p1 : idmap->at(p1); p2 = (idmap->find(p2) == idmap->end()) ? p2 : idmap->at(p2); } COMMON::Utils::AddNeighbor(p2, dist, (m_pNeighborhoodGraph)[p1], (NeighborhoodDists)[p1], m_iNeighborhoodSize); COMMON::Utils::AddNeighbor(p1, dist, (m_pNeighborhoodGraph)[p2], (NeighborhoodDists)[p2], m_iNeighborhoodSize); } } } TptreeDataIndices[i].clear(); TptreeLeafNodes[i].clear(); std::cout << std::endl; } TptreeDataIndices.clear(); TptreeLeafNodes.clear(); } if (m_iMaxCheckForRefineGraph > 0) { RefineGraph<T>(index, idmap); } } template <typename T> void RefineGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap = nullptr) { std::cout << "before RNG, graph acc:" << GraphAccuracyEstimation(index, 100, idmap) << std::endl; m_iCEF *= m_iCEFScale; m_iMaxCheckForRefineGraph *= m_iCEFScale; #pragma omp parallel for schedule(dynamic) for (SizeType i = 0; i < m_iGraphSize; i++) { RefineNode<T>(index, i, false); if (i % 1000 == 0) std::cout << "\rRefine 1 " << (i * 100 / m_iGraphSize) << "%"; } std::cout << "Refine RNG, graph acc:" << GraphAccuracyEstimation(index, 100, idmap) << std::endl; m_iCEF /= m_iCEFScale; m_iMaxCheckForRefineGraph /= m_iCEFScale; m_iNeighborhoodSize /= m_iNeighborhoodScale; #pragma omp parallel for schedule(dynamic) for (SizeType i = 0; i < m_iGraphSize; i++) { RefineNode<T>(index, i, false); if (i % 1000 == 0) std::cout << "\rRefine 2 " << (i * 100 / m_iGraphSize) << "%"; } std::cout << "Refine RNG, graph acc:" << GraphAccuracyEstimation(index, 100, idmap) << std::endl; if (idmap != nullptr) { for (auto iter = idmap->begin(); iter != idmap->end(); iter++) if (iter->first < 0) { m_pNeighborhoodGraph[-1 - iter->first][m_iNeighborhoodSize - 1] = -2 - iter->second; } } } template <typename T> ErrorCode RefineGraph(VectorIndex* index, std::vector<SizeType>& indices, std::vector<SizeType>& reverseIndices, std::ostream& output, const std::unordered_map<SizeType, SizeType>* idmap = nullptr) { SizeType R = (SizeType)indices.size(); #pragma omp parallel for schedule(dynamic) for (SizeType i = 0; i < R; i++) { RefineNode<T>(index, indices[i], false); SizeType* nodes = m_pNeighborhoodGraph[indices[i]]; for (DimensionType j = 0; j < m_iNeighborhoodSize; j++) { if (nodes[j] < 0) nodes[j] = -1; else nodes[j] = reverseIndices[nodes[j]]; } if (idmap == nullptr || idmap->find(-1 - indices[i]) == idmap->end()) continue; nodes[m_iNeighborhoodSize - 1] = -2 - idmap->at(-1 - indices[i]); } m_pNeighborhoodGraph.Refine(indices, output); return ErrorCode::Success; } template <typename T> void RefineNode(VectorIndex* index, const SizeType node, bool updateNeighbors) { COMMON::QueryResultSet<T> query((const T*)index->GetSample(node), m_iCEF + 1); index->SearchIndex(query); RebuildNeighbors(index, node, m_pNeighborhoodGraph[node], query.GetResults(), m_iCEF + 1); if (updateNeighbors) { // update neighbors for (int j = 0; j <= m_iCEF; j++) { BasicResult* item = query.GetResult(j); if (item->VID < 0) break; if (item->VID == node) continue; std::lock_guard<std::mutex> lock(m_dataUpdateLock[item->VID]); InsertNeighbors(index, item->VID, node, item->Dist); } } } template <typename T> void PartitionByTptree(VectorIndex* index, std::vector<SizeType>& indices, const SizeType first, const SizeType last, std::vector<std::pair<SizeType, SizeType>> & leaves) { if (last - first <= m_iTPTLeafSize) { leaves.push_back(std::make_pair(first, last)); } else { std::vector<float> Mean(index->GetFeatureDim(), 0); int iIteration = 100; SizeType end = min(first + m_iSamples, last); SizeType count = end - first + 1; // calculate the mean of each dimension for (SizeType j = first; j <= end; j++) { const T* v = (const T*)index->GetSample(indices[j]); for (DimensionType k = 0; k < index->GetFeatureDim(); k++) { Mean[k] += v[k]; } } for (DimensionType k = 0; k < index->GetFeatureDim(); k++) { Mean[k] /= count; } std::vector<BasicResult> Variance; Variance.reserve(index->GetFeatureDim()); for (DimensionType j = 0; j < index->GetFeatureDim(); j++) { Variance.push_back(BasicResult(j, 0)); } // calculate the variance of each dimension for (SizeType j = first; j <= end; j++) { const T* v = (const T*)index->GetSample(indices[j]); for (DimensionType k = 0; k < index->GetFeatureDim(); k++) { float dist = v[k] - Mean[k]; Variance[k].Dist += dist*dist; } } std::sort(Variance.begin(), Variance.end(), COMMON::Compare); std::vector<SizeType> indexs(m_numTopDimensionTPTSplit); std::vector<float> weight(m_numTopDimensionTPTSplit), bestweight(m_numTopDimensionTPTSplit); float bestvariance = Variance[index->GetFeatureDim() - 1].Dist; for (int i = 0; i < m_numTopDimensionTPTSplit; i++) { indexs[i] = Variance[index->GetFeatureDim() - 1 - i].VID; bestweight[i] = 0; } bestweight[0] = 1; float bestmean = Mean[indexs[0]]; std::vector<float> Val(count); for (int i = 0; i < iIteration; i++) { float sumweight = 0; for (int j = 0; j < m_numTopDimensionTPTSplit; j++) { weight[j] = float(rand() % 10000) / 5000.0f - 1.0f; sumweight += weight[j] * weight[j]; } sumweight = sqrt(sumweight); for (int j = 0; j < m_numTopDimensionTPTSplit; j++) { weight[j] /= sumweight; } float mean = 0; for (SizeType j = 0; j < count; j++) { Val[j] = 0; const T* v = (const T*)index->GetSample(indices[first + j]); for (int k = 0; k < m_numTopDimensionTPTSplit; k++) { Val[j] += weight[k] * v[indexs[k]]; } mean += Val[j]; } mean /= count; float var = 0; for (SizeType j = 0; j < count; j++) { float dist = Val[j] - mean; var += dist * dist; } if (var > bestvariance) { bestvariance = var; bestmean = mean; for (int j = 0; j < m_numTopDimensionTPTSplit; j++) { bestweight[j] = weight[j]; } } } SizeType i = first; SizeType j = last; // decide which child one point belongs while (i <= j) { float val = 0; const T* v = (const T*)index->GetSample(indices[i]); for (int k = 0; k < m_numTopDimensionTPTSplit; k++) { val += bestweight[k] * v[indexs[k]]; } if (val < bestmean) { i++; } else { std::swap(indices[i], indices[j]); j--; } } // if all the points in the node are equal,equally split the node into 2 if ((i == first) || (i == last + 1)) { i = (first + last + 1) / 2; } Mean.clear(); Variance.clear(); Val.clear(); indexs.clear(); weight.clear(); bestweight.clear(); PartitionByTptree<T>(index, indices, first, i - 1, leaves); PartitionByTptree<T>(index, indices, i, last, leaves); } } inline std::uint64_t BufferSize() const { return m_pNeighborhoodGraph.BufferSize(); } bool LoadGraph(std::string sGraphFilename) { if (!m_pNeighborhoodGraph.Load(sGraphFilename)) return false; m_iGraphSize = m_pNeighborhoodGraph.R(); m_iNeighborhoodSize = m_pNeighborhoodGraph.C(); m_dataUpdateLock.resize(m_iGraphSize); return true; } bool LoadGraph(char* pGraphMemFile) { m_pNeighborhoodGraph.Load(pGraphMemFile); m_iGraphSize = m_pNeighborhoodGraph.R(); m_iNeighborhoodSize = m_pNeighborhoodGraph.C(); m_dataUpdateLock.resize(m_iGraphSize); return true; } bool SaveGraph(std::string sGraphFilename) const { return m_pNeighborhoodGraph.Save(sGraphFilename); } bool SaveGraph(std::ostream& output) const { return m_pNeighborhoodGraph.Save(output); } inline ErrorCode AddBatch(SizeType num) { ErrorCode ret = m_pNeighborhoodGraph.AddBatch(num); if (ret != ErrorCode::Success) return ret; m_iGraphSize += num; m_dataUpdateLock.resize(m_iGraphSize); return ErrorCode::Success; } inline SizeType* operator[](SizeType index) { return m_pNeighborhoodGraph[index]; } inline const SizeType* operator[](SizeType index) const { return m_pNeighborhoodGraph[index]; } inline void SetR(SizeType rows) { m_pNeighborhoodGraph.SetR(rows); m_iGraphSize = rows; m_dataUpdateLock.resize(m_iGraphSize); } inline SizeType R() const { return m_iGraphSize; } static std::shared_ptr<NeighborhoodGraph> CreateInstance(std::string type); protected: // Graph structure SizeType m_iGraphSize; COMMON::Dataset<SizeType> m_pNeighborhoodGraph; COMMON::FineGrainedLock m_dataUpdateLock; // protect one row of the graph public: int m_iTPTNumber, m_iTPTLeafSize, m_iSamples, m_numTopDimensionTPTSplit; DimensionType m_iNeighborhoodSize; int m_iNeighborhoodScale, m_iCEFScale, m_iRefineIter, m_iCEF, m_iMaxCheckForRefineGraph; }; } } #endif
tree-vect-loop.c
/* Loop Vectorization Copyright (C) 2003-2018 Free Software Foundation, Inc. Contributed by Dorit Naishlos <dorit@il.ibm.com> and Ira Rosen <irar@il.ibm.com> This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "backend.h" #include "target.h" #include "rtl.h" #include "tree.h" #include "gimple.h" #include "cfghooks.h" #include "tree-pass.h" #include "ssa.h" #include "optabs-tree.h" #include "diagnostic-core.h" #include "fold-const.h" #include "stor-layout.h" #include "cfganal.h" #include "gimplify.h" #include "gimple-iterator.h" #include "gimplify-me.h" #include "tree-ssa-loop-ivopts.h" #include "tree-ssa-loop-manip.h" #include "tree-ssa-loop-niter.h" #include "tree-ssa-loop.h" #include "cfgloop.h" #include "params.h" #include "tree-scalar-evolution.h" #include "tree-vectorizer.h" #include "gimple-fold.h" #include "cgraph.h" #include "tree-cfg.h" #include "tree-if-conv.h" #include "internal-fn.h" #include "tree-vector-builder.h" #include "vec-perm-indices.h" #include "tree-eh.h" /* Loop Vectorization Pass. This pass tries to vectorize loops. For example, the vectorizer transforms the following simple loop: short a[N]; short b[N]; short c[N]; int i; for (i=0; i<N; i++){ a[i] = b[i] + c[i]; } as if it was manually vectorized by rewriting the source code into: typedef int __attribute__((mode(V8HI))) v8hi; short a[N]; short b[N]; short c[N]; int i; v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c; v8hi va, vb, vc; for (i=0; i<N/8; i++){ vb = pb[i]; vc = pc[i]; va = vb + vc; pa[i] = va; } The main entry to this pass is vectorize_loops(), in which the vectorizer applies a set of analyses on a given set of loops, followed by the actual vectorization transformation for the loops that had successfully passed the analysis phase. Throughout this pass we make a distinction between two types of data: scalars (which are represented by SSA_NAMES), and memory references ("data-refs"). These two types of data require different handling both during analysis and transformation. The types of data-refs that the vectorizer currently supports are ARRAY_REFS which base is an array DECL (not a pointer), and INDIRECT_REFS through pointers; both array and pointer accesses are required to have a simple (consecutive) access pattern. Analysis phase: =============== The driver for the analysis phase is vect_analyze_loop(). It applies a set of analyses, some of which rely on the scalar evolution analyzer (scev) developed by Sebastian Pop. During the analysis phase the vectorizer records some information per stmt in a "stmt_vec_info" struct which is attached to each stmt in the loop, as well as general information about the loop as a whole, which is recorded in a "loop_vec_info" struct attached to each loop. Transformation phase: ===================== The loop transformation phase scans all the stmts in the loop, and creates a vector stmt (or a sequence of stmts) for each scalar stmt S in the loop that needs to be vectorized. It inserts the vector code sequence just before the scalar stmt S, and records a pointer to the vector code in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct attached to S). This pointer will be used for the vectorization of following stmts which use the def of stmt S. Stmt S is removed if it writes to memory; otherwise, we rely on dead code elimination for removing it. For example, say stmt S1 was vectorized into stmt VS1: VS1: vb = px[i]; S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1 S2: a = b; To vectorize stmt S2, the vectorizer first finds the stmt that defines the operand 'b' (S1), and gets the relevant vector def 'vb' from the vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The resulting sequence would be: VS1: vb = px[i]; S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1 VS2: va = vb; S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2 Operands that are not SSA_NAMEs, are data-refs that appear in load/store operations (like 'x[i]' in S1), and are handled differently. Target modeling: ================= Currently the only target specific information that is used is the size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". Targets that can support different sizes of vectors, for now will need to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More flexibility will be added in the future. Since we only vectorize operations which vector form can be expressed using existing tree codes, to verify that an operation is supported, the vectorizer checks the relevant optab at the relevant machine_mode (e.g, optab_handler (add_optab, V8HImode)). If the value found is CODE_FOR_nothing, then there's no target support, and we can't vectorize the stmt. For additional information on this project see: http://gcc.gnu.org/projects/tree-ssa/vectorization.html */ static void vect_estimate_min_profitable_iters (loop_vec_info, int *, int *); /* Function vect_determine_vectorization_factor Determine the vectorization factor (VF). VF is the number of data elements that are operated upon in parallel in a single iteration of the vectorized loop. For example, when vectorizing a loop that operates on 4byte elements, on a target with vector size (VS) 16byte, the VF is set to 4, since 4 elements can fit in a single vector register. We currently support vectorization of loops in which all types operated upon are of the same size. Therefore this function currently sets VF according to the size of the types operated upon, and fails if there are multiple sizes in the loop. VF is also the factor by which the loop iterations are strip-mined, e.g.: original loop: for (i=0; i<N; i++){ a[i] = b[i] + c[i]; } vectorized loop: for (i=0; i<N; i+=VF){ a[i:VF] = b[i:VF] + c[i:VF]; } */ static bool vect_determine_vectorization_factor (loop_vec_info loop_vinfo) { struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); unsigned nbbs = loop->num_nodes; poly_uint64 vectorization_factor = 1; tree scalar_type = NULL_TREE; gphi *phi; tree vectype; stmt_vec_info stmt_info; unsigned i; HOST_WIDE_INT dummy; gimple *stmt, *pattern_stmt = NULL; gimple_seq pattern_def_seq = NULL; gimple_stmt_iterator pattern_def_si = gsi_none (); bool analyze_pattern_stmt = false; bool bool_result; auto_vec<stmt_vec_info> mask_producers; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== vect_determine_vectorization_factor ===\n"); for (i = 0; i < nbbs; i++) { basic_block bb = bbs[i]; for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) { phi = si.phi (); stmt_info = vinfo_for_stmt (phi); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "==> examining phi: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); } gcc_assert (stmt_info); if (STMT_VINFO_RELEVANT_P (stmt_info) || STMT_VINFO_LIVE_P (stmt_info)) { gcc_assert (!STMT_VINFO_VECTYPE (stmt_info)); scalar_type = TREE_TYPE (PHI_RESULT (phi)); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "get vectype for scalar type: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type); dump_printf (MSG_NOTE, "\n"); } vectype = get_vectype_for_scalar_type (scalar_type); if (!vectype) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: unsupported " "data-type "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return false; } STMT_VINFO_VECTYPE (stmt_info) = vectype; if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "vectype: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype); dump_printf (MSG_NOTE, "\n"); } if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "nunits = "); dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (vectype)); dump_printf (MSG_NOTE, "\n"); } vect_update_max_nunits (&vectorization_factor, vectype); } } for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si) || analyze_pattern_stmt;) { tree vf_vectype; if (analyze_pattern_stmt) stmt = pattern_stmt; else stmt = gsi_stmt (si); stmt_info = vinfo_for_stmt (stmt); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); } gcc_assert (stmt_info); /* Skip stmts which do not need to be vectorized. */ if ((!STMT_VINFO_RELEVANT_P (stmt_info) && !STMT_VINFO_LIVE_P (stmt_info)) || gimple_clobber_p (stmt)) { if (STMT_VINFO_IN_PATTERN_P (stmt_info) && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info)) && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt)) || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt)))) { stmt = pattern_stmt; stmt_info = vinfo_for_stmt (pattern_stmt); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "==> examining pattern statement: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); } } else { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "skip.\n"); gsi_next (&si); continue; } } else if (STMT_VINFO_IN_PATTERN_P (stmt_info) && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info)) && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt)) || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt)))) analyze_pattern_stmt = true; /* If a pattern statement has def stmts, analyze them too. */ if (is_pattern_stmt_p (stmt_info)) { if (pattern_def_seq == NULL) { pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info); pattern_def_si = gsi_start (pattern_def_seq); } else if (!gsi_end_p (pattern_def_si)) gsi_next (&pattern_def_si); if (pattern_def_seq != NULL) { gimple *pattern_def_stmt = NULL; stmt_vec_info pattern_def_stmt_info = NULL; while (!gsi_end_p (pattern_def_si)) { pattern_def_stmt = gsi_stmt (pattern_def_si); pattern_def_stmt_info = vinfo_for_stmt (pattern_def_stmt); if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info) || STMT_VINFO_LIVE_P (pattern_def_stmt_info)) break; gsi_next (&pattern_def_si); } if (!gsi_end_p (pattern_def_si)) { if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "==> examining pattern def stmt: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0); } stmt = pattern_def_stmt; stmt_info = pattern_def_stmt_info; } else { pattern_def_si = gsi_none (); analyze_pattern_stmt = false; } } else analyze_pattern_stmt = false; } if (gimple_get_lhs (stmt) == NULL_TREE /* MASK_STORE has no lhs, but is ok. */ && (!is_gimple_call (stmt) || !gimple_call_internal_p (stmt) || gimple_call_internal_fn (stmt) != IFN_MASK_STORE)) { if (is_gimple_call (stmt)) { /* Ignore calls with no lhs. These must be calls to #pragma omp simd functions, and what vectorization factor it really needs can't be determined until vectorizable_simd_clone_call. */ if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si)) { pattern_def_seq = NULL; gsi_next (&si); } continue; } if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: irregular stmt."); dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); } return false; } if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt)))) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: vector stmt in loop:"); dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); } return false; } bool_result = false; if (STMT_VINFO_VECTYPE (stmt_info)) { /* The only case when a vectype had been already set is for stmts that contain a dataref, or for "pattern-stmts" (stmts generated by the vectorizer to represent/replace a certain idiom). */ gcc_assert (STMT_VINFO_DATA_REF (stmt_info) || is_pattern_stmt_p (stmt_info) || !gsi_end_p (pattern_def_si)); vectype = STMT_VINFO_VECTYPE (stmt_info); } else { gcc_assert (!STMT_VINFO_DATA_REF (stmt_info)); if (gimple_call_internal_p (stmt, IFN_MASK_STORE)) scalar_type = TREE_TYPE (gimple_call_arg (stmt, 3)); else scalar_type = TREE_TYPE (gimple_get_lhs (stmt)); /* Bool ops don't participate in vectorization factor computation. For comparison use compared types to compute a factor. */ if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type) && is_gimple_assign (stmt) && gimple_assign_rhs_code (stmt) != COND_EXPR) { if (STMT_VINFO_RELEVANT_P (stmt_info) || STMT_VINFO_LIVE_P (stmt_info)) mask_producers.safe_push (stmt_info); bool_result = true; if (TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison && !VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt)))) scalar_type = TREE_TYPE (gimple_assign_rhs1 (stmt)); else { if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si)) { pattern_def_seq = NULL; gsi_next (&si); } continue; } } if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "get vectype for scalar type: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type); dump_printf (MSG_NOTE, "\n"); } vectype = get_vectype_for_scalar_type (scalar_type); if (!vectype) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: unsupported " "data-type "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return false; } if (!bool_result) STMT_VINFO_VECTYPE (stmt_info) = vectype; if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "vectype: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype); dump_printf (MSG_NOTE, "\n"); } } /* Don't try to compute VF out scalar types if we stmt produces boolean vector. Use result vectype instead. */ if (VECTOR_BOOLEAN_TYPE_P (vectype)) vf_vectype = vectype; else { /* The vectorization factor is according to the smallest scalar type (or the largest vector size, but we only support one vector size per loop). */ if (!bool_result) scalar_type = vect_get_smallest_scalar_type (stmt, &dummy, &dummy); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "get vectype for scalar type: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type); dump_printf (MSG_NOTE, "\n"); } vf_vectype = get_vectype_for_scalar_type (scalar_type); } if (!vf_vectype) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: unsupported data-type "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return false; } if (maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype)), GET_MODE_SIZE (TYPE_MODE (vf_vectype)))) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: different sized vector " "types in statement, "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, vectype); dump_printf (MSG_MISSED_OPTIMIZATION, " and "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, vf_vectype); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return false; } if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "vectype: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, vf_vectype); dump_printf (MSG_NOTE, "\n"); } if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "nunits = "); dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (vf_vectype)); dump_printf (MSG_NOTE, "\n"); } vect_update_max_nunits (&vectorization_factor, vf_vectype); if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si)) { pattern_def_seq = NULL; gsi_next (&si); } } } /* TODO: Analyze cost. Decide if worth while to vectorize. */ if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "vectorization factor = "); dump_dec (MSG_NOTE, vectorization_factor); dump_printf (MSG_NOTE, "\n"); } if (known_le (vectorization_factor, 1U)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: unsupported data-type\n"); return false; } LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor; for (i = 0; i < mask_producers.length (); i++) { tree mask_type = NULL; stmt = STMT_VINFO_STMT (mask_producers[i]); if (is_gimple_assign (stmt) && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison && !VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt)))) { scalar_type = TREE_TYPE (gimple_assign_rhs1 (stmt)); mask_type = get_mask_type_for_scalar_type (scalar_type); if (!mask_type) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: unsupported mask\n"); return false; } } else { tree rhs; ssa_op_iter iter; gimple *def_stmt; enum vect_def_type dt; FOR_EACH_SSA_TREE_OPERAND (rhs, stmt, iter, SSA_OP_USE) { if (!vect_is_simple_use (rhs, mask_producers[i]->vinfo, &def_stmt, &dt, &vectype)) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: can't compute mask type " "for statement, "); dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); } return false; } /* No vectype probably means external definition. Allow it in case there is another operand which allows to determine mask type. */ if (!vectype) continue; if (!mask_type) mask_type = vectype; else if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_type), TYPE_VECTOR_SUBPARTS (vectype))) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: different sized masks " "types in statement, "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, mask_type); dump_printf (MSG_MISSED_OPTIMIZATION, " and "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, vectype); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return false; } else if (VECTOR_BOOLEAN_TYPE_P (mask_type) != VECTOR_BOOLEAN_TYPE_P (vectype)) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: mixed mask and " "nonmask vector types in statement, "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, mask_type); dump_printf (MSG_MISSED_OPTIMIZATION, " and "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, vectype); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return false; } } /* We may compare boolean value loaded as vector of integers. Fix mask_type in such case. */ if (mask_type && !VECTOR_BOOLEAN_TYPE_P (mask_type) && gimple_code (stmt) == GIMPLE_ASSIGN && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison) mask_type = build_same_sized_truth_vector_type (mask_type); } /* No mask_type should mean loop invariant predicate. This is probably a subject for optimization in if-conversion. */ if (!mask_type) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: can't compute mask type " "for statement, "); dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); } return false; } STMT_VINFO_VECTYPE (mask_producers[i]) = mask_type; } return true; } /* Function vect_is_simple_iv_evolution. FORNOW: A simple evolution of an induction variables in the loop is considered a polynomial evolution. */ static bool vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init, tree * step) { tree init_expr; tree step_expr; tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb); basic_block bb; /* When there is no evolution in this loop, the evolution function is not "simple". */ if (evolution_part == NULL_TREE) return false; /* When the evolution is a polynomial of degree >= 2 the evolution function is not "simple". */ if (tree_is_chrec (evolution_part)) return false; step_expr = evolution_part; init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb)); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "step: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, step_expr); dump_printf (MSG_NOTE, ", init: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, init_expr); dump_printf (MSG_NOTE, "\n"); } *init = init_expr; *step = step_expr; if (TREE_CODE (step_expr) != INTEGER_CST && (TREE_CODE (step_expr) != SSA_NAME || ((bb = gimple_bb (SSA_NAME_DEF_STMT (step_expr))) && flow_bb_inside_loop_p (get_loop (cfun, loop_nb), bb)) || (!INTEGRAL_TYPE_P (TREE_TYPE (step_expr)) && (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)) || !flag_associative_math))) && (TREE_CODE (step_expr) != REAL_CST || !flag_associative_math)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "step unknown.\n"); return false; } return true; } /* Function vect_analyze_scalar_cycles_1. Examine the cross iteration def-use cycles of scalar variables in LOOP. LOOP_VINFO represents the loop that is now being considered for vectorization (can be LOOP, or an outer-loop enclosing LOOP). */ static void vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop) { basic_block bb = loop->header; tree init, step; auto_vec<gimple *, 64> worklist; gphi_iterator gsi; bool double_reduc; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_scalar_cycles ===\n"); /* First - identify all inductions. Reduction detection assumes that all the inductions have been identified, therefore, this order must not be changed. */ for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gphi *phi = gsi.phi (); tree access_fn = NULL; tree def = PHI_RESULT (phi); stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); } /* Skip virtual phi's. The data dependences that are associated with virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */ if (virtual_operand_p (def)) continue; STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type; /* Analyze the evolution function. */ access_fn = analyze_scalar_evolution (loop, def); if (access_fn) { STRIP_NOPS (access_fn); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "Access function of PHI: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, access_fn); dump_printf (MSG_NOTE, "\n"); } STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo) = initial_condition_in_loop_num (access_fn, loop->num); STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) = evolution_part_in_loop_num (access_fn, loop->num); } if (!access_fn || !vect_is_simple_iv_evolution (loop->num, access_fn, &init, &step) || (LOOP_VINFO_LOOP (loop_vinfo) != loop && TREE_CODE (step) != INTEGER_CST)) { worklist.safe_push (phi); continue; } gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo) != NULL_TREE); gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) != NULL_TREE); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Detected induction.\n"); STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def; } /* Second - identify all reductions and nested cycles. */ while (worklist.length () > 0) { gimple *phi = worklist.pop (); tree def = PHI_RESULT (phi); stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi); gimple *reduc_stmt; if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); } gcc_assert (!virtual_operand_p (def) && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type); reduc_stmt = vect_force_simple_reduction (loop_vinfo, phi, &double_reduc, false); if (reduc_stmt) { if (double_reduc) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Detected double reduction.\n"); STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def; STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) = vect_double_reduction_def; } else { if (loop != LOOP_VINFO_LOOP (loop_vinfo)) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Detected vectorizable nested cycle.\n"); STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle; STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) = vect_nested_cycle; } else { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Detected reduction.\n"); STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def; STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) = vect_reduction_def; /* Store the reduction cycles for possible vectorization in loop-aware SLP if it was not detected as reduction chain. */ if (! GROUP_FIRST_ELEMENT (vinfo_for_stmt (reduc_stmt))) LOOP_VINFO_REDUCTIONS (loop_vinfo).safe_push (reduc_stmt); } } } else if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "Unknown def-use cycle pattern.\n"); } } /* Function vect_analyze_scalar_cycles. Examine the cross iteration def-use cycles of scalar variables, by analyzing the loop-header PHIs of scalar variables. Classify each cycle as one of the following: invariant, induction, reduction, unknown. We do that for the loop represented by LOOP_VINFO, and also to its inner-loop, if exists. Examples for scalar cycles: Example1: reduction: loop1: for (i=0; i<N; i++) sum += a[i]; Example2: induction: loop2: for (i=0; i<N; i++) a[i] = i; */ static void vect_analyze_scalar_cycles (loop_vec_info loop_vinfo) { struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); vect_analyze_scalar_cycles_1 (loop_vinfo, loop); /* When vectorizing an outer-loop, the inner-loop is executed sequentially. Reductions in such inner-loop therefore have different properties than the reductions in the nest that gets vectorized: 1. When vectorized, they are executed in the same order as in the original scalar loop, so we can't change the order of computation when vectorizing them. 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the current checks are too strict. */ if (loop->inner) vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner); } /* Transfer group and reduction information from STMT to its pattern stmt. */ static void vect_fixup_reduc_chain (gimple *stmt) { gimple *firstp = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt)); gimple *stmtp; gcc_assert (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (firstp)) && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))); GROUP_SIZE (vinfo_for_stmt (firstp)) = GROUP_SIZE (vinfo_for_stmt (stmt)); do { stmtp = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt)); GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmtp)) = firstp; stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt)); if (stmt) GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmtp)) = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt)); } while (stmt); STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmtp)) = vect_reduction_def; } /* Fixup scalar cycles that now have their stmts detected as patterns. */ static void vect_fixup_scalar_cycles_with_patterns (loop_vec_info loop_vinfo) { gimple *first; unsigned i; FOR_EACH_VEC_ELT (LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo), i, first) if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (first))) { gimple *next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (first)); while (next) { if (! STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next))) break; next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next)); } /* If not all stmt in the chain are patterns try to handle the chain without patterns. */ if (! next) { vect_fixup_reduc_chain (first); LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo)[i] = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (first)); } } } /* Function vect_get_loop_niters. Determine how many iterations the loop is executed and place it in NUMBER_OF_ITERATIONS. Place the number of latch iterations in NUMBER_OF_ITERATIONSM1. Place the condition under which the niter information holds in ASSUMPTIONS. Return the loop exit condition. */ static gcond * vect_get_loop_niters (struct loop *loop, tree *assumptions, tree *number_of_iterations, tree *number_of_iterationsm1) { edge exit = single_exit (loop); struct tree_niter_desc niter_desc; tree niter_assumptions, niter, may_be_zero; gcond *cond = get_loop_exit_condition (loop); *assumptions = boolean_true_node; *number_of_iterationsm1 = chrec_dont_know; *number_of_iterations = chrec_dont_know; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== get_loop_niters ===\n"); if (!exit) return cond; niter = chrec_dont_know; may_be_zero = NULL_TREE; niter_assumptions = boolean_true_node; if (!number_of_iterations_exit_assumptions (loop, exit, &niter_desc, NULL) || chrec_contains_undetermined (niter_desc.niter)) return cond; niter_assumptions = niter_desc.assumptions; may_be_zero = niter_desc.may_be_zero; niter = niter_desc.niter; if (may_be_zero && integer_zerop (may_be_zero)) may_be_zero = NULL_TREE; if (may_be_zero) { if (COMPARISON_CLASS_P (may_be_zero)) { /* Try to combine may_be_zero with assumptions, this can simplify computation of niter expression. */ if (niter_assumptions && !integer_nonzerop (niter_assumptions)) niter_assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, niter_assumptions, fold_build1 (TRUTH_NOT_EXPR, boolean_type_node, may_be_zero)); else niter = fold_build3 (COND_EXPR, TREE_TYPE (niter), may_be_zero, build_int_cst (TREE_TYPE (niter), 0), rewrite_to_non_trapping_overflow (niter)); may_be_zero = NULL_TREE; } else if (integer_nonzerop (may_be_zero)) { *number_of_iterationsm1 = build_int_cst (TREE_TYPE (niter), 0); *number_of_iterations = build_int_cst (TREE_TYPE (niter), 1); return cond; } else return cond; } *assumptions = niter_assumptions; *number_of_iterationsm1 = niter; /* We want the number of loop header executions which is the number of latch executions plus one. ??? For UINT_MAX latch executions this number overflows to zero for loops like do { n++; } while (n != 0); */ if (niter && !chrec_contains_undetermined (niter)) niter = fold_build2 (PLUS_EXPR, TREE_TYPE (niter), unshare_expr (niter), build_int_cst (TREE_TYPE (niter), 1)); *number_of_iterations = niter; return cond; } /* Function bb_in_loop_p Used as predicate for dfs order traversal of the loop bbs. */ static bool bb_in_loop_p (const_basic_block bb, const void *data) { const struct loop *const loop = (const struct loop *)data; if (flow_bb_inside_loop_p (loop, bb)) return true; return false; } /* Create and initialize a new loop_vec_info struct for LOOP_IN, as well as stmt_vec_info structs for all the stmts in LOOP_IN. */ _loop_vec_info::_loop_vec_info (struct loop *loop_in) : vec_info (vec_info::loop, init_cost (loop_in)), loop (loop_in), bbs (XCNEWVEC (basic_block, loop->num_nodes)), num_itersm1 (NULL_TREE), num_iters (NULL_TREE), num_iters_unchanged (NULL_TREE), num_iters_assumptions (NULL_TREE), th (0), versioning_threshold (0), vectorization_factor (0), max_vectorization_factor (0), mask_skip_niters (NULL_TREE), mask_compare_type (NULL_TREE), unaligned_dr (NULL), peeling_for_alignment (0), ptr_mask (0), ivexpr_map (NULL), slp_unrolling_factor (1), single_scalar_iteration_cost (0), vectorizable (false), can_fully_mask_p (true), fully_masked_p (false), peeling_for_gaps (false), peeling_for_niter (false), operands_swapped (false), no_data_dependencies (false), has_mask_store (false), scalar_loop (NULL), orig_loop_info (NULL) { /* Create/Update stmt_info for all stmts in the loop. */ basic_block *body = get_loop_body (loop); for (unsigned int i = 0; i < loop->num_nodes; i++) { basic_block bb = body[i]; gimple_stmt_iterator si; for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) { gimple *phi = gsi_stmt (si); gimple_set_uid (phi, 0); set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, this)); } for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) { gimple *stmt = gsi_stmt (si); gimple_set_uid (stmt, 0); set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, this)); } } free (body); /* CHECKME: We want to visit all BBs before their successors (except for latch blocks, for which this assertion wouldn't hold). In the simple case of the loop forms we allow, a dfs order of the BBs would the same as reversed postorder traversal, so we are safe. */ unsigned int nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p, bbs, loop->num_nodes, loop); gcc_assert (nbbs == loop->num_nodes); } /* Free all levels of MASKS. */ void release_vec_loop_masks (vec_loop_masks *masks) { rgroup_masks *rgm; unsigned int i; FOR_EACH_VEC_ELT (*masks, i, rgm) rgm->masks.release (); masks->release (); } /* Free all memory used by the _loop_vec_info, as well as all the stmt_vec_info structs of all the stmts in the loop. */ _loop_vec_info::~_loop_vec_info () { int nbbs; gimple_stmt_iterator si; int j; nbbs = loop->num_nodes; for (j = 0; j < nbbs; j++) { basic_block bb = bbs[j]; for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) free_stmt_vec_info (gsi_stmt (si)); for (si = gsi_start_bb (bb); !gsi_end_p (si); ) { gimple *stmt = gsi_stmt (si); /* We may have broken canonical form by moving a constant into RHS1 of a commutative op. Fix such occurrences. */ if (operands_swapped && is_gimple_assign (stmt)) { enum tree_code code = gimple_assign_rhs_code (stmt); if ((code == PLUS_EXPR || code == POINTER_PLUS_EXPR || code == MULT_EXPR) && CONSTANT_CLASS_P (gimple_assign_rhs1 (stmt))) swap_ssa_operands (stmt, gimple_assign_rhs1_ptr (stmt), gimple_assign_rhs2_ptr (stmt)); else if (code == COND_EXPR && CONSTANT_CLASS_P (gimple_assign_rhs2 (stmt))) { tree cond_expr = gimple_assign_rhs1 (stmt); enum tree_code cond_code = TREE_CODE (cond_expr); if (TREE_CODE_CLASS (cond_code) == tcc_comparison) { bool honor_nans = HONOR_NANS (TREE_OPERAND (cond_expr, 0)); cond_code = invert_tree_comparison (cond_code, honor_nans); if (cond_code != ERROR_MARK) { TREE_SET_CODE (cond_expr, cond_code); swap_ssa_operands (stmt, gimple_assign_rhs2_ptr (stmt), gimple_assign_rhs3_ptr (stmt)); } } } } /* Free stmt_vec_info. */ free_stmt_vec_info (stmt); gsi_next (&si); } } free (bbs); release_vec_loop_masks (&masks); delete ivexpr_map; loop->aux = NULL; } /* Return an invariant or register for EXPR and emit necessary computations in the LOOP_VINFO loop preheader. */ tree cse_and_gimplify_to_preheader (loop_vec_info loop_vinfo, tree expr) { if (is_gimple_reg (expr) || is_gimple_min_invariant (expr)) return expr; if (! loop_vinfo->ivexpr_map) loop_vinfo->ivexpr_map = new hash_map<tree_operand_hash, tree>; tree &cached = loop_vinfo->ivexpr_map->get_or_insert (expr); if (! cached) { gimple_seq stmts = NULL; cached = force_gimple_operand (unshare_expr (expr), &stmts, true, NULL_TREE); if (stmts) { edge e = loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo)); gsi_insert_seq_on_edge_immediate (e, stmts); } } return cached; } /* Return true if we can use CMP_TYPE as the comparison type to produce all masks required to mask LOOP_VINFO. */ static bool can_produce_all_loop_masks_p (loop_vec_info loop_vinfo, tree cmp_type) { rgroup_masks *rgm; unsigned int i; FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), i, rgm) if (rgm->mask_type != NULL_TREE && !direct_internal_fn_supported_p (IFN_WHILE_ULT, cmp_type, rgm->mask_type, OPTIMIZE_FOR_SPEED)) return false; return true; } /* Calculate the maximum number of scalars per iteration for every rgroup in LOOP_VINFO. */ static unsigned int vect_get_max_nscalars_per_iter (loop_vec_info loop_vinfo) { unsigned int res = 1; unsigned int i; rgroup_masks *rgm; FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), i, rgm) res = MAX (res, rgm->max_nscalars_per_iter); return res; } /* Each statement in LOOP_VINFO can be masked where necessary. Check whether we can actually generate the masks required. Return true if so, storing the type of the scalar IV in LOOP_VINFO_MASK_COMPARE_TYPE. */ static bool vect_verify_full_masking (loop_vec_info loop_vinfo) { struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); unsigned int min_ni_width; /* Use a normal loop if there are no statements that need masking. This only happens in rare degenerate cases: it means that the loop has no loads, no stores, and no live-out values. */ if (LOOP_VINFO_MASKS (loop_vinfo).is_empty ()) return false; /* Get the maximum number of iterations that is representable in the counter type. */ tree ni_type = TREE_TYPE (LOOP_VINFO_NITERSM1 (loop_vinfo)); widest_int max_ni = wi::to_widest (TYPE_MAX_VALUE (ni_type)) + 1; /* Get a more refined estimate for the number of iterations. */ widest_int max_back_edges; if (max_loop_iterations (loop, &max_back_edges)) max_ni = wi::smin (max_ni, max_back_edges + 1); /* Account for rgroup masks, in which each bit is replicated N times. */ max_ni *= vect_get_max_nscalars_per_iter (loop_vinfo); /* Work out how many bits we need to represent the limit. */ min_ni_width = wi::min_precision (max_ni, UNSIGNED); /* Find a scalar mode for which WHILE_ULT is supported. */ opt_scalar_int_mode cmp_mode_iter; tree cmp_type = NULL_TREE; FOR_EACH_MODE_IN_CLASS (cmp_mode_iter, MODE_INT) { unsigned int cmp_bits = GET_MODE_BITSIZE (cmp_mode_iter.require ()); if (cmp_bits >= min_ni_width && targetm.scalar_mode_supported_p (cmp_mode_iter.require ())) { tree this_type = build_nonstandard_integer_type (cmp_bits, true); if (this_type && can_produce_all_loop_masks_p (loop_vinfo, this_type)) { /* Although we could stop as soon as we find a valid mode, it's often better to continue until we hit Pmode, since the operands to the WHILE are more likely to be reusable in address calculations. */ cmp_type = this_type; if (cmp_bits >= GET_MODE_BITSIZE (Pmode)) break; } } } if (!cmp_type) return false; LOOP_VINFO_MASK_COMPARE_TYPE (loop_vinfo) = cmp_type; return true; } /* Calculate the cost of one scalar iteration of the loop. */ static void vect_compute_single_scalar_iteration_cost (loop_vec_info loop_vinfo) { struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); int nbbs = loop->num_nodes, factor; int innerloop_iters, i; /* Gather costs for statements in the scalar loop. */ /* FORNOW. */ innerloop_iters = 1; if (loop->inner) innerloop_iters = 50; /* FIXME */ for (i = 0; i < nbbs; i++) { gimple_stmt_iterator si; basic_block bb = bbs[i]; if (bb->loop_father == loop->inner) factor = innerloop_iters; else factor = 1; for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) { gimple *stmt = gsi_stmt (si); stmt_vec_info stmt_info = vinfo_for_stmt (stmt); if (!is_gimple_assign (stmt) && !is_gimple_call (stmt)) continue; /* Skip stmts that are not vectorized inside the loop. */ if (stmt_info && !STMT_VINFO_RELEVANT_P (stmt_info) && (!STMT_VINFO_LIVE_P (stmt_info) || !VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info))) && !STMT_VINFO_IN_PATTERN_P (stmt_info)) continue; vect_cost_for_stmt kind; if (STMT_VINFO_DATA_REF (stmt_info)) { if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info))) kind = scalar_load; else kind = scalar_store; } else kind = scalar_stmt; record_stmt_cost (&LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), factor, kind, stmt_info, 0, vect_prologue); } } /* Now accumulate cost. */ void *target_cost_data = init_cost (loop); stmt_info_for_cost *si; int j; FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), j, si) { struct _stmt_vec_info *stmt_info = si->stmt ? vinfo_for_stmt (si->stmt) : NULL; (void) add_stmt_cost (target_cost_data, si->count, si->kind, stmt_info, si->misalign, vect_body); } unsigned dummy, body_cost = 0; finish_cost (target_cost_data, &dummy, &body_cost, &dummy); destroy_cost_data (target_cost_data); LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo) = body_cost; } /* Function vect_analyze_loop_form_1. Verify that certain CFG restrictions hold, including: - the loop has a pre-header - the loop has a single entry and exit - the loop exit condition is simple enough - the number of iterations can be analyzed, i.e, a countable loop. The niter could be analyzed under some assumptions. */ bool vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond, tree *assumptions, tree *number_of_iterationsm1, tree *number_of_iterations, gcond **inner_loop_cond) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_loop_form ===\n"); /* Different restrictions apply when we are considering an inner-most loop, vs. an outer (nested) loop. (FORNOW. May want to relax some of these restrictions in the future). */ if (!loop->inner) { /* Inner-most loop. We currently require that the number of BBs is exactly 2 (the header and latch). Vectorizable inner-most loops look like this: (pre-header) | header <--------+ | | | | +--> latch --+ | (exit-bb) */ if (loop->num_nodes != 2) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: control flow in loop.\n"); return false; } if (empty_block_p (loop->header)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: empty loop.\n"); return false; } } else { struct loop *innerloop = loop->inner; edge entryedge; /* Nested loop. We currently require that the loop is doubly-nested, contains a single inner loop, and the number of BBs is exactly 5. Vectorizable outer-loops look like this: (pre-header) | header <---+ | | inner-loop | | | tail ------+ | (exit-bb) The inner-loop has the properties expected of inner-most loops as described above. */ if ((loop->inner)->inner || (loop->inner)->next) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: multiple nested loops.\n"); return false; } if (loop->num_nodes != 5) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: control flow in loop.\n"); return false; } entryedge = loop_preheader_edge (innerloop); if (entryedge->src != loop->header || !single_exit (innerloop) || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: unsupported outerloop form.\n"); return false; } /* Analyze the inner-loop. */ tree inner_niterm1, inner_niter, inner_assumptions; if (! vect_analyze_loop_form_1 (loop->inner, inner_loop_cond, &inner_assumptions, &inner_niterm1, &inner_niter, NULL) /* Don't support analyzing niter under assumptions for inner loop. */ || !integer_onep (inner_assumptions)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: Bad inner loop.\n"); return false; } if (!expr_invariant_in_loop_p (loop, inner_niter)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: inner-loop count not" " invariant.\n"); return false; } if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Considering outer-loop vectorization.\n"); } if (!single_exit (loop) || EDGE_COUNT (loop->header->preds) != 2) { if (dump_enabled_p ()) { if (!single_exit (loop)) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: multiple exits.\n"); else if (EDGE_COUNT (loop->header->preds) != 2) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: too many incoming edges.\n"); } return false; } /* We assume that the loop exit condition is at the end of the loop. i.e, that the loop is represented as a do-while (with a proper if-guard before the loop if needed), where the loop header contains all the executable statements, and the latch is empty. */ if (!empty_block_p (loop->latch) || !gimple_seq_empty_p (phi_nodes (loop->latch))) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: latch block not empty.\n"); return false; } /* Make sure the exit is not abnormal. */ edge e = single_exit (loop); if (e->flags & EDGE_ABNORMAL) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: abnormal loop exit edge.\n"); return false; } *loop_cond = vect_get_loop_niters (loop, assumptions, number_of_iterations, number_of_iterationsm1); if (!*loop_cond) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: complicated exit condition.\n"); return false; } if (integer_zerop (*assumptions) || !*number_of_iterations || chrec_contains_undetermined (*number_of_iterations)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: number of iterations cannot be " "computed.\n"); return false; } if (integer_zerop (*number_of_iterations)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: number of iterations = 0.\n"); return false; } return true; } /* Analyze LOOP form and return a loop_vec_info if it is of suitable form. */ loop_vec_info vect_analyze_loop_form (struct loop *loop) { tree assumptions, number_of_iterations, number_of_iterationsm1; gcond *loop_cond, *inner_loop_cond = NULL; if (! vect_analyze_loop_form_1 (loop, &loop_cond, &assumptions, &number_of_iterationsm1, &number_of_iterations, &inner_loop_cond)) return NULL; loop_vec_info loop_vinfo = new _loop_vec_info (loop); LOOP_VINFO_NITERSM1 (loop_vinfo) = number_of_iterationsm1; LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations; LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations; if (!integer_onep (assumptions)) { /* We consider to vectorize this loop by versioning it under some assumptions. In order to do this, we need to clear existing information computed by scev and niter analyzer. */ scev_reset_htab (); free_numbers_of_iterations_estimates (loop); /* Also set flag for this loop so that following scev and niter analysis are done under the assumptions. */ loop_constraint_set (loop, LOOP_C_FINITE); /* Also record the assumptions for versioning. */ LOOP_VINFO_NITERS_ASSUMPTIONS (loop_vinfo) = assumptions; } if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)) { if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "Symbolic number of iterations is "); dump_generic_expr (MSG_NOTE, TDF_DETAILS, number_of_iterations); dump_printf (MSG_NOTE, "\n"); } } STMT_VINFO_TYPE (vinfo_for_stmt (loop_cond)) = loop_exit_ctrl_vec_info_type; if (inner_loop_cond) STMT_VINFO_TYPE (vinfo_for_stmt (inner_loop_cond)) = loop_exit_ctrl_vec_info_type; gcc_assert (!loop->aux); loop->aux = loop_vinfo; return loop_vinfo; } /* Scan the loop stmts and dependent on whether there are any (non-)SLP statements update the vectorization factor. */ static void vect_update_vf_for_slp (loop_vec_info loop_vinfo) { struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); int nbbs = loop->num_nodes; poly_uint64 vectorization_factor; int i; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== vect_update_vf_for_slp ===\n"); vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo); gcc_assert (known_ne (vectorization_factor, 0U)); /* If all the stmts in the loop can be SLPed, we perform only SLP, and vectorization factor of the loop is the unrolling factor required by the SLP instances. If that unrolling factor is 1, we say, that we perform pure SLP on loop - cross iteration parallelism is not exploited. */ bool only_slp_in_loop = true; for (i = 0; i < nbbs; i++) { basic_block bb = bbs[i]; for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) { gimple *stmt = gsi_stmt (si); stmt_vec_info stmt_info = vinfo_for_stmt (stmt); if (STMT_VINFO_IN_PATTERN_P (stmt_info) && STMT_VINFO_RELATED_STMT (stmt_info)) { stmt = STMT_VINFO_RELATED_STMT (stmt_info); stmt_info = vinfo_for_stmt (stmt); } if ((STMT_VINFO_RELEVANT_P (stmt_info) || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info))) && !PURE_SLP_STMT (stmt_info)) /* STMT needs both SLP and loop-based vectorization. */ only_slp_in_loop = false; } } if (only_slp_in_loop) { dump_printf_loc (MSG_NOTE, vect_location, "Loop contains only SLP stmts\n"); vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo); } else { dump_printf_loc (MSG_NOTE, vect_location, "Loop contains SLP and non-SLP stmts\n"); /* Both the vectorization factor and unroll factor have the form current_vector_size * X for some rational X, so they must have a common multiple. */ vectorization_factor = force_common_multiple (vectorization_factor, LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo)); } LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor; if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "Updating vectorization factor to "); dump_dec (MSG_NOTE, vectorization_factor); dump_printf (MSG_NOTE, ".\n"); } } /* Return true if STMT_INFO describes a double reduction phi and if the other phi in the reduction is also relevant for vectorization. This rejects cases such as: outer1: x_1 = PHI <x_3(outer2), ...>; ... inner: x_2 = ...; ... outer2: x_3 = PHI <x_2(inner)>; if nothing in x_2 or elsewhere makes x_1 relevant. */ static bool vect_active_double_reduction_p (stmt_vec_info stmt_info) { if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_double_reduction_def) return false; gimple *other_phi = STMT_VINFO_REDUC_DEF (stmt_info); return STMT_VINFO_RELEVANT_P (vinfo_for_stmt (other_phi)); } /* Function vect_analyze_loop_operations. Scan the loop stmts and make sure they are all vectorizable. */ static bool vect_analyze_loop_operations (loop_vec_info loop_vinfo) { struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); int nbbs = loop->num_nodes; int i; stmt_vec_info stmt_info; bool need_to_vectorize = false; bool ok; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_loop_operations ===\n"); for (i = 0; i < nbbs; i++) { basic_block bb = bbs[i]; for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) { gphi *phi = si.phi (); ok = true; stmt_info = vinfo_for_stmt (phi); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "examining phi: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); } if (virtual_operand_p (gimple_phi_result (phi))) continue; /* Inner-loop loop-closed exit phi in outer-loop vectorization (i.e., a phi in the tail of the outer-loop). */ if (! is_loop_header_bb_p (bb)) { /* FORNOW: we currently don't support the case that these phis are not used in the outerloop (unless it is double reduction, i.e., this phi is vect_reduction_def), cause this case requires to actually do something here. */ if (STMT_VINFO_LIVE_P (stmt_info) && !vect_active_double_reduction_p (stmt_info)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "Unsupported loop-closed phi in " "outer-loop.\n"); return false; } /* If PHI is used in the outer loop, we check that its operand is defined in the inner loop. */ if (STMT_VINFO_RELEVANT_P (stmt_info)) { tree phi_op; gimple *op_def_stmt; if (gimple_phi_num_args (phi) != 1) return false; phi_op = PHI_ARG_DEF (phi, 0); if (TREE_CODE (phi_op) != SSA_NAME) return false; op_def_stmt = SSA_NAME_DEF_STMT (phi_op); if (gimple_nop_p (op_def_stmt) || !flow_bb_inside_loop_p (loop, gimple_bb (op_def_stmt)) || !vinfo_for_stmt (op_def_stmt)) return false; if (STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt)) != vect_used_in_outer && STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt)) != vect_used_in_outer_by_reduction) return false; } continue; } gcc_assert (stmt_info); if ((STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope || STMT_VINFO_LIVE_P (stmt_info)) && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def) { /* A scalar-dependence cycle that we don't support. */ if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: scalar dependence cycle.\n"); return false; } if (STMT_VINFO_RELEVANT_P (stmt_info)) { need_to_vectorize = true; if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def && ! PURE_SLP_STMT (stmt_info)) ok = vectorizable_induction (phi, NULL, NULL, NULL); else if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def || STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle) && ! PURE_SLP_STMT (stmt_info)) ok = vectorizable_reduction (phi, NULL, NULL, NULL, NULL); } /* SLP PHIs are tested by vect_slp_analyze_node_operations. */ if (ok && STMT_VINFO_LIVE_P (stmt_info) && !PURE_SLP_STMT (stmt_info)) ok = vectorizable_live_operation (phi, NULL, NULL, -1, NULL); if (!ok) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: relevant phi not " "supported: "); dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, phi, 0); } return false; } } for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) { gimple *stmt = gsi_stmt (si); if (!gimple_clobber_p (stmt) && !vect_analyze_stmt (stmt, &need_to_vectorize, NULL, NULL)) return false; } } /* bbs */ /* All operations in the loop are either irrelevant (deal with loop control, or dead), or only used outside the loop and can be moved out of the loop (e.g. invariants, inductions). The loop can be optimized away by scalar optimizations. We're better off not touching this loop. */ if (!need_to_vectorize) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "All the computation can be taken out of the loop.\n"); if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: redundant loop. no profit to " "vectorize.\n"); return false; } return true; } /* Analyze the cost of the loop described by LOOP_VINFO. Decide if it is worthwhile to vectorize. Return 1 if definitely yes, 0 if definitely no, or -1 if it's worth retrying. */ static int vect_analyze_loop_costing (loop_vec_info loop_vinfo) { struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo); /* Only fully-masked loops can have iteration counts less than the vectorization factor. */ if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)) { HOST_WIDE_INT max_niter; if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)) max_niter = LOOP_VINFO_INT_NITERS (loop_vinfo); else max_niter = max_stmt_executions_int (loop); if (max_niter != -1 && (unsigned HOST_WIDE_INT) max_niter < assumed_vf) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: iteration count smaller than " "vectorization factor.\n"); return 0; } } int min_profitable_iters, min_profitable_estimate; vect_estimate_min_profitable_iters (loop_vinfo, &min_profitable_iters, &min_profitable_estimate); if (min_profitable_iters < 0) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: vectorization not profitable.\n"); if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: vector version will never be " "profitable.\n"); return -1; } int min_scalar_loop_bound = (PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND) * assumed_vf); /* Use the cost model only if it is more conservative than user specified threshold. */ unsigned int th = (unsigned) MAX (min_scalar_loop_bound, min_profitable_iters); LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = th; if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && LOOP_VINFO_INT_NITERS (loop_vinfo) < th) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: vectorization not profitable.\n"); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "not vectorized: iteration count smaller than user " "specified loop bound parameter or minimum profitable " "iterations (whichever is more conservative).\n"); return 0; } HOST_WIDE_INT estimated_niter = estimated_stmt_executions_int (loop); if (estimated_niter == -1) estimated_niter = likely_max_stmt_executions_int (loop); if (estimated_niter != -1 && ((unsigned HOST_WIDE_INT) estimated_niter < MAX (th, (unsigned) min_profitable_estimate))) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: estimated iteration count too " "small.\n"); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "not vectorized: estimated iteration count smaller " "than specified loop bound parameter or minimum " "profitable iterations (whichever is more " "conservative).\n"); return -1; } return 1; } /* Function vect_analyze_loop_2. Apply a set of analyses on LOOP, and create a loop_vec_info struct for it. The different analyses will record information in the loop_vec_info struct. */ static bool vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal) { bool ok; int res; unsigned int max_vf = MAX_VECTORIZATION_FACTOR; poly_uint64 min_vf = 2; unsigned int n_stmts = 0; /* The first group of checks is independent of the vector size. */ fatal = true; /* Find all data references in the loop (which correspond to vdefs/vuses) and analyze their evolution in the loop. */ basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); loop_p loop = LOOP_VINFO_LOOP (loop_vinfo); if (!find_loop_nest (loop, &LOOP_VINFO_LOOP_NEST (loop_vinfo))) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: loop nest containing two " "or more consecutive inner loops cannot be " "vectorized\n"); return false; } for (unsigned i = 0; i < loop->num_nodes; i++) for (gimple_stmt_iterator gsi = gsi_start_bb (bbs[i]); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple *stmt = gsi_stmt (gsi); if (is_gimple_debug (stmt)) continue; ++n_stmts; if (!find_data_references_in_stmt (loop, stmt, &LOOP_VINFO_DATAREFS (loop_vinfo))) { if (is_gimple_call (stmt) && loop->safelen) { tree fndecl = gimple_call_fndecl (stmt), op; if (fndecl != NULL_TREE) { cgraph_node *node = cgraph_node::get (fndecl); if (node != NULL && node->simd_clones != NULL) { unsigned int j, n = gimple_call_num_args (stmt); for (j = 0; j < n; j++) { op = gimple_call_arg (stmt, j); if (DECL_P (op) || (REFERENCE_CLASS_P (op) && get_base_address (op))) break; } op = gimple_call_lhs (stmt); /* Ignore #pragma omp declare simd functions if they don't have data references in the call stmt itself. */ if (j == n && !(op && (DECL_P (op) || (REFERENCE_CLASS_P (op) && get_base_address (op))))) continue; } } } if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: loop contains function " "calls or data references that cannot " "be analyzed\n"); return false; } } /* Analyze the data references and also adjust the minimal vectorization factor according to the loads and stores. */ ok = vect_analyze_data_refs (loop_vinfo, &min_vf); if (!ok) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "bad data references.\n"); return false; } /* Classify all cross-iteration scalar data-flow cycles. Cross-iteration cycles caused by virtual phis are analyzed separately. */ vect_analyze_scalar_cycles (loop_vinfo); vect_pattern_recog (loop_vinfo); vect_fixup_scalar_cycles_with_patterns (loop_vinfo); /* Analyze the access patterns of the data-refs in the loop (consecutive, complex, etc.). FORNOW: Only handle consecutive access pattern. */ ok = vect_analyze_data_ref_accesses (loop_vinfo); if (!ok) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "bad data access.\n"); return false; } /* Data-flow analysis to detect stmts that do not need to be vectorized. */ ok = vect_mark_stmts_to_be_vectorized (loop_vinfo); if (!ok) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "unexpected pattern.\n"); return false; } /* While the rest of the analysis below depends on it in some way. */ fatal = false; /* Analyze data dependences between the data-refs in the loop and adjust the maximum vectorization factor according to the dependences. FORNOW: fail at the first data dependence that we encounter. */ ok = vect_analyze_data_ref_dependences (loop_vinfo, &max_vf); if (!ok || (max_vf != MAX_VECTORIZATION_FACTOR && maybe_lt (max_vf, min_vf))) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "bad data dependence.\n"); return false; } LOOP_VINFO_MAX_VECT_FACTOR (loop_vinfo) = max_vf; ok = vect_determine_vectorization_factor (loop_vinfo); if (!ok) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "can't determine vectorization factor.\n"); return false; } if (max_vf != MAX_VECTORIZATION_FACTOR && maybe_lt (max_vf, LOOP_VINFO_VECT_FACTOR (loop_vinfo))) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "bad data dependence.\n"); return false; } /* Compute the scalar iteration cost. */ vect_compute_single_scalar_iteration_cost (loop_vinfo); poly_uint64 saved_vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo); unsigned th; /* Check the SLP opportunities in the loop, analyze and build SLP trees. */ ok = vect_analyze_slp (loop_vinfo, n_stmts); if (!ok) return false; /* If there are any SLP instances mark them as pure_slp. */ bool slp = vect_make_slp_decision (loop_vinfo); if (slp) { /* Find stmts that need to be both vectorized and SLPed. */ vect_detect_hybrid_slp (loop_vinfo); /* Update the vectorization factor based on the SLP decision. */ vect_update_vf_for_slp (loop_vinfo); } bool saved_can_fully_mask_p = LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo); /* We don't expect to have to roll back to anything other than an empty set of rgroups. */ gcc_assert (LOOP_VINFO_MASKS (loop_vinfo).is_empty ()); /* This is the point where we can re-start analysis with SLP forced off. */ start_over: /* Now the vectorization factor is final. */ poly_uint64 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo); gcc_assert (known_ne (vectorization_factor, 0U)); if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "vectorization_factor = "); dump_dec (MSG_NOTE, vectorization_factor); dump_printf (MSG_NOTE, ", niters = " HOST_WIDE_INT_PRINT_DEC "\n", LOOP_VINFO_INT_NITERS (loop_vinfo)); } HOST_WIDE_INT max_niter = likely_max_stmt_executions_int (LOOP_VINFO_LOOP (loop_vinfo)); /* Analyze the alignment of the data-refs in the loop. Fail if a data reference is found that cannot be vectorized. */ ok = vect_analyze_data_refs_alignment (loop_vinfo); if (!ok) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "bad data alignment.\n"); return false; } /* Prune the list of ddrs to be tested at run-time by versioning for alias. It is important to call pruning after vect_analyze_data_ref_accesses, since we use grouping information gathered by interleaving analysis. */ ok = vect_prune_runtime_alias_test_list (loop_vinfo); if (!ok) return false; /* Do not invoke vect_enhance_data_refs_alignment for eplilogue vectorization. */ if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo)) { /* This pass will decide on using loop versioning and/or loop peeling in order to enhance the alignment of data references in the loop. */ ok = vect_enhance_data_refs_alignment (loop_vinfo); if (!ok) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "bad data alignment.\n"); return false; } } if (slp) { /* Analyze operations in the SLP instances. Note this may remove unsupported SLP instances which makes the above SLP kind detection invalid. */ unsigned old_size = LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length (); vect_slp_analyze_operations (loop_vinfo); if (LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length () != old_size) goto again; } /* Scan all the remaining operations in the loop that are not subject to SLP and make sure they are vectorizable. */ ok = vect_analyze_loop_operations (loop_vinfo); if (!ok) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "bad operation or unsupported loop bound.\n"); return false; } /* Decide whether to use a fully-masked loop for this vectorization factor. */ LOOP_VINFO_FULLY_MASKED_P (loop_vinfo) = (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) && vect_verify_full_masking (loop_vinfo)); if (dump_enabled_p ()) { if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)) dump_printf_loc (MSG_NOTE, vect_location, "using a fully-masked loop.\n"); else dump_printf_loc (MSG_NOTE, vect_location, "not using a fully-masked loop.\n"); } /* If epilog loop is required because of data accesses with gaps, one additional iteration needs to be peeled. Check if there is enough iterations for vectorization. */ if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)) { poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); tree scalar_niters = LOOP_VINFO_NITERSM1 (loop_vinfo); if (known_lt (wi::to_widest (scalar_niters), vf)) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "loop has no enough iterations to support" " peeling for gaps.\n"); return false; } } /* Check the costings of the loop make vectorizing worthwhile. */ res = vect_analyze_loop_costing (loop_vinfo); if (res < 0) goto again; if (!res) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "Loop costings not worthwhile.\n"); return false; } /* Decide whether we need to create an epilogue loop to handle remaining scalar iterations. */ th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo); unsigned HOST_WIDE_INT const_vf; if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)) /* The main loop handles all iterations. */ LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false; else if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) > 0) { if (!multiple_p (LOOP_VINFO_INT_NITERS (loop_vinfo) - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo), LOOP_VINFO_VECT_FACTOR (loop_vinfo))) LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true; } else if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) || !LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&const_vf) || ((tree_ctz (LOOP_VINFO_NITERS (loop_vinfo)) < (unsigned) exact_log2 (const_vf)) /* In case of versioning, check if the maximum number of iterations is greater than th. If they are identical, the epilogue is unnecessary. */ && (!LOOP_REQUIRES_VERSIONING (loop_vinfo) || ((unsigned HOST_WIDE_INT) max_niter > (th / const_vf) * const_vf)))) LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true; /* If an epilogue loop is required make sure we can create one. */ if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) || LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo)) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required\n"); if (!vect_can_advance_ivs_p (loop_vinfo) || !slpeel_can_duplicate_loop_p (LOOP_VINFO_LOOP (loop_vinfo), single_exit (LOOP_VINFO_LOOP (loop_vinfo)))) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: can't create required " "epilog loop\n"); goto again; } } /* During peeling, we need to check if number of loop iterations is enough for both peeled prolog loop and vector loop. This check can be merged along with threshold check of loop versioning, so increase threshold for this case if necessary. */ if (LOOP_REQUIRES_VERSIONING (loop_vinfo)) { poly_uint64 niters_th = 0; if (!vect_use_loop_mask_for_alignment_p (loop_vinfo)) { /* Niters for peeled prolog loop. */ if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0) { struct data_reference *dr = LOOP_VINFO_UNALIGNED_DR (loop_vinfo); tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr))); niters_th += TYPE_VECTOR_SUBPARTS (vectype) - 1; } else niters_th += LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo); } /* Niters for at least one iteration of vectorized loop. */ if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)) niters_th += LOOP_VINFO_VECT_FACTOR (loop_vinfo); /* One additional iteration because of peeling for gap. */ if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)) niters_th += 1; LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo) = niters_th; } gcc_assert (known_eq (vectorization_factor, LOOP_VINFO_VECT_FACTOR (loop_vinfo))); /* Ok to vectorize! */ return true; again: /* Try again with SLP forced off but if we didn't do any SLP there is no point in re-trying. */ if (!slp) return false; /* If there are reduction chains re-trying will fail anyway. */ if (! LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).is_empty ()) return false; /* Likewise if the grouped loads or stores in the SLP cannot be handled via interleaving or lane instructions. */ slp_instance instance; slp_tree node; unsigned i, j; FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance) { stmt_vec_info vinfo; vinfo = vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance))[0]); if (! STMT_VINFO_GROUPED_ACCESS (vinfo)) continue; vinfo = vinfo_for_stmt (STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo)); unsigned int size = STMT_VINFO_GROUP_SIZE (vinfo); tree vectype = STMT_VINFO_VECTYPE (vinfo); if (! vect_store_lanes_supported (vectype, size, false) && ! known_eq (TYPE_VECTOR_SUBPARTS (vectype), 1U) && ! vect_grouped_store_supported (vectype, size)) return false; FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), j, node) { vinfo = vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (node)[0]); vinfo = vinfo_for_stmt (STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo)); bool single_element_p = !STMT_VINFO_GROUP_NEXT_ELEMENT (vinfo); size = STMT_VINFO_GROUP_SIZE (vinfo); vectype = STMT_VINFO_VECTYPE (vinfo); if (! vect_load_lanes_supported (vectype, size, false) && ! vect_grouped_load_supported (vectype, single_element_p, size)) return false; } } if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "re-trying with SLP disabled\n"); /* Roll back state appropriately. No SLP this time. */ slp = false; /* Restore vectorization factor as it were without SLP. */ LOOP_VINFO_VECT_FACTOR (loop_vinfo) = saved_vectorization_factor; /* Free the SLP instances. */ FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), j, instance) vect_free_slp_instance (instance); LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release (); /* Reset SLP type to loop_vect on all stmts. */ for (i = 0; i < LOOP_VINFO_LOOP (loop_vinfo)->num_nodes; ++i) { basic_block bb = LOOP_VINFO_BBS (loop_vinfo)[i]; for (gimple_stmt_iterator si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) { stmt_vec_info stmt_info = vinfo_for_stmt (gsi_stmt (si)); STMT_SLP_TYPE (stmt_info) = loop_vect; } for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) { stmt_vec_info stmt_info = vinfo_for_stmt (gsi_stmt (si)); STMT_SLP_TYPE (stmt_info) = loop_vect; if (STMT_VINFO_IN_PATTERN_P (stmt_info)) { stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info)); STMT_SLP_TYPE (stmt_info) = loop_vect; for (gimple_stmt_iterator pi = gsi_start (STMT_VINFO_PATTERN_DEF_SEQ (stmt_info)); !gsi_end_p (pi); gsi_next (&pi)) { gimple *pstmt = gsi_stmt (pi); STMT_SLP_TYPE (vinfo_for_stmt (pstmt)) = loop_vect; } } } } /* Free optimized alias test DDRS. */ LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).truncate (0); LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).release (); LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).release (); /* Reset target cost data. */ destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)); LOOP_VINFO_TARGET_COST_DATA (loop_vinfo) = init_cost (LOOP_VINFO_LOOP (loop_vinfo)); /* Reset accumulated rgroup information. */ release_vec_loop_masks (&LOOP_VINFO_MASKS (loop_vinfo)); /* Reset assorted flags. */ LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false; LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = false; LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = 0; LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo) = 0; LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = saved_can_fully_mask_p; goto start_over; } /* Function vect_analyze_loop. Apply a set of analyses on LOOP, and create a loop_vec_info struct for it. The different analyses will record information in the loop_vec_info struct. If ORIG_LOOP_VINFO is not NULL epilogue must be vectorized. */ loop_vec_info vect_analyze_loop (struct loop *loop, loop_vec_info orig_loop_vinfo) { loop_vec_info loop_vinfo; auto_vector_sizes vector_sizes; /* Autodetect first vector size we try. */ current_vector_size = 0; targetm.vectorize.autovectorize_vector_sizes (&vector_sizes); unsigned int next_size = 0; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "===== analyze_loop_nest =====\n"); if (loop_outer (loop) && loop_vec_info_for_loop (loop_outer (loop)) && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop)))) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "outer-loop already vectorized.\n"); return NULL; } poly_uint64 autodetected_vector_size = 0; while (1) { /* Check the CFG characteristics of the loop (nesting, entry/exit). */ loop_vinfo = vect_analyze_loop_form (loop); if (!loop_vinfo) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "bad loop form.\n"); return NULL; } bool fatal = false; if (orig_loop_vinfo) LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo) = orig_loop_vinfo; if (vect_analyze_loop_2 (loop_vinfo, fatal)) { LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1; return loop_vinfo; } delete loop_vinfo; if (next_size == 0) autodetected_vector_size = current_vector_size; if (next_size < vector_sizes.length () && known_eq (vector_sizes[next_size], autodetected_vector_size)) next_size += 1; if (fatal || next_size == vector_sizes.length () || known_eq (current_vector_size, 0U)) return NULL; /* Try the next biggest vector size. */ current_vector_size = vector_sizes[next_size++]; if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "***** Re-trying analysis with " "vector size "); dump_dec (MSG_NOTE, current_vector_size); dump_printf (MSG_NOTE, "\n"); } } } /* Return true if there is an in-order reduction function for CODE, storing it in *REDUC_FN if so. */ static bool fold_left_reduction_fn (tree_code code, internal_fn *reduc_fn) { switch (code) { case PLUS_EXPR: *reduc_fn = IFN_FOLD_LEFT_PLUS; return true; default: return false; } } /* Function reduction_fn_for_scalar_code Input: CODE - tree_code of a reduction operations. Output: REDUC_FN - the corresponding internal function to be used to reduce the vector of partial results into a single scalar result, or IFN_LAST if the operation is a supported reduction operation, but does not have such an internal function. Return FALSE if CODE currently cannot be vectorized as reduction. */ static bool reduction_fn_for_scalar_code (enum tree_code code, internal_fn *reduc_fn) { switch (code) { case MAX_EXPR: *reduc_fn = IFN_REDUC_MAX; return true; case MIN_EXPR: *reduc_fn = IFN_REDUC_MIN; return true; case PLUS_EXPR: *reduc_fn = IFN_REDUC_PLUS; return true; case BIT_AND_EXPR: *reduc_fn = IFN_REDUC_AND; return true; case BIT_IOR_EXPR: *reduc_fn = IFN_REDUC_IOR; return true; case BIT_XOR_EXPR: *reduc_fn = IFN_REDUC_XOR; return true; case MULT_EXPR: case MINUS_EXPR: *reduc_fn = IFN_LAST; return true; default: return false; } } /* If there is a neutral value X such that SLP reduction NODE would not be affected by the introduction of additional X elements, return that X, otherwise return null. CODE is the code of the reduction. REDUC_CHAIN is true if the SLP statements perform a single reduction, false if each statement performs an independent reduction. */ static tree neutral_op_for_slp_reduction (slp_tree slp_node, tree_code code, bool reduc_chain) { vec<gimple *> stmts = SLP_TREE_SCALAR_STMTS (slp_node); gimple *stmt = stmts[0]; stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt); tree vector_type = STMT_VINFO_VECTYPE (stmt_vinfo); tree scalar_type = TREE_TYPE (vector_type); struct loop *loop = gimple_bb (stmt)->loop_father; gcc_assert (loop); switch (code) { case WIDEN_SUM_EXPR: case DOT_PROD_EXPR: case SAD_EXPR: case PLUS_EXPR: case MINUS_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: return build_zero_cst (scalar_type); case MULT_EXPR: return build_one_cst (scalar_type); case BIT_AND_EXPR: return build_all_ones_cst (scalar_type); case MAX_EXPR: case MIN_EXPR: /* For MIN/MAX the initial values are neutral. A reduction chain has only a single initial value, so that value is neutral for all statements. */ if (reduc_chain) return PHI_ARG_DEF_FROM_EDGE (stmt, loop_preheader_edge (loop)); return NULL_TREE; default: return NULL_TREE; } } /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement STMT is printed with a message MSG. */ static void report_vect_op (dump_flags_t msg_type, gimple *stmt, const char *msg) { dump_printf_loc (msg_type, vect_location, "%s", msg); dump_gimple_stmt (msg_type, TDF_SLIM, stmt, 0); } /* Detect SLP reduction of the form: #a1 = phi <a5, a0> a2 = operation (a1) a3 = operation (a2) a4 = operation (a3) a5 = operation (a4) #a = phi <a5> PHI is the reduction phi node (#a1 = phi <a5, a0> above) FIRST_STMT is the first reduction stmt in the chain (a2 = operation (a1)). Return TRUE if a reduction chain was detected. */ static bool vect_is_slp_reduction (loop_vec_info loop_info, gimple *phi, gimple *first_stmt) { struct loop *loop = (gimple_bb (phi))->loop_father; struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info); enum tree_code code; gimple *current_stmt = NULL, *loop_use_stmt = NULL, *first, *next_stmt; stmt_vec_info use_stmt_info, current_stmt_info; tree lhs; imm_use_iterator imm_iter; use_operand_p use_p; int nloop_uses, size = 0, n_out_of_loop_uses; bool found = false; if (loop != vect_loop) return false; lhs = PHI_RESULT (phi); code = gimple_assign_rhs_code (first_stmt); while (1) { nloop_uses = 0; n_out_of_loop_uses = 0; FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs) { gimple *use_stmt = USE_STMT (use_p); if (is_gimple_debug (use_stmt)) continue; /* Check if we got back to the reduction phi. */ if (use_stmt == phi) { loop_use_stmt = use_stmt; found = true; break; } if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))) { loop_use_stmt = use_stmt; nloop_uses++; } else n_out_of_loop_uses++; /* There are can be either a single use in the loop or two uses in phi nodes. */ if (nloop_uses > 1 || (n_out_of_loop_uses && nloop_uses)) return false; } if (found) break; /* We reached a statement with no loop uses. */ if (nloop_uses == 0) return false; /* This is a loop exit phi, and we haven't reached the reduction phi. */ if (gimple_code (loop_use_stmt) == GIMPLE_PHI) return false; if (!is_gimple_assign (loop_use_stmt) || code != gimple_assign_rhs_code (loop_use_stmt) || !flow_bb_inside_loop_p (loop, gimple_bb (loop_use_stmt))) return false; /* Insert USE_STMT into reduction chain. */ use_stmt_info = vinfo_for_stmt (loop_use_stmt); if (current_stmt) { current_stmt_info = vinfo_for_stmt (current_stmt); GROUP_NEXT_ELEMENT (current_stmt_info) = loop_use_stmt; GROUP_FIRST_ELEMENT (use_stmt_info) = GROUP_FIRST_ELEMENT (current_stmt_info); } else GROUP_FIRST_ELEMENT (use_stmt_info) = loop_use_stmt; lhs = gimple_assign_lhs (loop_use_stmt); current_stmt = loop_use_stmt; size++; } if (!found || loop_use_stmt != phi || size < 2) return false; /* Swap the operands, if needed, to make the reduction operand be the second operand. */ lhs = PHI_RESULT (phi); next_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt)); while (next_stmt) { if (gimple_assign_rhs2 (next_stmt) == lhs) { tree op = gimple_assign_rhs1 (next_stmt); gimple *def_stmt = NULL; if (TREE_CODE (op) == SSA_NAME) def_stmt = SSA_NAME_DEF_STMT (op); /* Check that the other def is either defined in the loop ("vect_internal_def"), or it's an induction (defined by a loop-header phi-node). */ if (def_stmt && gimple_bb (def_stmt) && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)) && (is_gimple_assign (def_stmt) || is_gimple_call (def_stmt) || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt)) == vect_induction_def || (gimple_code (def_stmt) == GIMPLE_PHI && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt)) == vect_internal_def && !is_loop_header_bb_p (gimple_bb (def_stmt))))) { lhs = gimple_assign_lhs (next_stmt); next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt)); continue; } return false; } else { tree op = gimple_assign_rhs2 (next_stmt); gimple *def_stmt = NULL; if (TREE_CODE (op) == SSA_NAME) def_stmt = SSA_NAME_DEF_STMT (op); /* Check that the other def is either defined in the loop ("vect_internal_def"), or it's an induction (defined by a loop-header phi-node). */ if (def_stmt && gimple_bb (def_stmt) && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)) && (is_gimple_assign (def_stmt) || is_gimple_call (def_stmt) || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt)) == vect_induction_def || (gimple_code (def_stmt) == GIMPLE_PHI && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt)) == vect_internal_def && !is_loop_header_bb_p (gimple_bb (def_stmt))))) { if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "swapping oprnds: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, next_stmt, 0); } swap_ssa_operands (next_stmt, gimple_assign_rhs1_ptr (next_stmt), gimple_assign_rhs2_ptr (next_stmt)); update_stmt (next_stmt); if (CONSTANT_CLASS_P (gimple_assign_rhs1 (next_stmt))) LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true; } else return false; } lhs = gimple_assign_lhs (next_stmt); next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt)); } /* Save the chain for further analysis in SLP detection. */ first = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt)); LOOP_VINFO_REDUCTION_CHAINS (loop_info).safe_push (first); GROUP_SIZE (vinfo_for_stmt (first)) = size; return true; } /* Return true if we need an in-order reduction for operation CODE on type TYPE. NEED_WRAPPING_INTEGRAL_OVERFLOW is true if integer overflow must wrap. */ static bool needs_fold_left_reduction_p (tree type, tree_code code, bool need_wrapping_integral_overflow) { /* CHECKME: check for !flag_finite_math_only too? */ if (SCALAR_FLOAT_TYPE_P (type)) switch (code) { case MIN_EXPR: case MAX_EXPR: return false; default: return !flag_associative_math; } if (INTEGRAL_TYPE_P (type)) { if (!operation_no_trapping_overflow (type, code)) return true; if (need_wrapping_integral_overflow && !TYPE_OVERFLOW_WRAPS (type) && operation_can_overflow (code)) return true; return false; } if (SAT_FIXED_POINT_TYPE_P (type)) return true; return false; } /* Return true if the reduction PHI in LOOP with latch arg LOOP_ARG and reduction operation CODE has a handled computation expression. */ bool check_reduction_path (location_t loc, loop_p loop, gphi *phi, tree loop_arg, enum tree_code code) { auto_vec<std::pair<ssa_op_iter, use_operand_p> > path; auto_bitmap visited; tree lookfor = PHI_RESULT (phi); ssa_op_iter curri; use_operand_p curr = op_iter_init_phiuse (&curri, phi, SSA_OP_USE); while (USE_FROM_PTR (curr) != loop_arg) curr = op_iter_next_use (&curri); curri.i = curri.numops; do { path.safe_push (std::make_pair (curri, curr)); tree use = USE_FROM_PTR (curr); if (use == lookfor) break; gimple *def = SSA_NAME_DEF_STMT (use); if (gimple_nop_p (def) || ! flow_bb_inside_loop_p (loop, gimple_bb (def))) { pop: do { std::pair<ssa_op_iter, use_operand_p> x = path.pop (); curri = x.first; curr = x.second; do curr = op_iter_next_use (&curri); /* Skip already visited or non-SSA operands (from iterating over PHI args). */ while (curr != NULL_USE_OPERAND_P && (TREE_CODE (USE_FROM_PTR (curr)) != SSA_NAME || ! bitmap_set_bit (visited, SSA_NAME_VERSION (USE_FROM_PTR (curr))))); } while (curr == NULL_USE_OPERAND_P && ! path.is_empty ()); if (curr == NULL_USE_OPERAND_P) break; } else { if (gimple_code (def) == GIMPLE_PHI) curr = op_iter_init_phiuse (&curri, as_a <gphi *>(def), SSA_OP_USE); else curr = op_iter_init_use (&curri, def, SSA_OP_USE); while (curr != NULL_USE_OPERAND_P && (TREE_CODE (USE_FROM_PTR (curr)) != SSA_NAME || ! bitmap_set_bit (visited, SSA_NAME_VERSION (USE_FROM_PTR (curr))))) curr = op_iter_next_use (&curri); if (curr == NULL_USE_OPERAND_P) goto pop; } } while (1); if (dump_file && (dump_flags & TDF_DETAILS)) { dump_printf_loc (MSG_NOTE, loc, "reduction path: "); unsigned i; std::pair<ssa_op_iter, use_operand_p> *x; FOR_EACH_VEC_ELT (path, i, x) { dump_generic_expr (MSG_NOTE, TDF_SLIM, USE_FROM_PTR (x->second)); dump_printf (MSG_NOTE, " "); } dump_printf (MSG_NOTE, "\n"); } /* Check whether the reduction path detected is valid. */ bool fail = path.length () == 0; bool neg = false; for (unsigned i = 1; i < path.length (); ++i) { gimple *use_stmt = USE_STMT (path[i].second); tree op = USE_FROM_PTR (path[i].second); if (! has_single_use (op) || ! is_gimple_assign (use_stmt)) { fail = true; break; } if (gimple_assign_rhs_code (use_stmt) != code) { if (code == PLUS_EXPR && gimple_assign_rhs_code (use_stmt) == MINUS_EXPR) { /* Track whether we negate the reduction value each iteration. */ if (gimple_assign_rhs2 (use_stmt) == op) neg = ! neg; } else { fail = true; break; } } } return ! fail && ! neg; } /* Function vect_is_simple_reduction (1) Detect a cross-iteration def-use cycle that represents a simple reduction computation. We look for the following pattern: loop_header: a1 = phi < a0, a2 > a3 = ... a2 = operation (a3, a1) or a3 = ... loop_header: a1 = phi < a0, a2 > a2 = operation (a3, a1) such that: 1. operation is commutative and associative and it is safe to change the order of the computation 2. no uses for a2 in the loop (a2 is used out of the loop) 3. no uses of a1 in the loop besides the reduction operation 4. no uses of a1 outside the loop. Conditions 1,4 are tested here. Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized. (2) Detect a cross-iteration def-use cycle in nested loops, i.e., nested cycles. (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double reductions: a1 = phi < a0, a2 > inner loop (def of a3) a2 = phi < a3 > (4) Detect condition expressions, ie: for (int i = 0; i < N; i++) if (a[i] < val) ret_val = a[i]; */ static gimple * vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi, bool *double_reduc, bool need_wrapping_integral_overflow, enum vect_reduction_type *v_reduc_type) { struct loop *loop = (gimple_bb (phi))->loop_father; struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info); gimple *def_stmt, *def1 = NULL, *def2 = NULL, *phi_use_stmt = NULL; enum tree_code orig_code, code; tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE; tree type; int nloop_uses; tree name; imm_use_iterator imm_iter; use_operand_p use_p; bool phi_def; *double_reduc = false; *v_reduc_type = TREE_CODE_REDUCTION; tree phi_name = PHI_RESULT (phi); /* ??? If there are no uses of the PHI result the inner loop reduction won't be detected as possibly double-reduction by vectorizable_reduction because that tries to walk the PHI arg from the preheader edge which can be constant. See PR60382. */ if (has_zero_uses (phi_name)) return NULL; nloop_uses = 0; FOR_EACH_IMM_USE_FAST (use_p, imm_iter, phi_name) { gimple *use_stmt = USE_STMT (use_p); if (is_gimple_debug (use_stmt)) continue; if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "intermediate value used outside loop.\n"); return NULL; } nloop_uses++; if (nloop_uses > 1) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "reduction value used in loop.\n"); return NULL; } phi_use_stmt = use_stmt; } edge latch_e = loop_latch_edge (loop); tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e); if (TREE_CODE (loop_arg) != SSA_NAME) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "reduction: not ssa_name: "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, loop_arg); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return NULL; } def_stmt = SSA_NAME_DEF_STMT (loop_arg); if (is_gimple_assign (def_stmt)) { name = gimple_assign_lhs (def_stmt); phi_def = false; } else if (gimple_code (def_stmt) == GIMPLE_PHI) { name = PHI_RESULT (def_stmt); phi_def = true; } else { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "reduction: unhandled reduction operation: "); dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, def_stmt, 0); } return NULL; } if (! flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))) return NULL; nloop_uses = 0; auto_vec<gphi *, 3> lcphis; FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name) { gimple *use_stmt = USE_STMT (use_p); if (is_gimple_debug (use_stmt)) continue; if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))) nloop_uses++; else /* We can have more than one loop-closed PHI. */ lcphis.safe_push (as_a <gphi *> (use_stmt)); if (nloop_uses > 1) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "reduction used in loop.\n"); return NULL; } } /* If DEF_STMT is a phi node itself, we expect it to have a single argument defined in the inner loop. */ if (phi_def) { op1 = PHI_ARG_DEF (def_stmt, 0); if (gimple_phi_num_args (def_stmt) != 1 || TREE_CODE (op1) != SSA_NAME) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "unsupported phi node definition.\n"); return NULL; } def1 = SSA_NAME_DEF_STMT (op1); if (gimple_bb (def1) && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)) && loop->inner && flow_bb_inside_loop_p (loop->inner, gimple_bb (def1)) && is_gimple_assign (def1) && flow_bb_inside_loop_p (loop->inner, gimple_bb (phi_use_stmt))) { if (dump_enabled_p ()) report_vect_op (MSG_NOTE, def_stmt, "detected double reduction: "); *double_reduc = true; return def_stmt; } return NULL; } /* If we are vectorizing an inner reduction we are executing that in the original order only in case we are not dealing with a double reduction. */ bool check_reduction = true; if (flow_loop_nested_p (vect_loop, loop)) { gphi *lcphi; unsigned i; check_reduction = false; FOR_EACH_VEC_ELT (lcphis, i, lcphi) FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_phi_result (lcphi)) { gimple *use_stmt = USE_STMT (use_p); if (is_gimple_debug (use_stmt)) continue; if (! flow_bb_inside_loop_p (vect_loop, gimple_bb (use_stmt))) check_reduction = true; } } bool nested_in_vect_loop = flow_loop_nested_p (vect_loop, loop); code = orig_code = gimple_assign_rhs_code (def_stmt); /* We can handle "res -= x[i]", which is non-associative by simply rewriting this into "res += -x[i]". Avoid changing gimple instruction for the first simple tests and only do this if we're allowed to change code at all. */ if (code == MINUS_EXPR && gimple_assign_rhs2 (def_stmt) != phi_name) code = PLUS_EXPR; if (code == COND_EXPR) { if (! nested_in_vect_loop) *v_reduc_type = COND_REDUCTION; op3 = gimple_assign_rhs1 (def_stmt); if (COMPARISON_CLASS_P (op3)) { op4 = TREE_OPERAND (op3, 1); op3 = TREE_OPERAND (op3, 0); } if (op3 == phi_name || op4 == phi_name) { if (dump_enabled_p ()) report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, "reduction: condition depends on previous" " iteration: "); return NULL; } op1 = gimple_assign_rhs2 (def_stmt); op2 = gimple_assign_rhs3 (def_stmt); } else if (!commutative_tree_code (code) || !associative_tree_code (code)) { if (dump_enabled_p ()) report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, "reduction: not commutative/associative: "); return NULL; } else if (get_gimple_rhs_class (code) == GIMPLE_BINARY_RHS) { op1 = gimple_assign_rhs1 (def_stmt); op2 = gimple_assign_rhs2 (def_stmt); } else { if (dump_enabled_p ()) report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, "reduction: not handled operation: "); return NULL; } if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME) { if (dump_enabled_p ()) report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, "reduction: both uses not ssa_names: "); return NULL; } type = TREE_TYPE (gimple_assign_lhs (def_stmt)); if ((TREE_CODE (op1) == SSA_NAME && !types_compatible_p (type,TREE_TYPE (op1))) || (TREE_CODE (op2) == SSA_NAME && !types_compatible_p (type, TREE_TYPE (op2))) || (op3 && TREE_CODE (op3) == SSA_NAME && !types_compatible_p (type, TREE_TYPE (op3))) || (op4 && TREE_CODE (op4) == SSA_NAME && !types_compatible_p (type, TREE_TYPE (op4)))) { if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "reduction: multiple types: operation type: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, type); dump_printf (MSG_NOTE, ", operands types: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, TREE_TYPE (op1)); dump_printf (MSG_NOTE, ","); dump_generic_expr (MSG_NOTE, TDF_SLIM, TREE_TYPE (op2)); if (op3) { dump_printf (MSG_NOTE, ","); dump_generic_expr (MSG_NOTE, TDF_SLIM, TREE_TYPE (op3)); } if (op4) { dump_printf (MSG_NOTE, ","); dump_generic_expr (MSG_NOTE, TDF_SLIM, TREE_TYPE (op4)); } dump_printf (MSG_NOTE, "\n"); } return NULL; } /* Check whether it's ok to change the order of the computation. Generally, when vectorizing a reduction we change the order of the computation. This may change the behavior of the program in some cases, so we need to check that this is ok. One exception is when vectorizing an outer-loop: the inner-loop is executed sequentially, and therefore vectorizing reductions in the inner-loop during outer-loop vectorization is safe. */ if (check_reduction && *v_reduc_type == TREE_CODE_REDUCTION && needs_fold_left_reduction_p (type, code, need_wrapping_integral_overflow)) *v_reduc_type = FOLD_LEFT_REDUCTION; /* Reduction is safe. We're dealing with one of the following: 1) integer arithmetic and no trapv 2) floating point arithmetic, and special flags permit this optimization 3) nested cycle (i.e., outer loop vectorization). */ if (TREE_CODE (op1) == SSA_NAME) def1 = SSA_NAME_DEF_STMT (op1); if (TREE_CODE (op2) == SSA_NAME) def2 = SSA_NAME_DEF_STMT (op2); if (code != COND_EXPR && ((!def1 || gimple_nop_p (def1)) && (!def2 || gimple_nop_p (def2)))) { if (dump_enabled_p ()) report_vect_op (MSG_NOTE, def_stmt, "reduction: no defs for operands: "); return NULL; } /* Check that one def is the reduction def, defined by PHI, the other def is either defined in the loop ("vect_internal_def"), or it's an induction (defined by a loop-header phi-node). */ if (def2 && def2 == phi && (code == COND_EXPR || !def1 || gimple_nop_p (def1) || !flow_bb_inside_loop_p (loop, gimple_bb (def1)) || (def1 && flow_bb_inside_loop_p (loop, gimple_bb (def1)) && (is_gimple_assign (def1) || is_gimple_call (def1) || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1)) == vect_induction_def || (gimple_code (def1) == GIMPLE_PHI && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1)) == vect_internal_def && !is_loop_header_bb_p (gimple_bb (def1))))))) { if (dump_enabled_p ()) report_vect_op (MSG_NOTE, def_stmt, "detected reduction: "); return def_stmt; } if (def1 && def1 == phi && (code == COND_EXPR || !def2 || gimple_nop_p (def2) || !flow_bb_inside_loop_p (loop, gimple_bb (def2)) || (def2 && flow_bb_inside_loop_p (loop, gimple_bb (def2)) && (is_gimple_assign (def2) || is_gimple_call (def2) || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2)) == vect_induction_def || (gimple_code (def2) == GIMPLE_PHI && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2)) == vect_internal_def && !is_loop_header_bb_p (gimple_bb (def2))))))) { if (! nested_in_vect_loop && orig_code != MINUS_EXPR) { /* Check if we can swap operands (just for simplicity - so that the rest of the code can assume that the reduction variable is always the last (second) argument). */ if (code == COND_EXPR) { /* Swap cond_expr by inverting the condition. */ tree cond_expr = gimple_assign_rhs1 (def_stmt); enum tree_code invert_code = ERROR_MARK; enum tree_code cond_code = TREE_CODE (cond_expr); if (TREE_CODE_CLASS (cond_code) == tcc_comparison) { bool honor_nans = HONOR_NANS (TREE_OPERAND (cond_expr, 0)); invert_code = invert_tree_comparison (cond_code, honor_nans); } if (invert_code != ERROR_MARK) { TREE_SET_CODE (cond_expr, invert_code); swap_ssa_operands (def_stmt, gimple_assign_rhs2_ptr (def_stmt), gimple_assign_rhs3_ptr (def_stmt)); } else { if (dump_enabled_p ()) report_vect_op (MSG_NOTE, def_stmt, "detected reduction: cannot swap operands " "for cond_expr"); return NULL; } } else swap_ssa_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt), gimple_assign_rhs2_ptr (def_stmt)); if (dump_enabled_p ()) report_vect_op (MSG_NOTE, def_stmt, "detected reduction: need to swap operands: "); if (CONSTANT_CLASS_P (gimple_assign_rhs1 (def_stmt))) LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true; } else { if (dump_enabled_p ()) report_vect_op (MSG_NOTE, def_stmt, "detected reduction: "); } return def_stmt; } /* Try to find SLP reduction chain. */ if (! nested_in_vect_loop && code != COND_EXPR && orig_code != MINUS_EXPR && vect_is_slp_reduction (loop_info, phi, def_stmt)) { if (dump_enabled_p ()) report_vect_op (MSG_NOTE, def_stmt, "reduction: detected reduction chain: "); return def_stmt; } /* Dissolve group eventually half-built by vect_is_slp_reduction. */ gimple *first = GROUP_FIRST_ELEMENT (vinfo_for_stmt (def_stmt)); while (first) { gimple *next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (first)); GROUP_FIRST_ELEMENT (vinfo_for_stmt (first)) = NULL; GROUP_NEXT_ELEMENT (vinfo_for_stmt (first)) = NULL; first = next; } /* Look for the expression computing loop_arg from loop PHI result. */ if (check_reduction_path (vect_location, loop, as_a <gphi *> (phi), loop_arg, code)) return def_stmt; if (dump_enabled_p ()) { report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, "reduction: unknown pattern: "); } return NULL; } /* Wrapper around vect_is_simple_reduction, which will modify code in-place if it enables detection of more reductions. Arguments as there. */ gimple * vect_force_simple_reduction (loop_vec_info loop_info, gimple *phi, bool *double_reduc, bool need_wrapping_integral_overflow) { enum vect_reduction_type v_reduc_type; gimple *def = vect_is_simple_reduction (loop_info, phi, double_reduc, need_wrapping_integral_overflow, &v_reduc_type); if (def) { stmt_vec_info reduc_def_info = vinfo_for_stmt (phi); STMT_VINFO_REDUC_TYPE (reduc_def_info) = v_reduc_type; STMT_VINFO_REDUC_DEF (reduc_def_info) = def; reduc_def_info = vinfo_for_stmt (def); STMT_VINFO_REDUC_TYPE (reduc_def_info) = v_reduc_type; STMT_VINFO_REDUC_DEF (reduc_def_info) = phi; } return def; } /* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */ int vect_get_known_peeling_cost (loop_vec_info loop_vinfo, int peel_iters_prologue, int *peel_iters_epilogue, stmt_vector_for_cost *scalar_cost_vec, stmt_vector_for_cost *prologue_cost_vec, stmt_vector_for_cost *epilogue_cost_vec) { int retval = 0; int assumed_vf = vect_vf_for_cost (loop_vinfo); if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)) { *peel_iters_epilogue = assumed_vf / 2; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "cost model: epilogue peel iters set to vf/2 " "because loop iterations are unknown .\n"); /* If peeled iterations are known but number of scalar loop iterations are unknown, count a taken branch per peeled loop. */ retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken, NULL, 0, vect_prologue); retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken, NULL, 0, vect_epilogue); } else { int niters = LOOP_VINFO_INT_NITERS (loop_vinfo); peel_iters_prologue = niters < peel_iters_prologue ? niters : peel_iters_prologue; *peel_iters_epilogue = (niters - peel_iters_prologue) % assumed_vf; /* If we need to peel for gaps, but no peeling is required, we have to peel VF iterations. */ if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) && !*peel_iters_epilogue) *peel_iters_epilogue = assumed_vf; } stmt_info_for_cost *si; int j; if (peel_iters_prologue) FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si) { stmt_vec_info stmt_info = si->stmt ? vinfo_for_stmt (si->stmt) : NULL; retval += record_stmt_cost (prologue_cost_vec, si->count * peel_iters_prologue, si->kind, stmt_info, si->misalign, vect_prologue); } if (*peel_iters_epilogue) FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si) { stmt_vec_info stmt_info = si->stmt ? vinfo_for_stmt (si->stmt) : NULL; retval += record_stmt_cost (epilogue_cost_vec, si->count * *peel_iters_epilogue, si->kind, stmt_info, si->misalign, vect_epilogue); } return retval; } /* Function vect_estimate_min_profitable_iters Return the number of iterations required for the vector version of the loop to be profitable relative to the cost of the scalar version of the loop. *RET_MIN_PROFITABLE_NITERS is a cost model profitability threshold of iterations for vectorization. -1 value means loop vectorization is not profitable. This returned value may be used for dynamic profitability check. *RET_MIN_PROFITABLE_ESTIMATE is a profitability threshold to be used for static check against estimated number of iterations. */ static void vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo, int *ret_min_profitable_niters, int *ret_min_profitable_estimate) { int min_profitable_iters; int min_profitable_estimate; int peel_iters_prologue; int peel_iters_epilogue; unsigned vec_inside_cost = 0; int vec_outside_cost = 0; unsigned vec_prologue_cost = 0; unsigned vec_epilogue_cost = 0; int scalar_single_iter_cost = 0; int scalar_outside_cost = 0; int assumed_vf = vect_vf_for_cost (loop_vinfo); int npeel = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo); void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo); /* Cost model disabled. */ if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo))) { dump_printf_loc (MSG_NOTE, vect_location, "cost model disabled.\n"); *ret_min_profitable_niters = 0; *ret_min_profitable_estimate = 0; return; } /* Requires loop versioning tests to handle misalignment. */ if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)) { /* FIXME: Make cost depend on complexity of individual check. */ unsigned len = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length (); (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0, vect_prologue); dump_printf (MSG_NOTE, "cost model: Adding cost of checks for loop " "versioning to treat misalignment.\n"); } /* Requires loop versioning with alias checks. */ if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo)) { /* FIXME: Make cost depend on complexity of individual check. */ unsigned len = LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).length (); (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0, vect_prologue); len = LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).length (); if (len) /* Count LEN - 1 ANDs and LEN comparisons. */ (void) add_stmt_cost (target_cost_data, len * 2 - 1, scalar_stmt, NULL, 0, vect_prologue); len = LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).length (); if (len) { /* Count LEN - 1 ANDs and LEN comparisons. */ unsigned int nstmts = len * 2 - 1; /* +1 for each bias that needs adding. */ for (unsigned int i = 0; i < len; ++i) if (!LOOP_VINFO_LOWER_BOUNDS (loop_vinfo)[i].unsigned_p) nstmts += 1; (void) add_stmt_cost (target_cost_data, nstmts, scalar_stmt, NULL, 0, vect_prologue); } dump_printf (MSG_NOTE, "cost model: Adding cost of checks for loop " "versioning aliasing.\n"); } /* Requires loop versioning with niter checks. */ if (LOOP_REQUIRES_VERSIONING_FOR_NITERS (loop_vinfo)) { /* FIXME: Make cost depend on complexity of individual check. */ (void) add_stmt_cost (target_cost_data, 1, vector_stmt, NULL, 0, vect_prologue); dump_printf (MSG_NOTE, "cost model: Adding cost of checks for loop " "versioning niters.\n"); } if (LOOP_REQUIRES_VERSIONING (loop_vinfo)) (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, NULL, 0, vect_prologue); /* Count statements in scalar loop. Using this as scalar cost for a single iteration for now. TODO: Add outer loop support. TODO: Consider assigning different costs to different scalar statements. */ scalar_single_iter_cost = LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo); /* Add additional cost for the peeled instructions in prologue and epilogue loop. (For fully-masked loops there will be no peeling.) FORNOW: If we don't know the value of peel_iters for prologue or epilogue at compile-time - we assume it's vf/2 (the worst would be vf-1). TODO: Build an expression that represents peel_iters for prologue and epilogue to be used in a run-time test. */ if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)) { peel_iters_prologue = 0; peel_iters_epilogue = 0; if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)) { /* We need to peel exactly one iteration. */ peel_iters_epilogue += 1; stmt_info_for_cost *si; int j; FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), j, si) { struct _stmt_vec_info *stmt_info = si->stmt ? vinfo_for_stmt (si->stmt) : NULL; (void) add_stmt_cost (target_cost_data, si->count, si->kind, stmt_info, si->misalign, vect_epilogue); } } } else if (npeel < 0) { peel_iters_prologue = assumed_vf / 2; dump_printf (MSG_NOTE, "cost model: " "prologue peel iters set to vf/2.\n"); /* If peeling for alignment is unknown, loop bound of main loop becomes unknown. */ peel_iters_epilogue = assumed_vf / 2; dump_printf (MSG_NOTE, "cost model: " "epilogue peel iters set to vf/2 because " "peeling for alignment is unknown.\n"); /* If peeled iterations are unknown, count a taken branch and a not taken branch per peeled loop. Even if scalar loop iterations are known, vector iterations are not known since peeled prologue iterations are not known. Hence guards remain the same. */ (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, NULL, 0, vect_prologue); (void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken, NULL, 0, vect_prologue); (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, NULL, 0, vect_epilogue); (void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken, NULL, 0, vect_epilogue); stmt_info_for_cost *si; int j; FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), j, si) { struct _stmt_vec_info *stmt_info = si->stmt ? vinfo_for_stmt (si->stmt) : NULL; (void) add_stmt_cost (target_cost_data, si->count * peel_iters_prologue, si->kind, stmt_info, si->misalign, vect_prologue); (void) add_stmt_cost (target_cost_data, si->count * peel_iters_epilogue, si->kind, stmt_info, si->misalign, vect_epilogue); } } else { stmt_vector_for_cost prologue_cost_vec, epilogue_cost_vec; stmt_info_for_cost *si; int j; void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo); prologue_cost_vec.create (2); epilogue_cost_vec.create (2); peel_iters_prologue = npeel; (void) vect_get_known_peeling_cost (loop_vinfo, peel_iters_prologue, &peel_iters_epilogue, &LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), &prologue_cost_vec, &epilogue_cost_vec); FOR_EACH_VEC_ELT (prologue_cost_vec, j, si) { struct _stmt_vec_info *stmt_info = si->stmt ? vinfo_for_stmt (si->stmt) : NULL; (void) add_stmt_cost (data, si->count, si->kind, stmt_info, si->misalign, vect_prologue); } FOR_EACH_VEC_ELT (epilogue_cost_vec, j, si) { struct _stmt_vec_info *stmt_info = si->stmt ? vinfo_for_stmt (si->stmt) : NULL; (void) add_stmt_cost (data, si->count, si->kind, stmt_info, si->misalign, vect_epilogue); } prologue_cost_vec.release (); epilogue_cost_vec.release (); } /* FORNOW: The scalar outside cost is incremented in one of the following ways: 1. The vectorizer checks for alignment and aliasing and generates a condition that allows dynamic vectorization. A cost model check is ANDED with the versioning condition. Hence scalar code path now has the added cost of the versioning check. if (cost > th & versioning_check) jmp to vector code Hence run-time scalar is incremented by not-taken branch cost. 2. The vectorizer then checks if a prologue is required. If the cost model check was not done before during versioning, it has to be done before the prologue check. if (cost <= th) prologue = scalar_iters if (prologue == 0) jmp to vector code else execute prologue if (prologue == num_iters) go to exit Hence the run-time scalar cost is incremented by a taken branch, plus a not-taken branch, plus a taken branch cost. 3. The vectorizer then checks if an epilogue is required. If the cost model check was not done before during prologue check, it has to be done with the epilogue check. if (prologue == 0) jmp to vector code else execute prologue if (prologue == num_iters) go to exit vector code: if ((cost <= th) | (scalar_iters-prologue-epilogue == 0)) jmp to epilogue Hence the run-time scalar cost should be incremented by 2 taken branches. TODO: The back end may reorder the BBS's differently and reverse conditions/branch directions. Change the estimates below to something more reasonable. */ /* If the number of iterations is known and we do not do versioning, we can decide whether to vectorize at compile time. Hence the scalar version do not carry cost model guard costs. */ if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) || LOOP_REQUIRES_VERSIONING (loop_vinfo)) { /* Cost model check occurs at versioning. */ if (LOOP_REQUIRES_VERSIONING (loop_vinfo)) scalar_outside_cost += vect_get_stmt_cost (cond_branch_not_taken); else { /* Cost model check occurs at prologue generation. */ if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0) scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken) + vect_get_stmt_cost (cond_branch_not_taken); /* Cost model check occurs at epilogue generation. */ else scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken); } } /* Complete the target-specific cost calculations. */ finish_cost (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo), &vec_prologue_cost, &vec_inside_cost, &vec_epilogue_cost); vec_outside_cost = (int)(vec_prologue_cost + vec_epilogue_cost); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n"); dump_printf (MSG_NOTE, " Vector inside of loop cost: %d\n", vec_inside_cost); dump_printf (MSG_NOTE, " Vector prologue cost: %d\n", vec_prologue_cost); dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n", vec_epilogue_cost); dump_printf (MSG_NOTE, " Scalar iteration cost: %d\n", scalar_single_iter_cost); dump_printf (MSG_NOTE, " Scalar outside cost: %d\n", scalar_outside_cost); dump_printf (MSG_NOTE, " Vector outside cost: %d\n", vec_outside_cost); dump_printf (MSG_NOTE, " prologue iterations: %d\n", peel_iters_prologue); dump_printf (MSG_NOTE, " epilogue iterations: %d\n", peel_iters_epilogue); } /* Calculate number of iterations required to make the vector version profitable, relative to the loop bodies only. The following condition must hold true: SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC where SIC = scalar iteration cost, VIC = vector iteration cost, VOC = vector outside cost, VF = vectorization factor, PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations SOC = scalar outside cost for run time cost model check. */ if ((scalar_single_iter_cost * assumed_vf) > (int) vec_inside_cost) { min_profitable_iters = ((vec_outside_cost - scalar_outside_cost) * assumed_vf - vec_inside_cost * peel_iters_prologue - vec_inside_cost * peel_iters_epilogue); if (min_profitable_iters <= 0) min_profitable_iters = 0; else { min_profitable_iters /= ((scalar_single_iter_cost * assumed_vf) - vec_inside_cost); if ((scalar_single_iter_cost * assumed_vf * min_profitable_iters) <= (((int) vec_inside_cost * min_profitable_iters) + (((int) vec_outside_cost - scalar_outside_cost) * assumed_vf))) min_profitable_iters++; } } /* vector version will never be profitable. */ else { if (LOOP_VINFO_LOOP (loop_vinfo)->force_vectorize) warning_at (vect_location, OPT_Wopenmp_simd, "vectorization " "did not happen for a simd loop"); if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "cost model: the vector iteration cost = %d " "divided by the scalar iteration cost = %d " "is greater or equal to the vectorization factor = %d" ".\n", vec_inside_cost, scalar_single_iter_cost, assumed_vf); *ret_min_profitable_niters = -1; *ret_min_profitable_estimate = -1; return; } dump_printf (MSG_NOTE, " Calculated minimum iters for profitability: %d\n", min_profitable_iters); if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo) && min_profitable_iters < (assumed_vf + peel_iters_prologue)) /* We want the vectorized loop to execute at least once. */ min_profitable_iters = assumed_vf + peel_iters_prologue; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, " Runtime profitability threshold = %d\n", min_profitable_iters); *ret_min_profitable_niters = min_profitable_iters; /* Calculate number of iterations required to make the vector version profitable, relative to the loop bodies only. Non-vectorized variant is SIC * niters and it must win over vector variant on the expected loop trip count. The following condition must hold true: SIC * niters > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC + SOC */ if (vec_outside_cost <= 0) min_profitable_estimate = 0; else { min_profitable_estimate = ((vec_outside_cost + scalar_outside_cost) * assumed_vf - vec_inside_cost * peel_iters_prologue - vec_inside_cost * peel_iters_epilogue) / ((scalar_single_iter_cost * assumed_vf) - vec_inside_cost); } min_profitable_estimate = MAX (min_profitable_estimate, min_profitable_iters); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, " Static estimate profitability threshold = %d\n", min_profitable_estimate); *ret_min_profitable_estimate = min_profitable_estimate; } /* Writes into SEL a mask for a vec_perm, equivalent to a vec_shr by OFFSET vector elements (not bits) for a vector with NELT elements. */ static void calc_vec_perm_mask_for_shift (unsigned int offset, unsigned int nelt, vec_perm_builder *sel) { /* The encoding is a single stepped pattern. Any wrap-around is handled by vec_perm_indices. */ sel->new_vector (nelt, 1, 3); for (unsigned int i = 0; i < 3; i++) sel->quick_push (i + offset); } /* Checks whether the target supports whole-vector shifts for vectors of mode MODE. This is the case if _either_ the platform handles vec_shr_optab, _or_ it supports vec_perm_const with masks for all necessary shift amounts. */ static bool have_whole_vector_shift (machine_mode mode) { if (optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing) return true; /* Variable-length vectors should be handled via the optab. */ unsigned int nelt; if (!GET_MODE_NUNITS (mode).is_constant (&nelt)) return false; vec_perm_builder sel; vec_perm_indices indices; for (unsigned int i = nelt / 2; i >= 1; i /= 2) { calc_vec_perm_mask_for_shift (i, nelt, &sel); indices.new_vector (sel, 2, nelt); if (!can_vec_perm_const_p (mode, indices, false)) return false; } return true; } /* TODO: Close dependency between vect_model_*_cost and vectorizable_* functions. Design better to avoid maintenance issues. */ /* Function vect_model_reduction_cost. Models cost for a reduction operation, including the vector ops generated within the strip-mine loop, the initial definition before the loop, and the epilogue code that must be generated. */ static void vect_model_reduction_cost (stmt_vec_info stmt_info, internal_fn reduc_fn, int ncopies) { int prologue_cost = 0, epilogue_cost = 0, inside_cost; enum tree_code code; optab optab; tree vectype; gimple *orig_stmt; machine_mode mode; loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = NULL; void *target_cost_data; if (loop_vinfo) { loop = LOOP_VINFO_LOOP (loop_vinfo); target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo); } else target_cost_data = BB_VINFO_TARGET_COST_DATA (STMT_VINFO_BB_VINFO (stmt_info)); /* Condition reductions generate two reductions in the loop. */ vect_reduction_type reduction_type = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info); if (reduction_type == COND_REDUCTION) ncopies *= 2; vectype = STMT_VINFO_VECTYPE (stmt_info); mode = TYPE_MODE (vectype); orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info); if (!orig_stmt) orig_stmt = STMT_VINFO_STMT (stmt_info); code = gimple_assign_rhs_code (orig_stmt); if (reduction_type == EXTRACT_LAST_REDUCTION || reduction_type == FOLD_LEFT_REDUCTION) { /* No extra instructions needed in the prologue. */ prologue_cost = 0; if (reduction_type == EXTRACT_LAST_REDUCTION || reduc_fn != IFN_LAST) /* Count one reduction-like operation per vector. */ inside_cost = add_stmt_cost (target_cost_data, ncopies, vec_to_scalar, stmt_info, 0, vect_body); else { /* Use NELEMENTS extracts and NELEMENTS scalar ops. */ unsigned int nelements = ncopies * vect_nunits_for_cost (vectype); inside_cost = add_stmt_cost (target_cost_data, nelements, vec_to_scalar, stmt_info, 0, vect_body); inside_cost += add_stmt_cost (target_cost_data, nelements, scalar_stmt, stmt_info, 0, vect_body); } } else { /* Add in cost for initial definition. For cond reduction we have four vectors: initial index, step, initial result of the data reduction, initial value of the index reduction. */ int prologue_stmts = reduction_type == COND_REDUCTION ? 4 : 1; prologue_cost += add_stmt_cost (target_cost_data, prologue_stmts, scalar_to_vec, stmt_info, 0, vect_prologue); /* Cost of reduction op inside loop. */ inside_cost = add_stmt_cost (target_cost_data, ncopies, vector_stmt, stmt_info, 0, vect_body); } /* Determine cost of epilogue code. We have a reduction operator that will reduce the vector in one statement. Also requires scalar extract. */ if (!loop || !nested_in_vect_loop_p (loop, orig_stmt)) { if (reduc_fn != IFN_LAST) { if (reduction_type == COND_REDUCTION) { /* An EQ stmt and an COND_EXPR stmt. */ epilogue_cost += add_stmt_cost (target_cost_data, 2, vector_stmt, stmt_info, 0, vect_epilogue); /* Reduction of the max index and a reduction of the found values. */ epilogue_cost += add_stmt_cost (target_cost_data, 2, vec_to_scalar, stmt_info, 0, vect_epilogue); /* A broadcast of the max value. */ epilogue_cost += add_stmt_cost (target_cost_data, 1, scalar_to_vec, stmt_info, 0, vect_epilogue); } else { epilogue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt, stmt_info, 0, vect_epilogue); epilogue_cost += add_stmt_cost (target_cost_data, 1, vec_to_scalar, stmt_info, 0, vect_epilogue); } } else if (reduction_type == COND_REDUCTION) { unsigned estimated_nunits = vect_nunits_for_cost (vectype); /* Extraction of scalar elements. */ epilogue_cost += add_stmt_cost (target_cost_data, 2 * estimated_nunits, vec_to_scalar, stmt_info, 0, vect_epilogue); /* Scalar max reductions via COND_EXPR / MAX_EXPR. */ epilogue_cost += add_stmt_cost (target_cost_data, 2 * estimated_nunits - 3, scalar_stmt, stmt_info, 0, vect_epilogue); } else if (reduction_type == EXTRACT_LAST_REDUCTION || reduction_type == FOLD_LEFT_REDUCTION) /* No extra instructions need in the epilogue. */ ; else { int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype)); tree bitsize = TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt))); int element_bitsize = tree_to_uhwi (bitsize); int nelements = vec_size_in_bits / element_bitsize; if (code == COND_EXPR) code = MAX_EXPR; optab = optab_for_tree_code (code, vectype, optab_default); /* We have a whole vector shift available. */ if (optab != unknown_optab && VECTOR_MODE_P (mode) && optab_handler (optab, mode) != CODE_FOR_nothing && have_whole_vector_shift (mode)) { /* Final reduction via vector shifts and the reduction operator. Also requires scalar extract. */ epilogue_cost += add_stmt_cost (target_cost_data, exact_log2 (nelements) * 2, vector_stmt, stmt_info, 0, vect_epilogue); epilogue_cost += add_stmt_cost (target_cost_data, 1, vec_to_scalar, stmt_info, 0, vect_epilogue); } else /* Use extracts and reduction op for final reduction. For N elements, we have N extracts and N-1 reduction ops. */ epilogue_cost += add_stmt_cost (target_cost_data, nelements + nelements - 1, vector_stmt, stmt_info, 0, vect_epilogue); } } if (dump_enabled_p ()) dump_printf (MSG_NOTE, "vect_model_reduction_cost: inside_cost = %d, " "prologue_cost = %d, epilogue_cost = %d .\n", inside_cost, prologue_cost, epilogue_cost); } /* Function vect_model_induction_cost. Models cost for induction operations. */ static void vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies) { loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo); unsigned inside_cost, prologue_cost; if (PURE_SLP_STMT (stmt_info)) return; /* loop cost for vec_loop. */ inside_cost = add_stmt_cost (target_cost_data, ncopies, vector_stmt, stmt_info, 0, vect_body); /* prologue cost for vec_init and vec_step. */ prologue_cost = add_stmt_cost (target_cost_data, 2, scalar_to_vec, stmt_info, 0, vect_prologue); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "vect_model_induction_cost: inside_cost = %d, " "prologue_cost = %d .\n", inside_cost, prologue_cost); } /* Function get_initial_def_for_reduction Input: STMT - a stmt that performs a reduction operation in the loop. INIT_VAL - the initial value of the reduction variable Output: ADJUSTMENT_DEF - a tree that holds a value to be added to the final result of the reduction (used for adjusting the epilog - see below). Return a vector variable, initialized according to the operation that STMT performs. This vector will be used as the initial value of the vector of partial results. Option1 (adjust in epilog): Initialize the vector as follows: add/bit or/xor: [0,0,...,0,0] mult/bit and: [1,1,...,1,1] min/max/cond_expr: [init_val,init_val,..,init_val,init_val] and when necessary (e.g. add/mult case) let the caller know that it needs to adjust the result by init_val. Option2: Initialize the vector as follows: add/bit or/xor: [init_val,0,0,...,0] mult/bit and: [init_val,1,1,...,1] min/max/cond_expr: [init_val,init_val,...,init_val] and no adjustments are needed. For example, for the following code: s = init_val; for (i=0;i<n;i++) s = s + a[i]; STMT is 's = s + a[i]', and the reduction variable is 's'. For a vector of 4 units, we want to return either [0,0,0,init_val], or [0,0,0,0] and let the caller know that it needs to adjust the result at the end by 'init_val'. FORNOW, we are using the 'adjust in epilog' scheme, because this way the initialization vector is simpler (same element in all entries), if ADJUSTMENT_DEF is not NULL, and Option2 otherwise. A cost model should help decide between these two schemes. */ tree get_initial_def_for_reduction (gimple *stmt, tree init_val, tree *adjustment_def) { stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); tree scalar_type = TREE_TYPE (init_val); tree vectype = get_vectype_for_scalar_type (scalar_type); enum tree_code code = gimple_assign_rhs_code (stmt); tree def_for_init; tree init_def; bool nested_in_vect_loop = false; REAL_VALUE_TYPE real_init_val = dconst0; int int_init_val = 0; gimple *def_stmt = NULL; gimple_seq stmts = NULL; gcc_assert (vectype); gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type) || SCALAR_FLOAT_TYPE_P (scalar_type)); if (nested_in_vect_loop_p (loop, stmt)) nested_in_vect_loop = true; else gcc_assert (loop == (gimple_bb (stmt))->loop_father); /* In case of double reduction we only create a vector variable to be put in the reduction phi node. The actual statement creation is done in vect_create_epilog_for_reduction. */ if (adjustment_def && nested_in_vect_loop && TREE_CODE (init_val) == SSA_NAME && (def_stmt = SSA_NAME_DEF_STMT (init_val)) && gimple_code (def_stmt) == GIMPLE_PHI && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)) && vinfo_for_stmt (def_stmt) && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt)) == vect_double_reduction_def) { *adjustment_def = NULL; return vect_create_destination_var (init_val, vectype); } vect_reduction_type reduction_type = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_vinfo); /* In case of a nested reduction do not use an adjustment def as that case is not supported by the epilogue generation correctly if ncopies is not one. */ if (adjustment_def && nested_in_vect_loop) { *adjustment_def = NULL; return vect_get_vec_def_for_operand (init_val, stmt); } switch (code) { case WIDEN_SUM_EXPR: case DOT_PROD_EXPR: case SAD_EXPR: case PLUS_EXPR: case MINUS_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: case MULT_EXPR: case BIT_AND_EXPR: { /* ADJUSTMENT_DEF is NULL when called from vect_create_epilog_for_reduction to vectorize double reduction. */ if (adjustment_def) *adjustment_def = init_val; if (code == MULT_EXPR) { real_init_val = dconst1; int_init_val = 1; } if (code == BIT_AND_EXPR) int_init_val = -1; if (SCALAR_FLOAT_TYPE_P (scalar_type)) def_for_init = build_real (scalar_type, real_init_val); else def_for_init = build_int_cst (scalar_type, int_init_val); if (adjustment_def) /* Option1: the first element is '0' or '1' as well. */ init_def = gimple_build_vector_from_val (&stmts, vectype, def_for_init); else if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant ()) { /* Option2 (variable length): the first element is INIT_VAL. */ init_def = build_vector_from_val (vectype, def_for_init); gcall *call = gimple_build_call_internal (IFN_VEC_SHL_INSERT, 2, init_def, init_val); init_def = make_ssa_name (vectype); gimple_call_set_lhs (call, init_def); gimple_seq_add_stmt (&stmts, call); } else { /* Option2: the first element is INIT_VAL. */ tree_vector_builder elts (vectype, 1, 2); elts.quick_push (init_val); elts.quick_push (def_for_init); init_def = gimple_build_vector (&stmts, &elts); } } break; case MIN_EXPR: case MAX_EXPR: case COND_EXPR: { if (adjustment_def) { *adjustment_def = NULL_TREE; if (reduction_type != COND_REDUCTION && reduction_type != EXTRACT_LAST_REDUCTION) { init_def = vect_get_vec_def_for_operand (init_val, stmt); break; } } init_val = gimple_convert (&stmts, TREE_TYPE (vectype), init_val); init_def = gimple_build_vector_from_val (&stmts, vectype, init_val); } break; default: gcc_unreachable (); } if (stmts) gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts); return init_def; } /* Get at the initial defs for the reduction PHIs in SLP_NODE. NUMBER_OF_VECTORS is the number of vector defs to create. If NEUTRAL_OP is nonnull, introducing extra elements of that value will not change the result. */ static void get_initial_defs_for_reduction (slp_tree slp_node, vec<tree> *vec_oprnds, unsigned int number_of_vectors, bool reduc_chain, tree neutral_op) { vec<gimple *> stmts = SLP_TREE_SCALAR_STMTS (slp_node); gimple *stmt = stmts[0]; stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt); unsigned HOST_WIDE_INT nunits; unsigned j, number_of_places_left_in_vector; tree vector_type; tree vop; int group_size = stmts.length (); unsigned int vec_num, i; unsigned number_of_copies = 1; vec<tree> voprnds; voprnds.create (number_of_vectors); struct loop *loop; auto_vec<tree, 16> permute_results; vector_type = STMT_VINFO_VECTYPE (stmt_vinfo); gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def); loop = (gimple_bb (stmt))->loop_father; gcc_assert (loop); edge pe = loop_preheader_edge (loop); gcc_assert (!reduc_chain || neutral_op); /* NUMBER_OF_COPIES is the number of times we need to use the same values in created vectors. It is greater than 1 if unrolling is performed. For example, we have two scalar operands, s1 and s2 (e.g., group of strided accesses of size two), while NUNITS is four (i.e., four scalars of this type can be packed in a vector). The output vector will contain two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES will be 2). If GROUP_SIZE > NUNITS, the scalars will be split into several vectors containing the operands. For example, NUNITS is four as before, and the group size is 8 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and {s5, s6, s7, s8}. */ if (!TYPE_VECTOR_SUBPARTS (vector_type).is_constant (&nunits)) nunits = group_size; number_of_copies = nunits * number_of_vectors / group_size; number_of_places_left_in_vector = nunits; bool constant_p = true; tree_vector_builder elts (vector_type, nunits, 1); elts.quick_grow (nunits); for (j = 0; j < number_of_copies; j++) { for (i = group_size - 1; stmts.iterate (i, &stmt); i--) { tree op; /* Get the def before the loop. In reduction chain we have only one initial value. */ if ((j != (number_of_copies - 1) || (reduc_chain && i != 0)) && neutral_op) op = neutral_op; else op = PHI_ARG_DEF_FROM_EDGE (stmt, pe); /* Create 'vect_ = {op0,op1,...,opn}'. */ number_of_places_left_in_vector--; elts[number_of_places_left_in_vector] = op; if (!CONSTANT_CLASS_P (op)) constant_p = false; if (number_of_places_left_in_vector == 0) { gimple_seq ctor_seq = NULL; tree init; if (constant_p && !neutral_op ? multiple_p (TYPE_VECTOR_SUBPARTS (vector_type), nunits) : known_eq (TYPE_VECTOR_SUBPARTS (vector_type), nunits)) /* Build the vector directly from ELTS. */ init = gimple_build_vector (&ctor_seq, &elts); else if (neutral_op) { /* Build a vector of the neutral value and shift the other elements into place. */ init = gimple_build_vector_from_val (&ctor_seq, vector_type, neutral_op); int k = nunits; while (k > 0 && elts[k - 1] == neutral_op) k -= 1; while (k > 0) { k -= 1; gcall *call = gimple_build_call_internal (IFN_VEC_SHL_INSERT, 2, init, elts[k]); init = make_ssa_name (vector_type); gimple_call_set_lhs (call, init); gimple_seq_add_stmt (&ctor_seq, call); } } else { /* First time round, duplicate ELTS to fill the required number of vectors, then cherry pick the appropriate result for each iteration. */ if (vec_oprnds->is_empty ()) duplicate_and_interleave (&ctor_seq, vector_type, elts, number_of_vectors, permute_results); init = permute_results[number_of_vectors - j - 1]; } if (ctor_seq != NULL) gsi_insert_seq_on_edge_immediate (pe, ctor_seq); voprnds.quick_push (init); number_of_places_left_in_vector = nunits; elts.new_vector (vector_type, nunits, 1); elts.quick_grow (nunits); constant_p = true; } } } /* Since the vectors are created in the reverse order, we should invert them. */ vec_num = voprnds.length (); for (j = vec_num; j != 0; j--) { vop = voprnds[j - 1]; vec_oprnds->quick_push (vop); } voprnds.release (); /* In case that VF is greater than the unrolling factor needed for the SLP group of stmts, NUMBER_OF_VECTORS to be created is greater than NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have to replicate the vectors. */ tree neutral_vec = NULL; while (number_of_vectors > vec_oprnds->length ()) { if (neutral_op) { if (!neutral_vec) { gimple_seq ctor_seq = NULL; neutral_vec = gimple_build_vector_from_val (&ctor_seq, vector_type, neutral_op); if (ctor_seq != NULL) gsi_insert_seq_on_edge_immediate (pe, ctor_seq); } vec_oprnds->quick_push (neutral_vec); } else { for (i = 0; vec_oprnds->iterate (i, &vop) && i < vec_num; i++) vec_oprnds->quick_push (vop); } } } /* Function vect_create_epilog_for_reduction Create code at the loop-epilog to finalize the result of a reduction computation. VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector reduction statements. STMT is the scalar reduction stmt that is being vectorized. NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the number of elements that we can fit in a vectype (nunits). In this case we have to generate more than one vector stmt - i.e - we need to "unroll" the vector stmt by a factor VF/nunits. For more details see documentation in vectorizable_operation. REDUC_FN is the internal function for the epilog reduction. REDUCTION_PHIS is a list of the phi-nodes that carry the reduction computation. REDUC_INDEX is the index of the operand in the right hand side of the statement that is defined by REDUCTION_PHI. DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled. SLP_NODE is an SLP node containing a group of reduction statements. The first one in this group is STMT. INDUC_VAL is for INTEGER_INDUC_COND_REDUCTION the value to use for the case when the COND_EXPR is never true in the loop. For MAX_EXPR, it needs to be smaller than any value of the IV in the loop, for MIN_EXPR larger than any value of the IV in the loop. INDUC_CODE is the code for epilog reduction if INTEGER_INDUC_COND_REDUCTION. NEUTRAL_OP is the value given by neutral_op_for_slp_reduction; it is null if this is not an SLP reduction This function: 1. Creates the reduction def-use cycles: sets the arguments for REDUCTION_PHIS: The loop-entry argument is the vectorized initial-value of the reduction. The loop-latch argument is taken from VECT_DEFS - the vector of partial sums. 2. "Reduces" each vector of partial results VECT_DEFS into a single result, by calling the function specified by REDUC_FN if available, or by other means (whole-vector shifts or a scalar loop). The function also creates a new phi node at the loop exit to preserve loop-closed form, as illustrated below. The flow at the entry to this function: loop: vec_def = phi <null, null> # REDUCTION_PHI VECT_DEF = vector_stmt # vectorized form of STMT s_loop = scalar_stmt # (scalar) STMT loop_exit: s_out0 = phi <s_loop> # (scalar) EXIT_PHI use <s_out0> use <s_out0> The above is transformed by this function into: loop: vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI VECT_DEF = vector_stmt # vectorized form of STMT s_loop = scalar_stmt # (scalar) STMT loop_exit: s_out0 = phi <s_loop> # (scalar) EXIT_PHI v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI v_out2 = reduce <v_out1> s_out3 = extract_field <v_out2, 0> s_out4 = adjust_result <s_out3> use <s_out4> use <s_out4> */ static void vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt, gimple *reduc_def_stmt, int ncopies, internal_fn reduc_fn, vec<gimple *> reduction_phis, bool double_reduc, slp_tree slp_node, slp_instance slp_node_instance, tree induc_val, enum tree_code induc_code, tree neutral_op) { stmt_vec_info stmt_info = vinfo_for_stmt (stmt); stmt_vec_info prev_phi_info; tree vectype; machine_mode mode; loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL; basic_block exit_bb; tree scalar_dest; tree scalar_type; gimple *new_phi = NULL, *phi; gimple_stmt_iterator exit_gsi; tree vec_dest; tree new_temp = NULL_TREE, new_dest, new_name, new_scalar_dest; gimple *epilog_stmt = NULL; enum tree_code code = gimple_assign_rhs_code (stmt); gimple *exit_phi; tree bitsize; tree adjustment_def = NULL; tree vec_initial_def = NULL; tree expr, def, initial_def = NULL; tree orig_name, scalar_result; imm_use_iterator imm_iter, phi_imm_iter; use_operand_p use_p, phi_use_p; gimple *use_stmt, *orig_stmt, *reduction_phi = NULL; bool nested_in_vect_loop = false; auto_vec<gimple *> new_phis; auto_vec<gimple *> inner_phis; enum vect_def_type dt = vect_unknown_def_type; int j, i; auto_vec<tree> scalar_results; unsigned int group_size = 1, k, ratio; auto_vec<tree> vec_initial_defs; auto_vec<gimple *> phis; bool slp_reduc = false; bool direct_slp_reduc; tree new_phi_result; gimple *inner_phi = NULL; tree induction_index = NULL_TREE; if (slp_node) group_size = SLP_TREE_SCALAR_STMTS (slp_node).length (); if (nested_in_vect_loop_p (loop, stmt)) { outer_loop = loop; loop = loop->inner; nested_in_vect_loop = true; gcc_assert (!slp_node); } vectype = STMT_VINFO_VECTYPE (stmt_info); gcc_assert (vectype); mode = TYPE_MODE (vectype); /* 1. Create the reduction def-use cycle: Set the arguments of REDUCTION_PHIS, i.e., transform loop: vec_def = phi <null, null> # REDUCTION_PHI VECT_DEF = vector_stmt # vectorized form of STMT ... into: loop: vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI VECT_DEF = vector_stmt # vectorized form of STMT ... (in case of SLP, do it for all the phis). */ /* Get the loop-entry arguments. */ enum vect_def_type initial_def_dt = vect_unknown_def_type; if (slp_node) { unsigned vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); vec_initial_defs.reserve (vec_num); get_initial_defs_for_reduction (slp_node_instance->reduc_phis, &vec_initial_defs, vec_num, GROUP_FIRST_ELEMENT (stmt_info), neutral_op); } else { /* Get at the scalar def before the loop, that defines the initial value of the reduction variable. */ gimple *def_stmt; initial_def = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt, loop_preheader_edge (loop)); /* Optimize: if initial_def is for REDUC_MAX smaller than the base and we can't use zero for induc_val, use initial_def. Similarly for REDUC_MIN and initial_def larger than the base. */ if (TREE_CODE (initial_def) == INTEGER_CST && (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == INTEGER_INDUC_COND_REDUCTION) && !integer_zerop (induc_val) && ((induc_code == MAX_EXPR && tree_int_cst_lt (initial_def, induc_val)) || (induc_code == MIN_EXPR && tree_int_cst_lt (induc_val, initial_def)))) induc_val = initial_def; vect_is_simple_use (initial_def, loop_vinfo, &def_stmt, &initial_def_dt); vec_initial_def = get_initial_def_for_reduction (stmt, initial_def, &adjustment_def); vec_initial_defs.create (1); vec_initial_defs.quick_push (vec_initial_def); } /* Set phi nodes arguments. */ FOR_EACH_VEC_ELT (reduction_phis, i, phi) { tree vec_init_def = vec_initial_defs[i]; tree def = vect_defs[i]; for (j = 0; j < ncopies; j++) { if (j != 0) { phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi)); if (nested_in_vect_loop) vec_init_def = vect_get_vec_def_for_stmt_copy (initial_def_dt, vec_init_def); } /* Set the loop-entry arg of the reduction-phi. */ if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == INTEGER_INDUC_COND_REDUCTION) { /* Initialise the reduction phi to zero. This prevents initial values of non-zero interferring with the reduction op. */ gcc_assert (ncopies == 1); gcc_assert (i == 0); tree vec_init_def_type = TREE_TYPE (vec_init_def); tree induc_val_vec = build_vector_from_val (vec_init_def_type, induc_val); add_phi_arg (as_a <gphi *> (phi), induc_val_vec, loop_preheader_edge (loop), UNKNOWN_LOCATION); } else add_phi_arg (as_a <gphi *> (phi), vec_init_def, loop_preheader_edge (loop), UNKNOWN_LOCATION); /* Set the loop-latch arg for the reduction-phi. */ if (j > 0) def = vect_get_vec_def_for_stmt_copy (vect_unknown_def_type, def); add_phi_arg (as_a <gphi *> (phi), def, loop_latch_edge (loop), UNKNOWN_LOCATION); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "transform reduction: created def-use cycle: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, SSA_NAME_DEF_STMT (def), 0); } } } /* For cond reductions we want to create a new vector (INDEX_COND_EXPR) which is updated with the current index of the loop for every match of the original loop's cond_expr (VEC_STMT). This results in a vector containing the last time the condition passed for that vector lane. The first match will be a 1 to allow 0 to be used for non-matching indexes. If there are no matches at all then the vector will be all zeroes. */ if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION) { tree indx_before_incr, indx_after_incr; poly_uint64 nunits_out = TYPE_VECTOR_SUBPARTS (vectype); gimple *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info); gcc_assert (gimple_assign_rhs_code (vec_stmt) == VEC_COND_EXPR); int scalar_precision = GET_MODE_PRECISION (SCALAR_TYPE_MODE (TREE_TYPE (vectype))); tree cr_index_scalar_type = make_unsigned_type (scalar_precision); tree cr_index_vector_type = build_vector_type (cr_index_scalar_type, TYPE_VECTOR_SUBPARTS (vectype)); /* First we create a simple vector induction variable which starts with the values {1,2,3,...} (SERIES_VECT) and increments by the vector size (STEP). */ /* Create a {1,2,3,...} vector. */ tree series_vect = build_index_vector (cr_index_vector_type, 1, 1); /* Create a vector of the step value. */ tree step = build_int_cst (cr_index_scalar_type, nunits_out); tree vec_step = build_vector_from_val (cr_index_vector_type, step); /* Create an induction variable. */ gimple_stmt_iterator incr_gsi; bool insert_after; standard_iv_increment_position (loop, &incr_gsi, &insert_after); create_iv (series_vect, vec_step, NULL_TREE, loop, &incr_gsi, insert_after, &indx_before_incr, &indx_after_incr); /* Next create a new phi node vector (NEW_PHI_TREE) which starts filled with zeros (VEC_ZERO). */ /* Create a vector of 0s. */ tree zero = build_zero_cst (cr_index_scalar_type); tree vec_zero = build_vector_from_val (cr_index_vector_type, zero); /* Create a vector phi node. */ tree new_phi_tree = make_ssa_name (cr_index_vector_type); new_phi = create_phi_node (new_phi_tree, loop->header); set_vinfo_for_stmt (new_phi, new_stmt_vec_info (new_phi, loop_vinfo)); add_phi_arg (as_a <gphi *> (new_phi), vec_zero, loop_preheader_edge (loop), UNKNOWN_LOCATION); /* Now take the condition from the loops original cond_expr (VEC_STMT) and produce a new cond_expr (INDEX_COND_EXPR) which for every match uses values from the induction variable (INDEX_BEFORE_INCR) otherwise uses values from the phi node (NEW_PHI_TREE). Finally, we update the phi (NEW_PHI_TREE) to take the value of the new cond_expr (INDEX_COND_EXPR). */ /* Duplicate the condition from vec_stmt. */ tree ccompare = unshare_expr (gimple_assign_rhs1 (vec_stmt)); /* Create a conditional, where the condition is taken from vec_stmt (CCOMPARE), then is the induction index (INDEX_BEFORE_INCR) and else is the phi (NEW_PHI_TREE). */ tree index_cond_expr = build3 (VEC_COND_EXPR, cr_index_vector_type, ccompare, indx_before_incr, new_phi_tree); induction_index = make_ssa_name (cr_index_vector_type); gimple *index_condition = gimple_build_assign (induction_index, index_cond_expr); gsi_insert_before (&incr_gsi, index_condition, GSI_SAME_STMT); stmt_vec_info index_vec_info = new_stmt_vec_info (index_condition, loop_vinfo); STMT_VINFO_VECTYPE (index_vec_info) = cr_index_vector_type; set_vinfo_for_stmt (index_condition, index_vec_info); /* Update the phi with the vec cond. */ add_phi_arg (as_a <gphi *> (new_phi), induction_index, loop_latch_edge (loop), UNKNOWN_LOCATION); } /* 2. Create epilog code. The reduction epilog code operates across the elements of the vector of partial results computed by the vectorized loop. The reduction epilog code consists of: step 1: compute the scalar result in a vector (v_out2) step 2: extract the scalar result (s_out3) from the vector (v_out2) step 3: adjust the scalar result (s_out3) if needed. Step 1 can be accomplished using one the following three schemes: (scheme 1) using reduc_fn, if available. (scheme 2) using whole-vector shifts, if available. (scheme 3) using a scalar loop. In this case steps 1+2 above are combined. The overall epilog code looks like this: s_out0 = phi <s_loop> # original EXIT_PHI v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI v_out2 = reduce <v_out1> # step 1 s_out3 = extract_field <v_out2, 0> # step 2 s_out4 = adjust_result <s_out3> # step 3 (step 3 is optional, and steps 1 and 2 may be combined). Lastly, the uses of s_out0 are replaced by s_out4. */ /* 2.1 Create new loop-exit-phis to preserve loop-closed form: v_out1 = phi <VECT_DEF> Store them in NEW_PHIS. */ exit_bb = single_exit (loop)->dest; prev_phi_info = NULL; new_phis.create (vect_defs.length ()); FOR_EACH_VEC_ELT (vect_defs, i, def) { for (j = 0; j < ncopies; j++) { tree new_def = copy_ssa_name (def); phi = create_phi_node (new_def, exit_bb); set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, loop_vinfo)); if (j == 0) new_phis.quick_push (phi); else { def = vect_get_vec_def_for_stmt_copy (dt, def); STMT_VINFO_RELATED_STMT (prev_phi_info) = phi; } SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def); prev_phi_info = vinfo_for_stmt (phi); } } /* The epilogue is created for the outer-loop, i.e., for the loop being vectorized. Create exit phis for the outer loop. */ if (double_reduc) { loop = outer_loop; exit_bb = single_exit (loop)->dest; inner_phis.create (vect_defs.length ()); FOR_EACH_VEC_ELT (new_phis, i, phi) { tree new_result = copy_ssa_name (PHI_RESULT (phi)); gphi *outer_phi = create_phi_node (new_result, exit_bb); SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx, PHI_RESULT (phi)); set_vinfo_for_stmt (outer_phi, new_stmt_vec_info (outer_phi, loop_vinfo)); inner_phis.quick_push (phi); new_phis[i] = outer_phi; prev_phi_info = vinfo_for_stmt (outer_phi); while (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi))) { phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi)); new_result = copy_ssa_name (PHI_RESULT (phi)); outer_phi = create_phi_node (new_result, exit_bb); SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx, PHI_RESULT (phi)); set_vinfo_for_stmt (outer_phi, new_stmt_vec_info (outer_phi, loop_vinfo)); STMT_VINFO_RELATED_STMT (prev_phi_info) = outer_phi; prev_phi_info = vinfo_for_stmt (outer_phi); } } } exit_gsi = gsi_after_labels (exit_bb); /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3 (i.e. when reduc_fn is not available) and in the final adjustment code (if needed). Also get the original scalar reduction variable as defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it represents a reduction pattern), the tree-code and scalar-def are taken from the original stmt that the pattern-stmt (STMT) replaces. Otherwise (it is a regular reduction) - the tree-code and scalar-def are taken from STMT. */ orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info); if (!orig_stmt) { /* Regular reduction */ orig_stmt = stmt; } else { /* Reduction pattern */ stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt); gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo)); gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt); } code = gimple_assign_rhs_code (orig_stmt); /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore, partial results are added and not subtracted. */ if (code == MINUS_EXPR) code = PLUS_EXPR; scalar_dest = gimple_assign_lhs (orig_stmt); scalar_type = TREE_TYPE (scalar_dest); scalar_results.create (group_size); new_scalar_dest = vect_create_destination_var (scalar_dest, NULL); bitsize = TYPE_SIZE (scalar_type); /* In case this is a reduction in an inner-loop while vectorizing an outer loop - we don't need to extract a single scalar result at the end of the inner-loop (unless it is double reduction, i.e., the use of reduction is outside the outer-loop). The final vector of partial results will be used in the vectorized outer-loop, or reduced to a scalar result at the end of the outer-loop. */ if (nested_in_vect_loop && !double_reduc) goto vect_finalize_reduction; /* SLP reduction without reduction chain, e.g., # a1 = phi <a2, a0> # b1 = phi <b2, b0> a2 = operation (a1) b2 = operation (b1) */ slp_reduc = (slp_node && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))); /* True if we should implement SLP_REDUC using native reduction operations instead of scalar operations. */ direct_slp_reduc = (reduc_fn != IFN_LAST && slp_reduc && !TYPE_VECTOR_SUBPARTS (vectype).is_constant ()); /* In case of reduction chain, e.g., # a1 = phi <a3, a0> a2 = operation (a1) a3 = operation (a2), we may end up with more than one vector result. Here we reduce them to one vector. */ if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) || direct_slp_reduc) { tree first_vect = PHI_RESULT (new_phis[0]); gassign *new_vec_stmt = NULL; vec_dest = vect_create_destination_var (scalar_dest, vectype); for (k = 1; k < new_phis.length (); k++) { gimple *next_phi = new_phis[k]; tree second_vect = PHI_RESULT (next_phi); tree tem = make_ssa_name (vec_dest, new_vec_stmt); new_vec_stmt = gimple_build_assign (tem, code, first_vect, second_vect); gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT); first_vect = tem; } new_phi_result = first_vect; if (new_vec_stmt) { new_phis.truncate (0); new_phis.safe_push (new_vec_stmt); } } /* Likewise if we couldn't use a single defuse cycle. */ else if (ncopies > 1) { gcc_assert (new_phis.length () == 1); tree first_vect = PHI_RESULT (new_phis[0]); gassign *new_vec_stmt = NULL; vec_dest = vect_create_destination_var (scalar_dest, vectype); gimple *next_phi = new_phis[0]; for (int k = 1; k < ncopies; ++k) { next_phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next_phi)); tree second_vect = PHI_RESULT (next_phi); tree tem = make_ssa_name (vec_dest, new_vec_stmt); new_vec_stmt = gimple_build_assign (tem, code, first_vect, second_vect); gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT); first_vect = tem; } new_phi_result = first_vect; new_phis.truncate (0); new_phis.safe_push (new_vec_stmt); } else new_phi_result = PHI_RESULT (new_phis[0]); if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION && reduc_fn != IFN_LAST) { /* For condition reductions, we have a vector (NEW_PHI_RESULT) containing various data values where the condition matched and another vector (INDUCTION_INDEX) containing all the indexes of those matches. We need to extract the last matching index (which will be the index with highest value) and use this to index into the data vector. For the case where there were no matches, the data vector will contain all default values and the index vector will be all zeros. */ /* Get various versions of the type of the vector of indexes. */ tree index_vec_type = TREE_TYPE (induction_index); gcc_checking_assert (TYPE_UNSIGNED (index_vec_type)); tree index_scalar_type = TREE_TYPE (index_vec_type); tree index_vec_cmp_type = build_same_sized_truth_vector_type (index_vec_type); /* Get an unsigned integer version of the type of the data vector. */ int scalar_precision = GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type)); tree scalar_type_unsigned = make_unsigned_type (scalar_precision); tree vectype_unsigned = build_vector_type (scalar_type_unsigned, TYPE_VECTOR_SUBPARTS (vectype)); /* First we need to create a vector (ZERO_VEC) of zeros and another vector (MAX_INDEX_VEC) filled with the last matching index, which we can create using a MAX reduction and then expanding. In the case where the loop never made any matches, the max index will be zero. */ /* Vector of {0, 0, 0,...}. */ tree zero_vec = make_ssa_name (vectype); tree zero_vec_rhs = build_zero_cst (vectype); gimple *zero_vec_stmt = gimple_build_assign (zero_vec, zero_vec_rhs); gsi_insert_before (&exit_gsi, zero_vec_stmt, GSI_SAME_STMT); /* Find maximum value from the vector of found indexes. */ tree max_index = make_ssa_name (index_scalar_type); gcall *max_index_stmt = gimple_build_call_internal (IFN_REDUC_MAX, 1, induction_index); gimple_call_set_lhs (max_index_stmt, max_index); gsi_insert_before (&exit_gsi, max_index_stmt, GSI_SAME_STMT); /* Vector of {max_index, max_index, max_index,...}. */ tree max_index_vec = make_ssa_name (index_vec_type); tree max_index_vec_rhs = build_vector_from_val (index_vec_type, max_index); gimple *max_index_vec_stmt = gimple_build_assign (max_index_vec, max_index_vec_rhs); gsi_insert_before (&exit_gsi, max_index_vec_stmt, GSI_SAME_STMT); /* Next we compare the new vector (MAX_INDEX_VEC) full of max indexes with the vector (INDUCTION_INDEX) of found indexes, choosing values from the data vector (NEW_PHI_RESULT) for matches, 0 (ZERO_VEC) otherwise. Only one value should match, resulting in a vector (VEC_COND) with one data value and the rest zeros. In the case where the loop never made any matches, every index will match, resulting in a vector with all data values (which will all be the default value). */ /* Compare the max index vector to the vector of found indexes to find the position of the max value. */ tree vec_compare = make_ssa_name (index_vec_cmp_type); gimple *vec_compare_stmt = gimple_build_assign (vec_compare, EQ_EXPR, induction_index, max_index_vec); gsi_insert_before (&exit_gsi, vec_compare_stmt, GSI_SAME_STMT); /* Use the compare to choose either values from the data vector or zero. */ tree vec_cond = make_ssa_name (vectype); gimple *vec_cond_stmt = gimple_build_assign (vec_cond, VEC_COND_EXPR, vec_compare, new_phi_result, zero_vec); gsi_insert_before (&exit_gsi, vec_cond_stmt, GSI_SAME_STMT); /* Finally we need to extract the data value from the vector (VEC_COND) into a scalar (MATCHED_DATA_REDUC). Logically we want to do a OR reduction, but because this doesn't exist, we can use a MAX reduction instead. The data value might be signed or a float so we need to cast it first. In the case where the loop never made any matches, the data values are all identical, and so will reduce down correctly. */ /* Make the matched data values unsigned. */ tree vec_cond_cast = make_ssa_name (vectype_unsigned); tree vec_cond_cast_rhs = build1 (VIEW_CONVERT_EXPR, vectype_unsigned, vec_cond); gimple *vec_cond_cast_stmt = gimple_build_assign (vec_cond_cast, VIEW_CONVERT_EXPR, vec_cond_cast_rhs); gsi_insert_before (&exit_gsi, vec_cond_cast_stmt, GSI_SAME_STMT); /* Reduce down to a scalar value. */ tree data_reduc = make_ssa_name (scalar_type_unsigned); gcall *data_reduc_stmt = gimple_build_call_internal (IFN_REDUC_MAX, 1, vec_cond_cast); gimple_call_set_lhs (data_reduc_stmt, data_reduc); gsi_insert_before (&exit_gsi, data_reduc_stmt, GSI_SAME_STMT); /* Convert the reduced value back to the result type and set as the result. */ gimple_seq stmts = NULL; new_temp = gimple_build (&stmts, VIEW_CONVERT_EXPR, scalar_type, data_reduc); gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT); scalar_results.safe_push (new_temp); } else if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION && reduc_fn == IFN_LAST) { /* Condition reduction without supported IFN_REDUC_MAX. Generate idx = 0; idx_val = induction_index[0]; val = data_reduc[0]; for (idx = 0, val = init, i = 0; i < nelts; ++i) if (induction_index[i] > idx_val) val = data_reduc[i], idx_val = induction_index[i]; return val; */ tree data_eltype = TREE_TYPE (TREE_TYPE (new_phi_result)); tree idx_eltype = TREE_TYPE (TREE_TYPE (induction_index)); unsigned HOST_WIDE_INT el_size = tree_to_uhwi (TYPE_SIZE (idx_eltype)); poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (TREE_TYPE (induction_index)); /* Enforced by vectorizable_reduction, which ensures we have target support before allowing a conditional reduction on variable-length vectors. */ unsigned HOST_WIDE_INT v_size = el_size * nunits.to_constant (); tree idx_val = NULL_TREE, val = NULL_TREE; for (unsigned HOST_WIDE_INT off = 0; off < v_size; off += el_size) { tree old_idx_val = idx_val; tree old_val = val; idx_val = make_ssa_name (idx_eltype); epilog_stmt = gimple_build_assign (idx_val, BIT_FIELD_REF, build3 (BIT_FIELD_REF, idx_eltype, induction_index, bitsize_int (el_size), bitsize_int (off))); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); val = make_ssa_name (data_eltype); epilog_stmt = gimple_build_assign (val, BIT_FIELD_REF, build3 (BIT_FIELD_REF, data_eltype, new_phi_result, bitsize_int (el_size), bitsize_int (off))); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); if (off != 0) { tree new_idx_val = idx_val; tree new_val = val; if (off != v_size - el_size) { new_idx_val = make_ssa_name (idx_eltype); epilog_stmt = gimple_build_assign (new_idx_val, MAX_EXPR, idx_val, old_idx_val); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); } new_val = make_ssa_name (data_eltype); epilog_stmt = gimple_build_assign (new_val, COND_EXPR, build2 (GT_EXPR, boolean_type_node, idx_val, old_idx_val), val, old_val); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); idx_val = new_idx_val; val = new_val; } } /* Convert the reduced value back to the result type and set as the result. */ gimple_seq stmts = NULL; val = gimple_convert (&stmts, scalar_type, val); gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT); scalar_results.safe_push (val); } /* 2.3 Create the reduction code, using one of the three schemes described above. In SLP we simply need to extract all the elements from the vector (without reducing them), so we use scalar shifts. */ else if (reduc_fn != IFN_LAST && !slp_reduc) { tree tmp; tree vec_elem_type; /* Case 1: Create: v_out2 = reduc_expr <v_out1> */ if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Reduce using direct vector reduction.\n"); vec_elem_type = TREE_TYPE (TREE_TYPE (new_phi_result)); if (!useless_type_conversion_p (scalar_type, vec_elem_type)) { tree tmp_dest = vect_create_destination_var (scalar_dest, vec_elem_type); epilog_stmt = gimple_build_call_internal (reduc_fn, 1, new_phi_result); gimple_set_lhs (epilog_stmt, tmp_dest); new_temp = make_ssa_name (tmp_dest, epilog_stmt); gimple_set_lhs (epilog_stmt, new_temp); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); epilog_stmt = gimple_build_assign (new_scalar_dest, NOP_EXPR, new_temp); } else { epilog_stmt = gimple_build_call_internal (reduc_fn, 1, new_phi_result); gimple_set_lhs (epilog_stmt, new_scalar_dest); } new_temp = make_ssa_name (new_scalar_dest, epilog_stmt); gimple_set_lhs (epilog_stmt, new_temp); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); if ((STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == INTEGER_INDUC_COND_REDUCTION) && !operand_equal_p (initial_def, induc_val, 0)) { /* Earlier we set the initial value to be a vector if induc_val values. Check the result and if it is induc_val then replace with the original initial value, unless induc_val is the same as initial_def already. */ tree zcompare = build2 (EQ_EXPR, boolean_type_node, new_temp, induc_val); tmp = make_ssa_name (new_scalar_dest); epilog_stmt = gimple_build_assign (tmp, COND_EXPR, zcompare, initial_def, new_temp); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); new_temp = tmp; } scalar_results.safe_push (new_temp); } else if (direct_slp_reduc) { /* Here we create one vector for each of the GROUP_SIZE results, with the elements for other SLP statements replaced with the neutral value. We can then do a normal reduction on each vector. */ /* Enforced by vectorizable_reduction. */ gcc_assert (new_phis.length () == 1); gcc_assert (pow2p_hwi (group_size)); slp_tree orig_phis_slp_node = slp_node_instance->reduc_phis; vec<gimple *> orig_phis = SLP_TREE_SCALAR_STMTS (orig_phis_slp_node); gimple_seq seq = NULL; /* Build a vector {0, 1, 2, ...}, with the same number of elements and the same element size as VECTYPE. */ tree index = build_index_vector (vectype, 0, 1); tree index_type = TREE_TYPE (index); tree index_elt_type = TREE_TYPE (index_type); tree mask_type = build_same_sized_truth_vector_type (index_type); /* Create a vector that, for each element, identifies which of the GROUP_SIZE results should use it. */ tree index_mask = build_int_cst (index_elt_type, group_size - 1); index = gimple_build (&seq, BIT_AND_EXPR, index_type, index, build_vector_from_val (index_type, index_mask)); /* Get a neutral vector value. This is simply a splat of the neutral scalar value if we have one, otherwise the initial scalar value is itself a neutral value. */ tree vector_identity = NULL_TREE; if (neutral_op) vector_identity = gimple_build_vector_from_val (&seq, vectype, neutral_op); for (unsigned int i = 0; i < group_size; ++i) { /* If there's no univeral neutral value, we can use the initial scalar value from the original PHI. This is used for MIN and MAX reduction, for example. */ if (!neutral_op) { tree scalar_value = PHI_ARG_DEF_FROM_EDGE (orig_phis[i], loop_preheader_edge (loop)); vector_identity = gimple_build_vector_from_val (&seq, vectype, scalar_value); } /* Calculate the equivalent of: sel[j] = (index[j] == i); which selects the elements of NEW_PHI_RESULT that should be included in the result. */ tree compare_val = build_int_cst (index_elt_type, i); compare_val = build_vector_from_val (index_type, compare_val); tree sel = gimple_build (&seq, EQ_EXPR, mask_type, index, compare_val); /* Calculate the equivalent of: vec = seq ? new_phi_result : vector_identity; VEC is now suitable for a full vector reduction. */ tree vec = gimple_build (&seq, VEC_COND_EXPR, vectype, sel, new_phi_result, vector_identity); /* Do the reduction and convert it to the appropriate type. */ gcall *call = gimple_build_call_internal (reduc_fn, 1, vec); tree scalar = make_ssa_name (TREE_TYPE (vectype)); gimple_call_set_lhs (call, scalar); gimple_seq_add_stmt (&seq, call); scalar = gimple_convert (&seq, scalar_type, scalar); scalar_results.safe_push (scalar); } gsi_insert_seq_before (&exit_gsi, seq, GSI_SAME_STMT); } else { bool reduce_with_shift; tree vec_temp; /* COND reductions all do the final reduction with MAX_EXPR or MIN_EXPR. */ if (code == COND_EXPR) { if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == INTEGER_INDUC_COND_REDUCTION) code = induc_code; else code = MAX_EXPR; } /* See if the target wants to do the final (shift) reduction in a vector mode of smaller size and first reduce upper/lower halves against each other. */ enum machine_mode mode1 = mode; tree vectype1 = vectype; unsigned sz = tree_to_uhwi (TYPE_SIZE_UNIT (vectype)); unsigned sz1 = sz; if (!slp_reduc && (mode1 = targetm.vectorize.split_reduction (mode)) != mode) sz1 = GET_MODE_SIZE (mode1).to_constant (); vectype1 = get_vectype_for_scalar_type_and_size (scalar_type, sz1); reduce_with_shift = have_whole_vector_shift (mode1); if (!VECTOR_MODE_P (mode1)) reduce_with_shift = false; else { optab optab = optab_for_tree_code (code, vectype1, optab_default); if (optab_handler (optab, mode1) == CODE_FOR_nothing) reduce_with_shift = false; } /* First reduce the vector to the desired vector size we should do shift reduction on by combining upper and lower halves. */ new_temp = new_phi_result; while (sz > sz1) { gcc_assert (!slp_reduc); sz /= 2; vectype1 = get_vectype_for_scalar_type_and_size (scalar_type, sz); /* The target has to make sure we support lowpart/highpart extraction, either via direct vector extract or through an integer mode punning. */ tree dst1, dst2; if (convert_optab_handler (vec_extract_optab, TYPE_MODE (TREE_TYPE (new_temp)), TYPE_MODE (vectype1)) != CODE_FOR_nothing) { /* Extract sub-vectors directly once vec_extract becomes a conversion optab. */ dst1 = make_ssa_name (vectype1); epilog_stmt = gimple_build_assign (dst1, BIT_FIELD_REF, build3 (BIT_FIELD_REF, vectype1, new_temp, TYPE_SIZE (vectype1), bitsize_int (0))); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); dst2 = make_ssa_name (vectype1); epilog_stmt = gimple_build_assign (dst2, BIT_FIELD_REF, build3 (BIT_FIELD_REF, vectype1, new_temp, TYPE_SIZE (vectype1), bitsize_int (sz * BITS_PER_UNIT))); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); } else { /* Extract via punning to appropriately sized integer mode vector. */ tree eltype = build_nonstandard_integer_type (sz * BITS_PER_UNIT, 1); tree etype = build_vector_type (eltype, 2); gcc_assert (convert_optab_handler (vec_extract_optab, TYPE_MODE (etype), TYPE_MODE (eltype)) != CODE_FOR_nothing); tree tem = make_ssa_name (etype); epilog_stmt = gimple_build_assign (tem, VIEW_CONVERT_EXPR, build1 (VIEW_CONVERT_EXPR, etype, new_temp)); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); new_temp = tem; tem = make_ssa_name (eltype); epilog_stmt = gimple_build_assign (tem, BIT_FIELD_REF, build3 (BIT_FIELD_REF, eltype, new_temp, TYPE_SIZE (eltype), bitsize_int (0))); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); dst1 = make_ssa_name (vectype1); epilog_stmt = gimple_build_assign (dst1, VIEW_CONVERT_EXPR, build1 (VIEW_CONVERT_EXPR, vectype1, tem)); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); tem = make_ssa_name (eltype); epilog_stmt = gimple_build_assign (tem, BIT_FIELD_REF, build3 (BIT_FIELD_REF, eltype, new_temp, TYPE_SIZE (eltype), bitsize_int (sz * BITS_PER_UNIT))); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); dst2 = make_ssa_name (vectype1); epilog_stmt = gimple_build_assign (dst2, VIEW_CONVERT_EXPR, build1 (VIEW_CONVERT_EXPR, vectype1, tem)); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); } new_temp = make_ssa_name (vectype1); epilog_stmt = gimple_build_assign (new_temp, code, dst1, dst2); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); } if (reduce_with_shift && !slp_reduc) { int element_bitsize = tree_to_uhwi (bitsize); /* Enforced by vectorizable_reduction, which disallows SLP reductions for variable-length vectors and also requires direct target support for loop reductions. */ int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype1)); int nelements = vec_size_in_bits / element_bitsize; vec_perm_builder sel; vec_perm_indices indices; int elt_offset; tree zero_vec = build_zero_cst (vectype1); /* Case 2: Create: for (offset = nelements/2; offset >= 1; offset/=2) { Create: va' = vec_shift <va, offset> Create: va = vop <va, va'> } */ tree rhs; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Reduce using vector shifts\n"); mode1 = TYPE_MODE (vectype1); vec_dest = vect_create_destination_var (scalar_dest, vectype1); for (elt_offset = nelements / 2; elt_offset >= 1; elt_offset /= 2) { calc_vec_perm_mask_for_shift (elt_offset, nelements, &sel); indices.new_vector (sel, 2, nelements); tree mask = vect_gen_perm_mask_any (vectype1, indices); epilog_stmt = gimple_build_assign (vec_dest, VEC_PERM_EXPR, new_temp, zero_vec, mask); new_name = make_ssa_name (vec_dest, epilog_stmt); gimple_assign_set_lhs (epilog_stmt, new_name); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); epilog_stmt = gimple_build_assign (vec_dest, code, new_name, new_temp); new_temp = make_ssa_name (vec_dest, epilog_stmt); gimple_assign_set_lhs (epilog_stmt, new_temp); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); } /* 2.4 Extract the final scalar result. Create: s_out3 = extract_field <v_out2, bitpos> */ if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "extract scalar result\n"); rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp, bitsize, bitsize_zero_node); epilog_stmt = gimple_build_assign (new_scalar_dest, rhs); new_temp = make_ssa_name (new_scalar_dest, epilog_stmt); gimple_assign_set_lhs (epilog_stmt, new_temp); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); scalar_results.safe_push (new_temp); } else { /* Case 3: Create: s = extract_field <v_out2, 0> for (offset = element_size; offset < vector_size; offset += element_size;) { Create: s' = extract_field <v_out2, offset> Create: s = op <s, s'> // For non SLP cases } */ if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Reduce using scalar code.\n"); int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype1)); int element_bitsize = tree_to_uhwi (bitsize); FOR_EACH_VEC_ELT (new_phis, i, new_phi) { int bit_offset; if (gimple_code (new_phi) == GIMPLE_PHI) vec_temp = PHI_RESULT (new_phi); else vec_temp = gimple_assign_lhs (new_phi); tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize, bitsize_zero_node); epilog_stmt = gimple_build_assign (new_scalar_dest, rhs); new_temp = make_ssa_name (new_scalar_dest, epilog_stmt); gimple_assign_set_lhs (epilog_stmt, new_temp); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); /* In SLP we don't need to apply reduction operation, so we just collect s' values in SCALAR_RESULTS. */ if (slp_reduc) scalar_results.safe_push (new_temp); for (bit_offset = element_bitsize; bit_offset < vec_size_in_bits; bit_offset += element_bitsize) { tree bitpos = bitsize_int (bit_offset); tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize, bitpos); epilog_stmt = gimple_build_assign (new_scalar_dest, rhs); new_name = make_ssa_name (new_scalar_dest, epilog_stmt); gimple_assign_set_lhs (epilog_stmt, new_name); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); if (slp_reduc) { /* In SLP we don't need to apply reduction operation, so we just collect s' values in SCALAR_RESULTS. */ new_temp = new_name; scalar_results.safe_push (new_name); } else { epilog_stmt = gimple_build_assign (new_scalar_dest, code, new_name, new_temp); new_temp = make_ssa_name (new_scalar_dest, epilog_stmt); gimple_assign_set_lhs (epilog_stmt, new_temp); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); } } } /* The only case where we need to reduce scalar results in SLP, is unrolling. If the size of SCALAR_RESULTS is greater than GROUP_SIZE, we reduce them combining elements modulo GROUP_SIZE. */ if (slp_reduc) { tree res, first_res, new_res; gimple *new_stmt; /* Reduce multiple scalar results in case of SLP unrolling. */ for (j = group_size; scalar_results.iterate (j, &res); j++) { first_res = scalar_results[j % group_size]; new_stmt = gimple_build_assign (new_scalar_dest, code, first_res, res); new_res = make_ssa_name (new_scalar_dest, new_stmt); gimple_assign_set_lhs (new_stmt, new_res); gsi_insert_before (&exit_gsi, new_stmt, GSI_SAME_STMT); scalar_results[j % group_size] = new_res; } } else /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */ scalar_results.safe_push (new_temp); } if ((STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == INTEGER_INDUC_COND_REDUCTION) && !operand_equal_p (initial_def, induc_val, 0)) { /* Earlier we set the initial value to be a vector if induc_val values. Check the result and if it is induc_val then replace with the original initial value, unless induc_val is the same as initial_def already. */ tree zcompare = build2 (EQ_EXPR, boolean_type_node, new_temp, induc_val); tree tmp = make_ssa_name (new_scalar_dest); epilog_stmt = gimple_build_assign (tmp, COND_EXPR, zcompare, initial_def, new_temp); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); scalar_results[0] = tmp; } } vect_finalize_reduction: if (double_reduc) loop = loop->inner; /* 2.5 Adjust the final result by the initial value of the reduction variable. (When such adjustment is not needed, then 'adjustment_def' is zero). For example, if code is PLUS we create: new_temp = loop_exit_def + adjustment_def */ if (adjustment_def) { gcc_assert (!slp_reduc); if (nested_in_vect_loop) { new_phi = new_phis[0]; gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE); expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def); new_dest = vect_create_destination_var (scalar_dest, vectype); } else { new_temp = scalar_results[0]; gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE); expr = build2 (code, scalar_type, new_temp, adjustment_def); new_dest = vect_create_destination_var (scalar_dest, scalar_type); } epilog_stmt = gimple_build_assign (new_dest, expr); new_temp = make_ssa_name (new_dest, epilog_stmt); gimple_assign_set_lhs (epilog_stmt, new_temp); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); if (nested_in_vect_loop) { set_vinfo_for_stmt (epilog_stmt, new_stmt_vec_info (epilog_stmt, loop_vinfo)); STMT_VINFO_RELATED_STMT (vinfo_for_stmt (epilog_stmt)) = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi)); if (!double_reduc) scalar_results.quick_push (new_temp); else scalar_results[0] = new_temp; } else scalar_results[0] = new_temp; new_phis[0] = epilog_stmt; } /* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit phis with new adjusted scalar results, i.e., replace use <s_out0> with use <s_out4>. Transform: loop_exit: s_out0 = phi <s_loop> # (scalar) EXIT_PHI v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI v_out2 = reduce <v_out1> s_out3 = extract_field <v_out2, 0> s_out4 = adjust_result <s_out3> use <s_out0> use <s_out0> into: loop_exit: s_out0 = phi <s_loop> # (scalar) EXIT_PHI v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI v_out2 = reduce <v_out1> s_out3 = extract_field <v_out2, 0> s_out4 = adjust_result <s_out3> use <s_out4> use <s_out4> */ /* In SLP reduction chain we reduce vector results into one vector if necessary, hence we set here GROUP_SIZE to 1. SCALAR_DEST is the LHS of the last stmt in the reduction chain, since we are looking for the loop exit phi node. */ if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))) { gimple *dest_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1]; /* Handle reduction patterns. */ if (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (dest_stmt))) dest_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (dest_stmt)); scalar_dest = gimple_assign_lhs (dest_stmt); group_size = 1; } /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in case that GROUP_SIZE is greater than vectorization factor). Therefore, we need to match SCALAR_RESULTS with corresponding statements. The first (GROUP_SIZE / number of new vector stmts) scalar results correspond to the first vector stmt, etc. (RATIO is equal to (GROUP_SIZE / number of new vector stmts)). */ if (group_size > new_phis.length ()) { ratio = group_size / new_phis.length (); gcc_assert (!(group_size % new_phis.length ())); } else ratio = 1; for (k = 0; k < group_size; k++) { if (k % ratio == 0) { epilog_stmt = new_phis[k / ratio]; reduction_phi = reduction_phis[k / ratio]; if (double_reduc) inner_phi = inner_phis[k / ratio]; } if (slp_reduc) { gimple *current_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[k]; orig_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (current_stmt)); /* SLP statements can't participate in patterns. */ gcc_assert (!orig_stmt); scalar_dest = gimple_assign_lhs (current_stmt); } phis.create (3); /* Find the loop-closed-use at the loop exit of the original scalar result. (The reduction result is expected to have two immediate uses - one at the latch block, and one at the loop exit). */ FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest) if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))) && !is_gimple_debug (USE_STMT (use_p))) phis.safe_push (USE_STMT (use_p)); /* While we expect to have found an exit_phi because of loop-closed-ssa form we can end up without one if the scalar cycle is dead. */ FOR_EACH_VEC_ELT (phis, i, exit_phi) { if (outer_loop) { stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi); gphi *vect_phi; /* FORNOW. Currently not supporting the case that an inner-loop reduction is not used in the outer-loop (but only outside the outer-loop), unless it is double reduction. */ gcc_assert ((STMT_VINFO_RELEVANT_P (exit_phi_vinfo) && !STMT_VINFO_LIVE_P (exit_phi_vinfo)) || double_reduc); if (double_reduc) STMT_VINFO_VEC_STMT (exit_phi_vinfo) = inner_phi; else STMT_VINFO_VEC_STMT (exit_phi_vinfo) = epilog_stmt; if (!double_reduc || STMT_VINFO_DEF_TYPE (exit_phi_vinfo) != vect_double_reduction_def) continue; /* Handle double reduction: stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop) stmt2: s3 = phi <s1, s4> - (regular) reduc phi (inner loop) stmt3: s4 = use (s3) - (regular) reduc stmt (inner loop) stmt4: s2 = phi <s4> - double reduction stmt (outer loop) At that point the regular reduction (stmt2 and stmt3) is already vectorized, as well as the exit phi node, stmt4. Here we vectorize the phi node of double reduction, stmt1, and update all relevant statements. */ /* Go through all the uses of s2 to find double reduction phi node, i.e., stmt1 above. */ orig_name = PHI_RESULT (exit_phi); FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name) { stmt_vec_info use_stmt_vinfo; stmt_vec_info new_phi_vinfo; tree vect_phi_init, preheader_arg, vect_phi_res; basic_block bb = gimple_bb (use_stmt); gimple *use; /* Check that USE_STMT is really double reduction phi node. */ if (gimple_code (use_stmt) != GIMPLE_PHI || gimple_phi_num_args (use_stmt) != 2 || bb->loop_father != outer_loop) continue; use_stmt_vinfo = vinfo_for_stmt (use_stmt); if (!use_stmt_vinfo || STMT_VINFO_DEF_TYPE (use_stmt_vinfo) != vect_double_reduction_def) continue; /* Create vector phi node for double reduction: vs1 = phi <vs0, vs2> vs1 was created previously in this function by a call to vect_get_vec_def_for_operand and is stored in vec_initial_def; vs2 is defined by INNER_PHI, the vectorized EXIT_PHI; vs0 is created here. */ /* Create vector phi node. */ vect_phi = create_phi_node (vec_initial_def, bb); new_phi_vinfo = new_stmt_vec_info (vect_phi, loop_vec_info_for_loop (outer_loop)); set_vinfo_for_stmt (vect_phi, new_phi_vinfo); /* Create vs0 - initial def of the double reduction phi. */ preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt, loop_preheader_edge (outer_loop)); vect_phi_init = get_initial_def_for_reduction (stmt, preheader_arg, NULL); /* Update phi node arguments with vs0 and vs2. */ add_phi_arg (vect_phi, vect_phi_init, loop_preheader_edge (outer_loop), UNKNOWN_LOCATION); add_phi_arg (vect_phi, PHI_RESULT (inner_phi), loop_latch_edge (outer_loop), UNKNOWN_LOCATION); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "created double reduction phi node: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vect_phi, 0); } vect_phi_res = PHI_RESULT (vect_phi); /* Replace the use, i.e., set the correct vs1 in the regular reduction phi node. FORNOW, NCOPIES is always 1, so the loop is redundant. */ use = reduction_phi; for (j = 0; j < ncopies; j++) { edge pr_edge = loop_preheader_edge (loop); SET_PHI_ARG_DEF (use, pr_edge->dest_idx, vect_phi_res); use = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use)); } } } } phis.release (); if (nested_in_vect_loop) { if (double_reduc) loop = outer_loop; else continue; } phis.create (3); /* Find the loop-closed-use at the loop exit of the original scalar result. (The reduction result is expected to have two immediate uses, one at the latch block, and one at the loop exit). For double reductions we are looking for exit phis of the outer loop. */ FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest) { if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p)))) { if (!is_gimple_debug (USE_STMT (use_p))) phis.safe_push (USE_STMT (use_p)); } else { if (double_reduc && gimple_code (USE_STMT (use_p)) == GIMPLE_PHI) { tree phi_res = PHI_RESULT (USE_STMT (use_p)); FOR_EACH_IMM_USE_FAST (phi_use_p, phi_imm_iter, phi_res) { if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (phi_use_p))) && !is_gimple_debug (USE_STMT (phi_use_p))) phis.safe_push (USE_STMT (phi_use_p)); } } } } FOR_EACH_VEC_ELT (phis, i, exit_phi) { /* Replace the uses: */ orig_name = PHI_RESULT (exit_phi); scalar_result = scalar_results[k]; FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name) FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter) SET_USE (use_p, scalar_result); } phis.release (); } } /* Return a vector of type VECTYPE that is equal to the vector select operation "MASK ? VEC : IDENTITY". Insert the select statements before GSI. */ static tree merge_with_identity (gimple_stmt_iterator *gsi, tree mask, tree vectype, tree vec, tree identity) { tree cond = make_temp_ssa_name (vectype, NULL, "cond"); gimple *new_stmt = gimple_build_assign (cond, VEC_COND_EXPR, mask, vec, identity); gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT); return cond; } /* Successively apply CODE to each element of VECTOR_RHS, in left-to-right order, starting with LHS. Insert the extraction statements before GSI and associate the new scalar SSA names with variable SCALAR_DEST. Return the SSA name for the result. */ static tree vect_expand_fold_left (gimple_stmt_iterator *gsi, tree scalar_dest, tree_code code, tree lhs, tree vector_rhs) { tree vectype = TREE_TYPE (vector_rhs); tree scalar_type = TREE_TYPE (vectype); tree bitsize = TYPE_SIZE (scalar_type); unsigned HOST_WIDE_INT vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype)); unsigned HOST_WIDE_INT element_bitsize = tree_to_uhwi (bitsize); for (unsigned HOST_WIDE_INT bit_offset = 0; bit_offset < vec_size_in_bits; bit_offset += element_bitsize) { tree bitpos = bitsize_int (bit_offset); tree rhs = build3 (BIT_FIELD_REF, scalar_type, vector_rhs, bitsize, bitpos); gassign *stmt = gimple_build_assign (scalar_dest, rhs); rhs = make_ssa_name (scalar_dest, stmt); gimple_assign_set_lhs (stmt, rhs); gsi_insert_before (gsi, stmt, GSI_SAME_STMT); stmt = gimple_build_assign (scalar_dest, code, lhs, rhs); tree new_name = make_ssa_name (scalar_dest, stmt); gimple_assign_set_lhs (stmt, new_name); gsi_insert_before (gsi, stmt, GSI_SAME_STMT); lhs = new_name; } return lhs; } /* Perform an in-order reduction (FOLD_LEFT_REDUCTION). STMT is the statement that sets the live-out value. REDUC_DEF_STMT is the phi statement. CODE is the operation performed by STMT and OPS are its scalar operands. REDUC_INDEX is the index of the operand in OPS that is set by REDUC_DEF_STMT. REDUC_FN is the function that implements in-order reduction, or IFN_LAST if we should open-code it. VECTYPE_IN is the type of the vector input. MASKS specifies the masks that should be used to control the operation in a fully-masked loop. */ static bool vectorize_fold_left_reduction (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt, slp_tree slp_node, gimple *reduc_def_stmt, tree_code code, internal_fn reduc_fn, tree ops[3], tree vectype_in, int reduc_index, vec_loop_masks *masks) { stmt_vec_info stmt_info = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); tree vectype_out = STMT_VINFO_VECTYPE (stmt_info); gimple *new_stmt = NULL; int ncopies; if (slp_node) ncopies = 1; else ncopies = vect_get_num_copies (loop_vinfo, vectype_in); gcc_assert (!nested_in_vect_loop_p (loop, stmt)); gcc_assert (ncopies == 1); gcc_assert (TREE_CODE_LENGTH (code) == binary_op); gcc_assert (reduc_index == (code == MINUS_EXPR ? 0 : 1)); gcc_assert (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == FOLD_LEFT_REDUCTION); if (slp_node) gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype_out), TYPE_VECTOR_SUBPARTS (vectype_in))); tree op0 = ops[1 - reduc_index]; int group_size = 1; gimple *scalar_dest_def; auto_vec<tree> vec_oprnds0; if (slp_node) { vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL, slp_node); group_size = SLP_TREE_SCALAR_STMTS (slp_node).length (); scalar_dest_def = SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1]; } else { tree loop_vec_def0 = vect_get_vec_def_for_operand (op0, stmt); vec_oprnds0.create (1); vec_oprnds0.quick_push (loop_vec_def0); scalar_dest_def = stmt; } tree scalar_dest = gimple_assign_lhs (scalar_dest_def); tree scalar_type = TREE_TYPE (scalar_dest); tree reduc_var = gimple_phi_result (reduc_def_stmt); int vec_num = vec_oprnds0.length (); gcc_assert (vec_num == 1 || slp_node); tree vec_elem_type = TREE_TYPE (vectype_out); gcc_checking_assert (useless_type_conversion_p (scalar_type, vec_elem_type)); tree vector_identity = NULL_TREE; if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)) vector_identity = build_zero_cst (vectype_out); tree scalar_dest_var = vect_create_destination_var (scalar_dest, NULL); int i; tree def0; FOR_EACH_VEC_ELT (vec_oprnds0, i, def0) { tree mask = NULL_TREE; if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)) mask = vect_get_loop_mask (gsi, masks, vec_num, vectype_in, i); /* Handle MINUS by adding the negative. */ if (reduc_fn != IFN_LAST && code == MINUS_EXPR) { tree negated = make_ssa_name (vectype_out); new_stmt = gimple_build_assign (negated, NEGATE_EXPR, def0); gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT); def0 = negated; } if (mask) def0 = merge_with_identity (gsi, mask, vectype_out, def0, vector_identity); /* On the first iteration the input is simply the scalar phi result, and for subsequent iterations it is the output of the preceding operation. */ if (reduc_fn != IFN_LAST) { new_stmt = gimple_build_call_internal (reduc_fn, 2, reduc_var, def0); /* For chained SLP reductions the output of the previous reduction operation serves as the input of the next. For the final statement the output cannot be a temporary - we reuse the original scalar destination of the last statement. */ if (i != vec_num - 1) { gimple_set_lhs (new_stmt, scalar_dest_var); reduc_var = make_ssa_name (scalar_dest_var, new_stmt); gimple_set_lhs (new_stmt, reduc_var); } } else { reduc_var = vect_expand_fold_left (gsi, scalar_dest_var, code, reduc_var, def0); new_stmt = SSA_NAME_DEF_STMT (reduc_var); /* Remove the statement, so that we can use the same code paths as for statements that we've just created. */ gimple_stmt_iterator tmp_gsi = gsi_for_stmt (new_stmt); gsi_remove (&tmp_gsi, false); } if (i == vec_num - 1) { gimple_set_lhs (new_stmt, scalar_dest); vect_finish_replace_stmt (scalar_dest_def, new_stmt); } else vect_finish_stmt_generation (scalar_dest_def, new_stmt, gsi); if (slp_node) SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); } if (!slp_node) STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; return true; } /* Function is_nonwrapping_integer_induction. Check if STMT (which is part of loop LOOP) both increments and does not cause overflow. */ static bool is_nonwrapping_integer_induction (gimple *stmt, struct loop *loop) { stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt); tree base = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo); tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo); tree lhs_type = TREE_TYPE (gimple_phi_result (stmt)); widest_int ni, max_loop_value, lhs_max; bool overflow = false; /* Make sure the loop is integer based. */ if (TREE_CODE (base) != INTEGER_CST || TREE_CODE (step) != INTEGER_CST) return false; /* Check that the max size of the loop will not wrap. */ if (TYPE_OVERFLOW_UNDEFINED (lhs_type)) return true; if (! max_stmt_executions (loop, &ni)) return false; max_loop_value = wi::mul (wi::to_widest (step), ni, TYPE_SIGN (lhs_type), &overflow); if (overflow) return false; max_loop_value = wi::add (wi::to_widest (base), max_loop_value, TYPE_SIGN (lhs_type), &overflow); if (overflow) return false; return (wi::min_precision (max_loop_value, TYPE_SIGN (lhs_type)) <= TYPE_PRECISION (lhs_type)); } /* Function vectorizable_reduction. Check if STMT performs a reduction operation that can be vectorized. If VEC_STMT is also passed, vectorize the STMT: create a vectorized stmt to replace it, put it in VEC_STMT, and insert it at GSI. Return FALSE if not a vectorizable STMT, TRUE otherwise. This function also handles reduction idioms (patterns) that have been recognized in advance during vect_pattern_recog. In this case, STMT may be of this form: X = pattern_expr (arg0, arg1, ..., X) and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original sequence that had been detected and replaced by the pattern-stmt (STMT). This function also handles reduction of condition expressions, for example: for (int i = 0; i < N; i++) if (a[i] < value) last = a[i]; This is handled by vectorising the loop and creating an additional vector containing the loop indexes for which "a[i] < value" was true. In the function epilogue this is reduced to a single max value and then used to index into the vector of results. In some cases of reduction patterns, the type of the reduction variable X is different than the type of the other arguments of STMT. In such cases, the vectype that is used when transforming STMT into a vector stmt is different than the vectype that is used to determine the vectorization factor, because it consists of a different number of elements than the actual number of elements that are being operated upon in parallel. For example, consider an accumulation of shorts into an int accumulator. On some targets it's possible to vectorize this pattern operating on 8 shorts at a time (hence, the vectype for purposes of determining the vectorization factor should be V8HI); on the other hand, the vectype that is used to create the vector form is actually V4SI (the type of the result). Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that indicates what is the actual level of parallelism (V8HI in the example), so that the right vectorization factor would be derived. This vectype corresponds to the type of arguments to the reduction stmt, and should *NOT* be used to create the vectorized stmt. The right vectype for the vectorized stmt is obtained from the type of the result X: get_vectype_for_scalar_type (TREE_TYPE (X)) This means that, contrary to "regular" reductions (or "regular" stmts in general), the following equation: STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X)) does *NOT* necessarily hold for reduction patterns. */ bool vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt, slp_tree slp_node, slp_instance slp_node_instance) { tree vec_dest; tree scalar_dest; stmt_vec_info stmt_info = vinfo_for_stmt (stmt); tree vectype_out = STMT_VINFO_VECTYPE (stmt_info); tree vectype_in = NULL_TREE; loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); enum tree_code code, orig_code; internal_fn reduc_fn; machine_mode vec_mode; int op_type; optab optab; tree new_temp = NULL_TREE; gimple *def_stmt; enum vect_def_type dt, cond_reduc_dt = vect_unknown_def_type; gimple *cond_reduc_def_stmt = NULL; enum tree_code cond_reduc_op_code = ERROR_MARK; tree scalar_type; bool is_simple_use; gimple *orig_stmt; stmt_vec_info orig_stmt_info = NULL; int i; int ncopies; int epilog_copies; stmt_vec_info prev_stmt_info, prev_phi_info; bool single_defuse_cycle = false; gimple *new_stmt = NULL; int j; tree ops[3]; enum vect_def_type dts[3]; bool nested_cycle = false, found_nested_cycle_def = false; bool double_reduc = false; basic_block def_bb; struct loop * def_stmt_loop, *outer_loop = NULL; tree def_arg; gimple *def_arg_stmt; auto_vec<tree> vec_oprnds0; auto_vec<tree> vec_oprnds1; auto_vec<tree> vec_oprnds2; auto_vec<tree> vect_defs; auto_vec<gimple *> phis; int vec_num; tree def0, tem; bool first_p = true; tree cr_index_scalar_type = NULL_TREE, cr_index_vector_type = NULL_TREE; tree cond_reduc_val = NULL_TREE; /* Make sure it was already recognized as a reduction computation. */ if (STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) != vect_reduction_def && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) != vect_nested_cycle) return false; if (nested_in_vect_loop_p (loop, stmt)) { outer_loop = loop; loop = loop->inner; nested_cycle = true; } /* In case of reduction chain we switch to the first stmt in the chain, but we don't update STMT_INFO, since only the last stmt is marked as reduction and has reduction properties. */ if (GROUP_FIRST_ELEMENT (stmt_info) && GROUP_FIRST_ELEMENT (stmt_info) != stmt) { stmt = GROUP_FIRST_ELEMENT (stmt_info); first_p = false; } if (gimple_code (stmt) == GIMPLE_PHI) { /* Analysis is fully done on the reduction stmt invocation. */ if (! vec_stmt) { if (slp_node) slp_node_instance->reduc_phis = slp_node; STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type; return true; } if (STMT_VINFO_REDUC_TYPE (stmt_info) == FOLD_LEFT_REDUCTION) /* Leave the scalar phi in place. Note that checking STMT_VINFO_VEC_REDUCTION_TYPE (as below) only works for reductions involving a single statement. */ return true; gimple *reduc_stmt = STMT_VINFO_REDUC_DEF (stmt_info); if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (reduc_stmt))) reduc_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (reduc_stmt)); if (STMT_VINFO_VEC_REDUCTION_TYPE (vinfo_for_stmt (reduc_stmt)) == EXTRACT_LAST_REDUCTION) /* Leave the scalar phi in place. */ return true; gcc_assert (is_gimple_assign (reduc_stmt)); for (unsigned k = 1; k < gimple_num_ops (reduc_stmt); ++k) { tree op = gimple_op (reduc_stmt, k); if (op == gimple_phi_result (stmt)) continue; if (k == 1 && gimple_assign_rhs_code (reduc_stmt) == COND_EXPR) continue; if (!vectype_in || (GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (vectype_in))) < GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (op))))) vectype_in = get_vectype_for_scalar_type (TREE_TYPE (op)); break; } gcc_assert (vectype_in); if (slp_node) ncopies = 1; else ncopies = vect_get_num_copies (loop_vinfo, vectype_in); use_operand_p use_p; gimple *use_stmt; if (ncopies > 1 && (STMT_VINFO_RELEVANT (vinfo_for_stmt (reduc_stmt)) <= vect_used_only_live) && single_imm_use (gimple_phi_result (stmt), &use_p, &use_stmt) && (use_stmt == reduc_stmt || (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use_stmt)) == reduc_stmt))) single_defuse_cycle = true; /* Create the destination vector */ scalar_dest = gimple_assign_lhs (reduc_stmt); vec_dest = vect_create_destination_var (scalar_dest, vectype_out); if (slp_node) /* The size vect_schedule_slp_instance computes is off for us. */ vec_num = vect_get_num_vectors (LOOP_VINFO_VECT_FACTOR (loop_vinfo) * SLP_TREE_SCALAR_STMTS (slp_node).length (), vectype_in); else vec_num = 1; /* Generate the reduction PHIs upfront. */ prev_phi_info = NULL; for (j = 0; j < ncopies; j++) { if (j == 0 || !single_defuse_cycle) { for (i = 0; i < vec_num; i++) { /* Create the reduction-phi that defines the reduction operand. */ gimple *new_phi = create_phi_node (vec_dest, loop->header); set_vinfo_for_stmt (new_phi, new_stmt_vec_info (new_phi, loop_vinfo)); if (slp_node) SLP_TREE_VEC_STMTS (slp_node).quick_push (new_phi); else { if (j == 0) STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_phi; else STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi; prev_phi_info = vinfo_for_stmt (new_phi); } } } } return true; } /* 1. Is vectorizable reduction? */ /* Not supportable if the reduction variable is used in the loop, unless it's a reduction chain. */ if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer && !GROUP_FIRST_ELEMENT (stmt_info)) return false; /* Reductions that are not used even in an enclosing outer-loop, are expected to be "live" (used out of the loop). */ if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope && !STMT_VINFO_LIVE_P (stmt_info)) return false; /* 2. Has this been recognized as a reduction pattern? Check if STMT represents a pattern that has been recognized in earlier analysis stages. For stmts that represent a pattern, the STMT_VINFO_RELATED_STMT field records the last stmt in the original sequence that constitutes the pattern. */ orig_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt)); if (orig_stmt) { orig_stmt_info = vinfo_for_stmt (orig_stmt); gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info)); gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info)); } /* 3. Check the operands of the operation. The first operands are defined inside the loop body. The last operand is the reduction variable, which is defined by the loop-header-phi. */ gcc_assert (is_gimple_assign (stmt)); /* Flatten RHS. */ switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))) { case GIMPLE_BINARY_RHS: code = gimple_assign_rhs_code (stmt); op_type = TREE_CODE_LENGTH (code); gcc_assert (op_type == binary_op); ops[0] = gimple_assign_rhs1 (stmt); ops[1] = gimple_assign_rhs2 (stmt); break; case GIMPLE_TERNARY_RHS: code = gimple_assign_rhs_code (stmt); op_type = TREE_CODE_LENGTH (code); gcc_assert (op_type == ternary_op); ops[0] = gimple_assign_rhs1 (stmt); ops[1] = gimple_assign_rhs2 (stmt); ops[2] = gimple_assign_rhs3 (stmt); break; case GIMPLE_UNARY_RHS: return false; default: gcc_unreachable (); } if (code == COND_EXPR && slp_node) return false; scalar_dest = gimple_assign_lhs (stmt); scalar_type = TREE_TYPE (scalar_dest); if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type) && !SCALAR_FLOAT_TYPE_P (scalar_type)) return false; /* Do not try to vectorize bit-precision reductions. */ if (!type_has_mode_precision_p (scalar_type)) return false; /* All uses but the last are expected to be defined in the loop. The last use is the reduction variable. In case of nested cycle this assumption is not true: we use reduc_index to record the index of the reduction variable. */ gimple *reduc_def_stmt = NULL; int reduc_index = -1; for (i = 0; i < op_type; i++) { /* The condition of COND_EXPR is checked in vectorizable_condition(). */ if (i == 0 && code == COND_EXPR) continue; is_simple_use = vect_is_simple_use (ops[i], loop_vinfo, &def_stmt, &dts[i], &tem); dt = dts[i]; gcc_assert (is_simple_use); if (dt == vect_reduction_def) { reduc_def_stmt = def_stmt; reduc_index = i; continue; } else if (tem) { /* To properly compute ncopies we are interested in the widest input type in case we're looking at a widening accumulation. */ if (!vectype_in || (GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (vectype_in))) < GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (tem))))) vectype_in = tem; } if (dt != vect_internal_def && dt != vect_external_def && dt != vect_constant_def && dt != vect_induction_def && !(dt == vect_nested_cycle && nested_cycle)) return false; if (dt == vect_nested_cycle) { found_nested_cycle_def = true; reduc_def_stmt = def_stmt; reduc_index = i; } if (i == 1 && code == COND_EXPR) { /* Record how value of COND_EXPR is defined. */ if (dt == vect_constant_def) { cond_reduc_dt = dt; cond_reduc_val = ops[i]; } if (dt == vect_induction_def && def_stmt != NULL && is_nonwrapping_integer_induction (def_stmt, loop)) { cond_reduc_dt = dt; cond_reduc_def_stmt = def_stmt; } } } if (!vectype_in) vectype_in = vectype_out; /* When vectorizing a reduction chain w/o SLP the reduction PHI is not directy used in stmt. */ if (reduc_index == -1) { if (STMT_VINFO_REDUC_TYPE (stmt_info) == FOLD_LEFT_REDUCTION) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "in-order reduction chain without SLP.\n"); return false; } if (orig_stmt) reduc_def_stmt = STMT_VINFO_REDUC_DEF (orig_stmt_info); else reduc_def_stmt = STMT_VINFO_REDUC_DEF (stmt_info); } if (! reduc_def_stmt || gimple_code (reduc_def_stmt) != GIMPLE_PHI) return false; if (!(reduc_index == -1 || dts[reduc_index] == vect_reduction_def || dts[reduc_index] == vect_nested_cycle || ((dts[reduc_index] == vect_internal_def || dts[reduc_index] == vect_external_def || dts[reduc_index] == vect_constant_def || dts[reduc_index] == vect_induction_def) && nested_cycle && found_nested_cycle_def))) { /* For pattern recognized stmts, orig_stmt might be a reduction, but some helper statements for the pattern might not, or might be COND_EXPRs with reduction uses in the condition. */ gcc_assert (orig_stmt); return false; } stmt_vec_info reduc_def_info = vinfo_for_stmt (reduc_def_stmt); enum vect_reduction_type v_reduc_type = STMT_VINFO_REDUC_TYPE (reduc_def_info); gimple *tmp = STMT_VINFO_REDUC_DEF (reduc_def_info); STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = v_reduc_type; /* If we have a condition reduction, see if we can simplify it further. */ if (v_reduc_type == COND_REDUCTION) { /* TODO: We can't yet handle reduction chains, since we need to treat each COND_EXPR in the chain specially, not just the last one. E.g. for: x_1 = PHI <x_3, ...> x_2 = a_2 ? ... : x_1; x_3 = a_3 ? ... : x_2; we're interested in the last element in x_3 for which a_2 || a_3 is true, whereas the current reduction chain handling would vectorize x_2 as a normal VEC_COND_EXPR and only treat x_3 as a reduction operation. */ if (reduc_index == -1) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "conditional reduction chains not supported\n"); return false; } /* vect_is_simple_reduction ensured that operand 2 is the loop-carried operand. */ gcc_assert (reduc_index == 2); /* Loop peeling modifies initial value of reduction PHI, which makes the reduction stmt to be transformed different to the original stmt analyzed. We need to record reduction code for CONST_COND_REDUCTION type reduction at analyzing stage, thus it can be used directly at transform stage. */ if (STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info) == MAX_EXPR || STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info) == MIN_EXPR) { /* Also set the reduction type to CONST_COND_REDUCTION. */ gcc_assert (cond_reduc_dt == vect_constant_def); STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = CONST_COND_REDUCTION; } else if (direct_internal_fn_supported_p (IFN_FOLD_EXTRACT_LAST, vectype_in, OPTIMIZE_FOR_SPEED)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "optimizing condition reduction with" " FOLD_EXTRACT_LAST.\n"); STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = EXTRACT_LAST_REDUCTION; } else if (cond_reduc_dt == vect_induction_def) { stmt_vec_info cond_stmt_vinfo = vinfo_for_stmt (cond_reduc_def_stmt); tree base = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (cond_stmt_vinfo); tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (cond_stmt_vinfo); gcc_assert (TREE_CODE (base) == INTEGER_CST && TREE_CODE (step) == INTEGER_CST); cond_reduc_val = NULL_TREE; /* Find a suitable value, for MAX_EXPR below base, for MIN_EXPR above base; punt if base is the minimum value of the type for MAX_EXPR or maximum value of the type for MIN_EXPR for now. */ if (tree_int_cst_sgn (step) == -1) { cond_reduc_op_code = MIN_EXPR; if (tree_int_cst_sgn (base) == -1) cond_reduc_val = build_int_cst (TREE_TYPE (base), 0); else if (tree_int_cst_lt (base, TYPE_MAX_VALUE (TREE_TYPE (base)))) cond_reduc_val = int_const_binop (PLUS_EXPR, base, integer_one_node); } else { cond_reduc_op_code = MAX_EXPR; if (tree_int_cst_sgn (base) == 1) cond_reduc_val = build_int_cst (TREE_TYPE (base), 0); else if (tree_int_cst_lt (TYPE_MIN_VALUE (TREE_TYPE (base)), base)) cond_reduc_val = int_const_binop (MINUS_EXPR, base, integer_one_node); } if (cond_reduc_val) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "condition expression based on " "integer induction.\n"); STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = INTEGER_INDUC_COND_REDUCTION; } } else if (cond_reduc_dt == vect_constant_def) { enum vect_def_type cond_initial_dt; gimple *def_stmt = SSA_NAME_DEF_STMT (ops[reduc_index]); tree cond_initial_val = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop)); gcc_assert (cond_reduc_val != NULL_TREE); vect_is_simple_use (cond_initial_val, loop_vinfo, &def_stmt, &cond_initial_dt); if (cond_initial_dt == vect_constant_def && types_compatible_p (TREE_TYPE (cond_initial_val), TREE_TYPE (cond_reduc_val))) { tree e = fold_binary (LE_EXPR, boolean_type_node, cond_initial_val, cond_reduc_val); if (e && (integer_onep (e) || integer_zerop (e))) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "condition expression based on " "compile time constant.\n"); /* Record reduction code at analysis stage. */ STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info) = integer_onep (e) ? MAX_EXPR : MIN_EXPR; STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = CONST_COND_REDUCTION; } } } } if (orig_stmt) gcc_assert (tmp == orig_stmt || GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) == orig_stmt); else /* We changed STMT to be the first stmt in reduction chain, hence we check that in this case the first element in the chain is STMT. */ gcc_assert (stmt == tmp || GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) == stmt); if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt))) return false; if (slp_node) ncopies = 1; else ncopies = vect_get_num_copies (loop_vinfo, vectype_in); gcc_assert (ncopies >= 1); vec_mode = TYPE_MODE (vectype_in); poly_uint64 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out); if (code == COND_EXPR) { /* Only call during the analysis stage, otherwise we'll lose STMT_VINFO_TYPE. */ if (!vec_stmt && !vectorizable_condition (stmt, gsi, NULL, ops[reduc_index], 0, NULL)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "unsupported condition in reduction\n"); return false; } } else { /* 4. Supportable by target? */ if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR || code == RROTATE_EXPR) { /* Shifts and rotates are only supported by vectorizable_shifts, not vectorizable_reduction. */ if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "unsupported shift or rotation.\n"); return false; } /* 4.1. check support for the operation in the loop */ optab = optab_for_tree_code (code, vectype_in, optab_default); if (!optab) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "no optab.\n"); return false; } if (optab_handler (optab, vec_mode) == CODE_FOR_nothing) { if (dump_enabled_p ()) dump_printf (MSG_NOTE, "op not supported by target.\n"); if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD) || !vect_worthwhile_without_simd_p (loop_vinfo, code)) return false; if (dump_enabled_p ()) dump_printf (MSG_NOTE, "proceeding using word mode.\n"); } /* Worthwhile without SIMD support? */ if (!VECTOR_MODE_P (TYPE_MODE (vectype_in)) && !vect_worthwhile_without_simd_p (loop_vinfo, code)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not worthwhile without SIMD support.\n"); return false; } } /* 4.2. Check support for the epilog operation. If STMT represents a reduction pattern, then the type of the reduction variable may be different than the type of the rest of the arguments. For example, consider the case of accumulation of shorts into an int accumulator; The original code: S1: int_a = (int) short_a; orig_stmt-> S2: int_acc = plus <int_a ,int_acc>; was replaced with: STMT: int_acc = widen_sum <short_a, int_acc> This means that: 1. The tree-code that is used to create the vector operation in the epilog code (that reduces the partial results) is not the tree-code of STMT, but is rather the tree-code of the original stmt from the pattern that STMT is replacing. I.e, in the example above we want to use 'widen_sum' in the loop, but 'plus' in the epilog. 2. The type (mode) we use to check available target support for the vector operation to be created in the *epilog*, is determined by the type of the reduction variable (in the example above we'd check this: optab_handler (plus_optab, vect_int_mode])). However the type (mode) we use to check available target support for the vector operation to be created *inside the loop*, is determined by the type of the other arguments to STMT (in the example we'd check this: optab_handler (widen_sum_optab, vect_short_mode)). This is contrary to "regular" reductions, in which the types of all the arguments are the same as the type of the reduction variable. For "regular" reductions we can therefore use the same vector type (and also the same tree-code) when generating the epilog code and when generating the code inside the loop. */ vect_reduction_type reduction_type = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info); if (orig_stmt && (reduction_type == TREE_CODE_REDUCTION || reduction_type == FOLD_LEFT_REDUCTION)) { /* This is a reduction pattern: get the vectype from the type of the reduction variable, and get the tree-code from orig_stmt. */ orig_code = gimple_assign_rhs_code (orig_stmt); gcc_assert (vectype_out); vec_mode = TYPE_MODE (vectype_out); } else { /* Regular reduction: use the same vectype and tree-code as used for the vector code inside the loop can be used for the epilog code. */ orig_code = code; if (code == MINUS_EXPR) orig_code = PLUS_EXPR; /* For simple condition reductions, replace with the actual expression we want to base our reduction around. */ if (reduction_type == CONST_COND_REDUCTION) { orig_code = STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info); gcc_assert (orig_code == MAX_EXPR || orig_code == MIN_EXPR); } else if (reduction_type == INTEGER_INDUC_COND_REDUCTION) orig_code = cond_reduc_op_code; } if (nested_cycle) { def_bb = gimple_bb (reduc_def_stmt); def_stmt_loop = def_bb->loop_father; def_arg = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt, loop_preheader_edge (def_stmt_loop)); if (TREE_CODE (def_arg) == SSA_NAME && (def_arg_stmt = SSA_NAME_DEF_STMT (def_arg)) && gimple_code (def_arg_stmt) == GIMPLE_PHI && flow_bb_inside_loop_p (outer_loop, gimple_bb (def_arg_stmt)) && vinfo_for_stmt (def_arg_stmt) && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_arg_stmt)) == vect_double_reduction_def) double_reduc = true; } reduc_fn = IFN_LAST; if (reduction_type == TREE_CODE_REDUCTION || reduction_type == FOLD_LEFT_REDUCTION || reduction_type == INTEGER_INDUC_COND_REDUCTION || reduction_type == CONST_COND_REDUCTION) { if (reduction_type == FOLD_LEFT_REDUCTION ? fold_left_reduction_fn (orig_code, &reduc_fn) : reduction_fn_for_scalar_code (orig_code, &reduc_fn)) { if (reduc_fn != IFN_LAST && !direct_internal_fn_supported_p (reduc_fn, vectype_out, OPTIMIZE_FOR_SPEED)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "reduc op not supported by target.\n"); reduc_fn = IFN_LAST; } } else { if (!nested_cycle || double_reduc) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "no reduc code for scalar code.\n"); return false; } } } else if (reduction_type == COND_REDUCTION) { int scalar_precision = GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type)); cr_index_scalar_type = make_unsigned_type (scalar_precision); cr_index_vector_type = build_vector_type (cr_index_scalar_type, nunits_out); if (direct_internal_fn_supported_p (IFN_REDUC_MAX, cr_index_vector_type, OPTIMIZE_FOR_SPEED)) reduc_fn = IFN_REDUC_MAX; } if (reduction_type != EXTRACT_LAST_REDUCTION && reduc_fn == IFN_LAST && !nunits_out.is_constant ()) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "missing target support for reduction on" " variable-length vectors.\n"); return false; } if ((double_reduc || reduction_type != TREE_CODE_REDUCTION) && ncopies > 1) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "multiple types in double reduction or condition " "reduction.\n"); return false; } /* For SLP reductions, see if there is a neutral value we can use. */ tree neutral_op = NULL_TREE; if (slp_node) neutral_op = neutral_op_for_slp_reduction (slp_node_instance->reduc_phis, code, GROUP_FIRST_ELEMENT (stmt_info) != NULL); if (double_reduc && reduction_type == FOLD_LEFT_REDUCTION) { /* We can't support in-order reductions of code such as this: for (int i = 0; i < n1; ++i) for (int j = 0; j < n2; ++j) l += a[j]; since GCC effectively transforms the loop when vectorizing: for (int i = 0; i < n1 / VF; ++i) for (int j = 0; j < n2; ++j) for (int k = 0; k < VF; ++k) l += a[j]; which is a reassociation of the original operation. */ if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "in-order double reduction not supported.\n"); return false; } if (reduction_type == FOLD_LEFT_REDUCTION && slp_node && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))) { /* We cannot use in-order reductions in this case because there is an implicit reassociation of the operations involved. */ if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "in-order unchained SLP reductions not supported.\n"); return false; } /* For double reductions, and for SLP reductions with a neutral value, we construct a variable-length initial vector by loading a vector full of the neutral value and then shift-and-inserting the start values into the low-numbered elements. */ if ((double_reduc || neutral_op) && !nunits_out.is_constant () && !direct_internal_fn_supported_p (IFN_VEC_SHL_INSERT, vectype_out, OPTIMIZE_FOR_SPEED)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "reduction on variable-length vectors requires" " target support for a vector-shift-and-insert" " operation.\n"); return false; } /* Check extra constraints for variable-length unchained SLP reductions. */ if (STMT_SLP_TYPE (stmt_info) && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) && !nunits_out.is_constant ()) { /* We checked above that we could build the initial vector when there's a neutral element value. Check here for the case in which each SLP statement has its own initial value and in which that value needs to be repeated for every instance of the statement within the initial vector. */ unsigned int group_size = SLP_TREE_SCALAR_STMTS (slp_node).length (); scalar_mode elt_mode = SCALAR_TYPE_MODE (TREE_TYPE (vectype_out)); if (!neutral_op && !can_duplicate_and_interleave_p (group_size, elt_mode)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "unsupported form of SLP reduction for" " variable-length vectors: cannot build" " initial vector.\n"); return false; } /* The epilogue code relies on the number of elements being a multiple of the group size. The duplicate-and-interleave approach to setting up the the initial vector does too. */ if (!multiple_p (nunits_out, group_size)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "unsupported form of SLP reduction for" " variable-length vectors: the vector size" " is not a multiple of the number of results.\n"); return false; } } /* In case of widenning multiplication by a constant, we update the type of the constant to be the type of the other operand. We check that the constant fits the type in the pattern recognition pass. */ if (code == DOT_PROD_EXPR && !types_compatible_p (TREE_TYPE (ops[0]), TREE_TYPE (ops[1]))) { if (TREE_CODE (ops[0]) == INTEGER_CST) ops[0] = fold_convert (TREE_TYPE (ops[1]), ops[0]); else if (TREE_CODE (ops[1]) == INTEGER_CST) ops[1] = fold_convert (TREE_TYPE (ops[0]), ops[1]); else { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "invalid types in dot-prod\n"); return false; } } if (reduction_type == COND_REDUCTION) { widest_int ni; if (! max_loop_iterations (loop, &ni)) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "loop count not known, cannot create cond " "reduction.\n"); return false; } /* Convert backedges to iterations. */ ni += 1; /* The additional index will be the same type as the condition. Check that the loop can fit into this less one (because we'll use up the zero slot for when there are no matches). */ tree max_index = TYPE_MAX_VALUE (cr_index_scalar_type); if (wi::geu_p (ni, wi::to_widest (max_index))) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "loop size is greater than data size.\n"); return false; } } /* In case the vectorization factor (VF) is bigger than the number of elements that we can fit in a vectype (nunits), we have to generate more than one vector stmt - i.e - we need to "unroll" the vector stmt by a factor VF/nunits. For more details see documentation in vectorizable_operation. */ /* If the reduction is used in an outer loop we need to generate VF intermediate results, like so (e.g. for ncopies=2): r0 = phi (init, r0) r1 = phi (init, r1) r0 = x0 + r0; r1 = x1 + r1; (i.e. we generate VF results in 2 registers). In this case we have a separate def-use cycle for each copy, and therefore for each copy we get the vector def for the reduction variable from the respective phi node created for this copy. Otherwise (the reduction is unused in the loop nest), we can combine together intermediate results, like so (e.g. for ncopies=2): r = phi (init, r) r = x0 + r; r = x1 + r; (i.e. we generate VF/2 results in a single register). In this case for each copy we get the vector def for the reduction variable from the vectorized reduction operation generated in the previous iteration. This only works when we see both the reduction PHI and its only consumer in vectorizable_reduction and there are no intermediate stmts participating. */ use_operand_p use_p; gimple *use_stmt; if (ncopies > 1 && (STMT_VINFO_RELEVANT (stmt_info) <= vect_used_only_live) && single_imm_use (gimple_phi_result (reduc_def_stmt), &use_p, &use_stmt) && (use_stmt == stmt || STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use_stmt)) == stmt)) { single_defuse_cycle = true; epilog_copies = 1; } else epilog_copies = ncopies; /* If the reduction stmt is one of the patterns that have lane reduction embedded we cannot handle the case of ! single_defuse_cycle. */ if ((ncopies > 1 && ! single_defuse_cycle) && (code == DOT_PROD_EXPR || code == WIDEN_SUM_EXPR || code == SAD_EXPR)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "multi def-use cycle not possible for lane-reducing " "reduction operation\n"); return false; } if (slp_node) vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); else vec_num = 1; internal_fn cond_fn = get_conditional_internal_fn (code); vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo); if (!vec_stmt) /* transformation not required. */ { if (first_p) vect_model_reduction_cost (stmt_info, reduc_fn, ncopies); if (loop_vinfo && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo)) { if (reduction_type != FOLD_LEFT_REDUCTION && (cond_fn == IFN_LAST || !direct_internal_fn_supported_p (cond_fn, vectype_in, OPTIMIZE_FOR_SPEED))) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "can't use a fully-masked loop because no" " conditional operation is available.\n"); LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false; } else if (reduc_index == -1) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "can't use a fully-masked loop for chained" " reductions.\n"); LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false; } else vect_record_loop_mask (loop_vinfo, masks, ncopies * vec_num, vectype_in); } if (dump_enabled_p () && reduction_type == FOLD_LEFT_REDUCTION) dump_printf_loc (MSG_NOTE, vect_location, "using an in-order (fold-left) reduction.\n"); STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type; return true; } /* Transform. */ if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "transform reduction.\n"); /* FORNOW: Multiple types are not supported for condition. */ if (code == COND_EXPR) gcc_assert (ncopies == 1); bool masked_loop_p = LOOP_VINFO_FULLY_MASKED_P (loop_vinfo); if (reduction_type == FOLD_LEFT_REDUCTION) return vectorize_fold_left_reduction (stmt, gsi, vec_stmt, slp_node, reduc_def_stmt, code, reduc_fn, ops, vectype_in, reduc_index, masks); if (reduction_type == EXTRACT_LAST_REDUCTION) { gcc_assert (!slp_node); return vectorizable_condition (stmt, gsi, vec_stmt, NULL, reduc_index, NULL); } /* Create the destination vector */ vec_dest = vect_create_destination_var (scalar_dest, vectype_out); prev_stmt_info = NULL; prev_phi_info = NULL; if (!slp_node) { vec_oprnds0.create (1); vec_oprnds1.create (1); if (op_type == ternary_op) vec_oprnds2.create (1); } phis.create (vec_num); vect_defs.create (vec_num); if (!slp_node) vect_defs.quick_push (NULL_TREE); if (slp_node) phis.splice (SLP_TREE_VEC_STMTS (slp_node_instance->reduc_phis)); else phis.quick_push (STMT_VINFO_VEC_STMT (vinfo_for_stmt (reduc_def_stmt))); for (j = 0; j < ncopies; j++) { if (code == COND_EXPR) { gcc_assert (!slp_node); vectorizable_condition (stmt, gsi, vec_stmt, PHI_RESULT (phis[0]), reduc_index, NULL); /* Multiple types are not supported for condition. */ break; } /* Handle uses. */ if (j == 0) { if (slp_node) { /* Get vec defs for all the operands except the reduction index, ensuring the ordering of the ops in the vector is kept. */ auto_vec<tree, 3> slp_ops; auto_vec<vec<tree>, 3> vec_defs; slp_ops.quick_push (ops[0]); slp_ops.quick_push (ops[1]); if (op_type == ternary_op) slp_ops.quick_push (ops[2]); vect_get_slp_defs (slp_ops, slp_node, &vec_defs); vec_oprnds0.safe_splice (vec_defs[0]); vec_defs[0].release (); vec_oprnds1.safe_splice (vec_defs[1]); vec_defs[1].release (); if (op_type == ternary_op) { vec_oprnds2.safe_splice (vec_defs[2]); vec_defs[2].release (); } } else { vec_oprnds0.quick_push (vect_get_vec_def_for_operand (ops[0], stmt)); vec_oprnds1.quick_push (vect_get_vec_def_for_operand (ops[1], stmt)); if (op_type == ternary_op) vec_oprnds2.quick_push (vect_get_vec_def_for_operand (ops[2], stmt)); } } else { if (!slp_node) { gcc_assert (reduc_index != -1 || ! single_defuse_cycle); if (single_defuse_cycle && reduc_index == 0) vec_oprnds0[0] = gimple_get_lhs (new_stmt); else vec_oprnds0[0] = vect_get_vec_def_for_stmt_copy (dts[0], vec_oprnds0[0]); if (single_defuse_cycle && reduc_index == 1) vec_oprnds1[0] = gimple_get_lhs (new_stmt); else vec_oprnds1[0] = vect_get_vec_def_for_stmt_copy (dts[1], vec_oprnds1[0]); if (op_type == ternary_op) { if (single_defuse_cycle && reduc_index == 2) vec_oprnds2[0] = gimple_get_lhs (new_stmt); else vec_oprnds2[0] = vect_get_vec_def_for_stmt_copy (dts[2], vec_oprnds2[0]); } } } FOR_EACH_VEC_ELT (vec_oprnds0, i, def0) { tree vop[3] = { def0, vec_oprnds1[i], NULL_TREE }; if (masked_loop_p) { /* Make sure that the reduction accumulator is vop[0]. */ if (reduc_index == 1) { gcc_assert (commutative_tree_code (code)); std::swap (vop[0], vop[1]); } tree mask = vect_get_loop_mask (gsi, masks, vec_num * ncopies, vectype_in, i * ncopies + j); gcall *call = gimple_build_call_internal (cond_fn, 3, mask, vop[0], vop[1]); new_temp = make_ssa_name (vec_dest, call); gimple_call_set_lhs (call, new_temp); gimple_call_set_nothrow (call, true); new_stmt = call; } else { if (op_type == ternary_op) vop[2] = vec_oprnds2[i]; new_temp = make_ssa_name (vec_dest, new_stmt); new_stmt = gimple_build_assign (new_temp, code, vop[0], vop[1], vop[2]); } vect_finish_stmt_generation (stmt, new_stmt, gsi); if (slp_node) { SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); vect_defs.quick_push (new_temp); } else vect_defs[0] = new_temp; } if (slp_node) continue; if (j == 0) STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; else STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; prev_stmt_info = vinfo_for_stmt (new_stmt); } /* Finalize the reduction-phi (set its arguments) and create the epilog reduction code. */ if ((!single_defuse_cycle || code == COND_EXPR) && !slp_node) vect_defs[0] = gimple_get_lhs (*vec_stmt); vect_create_epilog_for_reduction (vect_defs, stmt, reduc_def_stmt, epilog_copies, reduc_fn, phis, double_reduc, slp_node, slp_node_instance, cond_reduc_val, cond_reduc_op_code, neutral_op); return true; } /* Function vect_min_worthwhile_factor. For a loop where we could vectorize the operation indicated by CODE, return the minimum vectorization factor that makes it worthwhile to use generic vectors. */ static unsigned int vect_min_worthwhile_factor (enum tree_code code) { switch (code) { case PLUS_EXPR: case MINUS_EXPR: case NEGATE_EXPR: return 4; case BIT_AND_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: case BIT_NOT_EXPR: return 2; default: return INT_MAX; } } /* Return true if VINFO indicates we are doing loop vectorization and if it is worth decomposing CODE operations into scalar operations for that loop's vectorization factor. */ bool vect_worthwhile_without_simd_p (vec_info *vinfo, tree_code code) { loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo); unsigned HOST_WIDE_INT value; return (loop_vinfo && LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&value) && value >= vect_min_worthwhile_factor (code)); } /* Function vectorizable_induction Check if PHI performs an induction computation that can be vectorized. If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized phi to replace it, put it in VEC_STMT, and add it to the same basic block. Return FALSE if not a vectorizable STMT, TRUE otherwise. */ bool vectorizable_induction (gimple *phi, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED, gimple **vec_stmt, slp_tree slp_node) { stmt_vec_info stmt_info = vinfo_for_stmt (phi); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); unsigned ncopies; bool nested_in_vect_loop = false; struct loop *iv_loop; tree vec_def; edge pe = loop_preheader_edge (loop); basic_block new_bb; tree new_vec, vec_init, vec_step, t; tree new_name; gimple *new_stmt; gphi *induction_phi; tree induc_def, vec_dest; tree init_expr, step_expr; poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); unsigned i; tree expr; gimple_seq stmts; imm_use_iterator imm_iter; use_operand_p use_p; gimple *exit_phi; edge latch_e; tree loop_arg; gimple_stmt_iterator si; basic_block bb = gimple_bb (phi); if (gimple_code (phi) != GIMPLE_PHI) return false; if (!STMT_VINFO_RELEVANT_P (stmt_info)) return false; /* Make sure it was recognized as induction computation. */ if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def) return false; tree vectype = STMT_VINFO_VECTYPE (stmt_info); poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype); if (slp_node) ncopies = 1; else ncopies = vect_get_num_copies (loop_vinfo, vectype); gcc_assert (ncopies >= 1); /* FORNOW. These restrictions should be relaxed. */ if (nested_in_vect_loop_p (loop, phi)) { imm_use_iterator imm_iter; use_operand_p use_p; gimple *exit_phi; edge latch_e; tree loop_arg; if (ncopies > 1) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "multiple types in nested loop.\n"); return false; } /* FORNOW: outer loop induction with SLP not supported. */ if (STMT_SLP_TYPE (stmt_info)) return false; exit_phi = NULL; latch_e = loop_latch_edge (loop->inner); loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e); FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg) { gimple *use_stmt = USE_STMT (use_p); if (is_gimple_debug (use_stmt)) continue; if (!flow_bb_inside_loop_p (loop->inner, gimple_bb (use_stmt))) { exit_phi = use_stmt; break; } } if (exit_phi) { stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi); if (!(STMT_VINFO_RELEVANT_P (exit_phi_vinfo) && !STMT_VINFO_LIVE_P (exit_phi_vinfo))) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "inner-loop induction only used outside " "of the outer vectorized loop.\n"); return false; } } nested_in_vect_loop = true; iv_loop = loop->inner; } else iv_loop = loop; gcc_assert (iv_loop == (gimple_bb (phi))->loop_father); if (slp_node && !nunits.is_constant ()) { /* The current SLP code creates the initial value element-by-element. */ if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "SLP induction not supported for variable-length" " vectors.\n"); return false; } if (!vec_stmt) /* transformation not required. */ { STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_induction ===\n"); vect_model_induction_cost (stmt_info, ncopies); return true; } /* Transform. */ /* Compute a vector variable, initialized with the first VF values of the induction variable. E.g., for an iv with IV_PHI='X' and evolution S, for a vector of 4 units, we want to compute: [X, X + S, X + 2*S, X + 3*S]. */ if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "transform induction phi.\n"); latch_e = loop_latch_edge (iv_loop); loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e); step_expr = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_info); gcc_assert (step_expr != NULL_TREE); pe = loop_preheader_edge (iv_loop); init_expr = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (iv_loop)); stmts = NULL; if (!nested_in_vect_loop) { /* Convert the initial value to the desired type. */ tree new_type = TREE_TYPE (vectype); init_expr = gimple_convert (&stmts, new_type, init_expr); /* If we are using the loop mask to "peel" for alignment then we need to adjust the start value here. */ tree skip_niters = LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo); if (skip_niters != NULL_TREE) { if (FLOAT_TYPE_P (vectype)) skip_niters = gimple_build (&stmts, FLOAT_EXPR, new_type, skip_niters); else skip_niters = gimple_convert (&stmts, new_type, skip_niters); tree skip_step = gimple_build (&stmts, MULT_EXPR, new_type, skip_niters, step_expr); init_expr = gimple_build (&stmts, MINUS_EXPR, new_type, init_expr, skip_step); } } /* Convert the step to the desired type. */ step_expr = gimple_convert (&stmts, TREE_TYPE (vectype), step_expr); if (stmts) { new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts); gcc_assert (!new_bb); } /* Find the first insertion point in the BB. */ si = gsi_after_labels (bb); /* For SLP induction we have to generate several IVs as for example with group size 3 we need [i, i, i, i + S] [i + S, i + S, i + 2*S, i + 2*S] [i + 2*S, i + 3*S, i + 3*S, i + 3*S]. The step is the same uniform [VF*S, VF*S, VF*S, VF*S] for all. */ if (slp_node) { /* Enforced above. */ unsigned int const_nunits = nunits.to_constant (); /* Generate [VF*S, VF*S, ... ]. */ if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr))) { expr = build_int_cst (integer_type_node, vf); expr = fold_convert (TREE_TYPE (step_expr), expr); } else expr = build_int_cst (TREE_TYPE (step_expr), vf); new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr), expr, step_expr); if (! CONSTANT_CLASS_P (new_name)) new_name = vect_init_vector (phi, new_name, TREE_TYPE (step_expr), NULL); new_vec = build_vector_from_val (vectype, new_name); vec_step = vect_init_vector (phi, new_vec, vectype, NULL); /* Now generate the IVs. */ unsigned group_size = SLP_TREE_SCALAR_STMTS (slp_node).length (); unsigned nvects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); unsigned elts = const_nunits * nvects; unsigned nivs = least_common_multiple (group_size, const_nunits) / const_nunits; gcc_assert (elts % group_size == 0); tree elt = init_expr; unsigned ivn; for (ivn = 0; ivn < nivs; ++ivn) { tree_vector_builder elts (vectype, const_nunits, 1); stmts = NULL; for (unsigned eltn = 0; eltn < const_nunits; ++eltn) { if (ivn*const_nunits + eltn >= group_size && (ivn * const_nunits + eltn) % group_size == 0) elt = gimple_build (&stmts, PLUS_EXPR, TREE_TYPE (elt), elt, step_expr); elts.quick_push (elt); } vec_init = gimple_build_vector (&stmts, &elts); if (stmts) { new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts); gcc_assert (!new_bb); } /* Create the induction-phi that defines the induction-operand. */ vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_"); induction_phi = create_phi_node (vec_dest, iv_loop->header); set_vinfo_for_stmt (induction_phi, new_stmt_vec_info (induction_phi, loop_vinfo)); induc_def = PHI_RESULT (induction_phi); /* Create the iv update inside the loop */ vec_def = make_ssa_name (vec_dest); new_stmt = gimple_build_assign (vec_def, PLUS_EXPR, induc_def, vec_step); gsi_insert_before (&si, new_stmt, GSI_SAME_STMT); set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo)); /* Set the arguments of the phi node: */ add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION); add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop), UNKNOWN_LOCATION); SLP_TREE_VEC_STMTS (slp_node).quick_push (induction_phi); } /* Re-use IVs when we can. */ if (ivn < nvects) { unsigned vfp = least_common_multiple (group_size, const_nunits) / group_size; /* Generate [VF'*S, VF'*S, ... ]. */ if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr))) { expr = build_int_cst (integer_type_node, vfp); expr = fold_convert (TREE_TYPE (step_expr), expr); } else expr = build_int_cst (TREE_TYPE (step_expr), vfp); new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr), expr, step_expr); if (! CONSTANT_CLASS_P (new_name)) new_name = vect_init_vector (phi, new_name, TREE_TYPE (step_expr), NULL); new_vec = build_vector_from_val (vectype, new_name); vec_step = vect_init_vector (phi, new_vec, vectype, NULL); for (; ivn < nvects; ++ivn) { gimple *iv = SLP_TREE_VEC_STMTS (slp_node)[ivn - nivs]; tree def; if (gimple_code (iv) == GIMPLE_PHI) def = gimple_phi_result (iv); else def = gimple_assign_lhs (iv); new_stmt = gimple_build_assign (make_ssa_name (vectype), PLUS_EXPR, def, vec_step); if (gimple_code (iv) == GIMPLE_PHI) gsi_insert_before (&si, new_stmt, GSI_SAME_STMT); else { gimple_stmt_iterator tgsi = gsi_for_stmt (iv); gsi_insert_after (&tgsi, new_stmt, GSI_CONTINUE_LINKING); } set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo)); SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); } } return true; } /* Create the vector that holds the initial_value of the induction. */ if (nested_in_vect_loop) { /* iv_loop is nested in the loop to be vectorized. init_expr had already been created during vectorization of previous stmts. We obtain it from the STMT_VINFO_VEC_STMT of the defining stmt. */ vec_init = vect_get_vec_def_for_operand (init_expr, phi); /* If the initial value is not of proper type, convert it. */ if (!useless_type_conversion_p (vectype, TREE_TYPE (vec_init))) { new_stmt = gimple_build_assign (vect_get_new_ssa_name (vectype, vect_simple_var, "vec_iv_"), VIEW_CONVERT_EXPR, build1 (VIEW_CONVERT_EXPR, vectype, vec_init)); vec_init = gimple_assign_lhs (new_stmt); new_bb = gsi_insert_on_edge_immediate (loop_preheader_edge (iv_loop), new_stmt); gcc_assert (!new_bb); set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo)); } } else { /* iv_loop is the loop to be vectorized. Create: vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */ stmts = NULL; new_name = gimple_convert (&stmts, TREE_TYPE (vectype), init_expr); unsigned HOST_WIDE_INT const_nunits; if (nunits.is_constant (&const_nunits)) { tree_vector_builder elts (vectype, const_nunits, 1); elts.quick_push (new_name); for (i = 1; i < const_nunits; i++) { /* Create: new_name_i = new_name + step_expr */ new_name = gimple_build (&stmts, PLUS_EXPR, TREE_TYPE (new_name), new_name, step_expr); elts.quick_push (new_name); } /* Create a vector from [new_name_0, new_name_1, ..., new_name_nunits-1] */ vec_init = gimple_build_vector (&stmts, &elts); } else if (INTEGRAL_TYPE_P (TREE_TYPE (step_expr))) /* Build the initial value directly from a VEC_SERIES_EXPR. */ vec_init = gimple_build (&stmts, VEC_SERIES_EXPR, vectype, new_name, step_expr); else { /* Build: [base, base, base, ...] + (vectype) [0, 1, 2, ...] * [step, step, step, ...]. */ gcc_assert (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr))); gcc_assert (flag_associative_math); tree index = build_index_vector (vectype, 0, 1); tree base_vec = gimple_build_vector_from_val (&stmts, vectype, new_name); tree step_vec = gimple_build_vector_from_val (&stmts, vectype, step_expr); vec_init = gimple_build (&stmts, FLOAT_EXPR, vectype, index); vec_init = gimple_build (&stmts, MULT_EXPR, vectype, vec_init, step_vec); vec_init = gimple_build (&stmts, PLUS_EXPR, vectype, vec_init, base_vec); } if (stmts) { new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts); gcc_assert (!new_bb); } } /* Create the vector that holds the step of the induction. */ if (nested_in_vect_loop) /* iv_loop is nested in the loop to be vectorized. Generate: vec_step = [S, S, S, S] */ new_name = step_expr; else { /* iv_loop is the loop to be vectorized. Generate: vec_step = [VF*S, VF*S, VF*S, VF*S] */ gimple_seq seq = NULL; if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr))) { expr = build_int_cst (integer_type_node, vf); expr = gimple_build (&seq, FLOAT_EXPR, TREE_TYPE (step_expr), expr); } else expr = build_int_cst (TREE_TYPE (step_expr), vf); new_name = gimple_build (&seq, MULT_EXPR, TREE_TYPE (step_expr), expr, step_expr); if (seq) { new_bb = gsi_insert_seq_on_edge_immediate (pe, seq); gcc_assert (!new_bb); } } t = unshare_expr (new_name); gcc_assert (CONSTANT_CLASS_P (new_name) || TREE_CODE (new_name) == SSA_NAME); new_vec = build_vector_from_val (vectype, t); vec_step = vect_init_vector (phi, new_vec, vectype, NULL); /* Create the following def-use cycle: loop prolog: vec_init = ... vec_step = ... loop: vec_iv = PHI <vec_init, vec_loop> ... STMT ... vec_loop = vec_iv + vec_step; */ /* Create the induction-phi that defines the induction-operand. */ vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_"); induction_phi = create_phi_node (vec_dest, iv_loop->header); set_vinfo_for_stmt (induction_phi, new_stmt_vec_info (induction_phi, loop_vinfo)); induc_def = PHI_RESULT (induction_phi); /* Create the iv update inside the loop */ vec_def = make_ssa_name (vec_dest); new_stmt = gimple_build_assign (vec_def, PLUS_EXPR, induc_def, vec_step); gsi_insert_before (&si, new_stmt, GSI_SAME_STMT); set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo)); /* Set the arguments of the phi node: */ add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION); add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop), UNKNOWN_LOCATION); STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = induction_phi; /* In case that vectorization factor (VF) is bigger than the number of elements that we can fit in a vectype (nunits), we have to generate more than one vector stmt - i.e - we need to "unroll" the vector stmt by a factor VF/nunits. For more details see documentation in vectorizable_operation. */ if (ncopies > 1) { gimple_seq seq = NULL; stmt_vec_info prev_stmt_vinfo; /* FORNOW. This restriction should be relaxed. */ gcc_assert (!nested_in_vect_loop); /* Create the vector that holds the step of the induction. */ if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr))) { expr = build_int_cst (integer_type_node, nunits); expr = gimple_build (&seq, FLOAT_EXPR, TREE_TYPE (step_expr), expr); } else expr = build_int_cst (TREE_TYPE (step_expr), nunits); new_name = gimple_build (&seq, MULT_EXPR, TREE_TYPE (step_expr), expr, step_expr); if (seq) { new_bb = gsi_insert_seq_on_edge_immediate (pe, seq); gcc_assert (!new_bb); } t = unshare_expr (new_name); gcc_assert (CONSTANT_CLASS_P (new_name) || TREE_CODE (new_name) == SSA_NAME); new_vec = build_vector_from_val (vectype, t); vec_step = vect_init_vector (phi, new_vec, vectype, NULL); vec_def = induc_def; prev_stmt_vinfo = vinfo_for_stmt (induction_phi); for (i = 1; i < ncopies; i++) { /* vec_i = vec_prev + vec_step */ new_stmt = gimple_build_assign (vec_dest, PLUS_EXPR, vec_def, vec_step); vec_def = make_ssa_name (vec_dest, new_stmt); gimple_assign_set_lhs (new_stmt, vec_def); gsi_insert_before (&si, new_stmt, GSI_SAME_STMT); set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo)); STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt; prev_stmt_vinfo = vinfo_for_stmt (new_stmt); } } if (nested_in_vect_loop) { /* Find the loop-closed exit-phi of the induction, and record the final vector of induction results: */ exit_phi = NULL; FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg) { gimple *use_stmt = USE_STMT (use_p); if (is_gimple_debug (use_stmt)) continue; if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (use_stmt))) { exit_phi = use_stmt; break; } } if (exit_phi) { stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi); /* FORNOW. Currently not supporting the case that an inner-loop induction is not used in the outer-loop (i.e. only outside the outer-loop). */ gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo) && !STMT_VINFO_LIVE_P (stmt_vinfo)); STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt; if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "vector of inductions after inner-loop:"); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0); } } } if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "transform induction: created def-use cycle: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, induction_phi, 0); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, SSA_NAME_DEF_STMT (vec_def), 0); } return true; } /* Function vectorizable_live_operation. STMT computes a value that is used outside the loop. Check if it can be supported. */ bool vectorizable_live_operation (gimple *stmt, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED, slp_tree slp_node, int slp_index, gimple **vec_stmt) { stmt_vec_info stmt_info = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); imm_use_iterator imm_iter; tree lhs, lhs_type, bitsize, vec_bitsize; tree vectype = STMT_VINFO_VECTYPE (stmt_info); poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype); int ncopies; gimple *use_stmt; auto_vec<tree> vec_oprnds; int vec_entry = 0; poly_uint64 vec_index = 0; gcc_assert (STMT_VINFO_LIVE_P (stmt_info)); if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def) return false; /* FORNOW. CHECKME. */ if (nested_in_vect_loop_p (loop, stmt)) return false; /* If STMT is not relevant and it is a simple assignment and its inputs are invariant then it can remain in place, unvectorized. The original last scalar value that it computes will be used. */ if (!STMT_VINFO_RELEVANT_P (stmt_info)) { gcc_assert (is_simple_and_all_uses_invariant (stmt, loop_vinfo)); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "statement is simple and uses invariant. Leaving in " "place.\n"); return true; } if (slp_node) ncopies = 1; else ncopies = vect_get_num_copies (loop_vinfo, vectype); if (slp_node) { gcc_assert (slp_index >= 0); int num_scalar = SLP_TREE_SCALAR_STMTS (slp_node).length (); int num_vec = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); /* Get the last occurrence of the scalar index from the concatenation of all the slp vectors. Calculate which slp vector it is and the index within. */ poly_uint64 pos = (num_vec * nunits) - num_scalar + slp_index; /* Calculate which vector contains the result, and which lane of that vector we need. */ if (!can_div_trunc_p (pos, nunits, &vec_entry, &vec_index)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "Cannot determine which vector holds the" " final result.\n"); return false; } } if (!vec_stmt) { /* No transformation required. */ if (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo)) { if (!direct_internal_fn_supported_p (IFN_EXTRACT_LAST, vectype, OPTIMIZE_FOR_SPEED)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "can't use a fully-masked loop because " "the target doesn't support extract last " "reduction.\n"); LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false; } else if (slp_node) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "can't use a fully-masked loop because an " "SLP statement is live after the loop.\n"); LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false; } else if (ncopies > 1) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "can't use a fully-masked loop because" " ncopies is greater than 1.\n"); LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false; } else { gcc_assert (ncopies == 1 && !slp_node); vect_record_loop_mask (loop_vinfo, &LOOP_VINFO_MASKS (loop_vinfo), 1, vectype); } } return true; } /* If stmt has a related stmt, then use that for getting the lhs. */ if (is_pattern_stmt_p (stmt_info)) stmt = STMT_VINFO_RELATED_STMT (stmt_info); lhs = (is_a <gphi *> (stmt)) ? gimple_phi_result (stmt) : gimple_get_lhs (stmt); lhs_type = TREE_TYPE (lhs); bitsize = (VECTOR_BOOLEAN_TYPE_P (vectype) ? bitsize_int (TYPE_PRECISION (TREE_TYPE (vectype))) : TYPE_SIZE (TREE_TYPE (vectype))); vec_bitsize = TYPE_SIZE (vectype); /* Get the vectorized lhs of STMT and the lane to use (counted in bits). */ tree vec_lhs, bitstart; if (slp_node) { gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)); /* Get the correct slp vectorized stmt. */ gimple *vec_stmt = SLP_TREE_VEC_STMTS (slp_node)[vec_entry]; if (gphi *phi = dyn_cast <gphi *> (vec_stmt)) vec_lhs = gimple_phi_result (phi); else vec_lhs = gimple_get_lhs (vec_stmt); /* Get entry to use. */ bitstart = bitsize_int (vec_index); bitstart = int_const_binop (MULT_EXPR, bitsize, bitstart); } else { enum vect_def_type dt = STMT_VINFO_DEF_TYPE (stmt_info); vec_lhs = vect_get_vec_def_for_operand_1 (stmt, dt); gcc_checking_assert (ncopies == 1 || !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)); /* For multiple copies, get the last copy. */ for (int i = 1; i < ncopies; ++i) vec_lhs = vect_get_vec_def_for_stmt_copy (vect_unknown_def_type, vec_lhs); /* Get the last lane in the vector. */ bitstart = int_const_binop (MINUS_EXPR, vec_bitsize, bitsize); } gimple_seq stmts = NULL; tree new_tree; if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)) { /* Emit: SCALAR_RES = EXTRACT_LAST <VEC_LHS, MASK> where VEC_LHS is the vectorized live-out result and MASK is the loop mask for the final iteration. */ gcc_assert (ncopies == 1 && !slp_node); tree scalar_type = TREE_TYPE (STMT_VINFO_VECTYPE (stmt_info)); tree scalar_res = make_ssa_name (scalar_type); tree mask = vect_get_loop_mask (gsi, &LOOP_VINFO_MASKS (loop_vinfo), 1, vectype, 0); gcall *new_stmt = gimple_build_call_internal (IFN_EXTRACT_LAST, 2, mask, vec_lhs); gimple_call_set_lhs (new_stmt, scalar_res); gimple_seq_add_stmt (&stmts, new_stmt); /* Convert the extracted vector element to the required scalar type. */ new_tree = gimple_convert (&stmts, lhs_type, scalar_res); } else { tree bftype = TREE_TYPE (vectype); if (VECTOR_BOOLEAN_TYPE_P (vectype)) bftype = build_nonstandard_integer_type (tree_to_uhwi (bitsize), 1); new_tree = build3 (BIT_FIELD_REF, bftype, vec_lhs, bitsize, bitstart); new_tree = force_gimple_operand (fold_convert (lhs_type, new_tree), &stmts, true, NULL_TREE); } if (stmts) gsi_insert_seq_on_edge_immediate (single_exit (loop), stmts); /* Replace use of lhs with newly computed result. If the use stmt is a single arg PHI, just replace all uses of PHI result. It's necessary because lcssa PHI defining lhs may be before newly inserted stmt. */ use_operand_p use_p; FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, lhs) if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)) && !is_gimple_debug (use_stmt)) { if (gimple_code (use_stmt) == GIMPLE_PHI && gimple_phi_num_args (use_stmt) == 1) { replace_uses_by (gimple_phi_result (use_stmt), new_tree); } else { FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter) SET_USE (use_p, new_tree); } update_stmt (use_stmt); } return true; } /* Kill any debug uses outside LOOP of SSA names defined in STMT. */ static void vect_loop_kill_debug_uses (struct loop *loop, gimple *stmt) { ssa_op_iter op_iter; imm_use_iterator imm_iter; def_operand_p def_p; gimple *ustmt; FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF) { FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p)) { basic_block bb; if (!is_gimple_debug (ustmt)) continue; bb = gimple_bb (ustmt); if (!flow_bb_inside_loop_p (loop, bb)) { if (gimple_debug_bind_p (ustmt)) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "killing debug use\n"); gimple_debug_bind_reset_value (ustmt); update_stmt (ustmt); } else gcc_unreachable (); } } } } /* Given loop represented by LOOP_VINFO, return true if computation of LOOP_VINFO_NITERS (= LOOP_VINFO_NITERSM1 + 1) doesn't overflow, false otherwise. */ static bool loop_niters_no_overflow (loop_vec_info loop_vinfo) { /* Constant case. */ if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)) { tree cst_niters = LOOP_VINFO_NITERS (loop_vinfo); tree cst_nitersm1 = LOOP_VINFO_NITERSM1 (loop_vinfo); gcc_assert (TREE_CODE (cst_niters) == INTEGER_CST); gcc_assert (TREE_CODE (cst_nitersm1) == INTEGER_CST); if (wi::to_widest (cst_nitersm1) < wi::to_widest (cst_niters)) return true; } widest_int max; struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); /* Check the upper bound of loop niters. */ if (get_max_loop_iterations (loop, &max)) { tree type = TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)); signop sgn = TYPE_SIGN (type); widest_int type_max = widest_int::from (wi::max_value (type), sgn); if (max < type_max) return true; } return false; } /* Return a mask type with half the number of elements as TYPE. */ tree vect_halve_mask_nunits (tree type) { poly_uint64 nunits = exact_div (TYPE_VECTOR_SUBPARTS (type), 2); return build_truth_vector_type (nunits, current_vector_size); } /* Return a mask type with twice as many elements as TYPE. */ tree vect_double_mask_nunits (tree type) { poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (type) * 2; return build_truth_vector_type (nunits, current_vector_size); } /* Record that a fully-masked version of LOOP_VINFO would need MASKS to contain a sequence of NVECTORS masks that each control a vector of type VECTYPE. */ void vect_record_loop_mask (loop_vec_info loop_vinfo, vec_loop_masks *masks, unsigned int nvectors, tree vectype) { gcc_assert (nvectors != 0); if (masks->length () < nvectors) masks->safe_grow_cleared (nvectors); rgroup_masks *rgm = &(*masks)[nvectors - 1]; /* The number of scalars per iteration and the number of vectors are both compile-time constants. */ unsigned int nscalars_per_iter = exact_div (nvectors * TYPE_VECTOR_SUBPARTS (vectype), LOOP_VINFO_VECT_FACTOR (loop_vinfo)).to_constant (); if (rgm->max_nscalars_per_iter < nscalars_per_iter) { rgm->max_nscalars_per_iter = nscalars_per_iter; rgm->mask_type = build_same_sized_truth_vector_type (vectype); } } /* Given a complete set of masks MASKS, extract mask number INDEX for an rgroup that operates on NVECTORS vectors of type VECTYPE, where 0 <= INDEX < NVECTORS. Insert any set-up statements before GSI. See the comment above vec_loop_masks for more details about the mask arrangement. */ tree vect_get_loop_mask (gimple_stmt_iterator *gsi, vec_loop_masks *masks, unsigned int nvectors, tree vectype, unsigned int index) { rgroup_masks *rgm = &(*masks)[nvectors - 1]; tree mask_type = rgm->mask_type; /* Populate the rgroup's mask array, if this is the first time we've used it. */ if (rgm->masks.is_empty ()) { rgm->masks.safe_grow_cleared (nvectors); for (unsigned int i = 0; i < nvectors; ++i) { tree mask = make_temp_ssa_name (mask_type, NULL, "loop_mask"); /* Provide a dummy definition until the real one is available. */ SSA_NAME_DEF_STMT (mask) = gimple_build_nop (); rgm->masks[i] = mask; } } tree mask = rgm->masks[index]; if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_type), TYPE_VECTOR_SUBPARTS (vectype))) { /* A loop mask for data type X can be reused for data type Y if X has N times more elements than Y and if Y's elements are N times bigger than X's. In this case each sequence of N elements in the loop mask will be all-zero or all-one. We can then view-convert the mask so that each sequence of N elements is replaced by a single element. */ gcc_assert (multiple_p (TYPE_VECTOR_SUBPARTS (mask_type), TYPE_VECTOR_SUBPARTS (vectype))); gimple_seq seq = NULL; mask_type = build_same_sized_truth_vector_type (vectype); mask = gimple_build (&seq, VIEW_CONVERT_EXPR, mask_type, mask); if (seq) gsi_insert_seq_before (gsi, seq, GSI_SAME_STMT); } return mask; } /* Scale profiling counters by estimation for LOOP which is vectorized by factor VF. */ static void scale_profile_for_vect_loop (struct loop *loop, unsigned vf) { edge preheader = loop_preheader_edge (loop); /* Reduce loop iterations by the vectorization factor. */ gcov_type new_est_niter = niter_for_unrolled_loop (loop, vf); profile_count freq_h = loop->header->count, freq_e = preheader->count (); if (freq_h.nonzero_p ()) { profile_probability p; /* Avoid dropping loop body profile counter to 0 because of zero count in loop's preheader. */ if (!(freq_e == profile_count::zero ())) freq_e = freq_e.force_nonzero (); p = freq_e.apply_scale (new_est_niter + 1, 1).probability_in (freq_h); scale_loop_frequencies (loop, p); } edge exit_e = single_exit (loop); exit_e->probability = profile_probability::always () .apply_scale (1, new_est_niter + 1); edge exit_l = single_pred_edge (loop->latch); profile_probability prob = exit_l->probability; exit_l->probability = exit_e->probability.invert (); if (prob.initialized_p () && exit_l->probability.initialized_p ()) scale_bbs_frequencies (&loop->latch, 1, exit_l->probability / prob); } /* Function vect_transform_loop. The analysis phase has determined that the loop is vectorizable. Vectorize the loop - created vectorized stmts to replace the scalar stmts in the loop, and update the loop exit condition. Returns scalar epilogue loop if any. */ struct loop * vect_transform_loop (loop_vec_info loop_vinfo) { struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); struct loop *epilogue = NULL; basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); int nbbs = loop->num_nodes; int i; tree niters_vector = NULL_TREE; tree step_vector = NULL_TREE; tree niters_vector_mult_vf = NULL_TREE; poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); unsigned int lowest_vf = constant_lower_bound (vf); bool grouped_store; bool slp_scheduled = false; gimple *stmt, *pattern_stmt; gimple_seq pattern_def_seq = NULL; gimple_stmt_iterator pattern_def_si = gsi_none (); bool transform_pattern_stmt = false; bool check_profitability = false; unsigned int th; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== vec_transform_loop ===\n"); /* Use the more conservative vectorization threshold. If the number of iterations is constant assume the cost check has been performed by our caller. If the threshold makes all loops profitable that run at least the (estimated) vectorization factor number of times checking is pointless, too. */ th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo); if (th >= vect_vf_for_cost (loop_vinfo) && !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Profitability threshold is %d loop iterations.\n", th); check_profitability = true; } /* Make sure there exists a single-predecessor exit bb. Do this before versioning. */ edge e = single_exit (loop); if (! single_pred_p (e->dest)) { split_loop_exit_edge (e); if (dump_enabled_p ()) dump_printf (MSG_NOTE, "split exit edge\n"); } /* Version the loop first, if required, so the profitability check comes first. */ if (LOOP_REQUIRES_VERSIONING (loop_vinfo)) { poly_uint64 versioning_threshold = LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo); if (check_profitability && ordered_p (poly_uint64 (th), versioning_threshold)) { versioning_threshold = ordered_max (poly_uint64 (th), versioning_threshold); check_profitability = false; } vect_loop_versioning (loop_vinfo, th, check_profitability, versioning_threshold); check_profitability = false; } /* Make sure there exists a single-predecessor exit bb also on the scalar loop copy. Do this after versioning but before peeling so CFG structure is fine for both scalar and if-converted loop to make slpeel_duplicate_current_defs_from_edges face matched loop closed PHI nodes on the exit. */ if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo)) { e = single_exit (LOOP_VINFO_SCALAR_LOOP (loop_vinfo)); if (! single_pred_p (e->dest)) { split_loop_exit_edge (e); if (dump_enabled_p ()) dump_printf (MSG_NOTE, "split exit edge of scalar loop\n"); } } tree niters = vect_build_loop_niters (loop_vinfo); LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = niters; tree nitersm1 = unshare_expr (LOOP_VINFO_NITERSM1 (loop_vinfo)); bool niters_no_overflow = loop_niters_no_overflow (loop_vinfo); epilogue = vect_do_peeling (loop_vinfo, niters, nitersm1, &niters_vector, &step_vector, &niters_vector_mult_vf, th, check_profitability, niters_no_overflow); if (niters_vector == NULL_TREE) { if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo) && known_eq (lowest_vf, vf)) { niters_vector = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)), LOOP_VINFO_INT_NITERS (loop_vinfo) / lowest_vf); step_vector = build_one_cst (TREE_TYPE (niters)); } else vect_gen_vector_loop_niters (loop_vinfo, niters, &niters_vector, &step_vector, niters_no_overflow); } /* 1) Make sure the loop header has exactly two entries 2) Make sure we have a preheader basic block. */ gcc_assert (EDGE_COUNT (loop->header->preds) == 2); split_edge (loop_preheader_edge (loop)); if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo) && vect_use_loop_mask_for_alignment_p (loop_vinfo)) /* This will deal with any possible peeling. */ vect_prepare_for_masked_peels (loop_vinfo); /* FORNOW: the vectorizer supports only loops which body consist of one basic block (header + empty latch). When the vectorizer will support more involved loop forms, the order by which the BBs are traversed need to be reconsidered. */ for (i = 0; i < nbbs; i++) { basic_block bb = bbs[i]; stmt_vec_info stmt_info; for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) { gphi *phi = si.phi (); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "------>vectorizing phi: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); } stmt_info = vinfo_for_stmt (phi); if (!stmt_info) continue; if (MAY_HAVE_DEBUG_BIND_STMTS && !STMT_VINFO_LIVE_P (stmt_info)) vect_loop_kill_debug_uses (loop, phi); if (!STMT_VINFO_RELEVANT_P (stmt_info) && !STMT_VINFO_LIVE_P (stmt_info)) continue; if (STMT_VINFO_VECTYPE (stmt_info) && (maybe_ne (TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)), vf)) && dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n"); if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def || STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle) && ! PURE_SLP_STMT (stmt_info)) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "transform phi.\n"); vect_transform_stmt (phi, NULL, NULL, NULL, NULL); } } pattern_stmt = NULL; for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si) || transform_pattern_stmt;) { bool is_store; if (transform_pattern_stmt) stmt = pattern_stmt; else { stmt = gsi_stmt (si); /* During vectorization remove existing clobber stmts. */ if (gimple_clobber_p (stmt)) { unlink_stmt_vdef (stmt); gsi_remove (&si, true); release_defs (stmt); continue; } } if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "------>vectorizing statement: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); } stmt_info = vinfo_for_stmt (stmt); /* vector stmts created in the outer-loop during vectorization of stmts in an inner-loop may not have a stmt_info, and do not need to be vectorized. */ if (!stmt_info) { gsi_next (&si); continue; } if (MAY_HAVE_DEBUG_BIND_STMTS && !STMT_VINFO_LIVE_P (stmt_info)) vect_loop_kill_debug_uses (loop, stmt); if (!STMT_VINFO_RELEVANT_P (stmt_info) && !STMT_VINFO_LIVE_P (stmt_info)) { if (STMT_VINFO_IN_PATTERN_P (stmt_info) && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info)) && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt)) || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt)))) { stmt = pattern_stmt; stmt_info = vinfo_for_stmt (stmt); } else { gsi_next (&si); continue; } } else if (STMT_VINFO_IN_PATTERN_P (stmt_info) && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info)) && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt)) || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt)))) transform_pattern_stmt = true; /* If pattern statement has def stmts, vectorize them too. */ if (is_pattern_stmt_p (stmt_info)) { if (pattern_def_seq == NULL) { pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info); pattern_def_si = gsi_start (pattern_def_seq); } else if (!gsi_end_p (pattern_def_si)) gsi_next (&pattern_def_si); if (pattern_def_seq != NULL) { gimple *pattern_def_stmt = NULL; stmt_vec_info pattern_def_stmt_info = NULL; while (!gsi_end_p (pattern_def_si)) { pattern_def_stmt = gsi_stmt (pattern_def_si); pattern_def_stmt_info = vinfo_for_stmt (pattern_def_stmt); if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info) || STMT_VINFO_LIVE_P (pattern_def_stmt_info)) break; gsi_next (&pattern_def_si); } if (!gsi_end_p (pattern_def_si)) { if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "==> vectorizing pattern def " "stmt: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0); } stmt = pattern_def_stmt; stmt_info = pattern_def_stmt_info; } else { pattern_def_si = gsi_none (); transform_pattern_stmt = false; } } else transform_pattern_stmt = false; } if (STMT_VINFO_VECTYPE (stmt_info)) { poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)); if (!STMT_SLP_TYPE (stmt_info) && maybe_ne (nunits, vf) && dump_enabled_p ()) /* For SLP VF is set according to unrolling factor, and not to vector size, hence for SLP this print is not valid. */ dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n"); } /* SLP. Schedule all the SLP instances when the first SLP stmt is reached. */ if (STMT_SLP_TYPE (stmt_info)) { if (!slp_scheduled) { slp_scheduled = true; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== scheduling SLP instances ===\n"); vect_schedule_slp (loop_vinfo); } /* Hybrid SLP stmts must be vectorized in addition to SLP. */ if (!vinfo_for_stmt (stmt) || PURE_SLP_STMT (stmt_info)) { if (!transform_pattern_stmt && gsi_end_p (pattern_def_si)) { pattern_def_seq = NULL; gsi_next (&si); } continue; } } /* -------- vectorize statement ------------ */ if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "transform statement.\n"); grouped_store = false; is_store = vect_transform_stmt (stmt, &si, &grouped_store, NULL, NULL); if (is_store) { if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) { /* Interleaving. If IS_STORE is TRUE, the vectorization of the interleaving chain was completed - free all the stores in the chain. */ gsi_next (&si); vect_remove_stores (GROUP_FIRST_ELEMENT (stmt_info)); } else { /* Free the attached stmt_vec_info and remove the stmt. */ gimple *store = gsi_stmt (si); free_stmt_vec_info (store); unlink_stmt_vdef (store); gsi_remove (&si, true); release_defs (store); } /* Stores can only appear at the end of pattern statements. */ gcc_assert (!transform_pattern_stmt); pattern_def_seq = NULL; } else if (!transform_pattern_stmt && gsi_end_p (pattern_def_si)) { pattern_def_seq = NULL; gsi_next (&si); } } /* stmts in BB */ /* Stub out scalar statements that must not survive vectorization. Doing this here helps with grouped statements, or statements that are involved in patterns. */ for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gcall *call = dyn_cast <gcall *> (gsi_stmt (gsi)); if (call && gimple_call_internal_p (call, IFN_MASK_LOAD)) { tree lhs = gimple_get_lhs (call); if (!VECTOR_TYPE_P (TREE_TYPE (lhs))) { tree zero = build_zero_cst (TREE_TYPE (lhs)); gimple *new_stmt = gimple_build_assign (lhs, zero); gsi_replace (&gsi, new_stmt, true); } } } } /* BBs in loop */ /* The vectorization factor is always > 1, so if we use an IV increment of 1. a zero NITERS becomes a nonzero NITERS_VECTOR. */ if (integer_onep (step_vector)) niters_no_overflow = true; vect_set_loop_condition (loop, loop_vinfo, niters_vector, step_vector, niters_vector_mult_vf, !niters_no_overflow); unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo); scale_profile_for_vect_loop (loop, assumed_vf); /* True if the final iteration might not handle a full vector's worth of scalar iterations. */ bool final_iter_may_be_partial = LOOP_VINFO_FULLY_MASKED_P (loop_vinfo); /* The minimum number of iterations performed by the epilogue. This is 1 when peeling for gaps because we always need a final scalar iteration. */ int min_epilogue_iters = LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) ? 1 : 0; /* +1 to convert latch counts to loop iteration counts, -min_epilogue_iters to remove iterations that cannot be performed by the vector code. */ int bias_for_lowest = 1 - min_epilogue_iters; int bias_for_assumed = bias_for_lowest; int alignment_npeels = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo); if (alignment_npeels && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)) { /* When the amount of peeling is known at compile time, the first iteration will have exactly alignment_npeels active elements. In the worst case it will have at least one. */ int min_first_active = (alignment_npeels > 0 ? alignment_npeels : 1); bias_for_lowest += lowest_vf - min_first_active; bias_for_assumed += assumed_vf - min_first_active; } /* In these calculations the "- 1" converts loop iteration counts back to latch counts. */ if (loop->any_upper_bound) loop->nb_iterations_upper_bound = (final_iter_may_be_partial ? wi::udiv_ceil (loop->nb_iterations_upper_bound + bias_for_lowest, lowest_vf) - 1 : wi::udiv_floor (loop->nb_iterations_upper_bound + bias_for_lowest, lowest_vf) - 1); if (loop->any_likely_upper_bound) loop->nb_iterations_likely_upper_bound = (final_iter_may_be_partial ? wi::udiv_ceil (loop->nb_iterations_likely_upper_bound + bias_for_lowest, lowest_vf) - 1 : wi::udiv_floor (loop->nb_iterations_likely_upper_bound + bias_for_lowest, lowest_vf) - 1); if (loop->any_estimate) loop->nb_iterations_estimate = (final_iter_may_be_partial ? wi::udiv_ceil (loop->nb_iterations_estimate + bias_for_assumed, assumed_vf) - 1 : wi::udiv_floor (loop->nb_iterations_estimate + bias_for_assumed, assumed_vf) - 1); if (dump_enabled_p ()) { if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo)) { dump_printf_loc (MSG_NOTE, vect_location, "LOOP VECTORIZED\n"); if (loop->inner) dump_printf_loc (MSG_NOTE, vect_location, "OUTER LOOP VECTORIZED\n"); dump_printf (MSG_NOTE, "\n"); } else { dump_printf_loc (MSG_NOTE, vect_location, "LOOP EPILOGUE VECTORIZED (VS="); dump_dec (MSG_NOTE, current_vector_size); dump_printf (MSG_NOTE, ")\n"); } } /* Free SLP instances here because otherwise stmt reference counting won't work. */ slp_instance instance; FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance) vect_free_slp_instance (instance); LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release (); /* Clear-up safelen field since its value is invalid after vectorization since vectorized loop can have loop-carried dependencies. */ loop->safelen = 0; /* Don't vectorize epilogue for epilogue. */ if (LOOP_VINFO_EPILOGUE_P (loop_vinfo)) epilogue = NULL; if (!PARAM_VALUE (PARAM_VECT_EPILOGUES_NOMASK)) epilogue = NULL; if (epilogue) { auto_vector_sizes vector_sizes; targetm.vectorize.autovectorize_vector_sizes (&vector_sizes); unsigned int next_size = 0; if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) >= 0 && known_eq (vf, lowest_vf)) { unsigned int eiters = (LOOP_VINFO_INT_NITERS (loop_vinfo) - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)); eiters = eiters % lowest_vf; epilogue->nb_iterations_upper_bound = eiters - 1; unsigned int ratio; while (next_size < vector_sizes.length () && !(constant_multiple_p (current_vector_size, vector_sizes[next_size], &ratio) && eiters >= lowest_vf / ratio)) next_size += 1; } else while (next_size < vector_sizes.length () && maybe_lt (current_vector_size, vector_sizes[next_size])) next_size += 1; if (next_size == vector_sizes.length ()) epilogue = NULL; } if (epilogue) { epilogue->force_vectorize = loop->force_vectorize; epilogue->safelen = loop->safelen; epilogue->dont_vectorize = false; /* We may need to if-convert epilogue to vectorize it. */ if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo)) tree_if_conversion (epilogue); } return epilogue; } /* The code below is trying to perform simple optimization - revert if-conversion for masked stores, i.e. if the mask of a store is zero do not perform it and all stored value producers also if possible. For example, for (i=0; i<n; i++) if (c[i]) { p1[i] += 1; p2[i] = p3[i] +2; } this transformation will produce the following semi-hammock: if (!mask__ifc__42.18_165 == { 0, 0, 0, 0, 0, 0, 0, 0 }) { vect__11.19_170 = MASK_LOAD (vectp_p1.20_168, 0B, mask__ifc__42.18_165); vect__12.22_172 = vect__11.19_170 + vect_cst__171; MASK_STORE (vectp_p1.23_175, 0B, mask__ifc__42.18_165, vect__12.22_172); vect__18.25_182 = MASK_LOAD (vectp_p3.26_180, 0B, mask__ifc__42.18_165); vect__19.28_184 = vect__18.25_182 + vect_cst__183; MASK_STORE (vectp_p2.29_187, 0B, mask__ifc__42.18_165, vect__19.28_184); } */ void optimize_mask_stores (struct loop *loop) { basic_block *bbs = get_loop_body (loop); unsigned nbbs = loop->num_nodes; unsigned i; basic_block bb; struct loop *bb_loop; gimple_stmt_iterator gsi; gimple *stmt; auto_vec<gimple *> worklist; vect_location = find_loop_location (loop); /* Pick up all masked stores in loop if any. */ for (i = 0; i < nbbs; i++) { bb = bbs[i]; for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { stmt = gsi_stmt (gsi); if (gimple_call_internal_p (stmt, IFN_MASK_STORE)) worklist.safe_push (stmt); } } free (bbs); if (worklist.is_empty ()) return; /* Loop has masked stores. */ while (!worklist.is_empty ()) { gimple *last, *last_store; edge e, efalse; tree mask; basic_block store_bb, join_bb; gimple_stmt_iterator gsi_to; tree vdef, new_vdef; gphi *phi; tree vectype; tree zero; last = worklist.pop (); mask = gimple_call_arg (last, 2); bb = gimple_bb (last); /* Create then_bb and if-then structure in CFG, then_bb belongs to the same loop as if_bb. It could be different to LOOP when two level loop-nest is vectorized and mask_store belongs to the inner one. */ e = split_block (bb, last); bb_loop = bb->loop_father; gcc_assert (loop == bb_loop || flow_loop_nested_p (loop, bb_loop)); join_bb = e->dest; store_bb = create_empty_bb (bb); add_bb_to_loop (store_bb, bb_loop); e->flags = EDGE_TRUE_VALUE; efalse = make_edge (bb, store_bb, EDGE_FALSE_VALUE); /* Put STORE_BB to likely part. */ efalse->probability = profile_probability::unlikely (); store_bb->count = efalse->count (); make_single_succ_edge (store_bb, join_bb, EDGE_FALLTHRU); if (dom_info_available_p (CDI_DOMINATORS)) set_immediate_dominator (CDI_DOMINATORS, store_bb, bb); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Create new block %d to sink mask stores.", store_bb->index); /* Create vector comparison with boolean result. */ vectype = TREE_TYPE (mask); zero = build_zero_cst (vectype); stmt = gimple_build_cond (EQ_EXPR, mask, zero, NULL_TREE, NULL_TREE); gsi = gsi_last_bb (bb); gsi_insert_after (&gsi, stmt, GSI_SAME_STMT); /* Create new PHI node for vdef of the last masked store: .MEM_2 = VDEF <.MEM_1> will be converted to .MEM.3 = VDEF <.MEM_1> and new PHI node will be created in join bb .MEM_2 = PHI <.MEM_1, .MEM_3> */ vdef = gimple_vdef (last); new_vdef = make_ssa_name (gimple_vop (cfun), last); gimple_set_vdef (last, new_vdef); phi = create_phi_node (vdef, join_bb); add_phi_arg (phi, new_vdef, EDGE_SUCC (store_bb, 0), UNKNOWN_LOCATION); /* Put all masked stores with the same mask to STORE_BB if possible. */ while (true) { gimple_stmt_iterator gsi_from; gimple *stmt1 = NULL; /* Move masked store to STORE_BB. */ last_store = last; gsi = gsi_for_stmt (last); gsi_from = gsi; /* Shift GSI to the previous stmt for further traversal. */ gsi_prev (&gsi); gsi_to = gsi_start_bb (store_bb); gsi_move_before (&gsi_from, &gsi_to); /* Setup GSI_TO to the non-empty block start. */ gsi_to = gsi_start_bb (store_bb); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "Move stmt to created bb\n"); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, last, 0); } /* Move all stored value producers if possible. */ while (!gsi_end_p (gsi)) { tree lhs; imm_use_iterator imm_iter; use_operand_p use_p; bool res; /* Skip debug statements. */ if (is_gimple_debug (gsi_stmt (gsi))) { gsi_prev (&gsi); continue; } stmt1 = gsi_stmt (gsi); /* Do not consider statements writing to memory or having volatile operand. */ if (gimple_vdef (stmt1) || gimple_has_volatile_ops (stmt1)) break; gsi_from = gsi; gsi_prev (&gsi); lhs = gimple_get_lhs (stmt1); if (!lhs) break; /* LHS of vectorized stmt must be SSA_NAME. */ if (TREE_CODE (lhs) != SSA_NAME) break; if (!VECTOR_TYPE_P (TREE_TYPE (lhs))) { /* Remove dead scalar statement. */ if (has_zero_uses (lhs)) { gsi_remove (&gsi_from, true); continue; } } /* Check that LHS does not have uses outside of STORE_BB. */ res = true; FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs) { gimple *use_stmt; use_stmt = USE_STMT (use_p); if (is_gimple_debug (use_stmt)) continue; if (gimple_bb (use_stmt) != store_bb) { res = false; break; } } if (!res) break; if (gimple_vuse (stmt1) && gimple_vuse (stmt1) != gimple_vuse (last_store)) break; /* Can move STMT1 to STORE_BB. */ if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "Move stmt to created bb\n"); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt1, 0); } gsi_move_before (&gsi_from, &gsi_to); /* Shift GSI_TO for further insertion. */ gsi_prev (&gsi_to); } /* Put other masked stores with the same mask to STORE_BB. */ if (worklist.is_empty () || gimple_call_arg (worklist.last (), 2) != mask || worklist.last () != stmt1) break; last = worklist.pop (); } add_phi_arg (phi, gimple_vuse (last_store), e, UNKNOWN_LOCATION); } }
pdzamax.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" #include "plasma_workspace.h" #include <plasma_core_blas.h> #define A(m, n) (plasma_complex64_t*)plasma_tile_addr(A, m, n) /******************************************************************************/ void plasma_pdzamax(plasma_enum_t colrow, plasma_desc_t A, double *work, double *values, plasma_sequence_t *sequence, plasma_request_t *request) { // Return if failed sequence. if (sequence->status != PlasmaSuccess) return; switch (colrow) { //=================== // PlasmaColumnwise //=================== case PlasmaColumnwise: for (int m = 0; m < A.mt; m++) { int mvam = plasma_tile_mview(A, m); int ldam = plasma_tile_mmain(A, m); for (int n = 0; n < A.nt; n++) { int nvan = plasma_tile_nview(A, n); plasma_core_omp_dzamax(PlasmaColumnwise, mvam, nvan, A(m, n), ldam, &work[A.n*m+n*A.nb], sequence, request); } } #pragma omp taskwait plasma_core_omp_damax(PlasmaRowwise, A.n, A.mt, work, A.n, values, sequence, request); break; //================ // PlasmaRowwise //================ case PlasmaRowwise: for (int m = 0; m < A.mt; m++) { int mvam = plasma_tile_mview(A, m); int ldam = plasma_tile_mmain(A, m); for (int n = 0; n < A.nt; n++) { int nvan = plasma_tile_nview(A, n); plasma_core_omp_dzamax(PlasmaRowwise, mvam, nvan, A(m, n), ldam, &work[A.m*n+m*A.mb], sequence, request); } } #pragma omp taskwait plasma_core_omp_damax(PlasmaRowwise, A.m, A.nt, work, A.m, values, sequence, request); } }
rnn-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file rnn-inl.h * \brief * \author Sebastian Bodenstein */ #ifndef MXNET_OPERATOR_RNN_INL_H_ #define MXNET_OPERATOR_RNN_INL_H_ #include <dmlc/logging.h> #include <dmlc/parameter.h> #include <mxnet/operator.h> #include <algorithm> #include <map> #include <vector> #include <string> #include <utility> #include "./math.h" #include "./math_functions-inl.h" #include "./operator_common.h" #include "./mshadow_op.h" #include "./linalg.h" namespace mxnet { namespace op { namespace rnn_enum { enum RNNOpInputs {kData, kParams, kState, kStateCell}; enum RNNOpOutputs {kOut, kStateOut, kStateCellOut}; enum RNNModeType {kRnnRelu, kRnnTanh, kLstm, kGru}; enum RNNOpResource {kTempSpace}; } // A utility function to calculate input size inline int rnn_single_param_size(int inputSize, int hiddenSize, int mode) { int size = hiddenSize * (hiddenSize + inputSize + 2); // Different RNN's have different num weights switch (mode) { case rnn_enum::kRnnRelu: size *= 1; break; case rnn_enum::kRnnTanh: size *= 1; break; case rnn_enum::kLstm: size *= 4; break; case rnn_enum::kGru: size *= 3; break; } return size; } inline int rnn_param_size(int layerNum, int inputSize, int hiddenSize, bool bidirectional, int mode) { // get size of first layer int size = rnn_single_param_size(inputSize, hiddenSize, mode); // get size of remaining layers if (bidirectional) { size += (layerNum - 1) * rnn_single_param_size(2 * hiddenSize, hiddenSize, mode); size *= 2; } else { size += (layerNum - 1) * rnn_single_param_size(hiddenSize, hiddenSize, mode); } return size; } struct RNNParam : public dmlc::Parameter<RNNParam> { uint32_t state_size; uint32_t num_layers; bool bidirectional, state_outputs; int mode; float p, pkeep_; int seq_length_, batch_size_, input_size_; bool lstm_q_; // whether type is lstm DMLC_DECLARE_PARAMETER(RNNParam) { DMLC_DECLARE_FIELD(state_size) .describe("size of the state for each layer"); DMLC_DECLARE_FIELD(num_layers) .describe("number of stacked layers"); DMLC_DECLARE_FIELD(bidirectional).set_default(false) .describe("whether to use bidirectional recurrent layers"); DMLC_DECLARE_FIELD(mode) .add_enum("rnn_relu", rnn_enum::kRnnRelu) .add_enum("rnn_tanh", rnn_enum::kRnnTanh) .add_enum("lstm", rnn_enum::kLstm) .add_enum("gru", rnn_enum::kGru) .describe("the type of RNN to compute"); DMLC_DECLARE_FIELD(p).set_default(0.) .set_range(0, 1) .describe("Dropout probability, fraction of the input that gets dropped out at training time"); DMLC_DECLARE_FIELD(state_outputs).set_default(false) .describe("Whether to have the states as symbol outputs."); } }; template<typename xpu, typename DType> class RNNOp : public Operator { public: explicit RNNOp(RNNParam p) { } virtual void Forward(const OpContext &ctx, const std::vector<TBlob> &in_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &out_data, const std::vector<TBlob> &aux_args) { using namespace mshadow; using namespace mshadow::expr; // TODO(sbodenstein): add MShadow implementation } virtual void Backward(const OpContext &ctx, const std::vector<TBlob> &out_grad, const std::vector<TBlob> &in_data, const std::vector<TBlob> &out_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &in_grad, const std::vector<TBlob> &aux_args) { using namespace mshadow; using namespace mshadow::expr; // TODO(sbodenstein): add MShadow implementation } private: RNNParam param_; }; // class RNNOp template<typename DType> class RNNOp<cpu, DType> : public Operator { public: explicit RNNOp(RNNParam param) { this->param_ = param; // RNN Mode param_.lstm_q_ = false; switch (param_.mode) { case rnn_enum::kLstm: param_.lstm_q_ = true; break; default: LOG(FATAL) << "only LSTM is implmented on CPU"; } } virtual void Forward(const OpContext &ctx, const std::vector<TBlob> &in_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &out_data, const std::vector<TBlob> &aux_args) { // Layout TNC CHECK(!ctx.is_train) << "only inference mode is available" "for cpu at the moment."; size_t in_expected = param_.lstm_q_ ? 4 : 3; size_t out_expected = param_.lstm_q_ ? 3 : 2; if (!param_.state_outputs) LOG(FATAL) << "no state outputs is currently not supported for cpu."; CHECK_EQ(req[rnn_enum::kOut], kWriteTo); CHECK_EQ(in_data.size(), in_expected); CHECK_EQ(out_data.size(), out_expected); mshadow::Stream<cpu> *s = ctx.get_stream<cpu>(); // get input + output tensors // w layout i2h_w, h2h_w, i2h_b, h2h_b Tensor<cpu, 3, DType> x = in_data[rnn_enum::kData].get<cpu, 3, DType>(s); // TNC Tensor<cpu, 1, DType> w = in_data[rnn_enum::kParams].get<cpu, 1, DType>(s); Tensor<cpu, 3, DType> hx = in_data[rnn_enum::kState].get<cpu, 3, DType>(s); // LNC Tensor<cpu, 3, DType> y = out_data[rnn_enum::kOut].get<cpu, 3, DType>(s); // TNC int64_t seq_len = x.shape_[0]; int64_t num_layers = hx.shape_[0]; int64_t batch_size = x.shape_[1]; int64_t h_channel = hx.shape_[2]; int64_t in_channel = x.shape_[2]; Tensor<cpu, 2, DType> x_flatten = in_data[rnn_enum::kData] .get_with_shape<cpu, 2, DType>( mshadow::Shape2(seq_len * batch_size, in_channel), s); // (T*N)C Tensor<cpu, 2, DType> y_flatten = out_data[rnn_enum::kOut] .get_with_shape<cpu, 2, DType>( mshadow::Shape2( y.shape_[0] * y.shape_[1], y.shape_[2]), s); // (T*N)C CHECK(x.CheckContiguous()); CHECK(w.CheckContiguous()); CHECK(hx.CheckContiguous()); CHECK(y.CheckContiguous()); if (param_.lstm_q_) { const size_t kNumMat = 4; int64_t fused_h_ch = kNumMat * h_channel; int64_t h_size = batch_size * fused_h_ch; int64_t num_dir = 1 + param_.bidirectional; int64_t h2h_w_size = h_channel * fused_h_ch; Tensor<cpu, 3, DType> cx = in_data[rnn_enum::kStateCell].get<cpu, 3, DType>(s); CHECK(cx.CheckContiguous()); Tensor<cpu, 3, DType> cy = out_data[rnn_enum::kStateCellOut].get<cpu, 3, DType>(s); Tensor<cpu, 3, DType> hy = out_data[rnn_enum::kStateOut].get<cpu, 3, DType>(s); CHECK(cy.CheckContiguous()); CHECK(hy.CheckContiguous()); DType* workspace_addr = static_cast<DType *>(ctx.requested[rnn_enum::kTempSpace] .get_host_space_internal(sizeof(DType) * (seq_len * h_size + h_size + y.shape_[0] * y.shape_[1] * y.shape_[2]))); Tensor<cpu, 3, DType> i2h_y( workspace_addr, mshadow::Shape3(seq_len, batch_size, fused_h_ch)); Tensor<cpu, 2, DType> i2h_y_flatten( workspace_addr, mshadow::Shape2(seq_len * batch_size, fused_h_ch)); Tensor<cpu, 2, DType> h2h_y(workspace_addr + seq_len * h_size, mshadow::Shape2(batch_size, fused_h_ch)); Tensor<cpu, 3, DType> y_tmp(workspace_addr + (seq_len + 1) * h_size, y.shape_); Tensor<cpu, 2, DType> y_flatten_tmp(workspace_addr + (seq_len + 1) * h_size, y_flatten.shape_); CHECK(i2h_y.CheckContiguous()); CHECK(h2h_y.CheckContiguous()); CHECK(y_tmp.CheckContiguous()); for (int64_t layer = 0; layer < num_layers; layer++) { int reverse_dir = 0; int out_tmp = 0; if (param_.bidirectional && layer % 2) reverse_dir = 1; if (layer / num_dir % 2 == 0) out_tmp = 1; mshadow::Shape<2> i2h_w_shape = mshadow::Shape2(fused_h_ch, (layer < num_dir) ? in_channel : num_dir * h_channel); mshadow::Shape<2> h2h_w_shape = mshadow::Shape2(fused_h_ch, h_channel); int64_t start = layer < num_dir ? (layer * (in_channel * fused_h_ch + h2h_w_size)) : // input layer (num_dir * (in_channel * fused_h_ch + h2h_w_size) + (layer - num_dir) * (h2h_w_size * num_dir + h2h_w_size)); Tensor<cpu, 2, DType> i2h_w(w.dptr_ + start, i2h_w_shape); start += layer < num_dir ? in_channel * fused_h_ch : h2h_w_size * num_dir; Tensor<cpu, 2, DType> h2h_w(w.dptr_ + start, h2h_w_shape); start = num_dir * (in_channel * fused_h_ch + h2h_w_size) + (num_layers - num_dir) * (h2h_w_size * (num_dir + 1)) + layer * fused_h_ch * 2; Tensor<cpu, 1, DType> i2h_b = w.Slice(start, start + fused_h_ch); start += fused_h_ch; Tensor<cpu, 1, DType> h2h_b = w.Slice(start, start + fused_h_ch); if (out_tmp) { linalg_gemm(layer < num_dir ? x_flatten:y_flatten, i2h_w, i2h_y_flatten, false, true, s); } else { linalg_gemm(layer < num_dir ? x_flatten:y_flatten_tmp, i2h_w, i2h_y_flatten, false, true, s); } i2h_y_flatten += repmat(i2h_b, seq_len * batch_size); for (int64_t t = 0; t < seq_len; t++) { int64_t timestep = t; if (reverse_dir) timestep = seq_len - 1 - t; linalg_gemm(t == 0 ? hx[layer]:hy[layer], h2h_w, h2h_y, false, true, s); h2h_y += repmat(h2h_b, batch_size); // fused element-wise ops LSTMFusedElementWiseCPUOps(i2h_y[timestep], cx[layer], h2h_y, y[timestep], out_tmp ? y_tmp[timestep]: y[timestep], hy[layer], cy[layer], batch_size, h_channel, t, reverse_dir, out_tmp && (layer == num_layers - 1)); } } } else { LOG(FATAL) << "only LSTM is available for cpu at the moment."; } } virtual void Backward(const OpContext &ctx, const std::vector<TBlob> &out_grad, const std::vector<TBlob> &in_data, const std::vector<TBlob> &out_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &in_grad, const std::vector<TBlob> &aux_args) { LOG(FATAL) << "LSTM backward is not available for cpu at the moment."; } private: RNNParam param_; void LSTMFusedElementWiseCPUOps(const Tensor<cpu, 2, DType> &i2h_y, const Tensor<cpu, 2, DType> &cx, const Tensor<cpu, 2, DType> &h2h_y, const Tensor<cpu, 2, DType> &y, // holding intermediate layer output const Tensor<cpu, 2, DType> &tmp, const Tensor<cpu, 2, DType> &hy, const Tensor<cpu, 2, DType> &cy, const int64_t batch_size, const int64_t h_channel, const int64_t t, const int reverse_dir, const int copy_tmp2y) { int64_t length = batch_size * h_channel; #pragma omp parallel for for (int64_t ji = 0; ji < length; ++ji) { int64_t j = ji / h_channel; // batch dim int64_t i = ji % h_channel; int64_t f = i + h_channel; int64_t c = i + h_channel * 2; int64_t o = i + h_channel * 3; int64_t j_pos = j * h_channel * 4; h2h_y.dptr_[j_pos + i] += i2h_y.dptr_[j_pos + i]; h2h_y.dptr_[j_pos + f] += i2h_y.dptr_[j_pos + f]; h2h_y.dptr_[j_pos + o] += i2h_y.dptr_[j_pos + o]; h2h_y.dptr_[j_pos + c] += i2h_y.dptr_[j_pos + c]; h2h_y.dptr_[j_pos + i] = 1.0f / (1.0f + math::exp(-h2h_y.dptr_[j_pos + i])); h2h_y.dptr_[j_pos + f] = 1.0f / (1.0f + math::exp(-h2h_y.dptr_[j_pos + f])); h2h_y.dptr_[j_pos + o] = 1.0f / (1.0f + math::exp(-h2h_y.dptr_[j_pos + o])); h2h_y.dptr_[j_pos + c] = tanh(h2h_y.dptr_[j_pos + c]); cy[j][i] = h2h_y.dptr_[j_pos + f] * (t == 0 ? cx[j][i]:cy[j][i]) + h2h_y.dptr_[j_pos + i] * h2h_y.dptr_[j_pos + c]; hy[j][i] = h2h_y.dptr_[j_pos + o] * tanh(cy[j][i]); tmp[j][i + h_channel * reverse_dir] = hy[j][i]; if (copy_tmp2y) { y[j][i] = tmp[j][i]; if (reverse_dir) y[j][i + h_channel] = tmp[j][i + h_channel]; } } } }; // class RNNOp template<typename xpu> Operator* CreateOp(RNNParam param, int dtype); #if DMLC_USE_CXX11 class RNNProp : public OperatorProperty { public: std::vector<std::string> ListArguments() const override { if (param_.mode == rnn_enum::kLstm) { return {"data", "parameters", "state", "state_cell"}; } else { return {"data", "parameters", "state"}; } } std::vector<std::string> ListOutputs() const override { std::vector<std::string> outputs = {"output"}; if (!param_.state_outputs) return outputs; else outputs.push_back("state"); if (param_.mode == rnn_enum::kLstm) outputs.push_back("state_cell"); return outputs; } int NumOutputs() const override { int mode_num = (param_.mode == rnn_enum::kLstm) ? 2 : 1; int num_outputs = param_.state_outputs ? (mode_num + 1) : 1; return num_outputs; } void Init(const std::vector<std::pair<std::string, std::string> >& kwargs) override { param_.Init(kwargs); } std::map<std::string, std::string> GetParams() const override { return param_.__DICT__(); } bool InferShape(std::vector<TShape> *in_shape, std::vector<TShape> *out_shape, std::vector<TShape> *aux_shape) const override { using namespace mshadow; if (param_.mode == rnn_enum::kLstm) { CHECK_EQ(in_shape->size(), 4U) << "Input:[data, parameters, state, cell_state]"; } else { CHECK_EQ(in_shape->size(), 3U) << "Input:[data, parameters, state]"; } const TShape &dshape = (*in_shape)[rnn_enum::kData]; if (dshape.ndim() == 0) return false; CHECK_EQ(dshape.ndim(), 3U) \ << "Input data should be rank-3 tensor of dim [sequence length, batch size, input size]"; // data: [sequence len, batch, input dimension] int batch_size = dshape[1]; int input_size = dshape[2]; int numDirections = param_.bidirectional ? 2 : 1; int total_layers = numDirections * param_.num_layers; // double for bidirectional SHAPE_ASSIGN_CHECK(*in_shape, rnn_enum::kState, Shape3(total_layers, batch_size, param_.state_size)); if (param_.mode == rnn_enum::kLstm) SHAPE_ASSIGN_CHECK(*in_shape, rnn_enum::kStateCell, Shape3(total_layers, batch_size, param_.state_size)); // calculate parameter vector length int param_size = rnn_param_size(param_.num_layers, input_size, param_.state_size, param_.bidirectional, param_.mode); SHAPE_ASSIGN_CHECK(*in_shape, rnn_enum::kParams, Shape1(param_size)); out_shape->clear(); // output: [sequence len, batch, output size] TShape oshape = dshape; oshape[2] = numDirections * param_.state_size; out_shape->push_back(oshape); if (!param_.state_outputs) { return true; } else { // outStateShape: [layer_num, batch, state size] TShape outStateShape = dshape; outStateShape[0] = total_layers; outStateShape[1] = batch_size; outStateShape[2] = param_.state_size; out_shape->push_back(outStateShape); // Deal with lstm cell state if (param_.mode == rnn_enum::kLstm) out_shape->push_back(outStateShape); return true; } } bool InferType(std::vector<int> *in_type, std::vector<int> *out_type, std::vector<int> *aux_type) const override { CHECK_GE(in_type->size(), 1U); int dtype = (*in_type)[0]; CHECK_NE(dtype, -1) << "First input must have specified type"; for (index_t i = 0; i < in_type->size(); ++i) { if ((*in_type)[i] == -1) { (*in_type)[i] = dtype; } else { UNIFORM_TYPE_CHECK((*in_type)[i], dtype, ListArguments()[i]); } } out_type->clear(); out_type->push_back(dtype); if (!param_.state_outputs) { return true; } else { out_type->push_back(dtype); // Deal with lstm cell state if (param_.mode == rnn_enum::kLstm) out_type->push_back(dtype); return true; } } OperatorProperty* Copy() const override { auto ptr = new RNNProp(); ptr->param_ = param_; return ptr; } std::string TypeString() const override { return "RNN"; } std::vector<int> DeclareBackwardDependency( const std::vector<int> &out_grad, const std::vector<int> &in_data, const std::vector<int> &out_data) const override { std::vector<int> dep = {in_data[rnn_enum::kData], in_data[rnn_enum::kParams], in_data[rnn_enum::kState], out_data[rnn_enum::kOut], out_grad[rnn_enum::kOut]}; if (param_.state_outputs) { dep.push_back(out_data[rnn_enum::kStateOut]); dep.push_back(out_grad[rnn_enum::kStateOut]); } if (param_.mode == rnn_enum::kLstm) { dep.push_back(in_data[rnn_enum::kStateCell]); if (param_.state_outputs) { dep.push_back(out_data[rnn_enum::kStateCellOut]); dep.push_back(out_grad[rnn_enum::kStateCellOut]); } } return dep; } std::vector<ResourceRequest> ForwardResource( const std::vector<TShape> &in_shape) const override { return {ResourceRequest::kTempSpace}; } std::vector<ResourceRequest> BackwardResource( const std::vector<TShape> &in_shape) const override { return {ResourceRequest::kTempSpace}; } Operator* CreateOperator(Context ctx) const override { LOG(FATAL) << "Not Implemented"; return NULL; } Operator* CreateOperatorEx(Context ctx, std::vector<TShape> *in_shape, std::vector<int> *in_type) const override; private: RNNParam param_; }; // class RNNProp #endif // DMLC_USE_CXX11 } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_RNN_INL_H_
GB_binop__isgt_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__isgt_int16 // A.*B function (eWiseMult): GB_AemultB__isgt_int16 // A*D function (colscale): GB_AxD__isgt_int16 // D*A function (rowscale): GB_DxB__isgt_int16 // C+=B function (dense accum): GB_Cdense_accumB__isgt_int16 // C+=b function (dense accum): GB_Cdense_accumb__isgt_int16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isgt_int16 // C=scalar+B GB_bind1st__isgt_int16 // C=scalar+B' GB_bind1st_tran__isgt_int16 // C=A+scalar GB_bind2nd__isgt_int16 // C=A'+scalar GB_bind2nd_tran__isgt_int16 // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x > y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGT || GxB_NO_INT16 || GxB_NO_ISGT_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__isgt_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__isgt_int16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__isgt_int16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__isgt_int16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__isgt_int16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__isgt_int16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__isgt_int16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__isgt_int16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t bij = Bx [p] ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__isgt_int16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB_bind1st_tran__isgt_int16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB_bind2nd_tran__isgt_int16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
dragonfly3_fmt_plug.c
/* * This file is part of John the Ripper password cracker, * based on rawSHA256_fmt.c code * * This software is Copyright (c) 2012 magnum, and it is hereby released to the * general public under the following terms: Redistribution and use in source * and binary forms, with or without modification, are permitted. * * The DragonFly BSD 2.10.1-REL crypt-sha2 hashes are seriously broken. See * http://www.openwall.com/lists/john-dev/2012/01/16/1 * */ #if FMT_EXTERNS_H extern struct fmt_main fmt_dragonfly3_32; extern struct fmt_main fmt_dragonfly3_64; #elif FMT_REGISTERS_H john_register_one(&fmt_dragonfly3_32); john_register_one(&fmt_dragonfly3_64); #else #include "sha2.h" #include <string.h> #include "arch.h" #include "params.h" #include "common.h" #include "formats.h" #ifdef _OPENMP #define OMP_SCALE 256 #include <omp.h> #endif #include "memdbg.h" #define FORMAT_LABEL_32 "dragonfly3-32" #define FORMAT_LABEL_64 "dragonfly3-64" #define FORMAT_NAME_32 "DragonFly BSD $3$ w/ bug, 32-bit" #define FORMAT_NAME_64 "DragonFly BSD $3$ w/ bug, 64-bit" #define ALGORITHM_NAME "SHA256 32/" ARCH_BITS_STR " " SHA2_LIB #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 125 #define CIPHERTEXT_LENGTH 44 #define BINARY_SIZE 32 #define BINARY_ALIGN 4 #define SALT_SIZE_32 (1+4+8) // 1st char is length #define SALT_SIZE_64 (1+8+8) #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests tests_32[] = { {"$3$z$EBG66iBCGfUfENOfqLUH/r9xQxI1cG373/hRop6j.oWs", "magnum"}, {"$3$f6daU5$Xf/u8pKp.sb4VCLKz7tTZMUKJ3J4oOfZgUSHYOFL.M0n", ""}, {"$3$PNPA2tJ$ppD4bXqPMYFVdYVYrxXGMWeYB6Xv8e6jmXbvrB5V.okl", "password"}, {"$3$jWhDSrS$bad..Dy7UAyabPyfrEi3fgQ2qtT.5fE7C5EMNo/n.Qk5", "John the Ripper"}, {"$3$SSYEHO$hkuDmUQHT2Tr0.ai.lUVyb9bCC875Up.CZVa6UJZ.Muv", "DragonFly BSD"}, {NULL} }; static struct fmt_tests tests_64[] = { {"$3$z$sNV7KLtLxvJRsj2MfBtGZFuzXP3CECITaFq/rvsy.Y.Q", "magnum"}, {"$3$f6daU5$eV2SX9vUHTMsoy3Ic7cWiQ4mOxyuyenGjYQWkJmy.AF3", ""}, {"$3$PNPA2tJ$GvXjg6zSge3YDh5I35JlYZHoQS2r0/.vn36fQzSY.A0d", "password"}, {"$3$jWhDSrS$5yBH7KFPmsg.PhPeDMj1MY4fv9061zdbYumPe2Ve.Y5J", "John the Ripper"}, {"$3$SSYEHO$AMYLyanRYs8F2U07FsBrSFuOIygJ4kgqvpBB17BI.61N", "DragonFly BSD"}, {NULL} }; static int (*saved_key_length); static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out) [(BINARY_SIZE + sizeof(ARCH_WORD_32) - 1) / sizeof(ARCH_WORD_32)]; static char *cur_salt; static int salt_len; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt = omp_t * MIN_KEYS_PER_CRYPT; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt = omp_t * MAX_KEYS_PER_CRYPT; #endif saved_key_length = mem_calloc_tiny(sizeof(*saved_key_length) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static int valid(char *ciphertext, struct fmt_main *self) { char *pos, *start; if (strncmp(ciphertext, "$3$", 3)) return 0; ciphertext += 3; for (pos = ciphertext; *pos && *pos != '$'; pos++); if (!*pos || pos < ciphertext || pos > &ciphertext[8]) return 0; start = ++pos; while (atoi64[ARCH_INDEX(*pos)] != 0x7F) pos++; if (*pos || pos - start != CIPHERTEXT_LENGTH) return 0; return 1; } #define TO_BINARY(b1, b2, b3) \ value = (ARCH_WORD_32)atoi64[ARCH_INDEX(pos[0])] | \ ((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[1])] << 6) | \ ((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[2])] << 12) | \ ((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[3])] << 18); \ pos += 4; \ out[b1] = value >> 16; \ out[b2] = value >> 8; \ out[b3] = value; static void *get_binary(char *ciphertext) { static ARCH_WORD_32 outbuf[BINARY_SIZE/4]; ARCH_WORD_32 value; char *pos; unsigned char *out = (unsigned char*)outbuf; int i; pos = strrchr(ciphertext, '$') + 1; for (i = 0; i < 10; i++) { TO_BINARY(i, i + 11, i + 21); } value = (ARCH_WORD_32)atoi64[ARCH_INDEX(pos[0])] | ((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[1])] << 6) | ((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[2])] << 12) | ((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[3])] << 18); out[10] = value >> 16; out[31] = value >> 8; return (void *)out; } static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; } static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; } static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; } static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; } static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; } static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; } static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; } static void set_key(char *key, int index) { int len = strlen(key); saved_key_length[index] = len; if (len > PLAINTEXT_LENGTH) len = saved_key_length[index] = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, len); } static char *get_key(int index) { saved_key[index][saved_key_length[index]] = 0; return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { SHA256_CTX ctx; SHA256_Init(&ctx); /* First the password */ SHA256_Update(&ctx, saved_key[index], saved_key_length[index]); /* Then the salt, including the $3$ magic */ SHA256_Update(&ctx, cur_salt, salt_len); SHA256_Final((unsigned char*)crypt_out[index], &ctx); } return count; } static void set_salt(void *salt) { salt_len = (int)*(char*)salt; cur_salt = (char*)salt + 1; } // For 32-bit version of the bug, our magic is "$3$\0" len 4 static void *get_salt_32(char *ciphertext) { static char *out; int len; if (!out) out = mem_alloc_tiny(SALT_SIZE_32, MEM_ALIGN_WORD); memset(out, 0, SALT_SIZE_32); ciphertext += 3; strcpy(&out[1], "$3$"); for (len = 0; ciphertext[len] != '$'; len++); memcpy(&out[5], ciphertext, len); out[0] = len + 4; return out; } // For 64-bit version of the bug, our magic is "$3$\0sha5" len 8 static void *get_salt_64(char *ciphertext) { static char *out; int len; if (!out) out = mem_alloc_tiny(SALT_SIZE_64, MEM_ALIGN_WORD); memset(out, 0, SALT_SIZE_64); ciphertext += 3; memcpy(&out[1], "$3$\0sha5", 8); for (len = 0; ciphertext[len] != '$'; len++); memcpy(&out[9], ciphertext, len); out[0] = len + 8; return out; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], BINARY_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } // Public domain hash function by DJ Bernstein static int salt_hash(void *salt) { unsigned char *s = (unsigned char*)salt + 1; unsigned int hash = 5381; unsigned int i; for (i = 0; i < *(unsigned char*)salt; i++) hash = ((hash << 5) + hash) ^ s[i]; return hash & (SALT_HASH_SIZE - 1); } struct fmt_main fmt_dragonfly3_32 = { { FORMAT_LABEL_32, FORMAT_NAME_32, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE_32, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { NULL }, #endif tests_32 }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt_32, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; struct fmt_main fmt_dragonfly3_64 = { { FORMAT_LABEL_64, FORMAT_NAME_64, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE_64, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { NULL }, #endif tests_64 }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt_64, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
data.c
#include "data.h" #include "utils.h" #include "image.h" #include "dark_cuda.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #define NUMCHARS 37 pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; list *get_paths(char *filename) { char *path; FILE *file = fopen(filename, "r"); if(!file) file_error(filename); list *lines = make_list(); while((path=fgetl(file))){ list_insert(lines, path); } fclose(file); return lines; } /* char **get_random_paths_indexes(char **paths, int n, int m, int *indexes) { char **random_paths = calloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); for(i = 0; i < n; ++i){ int index = random_gen()%m; indexes[i] = index; random_paths[i] = paths[index]; if(i == 0) printf("%s\n", paths[index]); } pthread_mutex_unlock(&mutex); return random_paths; } */ char **get_sequential_paths(char **paths, int n, int m, int mini_batch, int augment_speed) { int speed = rand_int(1, augment_speed); if (speed < 1) speed = 1; char** sequentia_paths = (char**)calloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); //printf("n = %d, mini_batch = %d \n", n, mini_batch); unsigned int *start_time_indexes = (unsigned int *)calloc(mini_batch, sizeof(unsigned int)); for (i = 0; i < mini_batch; ++i) { start_time_indexes[i] = random_gen() % m; //printf(" start_time_indexes[i] = %u, ", start_time_indexes[i]); } for (i = 0; i < n; ++i) { do { int time_line_index = i % mini_batch; unsigned int index = start_time_indexes[time_line_index] % m; start_time_indexes[time_line_index] += speed; //int index = random_gen() % m; sequentia_paths[i] = paths[index]; //if(i == 0) printf("%s\n", paths[index]); //printf(" index = %u - grp: %s \n", index, paths[index]); if (strlen(sequentia_paths[i]) <= 4) printf(" Very small path to the image: %s \n", sequentia_paths[i]); } while (strlen(sequentia_paths[i]) == 0); } free(start_time_indexes); pthread_mutex_unlock(&mutex); return sequentia_paths; } char **get_random_paths(char **paths, int n, int m) { char** random_paths = (char**)calloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); //printf("n = %d \n", n); for(i = 0; i < n; ++i){ do { int index = random_gen() % m; random_paths[i] = paths[index]; //if(i == 0) printf("%s\n", paths[index]); //printf("grp: %s\n", paths[index]); if (strlen(random_paths[i]) <= 4) printf(" Very small path to the image: %s \n", random_paths[i]); } while (strlen(random_paths[i]) == 0); } pthread_mutex_unlock(&mutex); return random_paths; } char **find_replace_paths(char **paths, int n, char *find, char *replace) { char** replace_paths = (char**)calloc(n, sizeof(char*)); int i; for(i = 0; i < n; ++i){ char replaced[4096]; find_replace(paths[i], find, replace, replaced); replace_paths[i] = copy_string(replaced); } return replace_paths; } matrix load_image_paths_gray(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = (float**)calloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image(paths[i], w, h, 3); image gray = grayscale_image(im); free_image(im); im = gray; X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_paths(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = (float**)calloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], w, h); X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_augment_paths(char **paths, int n, int use_flip, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { int i; matrix X; X.rows = n; X.vals = (float**)calloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], 0, 0); image crop = random_augment_image(im, angle, aspect, min, max, size); int flip = use_flip ? random_gen() % 2 : 0; if (flip) flip_image(crop); random_distort_image(crop, hue, saturation, exposure); /* show_image(im, "orig"); show_image(crop, "crop"); cvWaitKey(0); */ free_image(im); X.vals[i] = crop.data; X.cols = crop.h*crop.w*crop.c; } return X; } extern int check_mistakes; box_label *read_boxes(char *filename, int *n) { box_label* boxes = (box_label*)calloc(1, sizeof(box_label)); FILE *file = fopen(filename, "r"); if (!file) { printf("Can't open label file. (This can be normal only if you use MSCOCO): %s \n", filename); //file_error(filename); FILE* fw = fopen("bad.list", "a"); fwrite(filename, sizeof(char), strlen(filename), fw); char *new_line = "\n"; fwrite(new_line, sizeof(char), strlen(new_line), fw); fclose(fw); if (check_mistakes) getchar(); *n = 0; return boxes; } float x, y, h, w; int id; int count = 0; while(fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5){ boxes = (box_label*)realloc(boxes, (count + 1) * sizeof(box_label)); boxes[count].id = id; boxes[count].x = x; boxes[count].y = y; boxes[count].h = h; boxes[count].w = w; boxes[count].left = x - w/2; boxes[count].right = x + w/2; boxes[count].top = y - h/2; boxes[count].bottom = y + h/2; ++count; } fclose(file); *n = count; return boxes; } void randomize_boxes(box_label *b, int n) { int i; for(i = 0; i < n; ++i){ box_label swap = b[i]; int index = random_gen()%n; b[i] = b[index]; b[index] = swap; } } void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip) { int i; for(i = 0; i < n; ++i){ if(boxes[i].x == 0 && boxes[i].y == 0) { boxes[i].x = 999999; boxes[i].y = 999999; boxes[i].w = 999999; boxes[i].h = 999999; continue; } if ((boxes[i].x + boxes[i].w / 2) < 0 || (boxes[i].y + boxes[i].h / 2) < 0 || (boxes[i].x - boxes[i].w / 2) > 1 || (boxes[i].y - boxes[i].h / 2) > 1) { boxes[i].x = 999999; boxes[i].y = 999999; boxes[i].w = 999999; boxes[i].h = 999999; continue; } boxes[i].left = boxes[i].left * sx - dx; boxes[i].right = boxes[i].right * sx - dx; boxes[i].top = boxes[i].top * sy - dy; boxes[i].bottom = boxes[i].bottom* sy - dy; if(flip){ float swap = boxes[i].left; boxes[i].left = 1. - boxes[i].right; boxes[i].right = 1. - swap; } boxes[i].left = constrain(0, 1, boxes[i].left); boxes[i].right = constrain(0, 1, boxes[i].right); boxes[i].top = constrain(0, 1, boxes[i].top); boxes[i].bottom = constrain(0, 1, boxes[i].bottom); boxes[i].x = (boxes[i].left+boxes[i].right)/2; boxes[i].y = (boxes[i].top+boxes[i].bottom)/2; boxes[i].w = (boxes[i].right - boxes[i].left); boxes[i].h = (boxes[i].bottom - boxes[i].top); boxes[i].w = constrain(0, 1, boxes[i].w); boxes[i].h = constrain(0, 1, boxes[i].h); } } void fill_truth_swag(char *path, float *truth, int classes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; replace_image_to_label(path, labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); float x,y,w,h; int id; int i; for (i = 0; i < count && i < 30; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .0 || h < .0) continue; int index = (4+classes) * i; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; if (id < classes) truth[index+id] = 1; } free(boxes); } void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; replace_image_to_label(path, labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); float x,y,w,h; int id; int i; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .001 || h < .001) continue; int col = (int)(x*num_boxes); int row = (int)(y*num_boxes); x = x*num_boxes - col; y = y*num_boxes - row; int index = (col+row*num_boxes)*(5+classes); if (truth[index]) continue; truth[index++] = 1; if (id < classes) truth[index+id] = 1; index += classes; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; } free(boxes); } int fill_truth_detection(const char *path, int num_boxes, float *truth, int classes, int flip, float dx, float dy, float sx, float sy, int net_w, int net_h) { char labelpath[4096]; replace_image_to_label(path, labelpath); int count = 0; int i; box_label *boxes = read_boxes(labelpath, &count); int min_w_h = 0; float lowest_w = 1.F / net_w; float lowest_h = 1.F / net_h; randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); if (count > num_boxes) count = num_boxes; float x, y, w, h; int id; int sub = 0; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; // not detect small objects //if ((w < 0.001F || h < 0.001F)) continue; // if truth (box for object) is smaller than 1x1 pix char buff[256]; if (id >= classes) { printf("\n Wrong annotation: class_id = %d. But class_id should be [from 0 to %d] \n", id, (classes-1)); sprintf(buff, "echo %s \"Wrong annotation: class_id = %d. But class_id should be [from 0 to %d]\" >> bad_label.list", labelpath, id, (classes-1)); system(buff); getchar(); ++sub; continue; } if ((w < lowest_w || h < lowest_h)) { //sprintf(buff, "echo %s \"Very small object: w < lowest_w OR h < lowest_h\" >> bad_label.list", labelpath); //system(buff); ++sub; continue; } if (x == 999999 || y == 999999) { printf("\n Wrong annotation: x = 0, y = 0, < 0 or > 1 \n"); sprintf(buff, "echo %s \"Wrong annotation: x = 0 or y = 0\" >> bad_label.list", labelpath); system(buff); ++sub; if (check_mistakes) getchar(); continue; } if (x <= 0 || x > 1 || y <= 0 || y > 1) { printf("\n Wrong annotation: x = %f, y = %f \n", x, y); sprintf(buff, "echo %s \"Wrong annotation: x = %f, y = %f\" >> bad_label.list", labelpath, x, y); system(buff); ++sub; if (check_mistakes) getchar(); continue; } if (w > 1) { printf("\n Wrong annotation: w = %f \n", w); sprintf(buff, "echo %s \"Wrong annotation: w = %f\" >> bad_label.list", labelpath, w); system(buff); w = 1; if (check_mistakes) getchar(); } if (h > 1) { printf("\n Wrong annotation: h = %f \n", h); sprintf(buff, "echo %s \"Wrong annotation: h = %f\" >> bad_label.list", labelpath, h); system(buff); h = 1; if (check_mistakes) getchar(); } if (x == 0) x += lowest_w; if (y == 0) y += lowest_h; truth[(i-sub)*5+0] = x; truth[(i-sub)*5+1] = y; truth[(i-sub)*5+2] = w; truth[(i-sub)*5+3] = h; truth[(i-sub)*5+4] = id; if (min_w_h == 0) min_w_h = w*net_w; if (min_w_h > w*net_w) min_w_h = w*net_w; if (min_w_h > h*net_h) min_w_h = h*net_h; } free(boxes); return min_w_h; } void print_letters(float *pred, int n) { int i; for(i = 0; i < n; ++i){ int index = max_index(pred+i*NUMCHARS, NUMCHARS); printf("%c", int_to_alphanum(index)); } printf("\n"); } void fill_truth_captcha(char *path, int n, float *truth) { char *begin = strrchr(path, '/'); ++begin; int i; for(i = 0; i < strlen(begin) && i < n && begin[i] != '.'; ++i){ int index = alphanum_to_int(begin[i]); if(index > 35) printf("Bad %c\n", begin[i]); truth[i*NUMCHARS+index] = 1; } for(;i < n; ++i){ truth[i*NUMCHARS + NUMCHARS-1] = 1; } } data load_data_captcha(char **paths, int n, int m, int k, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = make_matrix(n, k*NUMCHARS); int i; for(i = 0; i < n; ++i){ fill_truth_captcha(paths[i], k, d.y.vals[i]); } if(m) free(paths); return d; } data load_data_captcha_encode(char **paths, int n, int m, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.X.cols = 17100; d.y = d.X; if(m) free(paths); return d; } void fill_truth(char *path, char **labels, int k, float *truth) { int i; memset(truth, 0, k*sizeof(float)); int count = 0; for(i = 0; i < k; ++i){ if(strstr(path, labels[i])){ truth[i] = 1; ++count; } } if(count != 1) printf("Too many or too few labels: %d, %s\n", count, path); } void fill_hierarchy(float *truth, int k, tree *hierarchy) { int j; for(j = 0; j < k; ++j){ if(truth[j]){ int parent = hierarchy->parent[j]; while(parent >= 0){ truth[parent] = 1; parent = hierarchy->parent[parent]; } } } int i; int count = 0; for(j = 0; j < hierarchy->groups; ++j){ //printf("%d\n", count); int mask = 1; for(i = 0; i < hierarchy->group_size[j]; ++i){ if(truth[count + i]){ mask = 0; break; } } if (mask) { for(i = 0; i < hierarchy->group_size[j]; ++i){ truth[count + i] = SECRET_NUM; } } count += hierarchy->group_size[j]; } } matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierarchy) { matrix y = make_matrix(n, k); int i; for(i = 0; i < n && labels; ++i){ fill_truth(paths[i], labels, k, y.vals[i]); if(hierarchy){ fill_hierarchy(y.vals[i], k, hierarchy); } } return y; } matrix load_tags_paths(char **paths, int n, int k) { matrix y = make_matrix(n, k); int i; int count = 0; for(i = 0; i < n; ++i){ char label[4096]; find_replace(paths[i], "imgs", "labels", label); find_replace(label, "_iconl.jpeg", ".txt", label); FILE *file = fopen(label, "r"); if(!file){ find_replace(label, "labels", "labels2", label); file = fopen(label, "r"); if(!file) continue; } ++count; int tag; while(fscanf(file, "%d", &tag) == 1){ if(tag < k){ y.vals[i][tag] = 1; } } fclose(file); } printf("%d/%d\n", count, n); return y; } char **get_labels_custom(char *filename, int *size) { list *plist = get_paths(filename); if(size) *size = plist->size; char **labels = (char **)list_to_array(plist); free_list(plist); return labels; } char **get_labels(char *filename) { return get_labels_custom(filename, NULL); } void free_data(data d) { if(!d.shallow){ free_matrix(d.X); free_matrix(d.y); }else{ free(d.X.vals); free(d.y.vals); } } data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; int k = size*size*(5+classes); d.y = make_matrix(n, k); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); int oh = orig.h; int ow = orig.w; int dw = (ow*jitter); int dh = (oh*jitter); int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; int flip = random_gen()%2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft/ow)/sx; float dy = ((float)ptop /oh)/sy; image sized = resize_image(cropped, w, h); if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; fill_truth_region(random_paths[i], d.y.vals[i], classes, size, flip, dx, dy, 1./sx, 1./sy); free_image(orig); free_image(cropped); } free(random_paths); return d; } data load_data_compare(int n, char **paths, int m, int classes, int w, int h) { if(m) paths = get_random_paths(paths, 2*n, m); int i,j; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*6; int k = 2*(classes); d.y = make_matrix(n, k); for(i = 0; i < n; ++i){ image im1 = load_image_color(paths[i*2], w, h); image im2 = load_image_color(paths[i*2+1], w, h); d.X.vals[i] = (float*)calloc(d.X.cols, sizeof(float)); memcpy(d.X.vals[i], im1.data, h*w*3*sizeof(float)); memcpy(d.X.vals[i] + h*w*3, im2.data, h*w*3*sizeof(float)); int id; float iou; char imlabel1[4096]; char imlabel2[4096]; find_replace(paths[i*2], "imgs", "labels", imlabel1); find_replace(imlabel1, "jpg", "txt", imlabel1); FILE *fp1 = fopen(imlabel1, "r"); while(fscanf(fp1, "%d %f", &id, &iou) == 2){ if (d.y.vals[i][2*id] < iou) d.y.vals[i][2*id] = iou; } find_replace(paths[i*2+1], "imgs", "labels", imlabel2); find_replace(imlabel2, "jpg", "txt", imlabel2); FILE *fp2 = fopen(imlabel2, "r"); while(fscanf(fp2, "%d %f", &id, &iou) == 2){ if (d.y.vals[i][2*id + 1] < iou) d.y.vals[i][2*id + 1] = iou; } for (j = 0; j < classes; ++j){ if (d.y.vals[i][2*j] > .5 && d.y.vals[i][2*j+1] < .5){ d.y.vals[i][2*j] = 1; d.y.vals[i][2*j+1] = 0; } else if (d.y.vals[i][2*j] < .5 && d.y.vals[i][2*j+1] > .5){ d.y.vals[i][2*j] = 0; d.y.vals[i][2*j+1] = 1; } else { d.y.vals[i][2*j] = SECRET_NUM; d.y.vals[i][2*j+1] = SECRET_NUM; } } fclose(fp1); fclose(fp2); free_image(im1); free_image(im2); } if(m) free(paths); return d; } data load_data_swag(char **paths, int n, int classes, float jitter) { int index = random_gen()%n; char *random_path = paths[index]; image orig = load_image_color(random_path, 0, 0); int h = orig.h; int w = orig.w; data d = {0}; d.shallow = 0; d.w = w; d.h = h; d.X.rows = 1; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; int k = (4+classes)*30; d.y = make_matrix(1, k); int dw = w*jitter; int dh = h*jitter; int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = w - pleft - pright; int sheight = h - ptop - pbot; float sx = (float)swidth / w; float sy = (float)sheight / h; int flip = random_gen()%2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft/w)/sx; float dy = ((float)ptop /h)/sy; image sized = resize_image(cropped, w, h); if(flip) flip_image(sized); d.X.vals[0] = sized.data; fill_truth_swag(random_path, d.y.vals[0], classes, flip, dx, dy, 1./sx, 1./sy); free_image(orig); free_image(cropped); return d; } static box float_to_box_stride(float *f, int stride) { box b = { 0 }; b.x = f[0]; b.y = f[1 * stride]; b.w = f[2 * stride]; b.h = f[3 * stride]; return b; } void blend_truth(float *new_truth, int boxes, float *old_truth) { const int t_size = 4 + 1; int count_new_truth = 0; int t; for (t = 0; t < boxes; ++t) { float x = new_truth[t*(4 + 1)]; if (!x) break; count_new_truth++; } for (t = count_new_truth; t < boxes; ++t) { float *new_truth_ptr = new_truth + t*t_size; float *old_truth_ptr = old_truth + (t - count_new_truth)*t_size; float x = old_truth_ptr[0]; if (!x) break; new_truth_ptr[0] = old_truth_ptr[0]; new_truth_ptr[1] = old_truth_ptr[1]; new_truth_ptr[2] = old_truth_ptr[2]; new_truth_ptr[3] = old_truth_ptr[3]; new_truth_ptr[4] = old_truth_ptr[4]; } //printf("\n was %d bboxes, now %d bboxes \n", count_new_truth, t); } #ifdef OPENCV #include "http_stream.h" data load_data_detection(int n, char **paths, int m, int w, int h, int c, int boxes, int classes, int use_flip, int use_blur, int use_mixup, float jitter, float hue, float saturation, float exposure, int mini_batch, int track, int augment_speed, int letter_box, int show_imgs) { const int random_index = random_gen(); c = c ? c : 3; char **random_paths; char **mixup_random_paths = NULL; if (track) random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed); else random_paths = get_random_paths(paths, n, m); int mixup = use_mixup ? random_gen() % 2 : 0; //printf("\n mixup = %d \n", mixup); if (mixup) { if (track) mixup_random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed); else mixup_random_paths = get_random_paths(paths, n, m); } int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*c; float r1 = 0, r2 = 0, r3 = 0, r4 = 0, r_scale = 0; float dhue = 0, dsat = 0, dexp = 0, flip = 0, blur = 0; int augmentation_calculated = 0; d.y = make_matrix(n, 5*boxes); int i_mixup = 0; for (i_mixup = 0; i_mixup <= mixup; i_mixup++) { if (i_mixup) augmentation_calculated = 0; for (i = 0; i < n; ++i) { float *truth = (float*)calloc(5 * boxes, sizeof(float)); const char *filename = (i_mixup) ? mixup_random_paths[i] : random_paths[i]; int flag = (c >= 3); mat_cv *src; src = load_image_mat_cv(filename, flag); if (src == NULL) { if (check_mistakes) getchar(); continue; } int oh = get_height_mat(src); int ow = get_width_mat(src); int dw = (ow*jitter); int dh = (oh*jitter); if (!augmentation_calculated || !track) { augmentation_calculated = 1; r1 = random_float(); r2 = random_float(); r3 = random_float(); r4 = random_float(); r_scale = random_float(); dhue = rand_uniform_strong(-hue, hue); dsat = rand_scale(saturation); dexp = rand_scale(exposure); flip = use_flip ? random_gen() % 2 : 0; blur = rand_int(0, 1) ? (use_blur) : 0; } int pleft = rand_precalc_random(-dw, dw, r1); int pright = rand_precalc_random(-dw, dw, r2); int ptop = rand_precalc_random(-dh, dh, r3); int pbot = rand_precalc_random(-dh, dh, r4); //printf("\n pleft = %d, pright = %d, ptop = %d, pbot = %d, ow = %d, oh = %d \n", pleft, pright, ptop, pbot, ow, oh); float scale = rand_precalc_random(.25, 2, r_scale); // unused currently if (letter_box) { float img_ar = (float)ow / (float)oh; float net_ar = (float)w / (float)h; float result_ar = img_ar / net_ar; //printf(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar); if (result_ar > 1) // sheight - should be increased { float oh_tmp = ow / net_ar; float delta_h = (oh_tmp - oh)/2; ptop = ptop - delta_h; pbot = pbot - delta_h; //printf(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot); } else // swidth - should be increased { float ow_tmp = oh * net_ar; float delta_w = (ow_tmp - ow)/2; pleft = pleft - delta_w; pright = pright - delta_w; //printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright); } } int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; float dx = ((float)pleft / ow) / sx; float dy = ((float)ptop / oh) / sy; int min_w_h = fill_truth_detection(filename, boxes, truth, classes, flip, dx, dy, 1. / sx, 1. / sy, w, h); if (min_w_h < blur*4) blur = 0; // disable blur if one of the objects is too small image ai = image_data_augmentation(src, w, h, pleft, ptop, swidth, sheight, flip, dhue, dsat, dexp, blur, boxes, d.y.vals[i]); if (i_mixup) { image old_img = ai; old_img.data = d.X.vals[i]; //show_image(ai, "new"); //show_image(old_img, "old"); //wait_until_press_key_cv(); blend_images_cv(ai, 0.5, old_img, 0.5); blend_truth(truth, boxes, d.y.vals[i]); free_image(old_img); } d.X.vals[i] = ai.data; memcpy(d.y.vals[i], truth, 5*boxes * sizeof(float)); if (show_imgs)// && i_mixup) // delete i_mixup { image tmp_ai = copy_image(ai); char buff[1000]; sprintf(buff, "aug_%d_%d_%s_%d", random_index, i, basecfg((char*)filename), random_gen()); int t; for (t = 0; t < boxes; ++t) { box b = float_to_box_stride(d.y.vals[i] + t*(4 + 1), 1); if (!b.x) break; int left = (b.x - b.w / 2.)*ai.w; int right = (b.x + b.w / 2.)*ai.w; int top = (b.y - b.h / 2.)*ai.h; int bot = (b.y + b.h / 2.)*ai.h; draw_box_width(tmp_ai, left, top, right, bot, 1, 150, 100, 50); // 3 channels RGB } save_image(tmp_ai, buff); if (show_imgs == 1) { //char buff_src[1000]; //sprintf(buff_src, "src_%d_%d_%s_%d", random_index, i, basecfg((char*)filename), random_gen()); //show_image_mat(src, buff_src); show_image(tmp_ai, buff); wait_until_press_key_cv(); } printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Click on window and press ESC button \n"); free_image(tmp_ai); } release_mat(&src); free(truth); } } free(random_paths); if(mixup_random_paths) free(mixup_random_paths); return d; } #else // OPENCV void blend_images(image new_img, float alpha, image old_img, float beta) { int i; int data_size = new_img.w * new_img.h * new_img.c; #pragma omp parallel for for (i = 0; i < data_size; ++i) new_img.data[i] = new_img.data[i] * alpha + old_img.data[i] * beta; } data load_data_detection(int n, char **paths, int m, int w, int h, int c, int boxes, int classes, int use_flip, int use_blur, int use_mixup, float jitter, float hue, float saturation, float exposure, int mini_batch, int track, int augment_speed, int letter_box, int show_imgs) { const int random_index = random_gen(); c = c ? c : 3; char **random_paths; char **mixup_random_paths = NULL; if(track) random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed); else random_paths = get_random_paths(paths, n, m); int mixup = use_mixup ? random_gen() % 2 : 0; //printf("\n mixup = %d \n", mixup); if (mixup) { if (track) mixup_random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed); else mixup_random_paths = get_random_paths(paths, n, m); } int i; data d = { 0 }; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*c; float r1 = 0, r2 = 0, r3 = 0, r4 = 0, r_scale; float dhue = 0, dsat = 0, dexp = 0, flip = 0; int augmentation_calculated = 0; d.y = make_matrix(n, 5 * boxes); int i_mixup = 0; for (i_mixup = 0; i_mixup <= mixup; i_mixup++) { if (i_mixup) augmentation_calculated = 0; for (i = 0; i < n; ++i) { float *truth = (float*)calloc(5 * boxes, sizeof(float)); char *filename = (i_mixup) ? mixup_random_paths[i] : random_paths[i]; image orig = load_image(filename, 0, 0, c); int oh = orig.h; int ow = orig.w; int dw = (ow*jitter); int dh = (oh*jitter); if (!augmentation_calculated || !track) { augmentation_calculated = 1; r1 = random_float(); r2 = random_float(); r3 = random_float(); r4 = random_float(); r_scale = random_float(); dhue = rand_uniform_strong(-hue, hue); dsat = rand_scale(saturation); dexp = rand_scale(exposure); flip = use_flip ? random_gen() % 2 : 0; } int pleft = rand_precalc_random(-dw, dw, r1); int pright = rand_precalc_random(-dw, dw, r2); int ptop = rand_precalc_random(-dh, dh, r3); int pbot = rand_precalc_random(-dh, dh, r4); float scale = rand_precalc_random(.25, 2, r_scale); // unused currently if (letter_box) { float img_ar = (float)ow / (float)oh; float net_ar = (float)w / (float)h; float result_ar = img_ar / net_ar; //printf(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar); if (result_ar > 1) // sheight - should be increased { float oh_tmp = ow / net_ar; float delta_h = (oh_tmp - oh) / 2; ptop = ptop - delta_h; pbot = pbot - delta_h; //printf(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot); } else // swidth - should be increased { float ow_tmp = oh * net_ar; float delta_w = (ow_tmp - ow) / 2; pleft = pleft - delta_w; pright = pright - delta_w; //printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright); } } int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft / ow) / sx; float dy = ((float)ptop / oh) / sy; image sized = resize_image(cropped, w, h); if (flip) flip_image(sized); distort_image(sized, dhue, dsat, dexp); //random_distort_image(sized, hue, saturation, exposure); fill_truth_detection(filename, boxes, truth, classes, flip, dx, dy, 1. / sx, 1. / sy, w, h); if (i_mixup) { image old_img = sized; old_img.data = d.X.vals[i]; //show_image(sized, "new"); //show_image(old_img, "old"); //wait_until_press_key_cv(); blend_images(sized, 0.5, old_img, 0.5); blend_truth(truth, boxes, d.y.vals[i]); free_image(old_img); } d.X.vals[i] = sized.data; memcpy(d.y.vals[i], truth, 5 * boxes * sizeof(float)); if (show_imgs)// && i_mixup) { char buff[1000]; sprintf(buff, "aug_%d_%d_%s_%d", random_index, i, basecfg(filename), random_gen()); int t; for (t = 0; t < boxes; ++t) { box b = float_to_box_stride(d.y.vals[i] + t*(4 + 1), 1); if (!b.x) break; int left = (b.x - b.w / 2.)*sized.w; int right = (b.x + b.w / 2.)*sized.w; int top = (b.y - b.h / 2.)*sized.h; int bot = (b.y + b.h / 2.)*sized.h; draw_box_width(sized, left, top, right, bot, 1, 150, 100, 50); // 3 channels RGB } save_image(sized, buff); if (show_imgs == 1) { show_image(sized, buff); wait_until_press_key_cv(); } printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Press Enter: \n"); //getchar(); } free_image(orig); free_image(cropped); free(truth); } } free(random_paths); if (mixup_random_paths) free(mixup_random_paths); return d; } #endif // OPENCV void *load_thread(void *ptr) { //srand(time(0)); //printf("Loading data: %d\n", random_gen()); load_args a = *(struct load_args*)ptr; if(a.exposure == 0) a.exposure = 1; if(a.saturation == 0) a.saturation = 1; if(a.aspect == 0) a.aspect = 1; if (a.type == OLD_CLASSIFICATION_DATA){ *a.d = load_data_old(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h); } else if (a.type == CLASSIFICATION_DATA){ *a.d = load_data_augment(a.paths, a.n, a.m, a.labels, a.classes, a.hierarchy, a.flip, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } else if (a.type == SUPER_DATA){ *a.d = load_data_super(a.paths, a.n, a.m, a.w, a.h, a.scale); } else if (a.type == WRITING_DATA){ *a.d = load_data_writing(a.paths, a.n, a.m, a.w, a.h, a.out_w, a.out_h); } else if (a.type == REGION_DATA){ *a.d = load_data_region(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure); } else if (a.type == DETECTION_DATA){ *a.d = load_data_detection(a.n, a.paths, a.m, a.w, a.h, a.c, a.num_boxes, a.classes, a.flip, a.blur, a.mixup, a.jitter, a.hue, a.saturation, a.exposure, a.mini_batch, a.track, a.augment_speed, a.letter_box, a.show_imgs); } else if (a.type == SWAG_DATA){ *a.d = load_data_swag(a.paths, a.n, a.classes, a.jitter); } else if (a.type == COMPARE_DATA){ *a.d = load_data_compare(a.n, a.paths, a.m, a.classes, a.w, a.h); } else if (a.type == IMAGE_DATA){ *(a.im) = load_image(a.path, 0, 0, a.c); *(a.resized) = resize_image(*(a.im), a.w, a.h); }else if (a.type == LETTERBOX_DATA) { *(a.im) = load_image(a.path, 0, 0, a.c); *(a.resized) = letterbox_image(*(a.im), a.w, a.h); } else if (a.type == TAG_DATA){ *a.d = load_data_tag(a.paths, a.n, a.m, a.classes, a.flip, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } free(ptr); return 0; } pthread_t load_data_in_thread(load_args args) { pthread_t thread; struct load_args* ptr = (load_args*)calloc(1, sizeof(struct load_args)); *ptr = args; if(pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed"); return thread; } void *load_threads(void *ptr) { //srand(time(0)); int i; load_args args = *(load_args *)ptr; if (args.threads == 0) args.threads = 1; data *out = args.d; int total = args.n; free(ptr); data* buffers = (data*)calloc(args.threads, sizeof(data)); pthread_t* threads = (pthread_t*)calloc(args.threads, sizeof(pthread_t)); for(i = 0; i < args.threads; ++i){ args.d = buffers + i; args.n = (i+1) * total/args.threads - i * total/args.threads; threads[i] = load_data_in_thread(args); } for(i = 0; i < args.threads; ++i){ pthread_join(threads[i], 0); } *out = concat_datas(buffers, args.threads); out->shallow = 0; for(i = 0; i < args.threads; ++i){ buffers[i].shallow = 1; free_data(buffers[i]); } free(buffers); free(threads); return 0; } pthread_t load_data(load_args args) { pthread_t thread; struct load_args* ptr = (load_args*)calloc(1, sizeof(struct load_args)); *ptr = args; if(pthread_create(&thread, 0, load_threads, ptr)) error("Thread creation failed"); return thread; } data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h) { if(m) paths = get_random_paths(paths, n, m); char **replace_paths = find_replace_paths(paths, n, ".png", "-label.png"); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_image_paths_gray(replace_paths, n, out_w, out_h); if(m) free(paths); int i; for(i = 0; i < n; ++i) free(replace_paths[i]); free(replace_paths); return d; } data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_labels_paths(paths, n, labels, k, 0); if(m) free(paths); return d; } /* data load_data_study(char **paths, int n, int m, char **labels, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { data d = {0}; d.indexes = calloc(n, sizeof(int)); if(m) paths = get_random_paths_indexes(paths, n, m, d.indexes); d.shallow = 0; d.X = load_image_augment_paths(paths, n, flip, min, max, size, angle, aspect, hue, saturation, exposure); d.y = load_labels_paths(paths, n, labels, k); if(m) free(paths); return d; } */ data load_data_super(char **paths, int n, int m, int w, int h, int scale) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; int i; d.X.rows = n; d.X.vals = (float**)calloc(n, sizeof(float*)); d.X.cols = w*h*3; d.y.rows = n; d.y.vals = (float**)calloc(n, sizeof(float*)); d.y.cols = w*scale * h*scale * 3; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], 0, 0); image crop = random_crop_image(im, w*scale, h*scale); int flip = random_gen()%2; if (flip) flip_image(crop); image resize = resize_image(crop, w, h); d.X.vals[i] = resize.data; d.y.vals[i] = crop.data; free_image(im); } if(m) free(paths); return d; } data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int use_flip, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_augment_paths(paths, n, use_flip, min, max, size, angle, aspect, hue, saturation, exposure); d.y = load_labels_paths(paths, n, labels, k, hierarchy); if(m) free(paths); return d; } data load_data_tag(char **paths, int n, int m, int k, int use_flip, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.w = size; d.h = size; d.shallow = 0; d.X = load_image_augment_paths(paths, n, use_flip, min, max, size, angle, aspect, hue, saturation, exposure); d.y = load_tags_paths(paths, n, k); if(m) free(paths); return d; } matrix concat_matrix(matrix m1, matrix m2) { int i, count = 0; matrix m; m.cols = m1.cols; m.rows = m1.rows+m2.rows; m.vals = (float**)calloc(m1.rows + m2.rows, sizeof(float*)); for(i = 0; i < m1.rows; ++i){ m.vals[count++] = m1.vals[i]; } for(i = 0; i < m2.rows; ++i){ m.vals[count++] = m2.vals[i]; } return m; } data concat_data(data d1, data d2) { data d = {0}; d.shallow = 1; d.X = concat_matrix(d1.X, d2.X); d.y = concat_matrix(d1.y, d2.y); return d; } data concat_datas(data *d, int n) { int i; data out = {0}; for(i = 0; i < n; ++i){ data newdata = concat_data(d[i], out); free_data(out); out = newdata; } return out; } data load_categorical_data_csv(char *filename, int target, int k) { data d = {0}; d.shallow = 0; matrix X = csv_to_matrix(filename); float *truth_1d = pop_column(&X, target); float **truth = one_hot_encode(truth_1d, X.rows, k); matrix y; y.rows = X.rows; y.cols = k; y.vals = truth; d.X = X; d.y = y; free(truth_1d); return d; } data load_cifar10_data(char *filename) { data d = {0}; d.shallow = 0; long i,j; matrix X = make_matrix(10000, 3072); matrix y = make_matrix(10000, 10); d.X = X; d.y = y; FILE *fp = fopen(filename, "rb"); if(!fp) file_error(filename); for(i = 0; i < 10000; ++i){ unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class_id = bytes[0]; y.vals[i][class_id] = 1; for(j = 0; j < X.cols; ++j){ X.vals[i][j] = (double)bytes[j+1]; } } //translate_data_rows(d, -128); scale_data_rows(d, 1./255); //normalize_data_rows(d); fclose(fp); return d; } void get_random_batch(data d, int n, float *X, float *y) { int j; for(j = 0; j < n; ++j){ int index = random_gen()%d.X.rows; memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); } } void get_next_batch(data d, int n, int offset, float *X, float *y) { int j; for(j = 0; j < n; ++j){ int index = offset + j; memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); } } void smooth_data(data d) { int i, j; float scale = 1. / d.y.cols; float eps = .1; for(i = 0; i < d.y.rows; ++i){ for(j = 0; j < d.y.cols; ++j){ d.y.vals[i][j] = eps * scale + (1-eps) * d.y.vals[i][j]; } } } data load_all_cifar10() { data d = {0}; d.shallow = 0; int i,j,b; matrix X = make_matrix(50000, 3072); matrix y = make_matrix(50000, 10); d.X = X; d.y = y; for(b = 0; b < 5; ++b){ char buff[256]; sprintf(buff, "data/cifar/cifar-10-batches-bin/data_batch_%d.bin", b+1); FILE *fp = fopen(buff, "rb"); if(!fp) file_error(buff); for(i = 0; i < 10000; ++i){ unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class_id = bytes[0]; y.vals[i+b*10000][class_id] = 1; for(j = 0; j < X.cols; ++j){ X.vals[i+b*10000][j] = (double)bytes[j+1]; } } fclose(fp); } //normalize_data_rows(d); //translate_data_rows(d, -128); scale_data_rows(d, 1./255); smooth_data(d); return d; } data load_go(char *filename) { FILE *fp = fopen(filename, "rb"); matrix X = make_matrix(3363059, 361); matrix y = make_matrix(3363059, 361); int row, col; if(!fp) file_error(filename); char *label; int count = 0; while((label = fgetl(fp))){ int i; if(count == X.rows){ X = resize_matrix(X, count*2); y = resize_matrix(y, count*2); } sscanf(label, "%d %d", &row, &col); char *board = fgetl(fp); int index = row*19 + col; y.vals[count][index] = 1; for(i = 0; i < 19*19; ++i){ float val = 0; if(board[i] == '1') val = 1; else if(board[i] == '2') val = -1; X.vals[count][i] = val; } ++count; free(label); free(board); } X = resize_matrix(X, count); y = resize_matrix(y, count); data d = {0}; d.shallow = 0; d.X = X; d.y = y; fclose(fp); return d; } void randomize_data(data d) { int i; for(i = d.X.rows-1; i > 0; --i){ int index = random_gen()%i; float *swap = d.X.vals[index]; d.X.vals[index] = d.X.vals[i]; d.X.vals[i] = swap; swap = d.y.vals[index]; d.y.vals[index] = d.y.vals[i]; d.y.vals[i] = swap; } } void scale_data_rows(data d, float s) { int i; for(i = 0; i < d.X.rows; ++i){ scale_array(d.X.vals[i], d.X.cols, s); } } void translate_data_rows(data d, float s) { int i; for(i = 0; i < d.X.rows; ++i){ translate_array(d.X.vals[i], d.X.cols, s); } } void normalize_data_rows(data d) { int i; for(i = 0; i < d.X.rows; ++i){ normalize_array(d.X.vals[i], d.X.cols); } } data get_data_part(data d, int part, int total) { data p = {0}; p.shallow = 1; p.X.rows = d.X.rows * (part + 1) / total - d.X.rows * part / total; p.y.rows = d.y.rows * (part + 1) / total - d.y.rows * part / total; p.X.cols = d.X.cols; p.y.cols = d.y.cols; p.X.vals = d.X.vals + d.X.rows * part / total; p.y.vals = d.y.vals + d.y.rows * part / total; return p; } data get_random_data(data d, int num) { data r = {0}; r.shallow = 1; r.X.rows = num; r.y.rows = num; r.X.cols = d.X.cols; r.y.cols = d.y.cols; r.X.vals = (float**)calloc(num, sizeof(float*)); r.y.vals = (float**)calloc(num, sizeof(float*)); int i; for(i = 0; i < num; ++i){ int index = random_gen()%d.X.rows; r.X.vals[i] = d.X.vals[index]; r.y.vals[i] = d.y.vals[index]; } return r; } data *split_data(data d, int part, int total) { data* split = (data*)calloc(2, sizeof(data)); int i; int start = part*d.X.rows/total; int end = (part+1)*d.X.rows/total; data train; data test; train.shallow = test.shallow = 1; test.X.rows = test.y.rows = end-start; train.X.rows = train.y.rows = d.X.rows - (end-start); train.X.cols = test.X.cols = d.X.cols; train.y.cols = test.y.cols = d.y.cols; train.X.vals = (float**)calloc(train.X.rows, sizeof(float*)); test.X.vals = (float**)calloc(test.X.rows, sizeof(float*)); train.y.vals = (float**)calloc(train.y.rows, sizeof(float*)); test.y.vals = (float**)calloc(test.y.rows, sizeof(float*)); for(i = 0; i < start; ++i){ train.X.vals[i] = d.X.vals[i]; train.y.vals[i] = d.y.vals[i]; } for(i = start; i < end; ++i){ test.X.vals[i-start] = d.X.vals[i]; test.y.vals[i-start] = d.y.vals[i]; } for(i = end; i < d.X.rows; ++i){ train.X.vals[i-(end-start)] = d.X.vals[i]; train.y.vals[i-(end-start)] = d.y.vals[i]; } split[0] = train; split[1] = test; return split; }
GB_unaryop__lnot_uint64_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint64_int16 // op(A') function: GB_tran__lnot_uint64_int16 // C type: uint64_t // A type: int16_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int16_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT64 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint64_int16 ( uint64_t *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint64_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__minus_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__minus_int64) // A.*B function (eWiseMult): GB (_AemultB_01__minus_int64) // A.*B function (eWiseMult): GB (_AemultB_02__minus_int64) // A.*B function (eWiseMult): GB (_AemultB_03__minus_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_int64) // A*D function (colscale): GB (_AxD__minus_int64) // D*A function (rowscale): GB (_DxB__minus_int64) // C+=B function (dense accum): GB (_Cdense_accumB__minus_int64) // C+=b function (dense accum): GB (_Cdense_accumb__minus_int64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_int64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_int64) // C=scalar+B GB (_bind1st__minus_int64) // C=scalar+B' GB (_bind1st_tran__minus_int64) // C=A+scalar GB (_bind2nd__minus_int64) // C=A'+scalar GB (_bind2nd_tran__minus_int64) // C type: int64_t // A type: int64_t // B,b type: int64_t // BinaryOp: cij = (aij - bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x - y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_INT64 || GxB_NO_MINUS_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__minus_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__minus_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__minus_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__minus_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__minus_int64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__minus_int64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__minus_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__minus_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__minus_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__minus_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__minus_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__minus_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = (x - bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__minus_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij - y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x - aij) ; \ } GrB_Info GB (_bind1st_tran__minus_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - y) ; \ } GrB_Info GB (_bind2nd_tran__minus_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 24; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,12);t1++) { lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24)); ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-1,2)),ceild(24*t2-Nz-20,24));t3<=min(min(min(floord(Nt+Ny-4,24),floord(12*t1+Ny+21,24)),floord(24*t2+Ny+20,24)),floord(24*t1-24*t2+Nz+Ny+19,24));t3++) { for (t4=max(max(max(0,ceild(3*t1-127,128)),ceild(24*t2-Nz-508,512)),ceild(24*t3-Ny-508,512));t4<=min(min(min(min(floord(Nt+Nx-4,512),floord(12*t1+Nx+21,512)),floord(24*t2+Nx+20,512)),floord(24*t3+Nx+20,512)),floord(24*t1-24*t2+Nz+Nx+19,512));t4++) { for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),24*t3-Ny+2),512*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),24*t3+22),512*t4+510),24*t1-24*t2+Nz+21);t5++) { for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) { lbv=max(512*t4,t5+1); ubv=min(512*t4+511,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
GB_binop__isle_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isle_int16) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__isle_int16) // A.*B function (eWiseMult): GB (_AemultB_03__isle_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_int16) // A*D function (colscale): GB (_AxD__isle_int16) // D*A function (rowscale): GB (_DxB__isle_int16) // C+=B function (dense accum): GB (_Cdense_accumB__isle_int16) // C+=b function (dense accum): GB (_Cdense_accumb__isle_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_int16) // C=scalar+B GB (_bind1st__isle_int16) // C=scalar+B' GB (_bind1st_tran__isle_int16) // C=A+scalar GB (_bind2nd__isle_int16) // C=A'+scalar GB (_bind2nd_tran__isle_int16) // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_INT16 || GxB_NO_ISLE_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isle_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isle_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isle_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isle_int16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isle_int16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isle_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isle_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isle_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isle_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isle_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isle_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = Bx [p] ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isle_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = Ax [p] ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__isle_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__isle_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__le_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__le_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__le_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__le_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__le_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__le_uint16) // A*D function (colscale): GB (_AxD__le_uint16) // D*A function (rowscale): GB (_DxB__le_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__le_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__le_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_uint16) // C=scalar+B GB (_bind1st__le_uint16) // C=scalar+B' GB (_bind1st_tran__le_uint16) // C=A+scalar GB (_bind2nd__le_uint16) // C=A'+scalar GB (_bind2nd_tran__le_uint16) // C type: bool // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LE || GxB_NO_UINT16 || GxB_NO_LE_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__le_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__le_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__le_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__le_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__le_uint16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__le_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__le_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__le_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__le_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__le_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__le_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__le_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__le_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__le_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
trapezoide.c
#include <stdio.h> #include <omp.h> int main(){ int threads = 16; // Integration limits double izquierda; double derecha; printf("limite izquierdo: "); scanf("%lf",&izquierda); printf("limite derecho: "); scanf("%lf",&derecha); int step = 10000000; double delta = (derecha-izquierda)/step; double integracion = (10/izquierda + 10/derecha)/2; // Utilizamos openmp para paralelizar el calculo #pragma omp parallel shared(result) num_threads(threads) { // Paralelizamos nuestro ciclo for #pragma omp for for(int i = 1; i < step; i++) { integracion += 5/(izquierda + i*delta); } } //Aproximamos el resultado final integracion = integracion * delta; printf("Resultado de integracion: %.2f \n", integracion); return 0; }
pi.c
#include<stdio.h> #include<omp.h> int main(int argc, char *argv[]){ int nThreads = 4; omp_set_num_threads(nThreads); unsigned long long n = 100000000; //scanf("%ld", &n); double sum = 0, step = 1.0/((double)n); /* * FOR * schedule(type, [chunk]) * shared(list) * private(list) * firstprivate(list) * lastprivate(list) * reduction(operator: list) * nowait * ordered */ #pragma omp parallel for reduction(+: sum) for(unsigned long long i = 0; i < n; i++){ sum += 4.0/(1.0 + ((i + 0.5) * step)*((i + 0.5) * step)); } printf("Pi = %.20lf\n", step*sum); return 0; }
GB_unaryop__abs_int64_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int64_fp32 // op(A') function: GB_tran__abs_int64_fp32 // C type: int64_t // A type: float // cast: int64_t cij ; GB_CAST_SIGNED(cij,aij,64) // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ float #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int64_t z ; GB_CAST_SIGNED(z,x,64) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT64 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int64_fp32 ( int64_t *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int64_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
stream.c
/*-----------------------------------------------------------------------*/ /* Program: STREAM */ /* Revision: $Id: stream.c,v 5.10 2013/01/17 16:01:06 mccalpin Exp mccalpin $ */ /* Original code developed by John D. McCalpin */ /* Programmers: John D. McCalpin */ /* Joe R. Zagar */ /* */ /* This program measures memory transfer rates in MB/s for simple */ /* computational kernels coded in C. */ /*-----------------------------------------------------------------------*/ /* Copyright 1991-2013: John D. McCalpin */ /*-----------------------------------------------------------------------*/ /* License: */ /* 1. You are free to use this program and/or to redistribute */ /* this program. */ /* 2. You are free to modify this program for your own use, */ /* including commercial use, subject to the publication */ /* restrictions in item 3. */ /* 3. You are free to publish results obtained from running this */ /* program, or from works that you derive from this program, */ /* with the following limitations: */ /* 3a. In order to be referred to as "STREAM benchmark results", */ /* published results must be in conformance to the STREAM */ /* Run Rules, (briefly reviewed below) published at */ /* http://www.cs.virginia.edu/stream/ref.html */ /* and incorporated herein by reference. */ /* As the copyright holder, John McCalpin retains the */ /* right to determine conformity with the Run Rules. */ /* 3b. Results based on modified source code or on runs not in */ /* accordance with the STREAM Run Rules must be clearly */ /* labelled whenever they are published. Examples of */ /* proper labelling include: */ /* "tuned STREAM benchmark results" */ /* "based on a variant of the STREAM benchmark code" */ /* Other comparable, clear, and reasonable labelling is */ /* acceptable. */ /* 3c. Submission of results to the STREAM benchmark web site */ /* is encouraged, but not required. */ /* 4. Use of this program or creation of derived works based on this */ /* program constitutes acceptance of these licensing restrictions. */ /* 5. Absolutely no warranty is expressed or implied. */ /*-----------------------------------------------------------------------*/ # include <stdio.h> # include <unistd.h> # include <math.h> # include <float.h> # include <limits.h> # include <sys/time.h> /*----------------------------------------------------------------------- * INSTRUCTIONS: * * 1) STREAM requires different amounts of memory to run on different * systems, depending on both the system cache size(s) and the * granularity of the system timer. * You should adjust the value of 'STREAM_ARRAY_SIZE' (below) * to meet *both* of the following criteria: * (a) Each array must be at least 4 times the size of the * available cache memory. I don't worry about the difference * between 10^6 and 2^20, so in practice the minimum array size * is about 3.8 times the cache size. * Example 1: One Xeon E3 with 8 MB L3 cache * STREAM_ARRAY_SIZE should be >= 4 million, giving * an array size of 30.5 MB and a total memory requirement * of 91.5 MB. * Example 2: Two Xeon E5's with 20 MB L3 cache each (using OpenMP) * STREAM_ARRAY_SIZE should be >= 20 million, giving * an array size of 153 MB and a total memory requirement * of 458 MB. * (b) The size should be large enough so that the 'timing calibration' * output by the program is at least 20 clock-ticks. * Example: most versions of Windows have a 10 millisecond timer * granularity. 20 "ticks" at 10 ms/tic is 200 milliseconds. * If the chip is capable of 10 GB/s, it moves 2 GB in 200 msec. * This means the each array must be at least 1 GB, or 128M elements. * * Version 5.10 increases the default array size from 2 million * elements to 10 million elements in response to the increasing * size of L3 caches. The new default size is large enough for caches * up to 20 MB. * Version 5.10 changes the loop index variables from "register int" * to "ssize_t", which allows array indices >2^32 (4 billion) * on properly configured 64-bit systems. Additional compiler options * (such as "-mcmodel=medium") may be required for large memory runs. * * Array size can be set at compile time without modifying the source * code for the (many) compilers that support preprocessor definitions * on the compile line. E.g., * gcc -O -DSTREAM_ARRAY_SIZE=100000000 stream.c -o stream.100M * will override the default size of 10M with a new size of 100M elements * per array. */ #ifndef STREAM_ARRAY_SIZE # define STREAM_ARRAY_SIZE 10000000 #endif /* 2) STREAM runs each kernel "NTIMES" times and reports the *best* result * for any iteration after the first, therefore the minimum value * for NTIMES is 2. * There are no rules on maximum allowable values for NTIMES, but * values larger than the default are unlikely to noticeably * increase the reported performance. * NTIMES can also be set on the compile line without changing the source * code using, for example, "-DNTIMES=7". */ #ifdef NTIMES #if NTIMES<=1 # define NTIMES 10 #endif #endif #ifndef NTIMES # define NTIMES 10 #endif /* Users are allowed to modify the "OFFSET" variable, which *may* change the * relative alignment of the arrays (though compilers may change the * effective offset by making the arrays non-contiguous on some systems). * Use of non-zero values for OFFSET can be especially helpful if the * STREAM_ARRAY_SIZE is set to a value close to a large power of 2. * OFFSET can also be set on the compile line without changing the source * code using, for example, "-DOFFSET=56". */ #ifndef OFFSET # define OFFSET 0 #endif /* * 3) Compile the code with optimization. Many compilers generate * unreasonably bad code before the optimizer tightens things up. * If the results are unreasonably good, on the other hand, the * optimizer might be too smart for me! * * For a simple single-core version, try compiling with: * cc -O stream.c -o stream * This is known to work on many, many systems.... * * To use multiple cores, you need to tell the compiler to obey the OpenMP * directives in the code. This varies by compiler, but a common example is * gcc -O -fopenmp stream.c -o stream_omp * The environment variable OMP_NUM_THREADS allows runtime control of the * number of threads/cores used when the resulting "stream_omp" program * is executed. * * To run with single-precision variables and arithmetic, simply add * -DSTREAM_TYPE=float * to the compile line. * Note that this changes the minimum array sizes required --- see (1) above. * * The preprocessor directive "TUNED" does not do much -- it simply causes the * code to call separate functions to execute each kernel. Trivial versions * of these functions are provided, but they are *not* tuned -- they just * provide predefined interfaces to be replaced with tuned code. * * * 4) Optional: Mail the results to mccalpin@cs.virginia.edu * Be sure to include info that will help me understand: * a) the computer hardware configuration (e.g., processor model, memory type) * b) the compiler name/version and compilation flags * c) any run-time information (such as OMP_NUM_THREADS) * d) all of the output from the test case. * * Thanks! * *-----------------------------------------------------------------------*/ # define HLINE "-------------------------------------------------------------\n" # ifndef MIN # define MIN(x,y) ((x)<(y)?(x):(y)) # endif # ifndef MAX # define MAX(x,y) ((x)>(y)?(x):(y)) # endif #ifndef STREAM_TYPE #define STREAM_TYPE double #endif static STREAM_TYPE a[STREAM_ARRAY_SIZE+OFFSET], b[STREAM_ARRAY_SIZE+OFFSET], c[STREAM_ARRAY_SIZE+OFFSET]; static double avgtime[4] = {0}, maxtime[4] = {0}, mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX}; static char *label[4] = {"Copy: ", "Scale: ", "Add: ", "Triad: "}; static double bytes[4] = { 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE }; int checktick(void); double mysecond(void); extern void checkSTREAMresults(void); #ifdef TUNED extern void tuned_STREAM_Copy(); extern void tuned_STREAM_Scale(STREAM_TYPE scalar); extern void tuned_STREAM_Add(); extern void tuned_STREAM_Triad(STREAM_TYPE scalar); #endif #ifdef _OPENMP extern int omp_get_num_threads(); #endif int main() { int quantum, checktick(); int BytesPerWord; int k; ssize_t j; STREAM_TYPE scalar; double t, times[4][NTIMES]; /* --- SETUP --- determine precision and check timing --- */ printf(HLINE); printf("STREAM version $Revision: 5.10 $\n"); printf(HLINE); BytesPerWord = sizeof(STREAM_TYPE); printf("This system uses %d bytes per array element.\n", BytesPerWord); printf(HLINE); #ifdef N printf("***** WARNING: ******\n"); printf(" It appears that you set the preprocessor variable N when compiling this code.\n"); printf(" This version of the code uses the preprocesor variable STREAM_ARRAY_SIZE to control the array size\n"); printf(" Reverting to default value of STREAM_ARRAY_SIZE=%llu\n",(unsigned long long) STREAM_ARRAY_SIZE); printf("***** WARNING: ******\n"); #endif printf("Array size = %llu (elements), Offset = %d (elements)\n" , (unsigned long long) STREAM_ARRAY_SIZE, OFFSET); printf("Memory per array = %.1f MiB (= %.1f GiB).\n", BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0), BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0/1024.0)); printf("Total memory required = %.1f MiB (= %.1f GiB).\n", (3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.), (3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024./1024.)); printf("Each kernel will be executed %d times.\n", NTIMES); printf(" The *best* time for each kernel (excluding the first iteration)\n"); printf(" will be used to compute the reported bandwidth.\n"); #ifdef _OPENMP printf(HLINE); #pragma omp parallel { #pragma omp master { k = omp_get_num_threads(); printf ("Number of Threads requested = %i\n",k); } } #endif #ifdef _OPENMP k = 0; #pragma omp parallel #pragma omp atomic k++; printf ("Number of Threads counted = %i\n",k); #endif /* Get initial value for system clock. */ #ifdef _OPENMP #pragma omp parallel for #endif for (j=0; j<STREAM_ARRAY_SIZE; j++) { a[j] = 1.0; b[j] = 2.0; c[j] = 0.0; } printf(HLINE); if ( (quantum = checktick()) >= 1) printf("Your clock granularity/precision appears to be " "%d microseconds.\n", quantum); else { printf("Your clock granularity appears to be " "less than one microsecond.\n"); quantum = 1; } t = mysecond(); #ifdef _OPENMP #pragma omp parallel for #endif for (j = 0; j < STREAM_ARRAY_SIZE; j++) a[j] = 2.0E0 * a[j]; t = 1.0E6 * (mysecond() - t); printf("Each test below will take on the order" " of %d microseconds.\n", (int) t ); printf(" (= %d clock ticks)\n", (int) (t/quantum) ); printf("Increase the size of the arrays if this shows that\n"); printf("you are not getting at least 20 clock ticks per test.\n"); printf(HLINE); printf("WARNING -- The above is only a rough guideline.\n"); printf("For best results, please be sure you know the\n"); printf("precision of your system timer.\n"); printf(HLINE); /* --- MAIN LOOP --- repeat test cases NTIMES times --- */ scalar = 3.0; for (k=0; k<NTIMES; k++) { times[0][k] = mysecond(); #ifdef TUNED tuned_STREAM_Copy(); #else #ifdef _OPENMP #pragma omp parallel for #endif for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]; #endif times[0][k] = mysecond() - times[0][k]; times[1][k] = mysecond(); #ifdef TUNED tuned_STREAM_Scale(scalar); #else #ifdef _OPENMP #pragma omp parallel for #endif for (j=0; j<STREAM_ARRAY_SIZE; j++) b[j] = scalar*c[j]; #endif times[1][k] = mysecond() - times[1][k]; times[2][k] = mysecond(); #ifdef TUNED tuned_STREAM_Add(); #else #ifdef _OPENMP #pragma omp parallel for #endif for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]+b[j]; #endif times[2][k] = mysecond() - times[2][k]; times[3][k] = mysecond(); #ifdef TUNED tuned_STREAM_Triad(scalar); #else #ifdef _OPENMP #pragma omp parallel for #endif for (j=0; j<STREAM_ARRAY_SIZE; j++) a[j] = b[j]+scalar*c[j]; #endif times[3][k] = mysecond() - times[3][k]; } /* --- SUMMARY --- */ for (k=1; k<NTIMES; k++) /* note -- skip first iteration */ { for (j=0; j<4; j++) { avgtime[j] = avgtime[j] + times[j][k]; mintime[j] = MIN(mintime[j], times[j][k]); maxtime[j] = MAX(maxtime[j], times[j][k]); } } printf("Function Best Rate MB/s Avg time Min time Max time\n"); for (j=0; j<4; j++) { avgtime[j] = avgtime[j]/(double)(NTIMES-1); printf("%s%12.1f %11.6f %11.6f %11.6f\n", label[j], 1.0E-06 * bytes[j]/mintime[j], avgtime[j], mintime[j], maxtime[j]); } printf(HLINE); /* --- Check Results --- */ checkSTREAMresults(); printf(HLINE); return 0; } # define M 20 int checktick(void) { int i, minDelta, Delta; double t1, t2, timesfound[M]; /* Collect a sequence of M unique time values from the system. */ for (i = 0; i < M; i++) { t1 = mysecond(); while( ((t2=mysecond()) - t1) < 1.0E-6 ) ; timesfound[i] = t1 = t2; } /* * Determine the minimum difference between these M values. * This result will be our estimate (in microseconds) for the * clock granularity. */ minDelta = 1000000; for (i = 1; i < M; i++) { Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1])); minDelta = MIN(minDelta, MAX(Delta,0)); } return(minDelta); } /* A gettimeofday routine to give access to the wall clock timer on most UNIX-like systems. */ #include <sys/time.h> double mysecond(void) { struct timeval tp; struct timezone tzp; int i; i = gettimeofday(&tp,&tzp); return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); } #ifndef abs #define abs(a) ((a) >= 0 ? (a) : -(a)) #endif void checkSTREAMresults (void) { STREAM_TYPE aj,bj,cj,scalar; STREAM_TYPE aSumErr,bSumErr,cSumErr; STREAM_TYPE aAvgErr,bAvgErr,cAvgErr; double epsilon; ssize_t j; int k,ierr,err; /* reproduce initialization */ aj = 1.0; bj = 2.0; cj = 0.0; /* a[] is modified during timing check */ aj = 2.0E0 * aj; /* now execute timing loop */ scalar = 3.0; for (k=0; k<NTIMES; k++) { cj = aj; bj = scalar*cj; cj = aj+bj; aj = bj+scalar*cj; } /* accumulate deltas between observed and expected results */ aSumErr = 0.0; bSumErr = 0.0; cSumErr = 0.0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { aSumErr += abs(a[j] - aj); bSumErr += abs(b[j] - bj); cSumErr += abs(c[j] - cj); // if (j == 417) printf("Index 417: c[j]: %f, cj: %f\n",c[j],cj); // MCCALPIN } aAvgErr = aSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; bAvgErr = bSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; cAvgErr = cSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; if (sizeof(STREAM_TYPE) == 4) { epsilon = 1.e-6; } else if (sizeof(STREAM_TYPE) == 8) { epsilon = 1.e-13; } else { printf("WEIRD: sizeof(STREAM_TYPE) = %lu\n",sizeof(STREAM_TYPE)); epsilon = 1.e-6; } err = 0; if (abs(aAvgErr/aj) > epsilon) { err++; printf ("Failed Validation on array a[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",aj,aAvgErr,abs(aAvgErr)/aj); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(a[j]/aj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array a: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,aj,a[j],abs((aj-a[j])/aAvgErr)); } #endif } } printf(" For array a[], %d errors were found.\n",ierr); } if (abs(bAvgErr/bj) > epsilon) { err++; printf ("Failed Validation on array b[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",bj,bAvgErr,abs(bAvgErr)/bj); printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(b[j]/bj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array b: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,bj,b[j],abs((bj-b[j])/bAvgErr)); } #endif } } printf(" For array b[], %d errors were found.\n",ierr); } if (abs(cAvgErr/cj) > epsilon) { err++; printf ("Failed Validation on array c[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",cj,cAvgErr,abs(cAvgErr)/cj); printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(c[j]/cj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array c: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,cj,c[j],abs((cj-c[j])/cAvgErr)); } #endif } } printf(" For array c[], %d errors were found.\n",ierr); } if (err == 0) { printf ("Solution Validates: avg error less than %e on all three arrays\n",epsilon); } #ifdef VERBOSE printf ("Results Validation Verbose Results: \n"); printf (" Expected a(1), b(1), c(1): %f %f %f \n",aj,bj,cj); printf (" Observed a(1), b(1), c(1): %f %f %f \n",a[1],b[1],c[1]); printf (" Rel Errors on a, b, c: %e %e %e \n",abs(aAvgErr/aj),abs(bAvgErr/bj),abs(cAvgErr/cj)); #endif } #ifdef TUNED /* stubs for "tuned" versions of the kernels */ void tuned_STREAM_Copy() { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]; } void tuned_STREAM_Scale(STREAM_TYPE scalar) { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) b[j] = scalar*c[j]; } void tuned_STREAM_Add() { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]+b[j]; } void tuned_STREAM_Triad(STREAM_TYPE scalar) { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) a[j] = b[j]+scalar*c[j]; } /* end of stubs for the "tuned" versions of the kernels */ #endif
pngquant.c
/* pngquant.c - quantize the colors in an alphamap down to a specified number ** ** Copyright (C) 1989, 1991 by Jef Poskanzer. ** ** Permission to use, copy, modify, and distribute this software and its ** documentation for any purpose and without fee is hereby granted, provided ** that the above copyright notice appear in all copies and that both that ** copyright notice and this permission notice appear in supporting ** documentation. This software is provided "as is" without express or ** implied warranty. ** ** - - - - ** ** © 1997-2002 by Greg Roelofs; based on an idea by Stefan Schneider. ** © 2009-2015 by Kornel Lesiński. ** ** All rights reserved. ** ** Redistribution and use in source and binary forms, with or without modification, ** are permitted provided that the following conditions are met: ** ** 1. Redistributions of source code must retain the above copyright notice, ** this list of conditions and the following disclaimer. ** ** 2. Redistributions in binary form must reproduce the above copyright notice, ** this list of conditions and the following disclaimer in the documentation ** and/or other materials provided with the distribution. ** ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ** AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ** IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ** DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE ** FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL ** DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ** SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ** CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, ** OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ** OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ** */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdarg.h> #include <stdbool.h> #include <getopt.h> #include <unistd.h> #include "pngquant.h" extern char *optarg; extern int optind, opterr; #if defined(WIN32) || defined(__WIN32__) # include <fcntl.h> /* O_BINARY */ # include <io.h> /* setmode() */ #endif #ifdef _OPENMP #include <omp.h> #else #define omp_get_max_threads() 1 #define omp_get_thread_num() 0 #endif static pngquant_error prepare_output_image(liq_result *result, liq_image *input_image, png8_image *output_image); static void set_palette(liq_result *result, png8_image *output_image); static pngquant_error read_image(liq_attr *options, const char *filename, int using_stdin, png24_image *input_image_p, liq_image **liq_image_p, bool keep_input_pixels, bool verbose); static pngquant_error write_image(png8_image *output_image, png24_image *output_image24, const char *outname, struct pngquant_options *options); static char *add_filename_extension(const char *filename, const char *newext); static bool file_exists(const char *outname); static void verbose_printf(struct pngquant_options *context, const char *fmt, ...) { if (context->log_callback) { va_list va; va_start(va, fmt); int required_space = vsnprintf(NULL, 0, fmt, va)+1; // +\0 va_end(va); char buf[required_space]; va_start(va, fmt); vsnprintf(buf, required_space, fmt, va); va_end(va); context->log_callback(context->liq, buf, context->log_callback_user_info); } } static void log_callback(const liq_attr *attr, const char *msg, void* user_info) { fprintf(stderr, "%s\n", msg); } #ifdef _OPENMP #define LOG_BUFFER_SIZE 1300 struct buffered_log { int buf_used; char buf[LOG_BUFFER_SIZE]; }; static void log_callback_buferred_flush(const liq_attr *attr, void *context) { struct buffered_log *log = context; if (log->buf_used) { fwrite(log->buf, 1, log->buf_used, stderr); fflush(stderr); log->buf_used = 0; } } static void log_callback_buferred(const liq_attr *attr, const char *msg, void* context) { struct buffered_log *log = context; int len = strlen(msg); if (len > LOG_BUFFER_SIZE-2) len = LOG_BUFFER_SIZE-2; if (len > LOG_BUFFER_SIZE - log->buf_used - 2) log_callback_buferred_flush(attr, log); memcpy(&log->buf[log->buf_used], msg, len); log->buf_used += len+1; log->buf[log->buf_used-1] = '\n'; log->buf[log->buf_used] = '\0'; } #endif enum {arg_floyd=1, arg_ordered, arg_ext, arg_no_force, arg_iebug, arg_transbug, arg_map, arg_posterize, arg_skip_larger}; static const struct option long_options[] = { {"verbose", no_argument, NULL, 'v'}, {"quiet", no_argument, NULL, 'q'}, {"force", no_argument, NULL, 'f'}, {"no-force", no_argument, NULL, arg_no_force}, {"floyd", optional_argument, NULL, arg_floyd}, {"ordered", no_argument, NULL, arg_ordered}, {"nofs", no_argument, NULL, arg_ordered}, {"iebug", no_argument, NULL, arg_iebug}, {"transbug", no_argument, NULL, arg_transbug}, {"ext", required_argument, NULL, arg_ext}, {"skip-if-larger", no_argument, NULL, arg_skip_larger}, {"output", required_argument, NULL, 'o'}, {"speed", required_argument, NULL, 's'}, {"quality", required_argument, NULL, 'Q'}, {"posterize", required_argument, NULL, arg_posterize}, {"map", required_argument, NULL, arg_map}, {"version", no_argument, NULL, 'V'}, {"help", no_argument, NULL, 'h'}, {NULL, 0, NULL, 0}, }; pngquant_error pngquant_file(const char *filename, const char *outname, struct pngquant_options *options) { pngquant_error retval = SUCCESS; verbose_printf(options, "%s:", filename); liq_image *input_image = NULL; png24_image input_image_rwpng = {}; bool keep_input_pixels = options->skip_if_larger || (options->using_stdout && options->min_quality_limit); // original may need to be output to stdout if (SUCCESS == retval) { retval = read_image(options->liq, filename, options->using_stdin, &input_image_rwpng, &input_image, keep_input_pixels, options->verbose); } int quality_percent = 90; // quality on 0-100 scale, updated upon successful remap png8_image output_image = {}; if (SUCCESS == retval) { verbose_printf(options, " read %luKB file", (input_image_rwpng.file_size+1023UL)/1024UL); #if USE_LCMS if (input_image_rwpng.lcms_status == ICCP) { verbose_printf(options, " used embedded ICC profile to transform image to sRGB colorspace"); } else if (input_image_rwpng.lcms_status == GAMA_CHRM) { verbose_printf(options, " used gAMA and cHRM chunks to transform image to sRGB colorspace"); } else if (input_image_rwpng.lcms_status == ICCP_WARN_GRAY) { verbose_printf(options, " warning: ignored ICC profile in GRAY colorspace"); } #endif if (input_image_rwpng.gamma != 0.45455) { verbose_printf(options, " corrected image from gamma %2.1f to sRGB gamma", 1.0/input_image_rwpng.gamma); } // when using image as source of a fixed palette the palette is extracted using regular quantization liq_result *remap; liq_error remap_error = liq_image_quantize(options->fixed_palette_image ? options->fixed_palette_image : input_image, options->liq, &remap); if (LIQ_OK == remap_error) { liq_set_output_gamma(remap, 0.45455); // fixed gamma ~2.2 for the web. PNG can't store exact 1/2.2 liq_set_dithering_level(remap, options->floyd); retval = prepare_output_image(remap, input_image, &output_image); if (SUCCESS == retval) { if (LIQ_OK != liq_write_remapped_image_rows(remap, input_image, output_image.row_pointers)) { retval = OUT_OF_MEMORY_ERROR; } set_palette(remap, &output_image); double palette_error = liq_get_quantization_error(remap); if (palette_error >= 0) { quality_percent = liq_get_quantization_quality(remap); verbose_printf(options, " mapped image to new colors...MSE=%.3f (Q=%d)", palette_error, quality_percent); } } liq_result_destroy(remap); } else if (LIQ_QUALITY_TOO_LOW == remap_error) { retval = TOO_LOW_QUALITY; } else { retval = INVALID_ARGUMENT; // dunno } } if (SUCCESS == retval) { if (options->skip_if_larger) { // this is very rough approximation, but generally avoid losing more quality than is gained in file size. // Quality is squared, because even greater savings are needed to justify big quality loss. double quality = quality_percent/100.0; output_image.maximum_file_size = (input_image_rwpng.file_size-1) * quality*quality; } output_image.fast_compression = options->fast_compression; output_image.chunks = input_image_rwpng.chunks; input_image_rwpng.chunks = NULL; retval = write_image(&output_image, NULL, outname, options); if (TOO_LARGE_FILE == retval) { verbose_printf(options, " file exceeded expected size of %luKB", (unsigned long)output_image.maximum_file_size/1024UL); } } if (options->using_stdout && keep_input_pixels && (TOO_LARGE_FILE == retval || TOO_LOW_QUALITY == retval)) { // when outputting to stdout it'd be nasty to create 0-byte file // so if quality is too low, output 24-bit original pngquant_error write_retval = write_image(NULL, &input_image_rwpng, outname, options); if (write_retval) { retval = write_retval; } } if (input_image) liq_image_destroy(input_image); rwpng_free_image24(&input_image_rwpng); rwpng_free_image8(&output_image); return retval; } static void set_palette(liq_result *result, png8_image *output_image) { const liq_palette *palette = liq_get_palette(result); output_image->num_palette = palette->count; for(unsigned int i=0; i < palette->count; i++) { liq_color px = palette->entries[i]; output_image->palette[i] = (rwpng_rgba){.r=px.r, .g=px.g, .b=px.b, .a=px.a}; } } static bool file_exists(const char *outname) { FILE *outfile = fopen(outname, "rb"); if ((outfile ) != NULL) { fclose(outfile); return true; } return false; } /* build the output filename from the input name by inserting "-fs8" or * "-or8" before the ".png" extension (or by appending that plus ".png" if * there isn't any extension), then make sure it doesn't exist already */ static char *add_filename_extension(const char *filename, const char *newext) { size_t x = strlen(filename); char* outname = malloc(x+4+strlen(newext)+1); if (!outname) return NULL; strncpy(outname, filename, x); if (strncmp(outname+x-4, ".png", 4) == 0 || strncmp(outname+x-4, ".PNG", 4) == 0) { strcpy(outname+x-4, newext); } else { strcpy(outname+x, newext); } return outname; } static char *temp_filename(const char *basename) { size_t x = strlen(basename); char *outname = malloc(x+1+4); if (!outname) return NULL; strcpy(outname, basename); strcpy(outname+x, ".tmp"); return outname; } static void set_binary_mode(FILE *fp) { #if defined(WIN32) || defined(__WIN32__) setmode(fp == stdout ? 1 : 0, O_BINARY); #endif } static const char *filename_part(const char *path) { const char *outfilename = strrchr(path, '/'); if (outfilename) { return outfilename+1; } else { return path; } } static bool replace_file(const char *from, const char *to, const bool force) { #if defined(WIN32) || defined(__WIN32__) if (force) { // On Windows rename doesn't replace unlink(to); } #endif return (0 == rename(from, to)); } static pngquant_error write_image(png8_image *output_image, png24_image *output_image24, const char *outname, struct pngquant_options *options) { FILE *outfile; char *tempname = NULL; if (options->using_stdout) { set_binary_mode(stdout); outfile = stdout; if (output_image) { verbose_printf(options, " writing %d-color image to stdout", output_image->num_palette); } else { verbose_printf(options, " writing truecolor image to stdout"); } } else { tempname = temp_filename(outname); if (!tempname) return OUT_OF_MEMORY_ERROR; if ((outfile = fopen(tempname, "wb")) == NULL) { fprintf(stderr, " error: cannot open '%s' for writing\n", tempname); free(tempname); return CANT_WRITE_ERROR; } if (output_image) { verbose_printf(options, " writing %d-color image as %s", output_image->num_palette, filename_part(outname)); } else { verbose_printf(options, " writing truecolor image as %s", filename_part(outname)); } } pngquant_error retval; #pragma omp critical (libpng) { if (output_image) { retval = rwpng_write_image8(outfile, output_image); } else { retval = rwpng_write_image24(outfile, output_image24); } } if (!options->using_stdout) { fclose(outfile); if (SUCCESS == retval) { // Image has been written to a temporary file and then moved over destination. // This makes replacement atomic and avoids damaging destination file on write error. if (!replace_file(tempname, outname, options->force)) { retval = CANT_WRITE_ERROR; } } if (retval) { unlink(tempname); } } free(tempname); if (retval && retval != TOO_LARGE_FILE) { fprintf(stderr, " error: failed writing image to %s\n", outname); } return retval; } static pngquant_error read_image(liq_attr *options, const char *filename, int using_stdin, png24_image *input_image_p, liq_image **liq_image_p, bool keep_input_pixels, bool verbose) { FILE *infile; if (using_stdin) { set_binary_mode(stdin); infile = stdin; } else if ((infile = fopen(filename, "rb")) == NULL) { fprintf(stderr, " error: cannot open %s for reading\n", filename); return READ_ERROR; } pngquant_error retval; #pragma omp critical (libpng) { retval = rwpng_read_image24(infile, input_image_p, false, verbose); } if (!using_stdin) { fclose(infile); } if (retval) { fprintf(stderr, " error: cannot decode image %s\n", using_stdin ? "from stdin" : filename_part(filename)); return retval; } *liq_image_p = liq_image_create_rgba_rows(options, (void**)input_image_p->row_pointers, input_image_p->width, input_image_p->height, input_image_p->gamma); if (!*liq_image_p) { return OUT_OF_MEMORY_ERROR; } if (!keep_input_pixels) { if (LIQ_OK != liq_image_set_memory_ownership(*liq_image_p, LIQ_OWN_ROWS | LIQ_OWN_PIXELS)) { return OUT_OF_MEMORY_ERROR; } input_image_p->row_pointers = NULL; input_image_p->rgba_data = NULL; } return SUCCESS; } static pngquant_error prepare_output_image(liq_result *result, liq_image *input_image, png8_image *output_image) { output_image->width = liq_image_get_width(input_image); output_image->height = liq_image_get_height(input_image); output_image->gamma = liq_get_output_gamma(result); /* ** Step 3.7 [GRR]: allocate memory for the entire indexed image */ output_image->indexed_data = malloc(output_image->height * output_image->width); output_image->row_pointers = malloc(output_image->height * sizeof(output_image->row_pointers[0])); if (!output_image->indexed_data || !output_image->row_pointers) { return OUT_OF_MEMORY_ERROR; } for(size_t row = 0; row < output_image->height; row++) { output_image->row_pointers[row] = output_image->indexed_data + row * output_image->width; } const liq_palette *palette = liq_get_palette(result); // tRNS, etc. output_image->num_palette = palette->count; return SUCCESS; }
GB_ijsort.c
//------------------------------------------------------------------------------ // GB_ijsort: sort an index array I and remove duplicates //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // Sort an index array and remove duplicates. In MATLAB notation: /* [I1 I1k] = sort (I) ; Iduplicate = [(I1 (1:end-1) == I1 (2:end)), false] ; I2 = I1 (~Iduplicate) ; I2k = I1k (~Iduplicate) ; */ #include "GB_ij.h" #include "GB_sort.h" #define GB_FREE_WORK \ { \ GB_FREE_MEMORY (Count, ntasks+1, sizeof (int64_t)) ; \ GB_FREE_MEMORY (W0, ni, sizeof (GrB_Index)) ; \ GB_FREE_MEMORY (W1, ni, sizeof (GrB_Index)) ; \ GB_FREE_MEMORY (I1, ni, sizeof (GrB_Index)) ; \ GB_FREE_MEMORY (I1k, ni, sizeof (GrB_Index)) ; \ } GrB_Info GB_ijsort ( const GrB_Index *GB_RESTRICT I, // size ni, where ni > 1 always holds int64_t *GB_RESTRICT p_ni, // : size of I, output: # of indices in I2 GrB_Index *GB_RESTRICT *p_I2, // size ni2, where I2 [0..ni2-1] // contains the sorted indices with duplicates removed. GrB_Index *GB_RESTRICT *p_I2k, // output array of size ni2 GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (I != NULL) ; ASSERT (p_ni != NULL) ; ASSERT (p_I2 != NULL) ; ASSERT (p_I2k != NULL) ; //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GrB_Index *GB_RESTRICT I1 = NULL ; GrB_Index *GB_RESTRICT I1k = NULL ; GrB_Index *GB_RESTRICT I2 = NULL ; GrB_Index *GB_RESTRICT I2k = NULL ; int64_t *GB_RESTRICT W0 = NULL ; int64_t *GB_RESTRICT W1 = NULL ; int64_t ni = *p_ni ; ASSERT (ni > 1) ; int64_t *GB_RESTRICT Count = NULL ; // size ntasks+1 int ntasks = 0 ; //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (ni, chunk, nthreads_max) ; //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- GB_MALLOC_MEMORY (I1, ni, sizeof (GrB_Index)) ; GB_MALLOC_MEMORY (I1k, ni, sizeof (GrB_Index)) ; if (I1 == NULL || I1k == NULL) { // out of memory GB_FREE_WORK ; return (GB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // copy I into I1 and construct I1k //-------------------------------------------------------------------------- GB_memcpy (I1, I, ni * sizeof (GrB_Index), nthreads) ; int64_t k ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (k = 0 ; k < ni ; k++) { // the key is selected so that the last duplicate entry comes first in // the sorted result. It must be adjusted later, so that the kth entry // has a key equal to k. I1k [k] = (ni-k) ; } //-------------------------------------------------------------------------- // sort [I1 I1k] //-------------------------------------------------------------------------- // determine # of threads to use in the parallel mergesort int nth = GB_MSORT_NTHREADS (nthreads) ; if (nth > 1) { GB_MALLOC_MEMORY (W0, ni, sizeof (int64_t)) ; GB_MALLOC_MEMORY (W1, ni, sizeof (int64_t)) ; if (W0 == NULL || W1 == NULL) { // out of memory GB_FREE_WORK ; return (GB_OUT_OF_MEMORY) ; } } GB_msort_2 ((int64_t *) I1, (int64_t *) I1k, W0, W1, ni, nth) ; GB_FREE_MEMORY (W0, ni, sizeof (int64_t)) ; GB_FREE_MEMORY (W1, ni, sizeof (int64_t)) ; //-------------------------------------------------------------------------- // determine number of tasks to create //-------------------------------------------------------------------------- ntasks = (nthreads == 1) ? 1 : (32 * nthreads) ; ntasks = GB_IMIN (ntasks, ni) ; ntasks = GB_IMAX (ntasks, 1) ; //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- GB_MALLOC_MEMORY (Count, ntasks+1, sizeof (int64_t)) ; if (Count == NULL) { // out of memory GB_FREE_WORK ; return (GB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // count unique entries in I1 //-------------------------------------------------------------------------- int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { int64_t kfirst, klast, my_count = (tid == 0) ? 1 : 0 ; GB_PARTITION (kfirst, klast, ni, tid, ntasks) ; for (int64_t k = GB_IMAX (kfirst,1) ; k < klast ; k++) { if (I1 [k-1] != I1 [k]) { my_count++ ; } } Count [tid] = my_count ; } GB_cumsum (Count, ntasks, NULL, 1) ; int64_t ni2 = Count [ntasks] ; //-------------------------------------------------------------------------- // allocate the result I2 //-------------------------------------------------------------------------- GB_MALLOC_MEMORY (I2 , ni2, sizeof (GrB_Index)) ; GB_MALLOC_MEMORY (I2k, ni2, sizeof (GrB_Index)) ; if (I2 == NULL || I2k == NULL) { // out of memory GB_FREE_WORK ; GB_FREE_MEMORY (I2 , ni2, sizeof (GrB_Index)) ; GB_FREE_MEMORY (I2k, ni2, sizeof (GrB_Index)) ; return (GB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // construct the new list I2 from I1, removing duplicates //-------------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { int64_t kfirst, klast, k2 = Count [tid] ; GB_PARTITION (kfirst, klast, ni, tid, ntasks) ; if (tid == 0) { // the first entry in I1 is never a duplicate I2 [k2] = I1 [0] ; I2k [k2] = (ni - I1k [0]) ; k2++ ; } for (int64_t k = GB_IMAX (kfirst,1) ; k < klast ; k++) { if (I1 [k-1] != I1 [k]) { I2 [k2] = I1 [k] ; I2k [k2] = ni - I1k [k] ; k2++ ; } } } //-------------------------------------------------------------------------- // check result: compare with single-pass, single-threaded algorithm //-------------------------------------------------------------------------- #ifdef GB_DEBUG { int64_t ni1 = 1 ; I1k [0] = ni - I1k [0] ; for (int64_t k = 1 ; k < ni ; k++) { if (I1 [ni1-1] != I1 [k]) { I1 [ni1] = I1 [k] ; I1k [ni1] = ni - I1k [k] ; ni1++ ; } } ASSERT (ni1 == ni2) ; for (int64_t k = 0 ; k < ni1 ; k++) { ASSERT (I1 [k] == I2 [k]) ; ASSERT (I1k [k] == I2k [k]) ; } } #endif //-------------------------------------------------------------------------- // free workspace and return the new sorted list //-------------------------------------------------------------------------- GB_FREE_WORK ; *(p_I2 ) = (GrB_Index *) I2 ; *(p_I2k) = (GrB_Index *) I2k ; *(p_ni ) = (int64_t ) ni2 ; return (GrB_SUCCESS) ; }
omp3-2-1.c
#include<stdio.h> #ifndef N #define N 5000 #endif #define M 1000000000 int a[N][N], b[N][N]; int main() { int i, j, sum; #pragma omp parallel sections private(i, j) { #pragma omp section { for (i = 0; i < N; i++) for (j = 0; j < N; j++) a[i][j] = i + j; } #pragma omp section { for (i = 0; i < N; i++) for (j = 0; j < N; j++) b[i][j] = i - j; } } sum = 0; for (i = 0; i < N; i++) for (j = 0; j < N; j++) { sum += a[i][j]; sum %= M; } printf("%d\n", sum); sum = 0; for (i = 0; i < N; i++) for (j = 0; j < N; j++) { sum += b[i][j]; sum %= M; } printf("%d\n", sum); return 0; }
test-double-libmvec-sincos-main.c
/* Test for vector sincos ABI. Copyright (C) 2016-2020 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <https://www.gnu.org/licenses/>. */ #include <math.h> #define N 1000 double x[N], s[N], c[N]; double* s_ptrs[N]; double* c_ptrs[N]; int test_sincos_abi (void) { int i; for(i = 0; i < N; i++) { x[i] = i / 3; s_ptrs[i] = &s[i]; c_ptrs[i] = &c[i]; } #pragma omp simd for(i = 0; i < N; i++) sincos (x[i], s_ptrs[i], c_ptrs[i]); return 0; }
DRB064-outeronly2-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Only the outmost loop can be parallelized. The inner loop has loop carried true data dependence. However, the loop is not parallelized so no race condition. */ int n=100, m=100; double b[100][100]; void foo() { int i,j; #pragma omp parallel for private(j) for (i=0;i<n;i++) for (j=1;j<m;j++) // Be careful about bounds of j b[i][j]=b[i][j-1]; } int main() { foo(); return 0; }
chan_demo2.c
#include <stdint.h> #include <stdio.h> #define data_t int64_t #define prefix int64 #include <chan.h> #undef prefix #undef data_t void produce(chan_int64_t *ch, int64_t n) { for (int64_t i = 1; i <= n; i++) { chan_int64_send(ch, i); } chan_int64_close(ch); } void square_em(chan_int64_t *in, chan_int64_t *out) { int64_t x; while (chan_int64_recv(in, &x) == CHAN_SUCCESS) { chan_int64_send(out, x * x); } chan_int64_close(out); } void sum_em(chan_int64_t *in, chan_int64_t *out) { int64_t total = 0; int64_t x; while (chan_int64_recv(in, &x) == CHAN_SUCCESS) { total += x; } chan_int64_send(out, total); chan_int64_close(out); } int main(void) { chan_int64_t *ch1 = chan_int64_init(10); chan_int64_t *ch2 = chan_int64_init(10); chan_int64_t *ch3 = chan_int64_init(10); #pragma omp parallel { #pragma omp sections { #pragma omp section produce(ch1, 1000); #pragma omp section square_em(ch1, ch2); #pragma omp section sum_em(ch2, ch3); } } int64_t total; chan_int64_recv(ch3, &total); printf("%ld\n", total); chan_int64_destroy(&ch1); chan_int64_destroy(&ch2); chan_int64_destroy(&ch3); return 0; }
conv_kernel_x86.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: quanwang@openailab.com */ #include "conv_kernel_x86.h" #include "wino_conv_kernel_x86.h" #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "utility/sys_port.h" #include "utility/float.h" #include "utility/log.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include <stdint.h> #include <stdlib.h> #include <string.h> #include <math.h> #if __AVX__ #include <immintrin.h> #endif #ifndef _MSC_VER #include <sys/time.h> #define max(a, b) ((a) > (b) ? (a) : (b)) #define min(a, b) ((a) < (b) ? (a) : (b)) #endif static int get_private_mem_size(struct tensor* filter) { if (filter->data_type == TENGINE_DT_UINT8) // simulator uint8 inference with fp32 return filter->elem_num * filter->elem_size * 4; else return filter->elem_num * filter->elem_size; // caution } static void interleave(struct tensor* filter, struct conv_priv_info* priv_info) { /* simply copy the data */ memcpy(priv_info->interleave_buffer, filter->data, filter->elem_num * filter->elem_size); } static void interleave_uint8(struct tensor* filter, struct conv_priv_info* priv_info) { /* dequant uint8 weight to fp32 for simulator */ float* weight_fp32 = (float* )priv_info->interleave_buffer; uint8_t* weight_uint8 = (uint8_t*)filter->data; float scale = filter->scale; int zero_point = filter->zero_point; for (int i = 0; i < filter->elem_num; i++) { weight_fp32[i] = ((float)weight_uint8[i] - (float)zero_point) * scale; } } void im2col_fp32(float* data_img, float* data_col, int inh, int inw, int inc, int outh, int outw, int ksize_h, int ksize_w, int sh, int sw, int ph, int pw, int dh, int dw) { const int channels_col = ksize_h * ksize_w * inc; for (int c = 0; c < channels_col; ++c) { const int kw = c % ksize_w; int c_ = c / ksize_w; const int kh = c_ % ksize_h; c_ = c_ / ksize_h; const int im_col = kw * dw - pw; const int w_low = max(0, -im_col / sw + (-im_col % sw > 0)); const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0)); for (int h = 0; h < outh; ++h) { const int im_row = kh * dh + h * sh - ph; float* out = data_col + (c * outh + h) * outw; const float* end = out + w_high; if (im_row >= 0 && im_row < inh) { float* in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw; memset(out, 0, w_low * sizeof(float)); out += w_low; while (out < end) { in += sw; *(out++) = *in; } memset(out, 0, (outw - w_high) * sizeof(float)); } else { memset(out, 0, outw * sizeof(float)); } } } } void im2col_uint8(uint8_t* data_img, float* data_col, struct tensor* input_tensor, struct tensor* output_tensor, struct conv_param* param) { int ksize_h = param->kernel_h; int ksize_w = param->kernel_w; int inc = param->input_channel / param->group; int sh = param->stride_h; int sw = param->stride_w; int ph = param->pad_h0; int pw = param->pad_w0; int dh = param->dilation_h; int dw = param->dilation_w; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; float scale = input_tensor->scale; int zero_point = input_tensor->zero_point; const int channels_col = ksize_h * ksize_w * inc; for (int c = 0; c < channels_col; ++c) { const int kw = c % ksize_w; int c_ = c / ksize_w; const int kh = c_ % ksize_h; c_ = c_ / ksize_h; const int im_col = kw * dw - pw; const int w_low = max(0, -im_col / sw + (-im_col % sw > 0)); const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0)); for (int h = 0; h < outh; ++h) { const int im_row = kh * dh + h * sh - ph; float* out = data_col + (c * outh + h) * outw; const float* end = out + w_high; if (im_row >= 0 && im_row < inh) { uint8_t * in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw; memset(out, 0, w_low * sizeof(float)); out += w_low; while (out < end) { in += sw; float in_fp32 = ((float)in[0] - (float)zero_point) * scale; out[0] = in_fp32; out++; } memset(out, 0, (outw - w_high) * sizeof(float)); } else { memset(out, 0, outw * sizeof(float)); } } } } void im2col_int8(int8_t* data_img, int8_t* data_col, struct tensor* input_tensor, struct tensor* output_tensor, struct conv_param* param) { int ksize_h = param->kernel_h; int ksize_w = param->kernel_w; int inc = param->input_channel / param->group; int sh = param->stride_h; int sw = param->stride_w; int ph = param->pad_h0; int pw = param->pad_w0; int dh = param->dilation_h; int dw = param->dilation_w; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; const int channels_col = ksize_h * ksize_w * inc; for (int c = 0; c < channels_col; ++c) { const int kw = c % ksize_w; int c_ = c / ksize_w; const int kh = c_ % ksize_h; c_ = c_ / ksize_h; const int im_col = kw * dw - pw; const int w_low = max(0, -im_col / sw + (-im_col % sw > 0)); const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0)); for (int h = 0; h < outh; ++h) { const int im_row = kh * dh + h * sh - ph; int8_t * out = data_col + (c * outh + h) * outw; const int8_t * end = out + w_high; if (im_row >= 0 && im_row < inh) { int8_t * in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw; memset(out, 0, w_low * sizeof(int8_t)); out += w_low; while (out < end) { in += sw; out[0] = in[0]; out++; } memset(out, 0, (outw - w_high) * sizeof(int8_t)); } else { memset(out, 0, outw * sizeof(int8_t)); } } } } static void im2col_ir(struct tensor* input, struct tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n, int group) { int input_chan = param->input_channel / param->group; int image_size = input->dims[1] * input->dims[2] * input->dims[3]; int group_size = input_chan * input->dims[2] * input->dims[3]; void* input_base = (void*)((uint8_t*)input->data + (n * image_size + group * group_size) * input->elem_size); void* im2col_buf = (void*)priv_info->im2col_buffer; if (input->data_type == TENGINE_DT_FP32) { im2col_fp32(input_base, im2col_buf, input->dims[2], input->dims[3], input_chan, output->dims[2], output->dims[3], param->kernel_h, param->kernel_w, param->stride_h, param->stride_w, param->pad_h0, param->pad_w0, param->dilation_h, param->dilation_w); } else if (input->data_type == TENGINE_DT_UINT8) { im2col_uint8(input_base, im2col_buf, input, output, param); } else if (input->data_type == TENGINE_DT_INT8) { im2col_int8(input_base, im2col_buf, input, output, param); } else { TLOG_ERR("Input data type %d not to be supported.\n", input->data_type); } } void input_pack4_fp32(int K, int N, float* pB, float* pB_t, int num_thread) { int nn_size = N >> 3; int remian_size_start = nn_size << 3; // [ch00, ch10, ch20, ch30, ch01, ch11, ch21, ch31, ch02, ch12, ch22, ch32, ch03, ch13, ch23, ch33 ....] #pragma omp parallel for num_threads(num_thread) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 8; const float* img = pB + i; float* tmp = pB_t + (i / 8) * 8 * K; for (int j = 0; j < K; j++) { #if __AVX__ _mm256_storeu_ps(tmp, _mm256_loadu_ps(img)); #else tmp[0] = img[0]; tmp[1] = img[1]; tmp[2] = img[2]; tmp[3] = img[3]; tmp[4] = img[4]; tmp[5] = img[5]; tmp[6] = img[6]; tmp[7] = img[7]; #endif // __SSE__ tmp += 8; img += N; } } // [ch00, ch01, ch02, ch03 ....] #pragma omp parallel for num_threads(num_thread) for (int i = remian_size_start; i < N; i++) { const float* img = pB + i; float* tmp = pB_t + (i / 8 + i % 8) * 8 * K; for (int j = 0; j < K; j++) { tmp[0] = img[0]; tmp += 1; img += N; } } } static void sgemm_fp(int M, int N, int K, float* pA_t, float* pB_t, float* pC, int num_thread) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = M >> 3; remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(num_thread) for (int pp = 0; pp < nn_outch; pp++) { int i = pp * 8; float* output0 = pC + ( i )*N; float* output1 = pC + (i + 1) * N; float* output2 = pC + (i + 2) * N; float* output3 = pC + (i + 3) * N; float* output4 = pC + (i + 4) * N; float* output5 = pC + (i + 5) * N; float* output6 = pC + (i + 6) * N; float* output7 = pC + (i + 7) * N; int j = 0; for (; j + 7 < N; j += 8) { float* va = pA_t + (i / 8) * 8 * K; float* vb = pB_t + (j / 8) * 8 * K; #if __AVX__ __m256 _sum0 = _mm256_set1_ps(0.0); __m256 _sum1 = _mm256_set1_ps(0.0); __m256 _sum2 = _mm256_set1_ps(0.0); __m256 _sum3 = _mm256_set1_ps(0.0); __m256 _sum4 = _mm256_set1_ps(0.0); __m256 _sum5 = _mm256_set1_ps(0.0); __m256 _sum6 = _mm256_set1_ps(0.0); __m256 _sum7 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb + 8); __m256 _vb2 = _mm256_loadu_ps(vb + 16); __m256 _vb3 = _mm256_loadu_ps(vb + 24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb0, _va0, _sum4); // sum4 = (a00-a07) * k40 _sum5 = _mm256_fmadd_ps(_vb0, _va1, _sum5); // sum5 = (a00-a07) * k50 _sum6 = _mm256_fmadd_ps(_vb0, _va2, _sum6); // sum6 = (a00-a07) * k60 _sum7 = _mm256_fmadd_ps(_vb0, _va3, _sum7); // sum7 = (a00-a07) * k70 va += 8; // k1 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb1, _va0, _sum0); // sum0 += (a10-a17) * k01 _sum1 = _mm256_fmadd_ps(_vb1, _va1, _sum1); // sum1 += (a10-a17) * k11 _sum2 = _mm256_fmadd_ps(_vb1, _va2, _sum2); // sum2 += (a10-a17) * k21 _sum3 = _mm256_fmadd_ps(_vb1, _va3, _sum3); // sum3 += (a10-a17) * k31 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb1, _va0, _sum4); // sum4 += (a10-a17) * k41 _sum5 = _mm256_fmadd_ps(_vb1, _va1, _sum5); // sum5 += (a10-a17) * k51 _sum6 = _mm256_fmadd_ps(_vb1, _va2, _sum6); // sum6 += (a10-a17) * k61 _sum7 = _mm256_fmadd_ps(_vb1, _va3, _sum7); // sum7 += (a10-a17) * k71 va += 8; // k2 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb2, _va0, _sum0); // sum0 += (a20-a27) * k02 _sum1 = _mm256_fmadd_ps(_vb2, _va1, _sum1); // sum1 += (a20-a27) * k12 _sum2 = _mm256_fmadd_ps(_vb2, _va2, _sum2); // sum2 += (a20-a27) * k22 _sum3 = _mm256_fmadd_ps(_vb2, _va3, _sum3); // sum3 += (a20-a27) * k32 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb2, _va0, _sum4); // sum4 += (a20-a27) * k42 _sum5 = _mm256_fmadd_ps(_vb2, _va1, _sum5); // sum5 += (a20-a27) * k52 _sum6 = _mm256_fmadd_ps(_vb2, _va2, _sum6); // sum6 += (a20-a27) * k62 _sum7 = _mm256_fmadd_ps(_vb2, _va3, _sum7); // sum7 += (a20-a27) * k72 va += 8; // k3 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb3, _va0, _sum0); // sum0 += (a30-a37) * k03 _sum1 = _mm256_fmadd_ps(_vb3, _va1, _sum1); // sum1 += (a30-a37) * k13 _sum2 = _mm256_fmadd_ps(_vb3, _va2, _sum2); // sum2 += (a30-a37) * k23 _sum3 = _mm256_fmadd_ps(_vb3, _va3, _sum3); // sum3 += (a30-a37) * k33 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb3, _va0, _sum4); // sum4 += (a30-a37) * k43 _sum5 = _mm256_fmadd_ps(_vb3, _va1, _sum5); // sum5 += (a30-a37) * k53 _sum6 = _mm256_fmadd_ps(_vb3, _va2, _sum6); // sum6 += (a30-a37) * k63 _sum7 = _mm256_fmadd_ps(_vb3, _va3, _sum7); // sum7 += (a30-a37) * k73 va += 8; vb += 32; } for (; k < K; k++) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _va4 = _mm256_broadcast_ss(va + 4); __m256 _va5 = _mm256_broadcast_ss(va + 5); __m256 _va6 = _mm256_broadcast_ss(va + 6); __m256 _va7 = _mm256_broadcast_ss(va + 7); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 _sum4 = _mm256_fmadd_ps(_vb0, _va4, _sum4); // sum4 = (a00-a07) * k40 _sum5 = _mm256_fmadd_ps(_vb0, _va5, _sum5); // sum5 = (a00-a07) * k50 _sum6 = _mm256_fmadd_ps(_vb0, _va6, _sum6); // sum6 = (a00-a07) * k60 _sum7 = _mm256_fmadd_ps(_vb0, _va7, _sum7); // sum7 = (a00-a07) * k70 va += 8; vb += 8; } _mm256_storeu_ps(output0, _sum0); _mm256_storeu_ps(output1, _sum1); _mm256_storeu_ps(output2, _sum2); _mm256_storeu_ps(output3, _sum3); _mm256_storeu_ps(output4, _sum4); _mm256_storeu_ps(output5, _sum5); _mm256_storeu_ps(output6, _sum6); _mm256_storeu_ps(output7, _sum7); #else float sum0[8] = {0}; float sum1[8] = {0}; float sum2[8] = {0}; float sum3[8] = {0}; float sum4[8] = {0}; float sum5[8] = {0}; float sum6[8] = {0}; float sum7[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; sum4[n] += va[4] * vb[n]; sum5[n] += va[5] * vb[n]; sum6[n] += va[6] * vb[n]; sum7[n] += va[7] * vb[n]; } va += 8; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; output4[n] = sum4[n]; output5[n] = sum5[n]; output6[n] = sum6[n]; output7[n] = sum7[n]; } #endif // __AVX__ output0 += 8; output1 += 8; output2 += 8; output3 += 8; output4 += 8; output5 += 8; output6 += 8; output7 += 8; } for (; j < N; j++) { float* va = pA_t + (i / 8) * 8 * K; float* vb = pB_t + (j / 8 + j % 8) * 8 * K; #if __AVX__ __m256 _sum0_7 = _mm256_set1_ps(0.0); __m256 _sum0 = _mm256_set1_ps(0.0); __m256 _sum1 = _mm256_set1_ps(0.0); __m256 _sum2 = _mm256_set1_ps(0.0); __m256 _sum3 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { __m256 _vb0 = _mm256_broadcast_ss(vb); __m256 _vb1 = _mm256_broadcast_ss(vb + 1); __m256 _vb2 = _mm256_broadcast_ss(vb + 2); __m256 _vb3 = _mm256_broadcast_ss(vb + 3); __m256 _va0 = _mm256_loadu_ps(va); __m256 _va1 = _mm256_loadu_ps(va + 8); __m256 _va2 = _mm256_loadu_ps(va + 16); __m256 _va3 = _mm256_loadu_ps(va + 24); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); // sum0 += (k00-k70) * a00 _sum1 = _mm256_fmadd_ps(_va1, _vb1, _sum1); // sum1 += (k01-k71) * a10 _sum2 = _mm256_fmadd_ps(_va2, _vb2, _sum2); // sum2 += (k02-k72) * a20 _sum3 = _mm256_fmadd_ps(_va3, _vb3, _sum3); // sum3 += (k03-k73) * a30 va += 32; vb += 4; } _sum0 = _mm256_add_ps(_sum0, _sum1); _sum2 = _mm256_add_ps(_sum2, _sum3); _sum0_7 = _mm256_add_ps(_sum0_7, _sum0); _sum0_7 = _mm256_add_ps(_sum0_7, _sum2); for (; k < K; k++) { __m256 _vb0 = _mm256_broadcast_ss(vb); __m256 _va = _mm256_loadu_ps(va); _sum0_7 = _mm256_fmadd_ps(_va, _vb0, _sum0_7); // sum0 += (k00-k70) * a00 va += 8; vb += 1; } float output_sum0_7[8] = {0.f}; _mm256_storeu_ps(output_sum0_7, _sum0_7); output0[0] = output_sum0_7[0]; output1[0] = output_sum0_7[1]; output2[0] = output_sum0_7[2]; output3[0] = output_sum0_7[3]; output4[0] = output_sum0_7[4]; output5[0] = output_sum0_7[5]; output6[0] = output_sum0_7[6]; output7[0] = output_sum0_7[7]; #else float sum0 = 0; float sum1 = 0; float sum2 = 0; float sum3 = 0; float sum4 = 0; float sum5 = 0; float sum6 = 0; float sum7 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; sum4 += va[4] * vb[0]; sum5 += va[5] * vb[0]; sum6 += va[6] * vb[0]; sum7 += va[7] * vb[0]; va += 8; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; output4[0] = sum4; output5[0] = sum5; output6[0] = sum6; output7[0] = sum7; #endif // __AVX__ output0++; output1++; output2++; output3++; output4++; output5++; output6++; output7++; } } nn_outch = (M - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int i = remain_outch_start + pp * 4; float* output0 = pC + ( i )*N; float* output1 = pC + (i + 1) * N; float* output2 = pC + (i + 2) * N; float* output3 = pC + (i + 3) * N; int j = 0; for (; j + 7 < N; j += 8) { float* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K; float* vb = pB_t + (j / 8) * 8 * K; #if __AVX__ __m256 _sum0 = _mm256_set1_ps(0.0); __m256 _sum1 = _mm256_set1_ps(0.0); __m256 _sum2 = _mm256_set1_ps(0.0); __m256 _sum3 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb + 8); __m256 _vb2 = _mm256_loadu_ps(vb + 16); __m256 _vb3 = _mm256_loadu_ps(vb + 24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 va += 4; // k1 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb1, _va0, _sum0); // sum0 += (a10-a17) * k01 _sum1 = _mm256_fmadd_ps(_vb1, _va1, _sum1); // sum1 += (a10-a17) * k11 _sum2 = _mm256_fmadd_ps(_vb1, _va2, _sum2); // sum2 += (a10-a17) * k21 _sum3 = _mm256_fmadd_ps(_vb1, _va3, _sum3); // sum3 += (a10-a17) * k31 va += 4; // k2 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb2, _va0, _sum0); // sum0 += (a20-a27) * k02 _sum1 = _mm256_fmadd_ps(_vb2, _va1, _sum1); // sum1 += (a20-a27) * k12 _sum2 = _mm256_fmadd_ps(_vb2, _va2, _sum2); // sum2 += (a20-a27) * k22 _sum3 = _mm256_fmadd_ps(_vb2, _va3, _sum3); // sum3 += (a20-a27) * k32 va += 4; // k3 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb3, _va0, _sum0); // sum0 += (a30-a37) * k03 _sum1 = _mm256_fmadd_ps(_vb3, _va1, _sum1); // sum1 += (a30-a37) * k13 _sum2 = _mm256_fmadd_ps(_vb3, _va2, _sum2); // sum2 += (a30-a37) * k23 _sum3 = _mm256_fmadd_ps(_vb3, _va3, _sum3); // sum3 += (a30-a37) * k33 va += 4; vb += 32; } for (; k < K; k++) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 va += 4; vb += 8; } _mm256_storeu_ps(output0, _sum0); _mm256_storeu_ps(output1, _sum1); _mm256_storeu_ps(output2, _sum2); _mm256_storeu_ps(output3, _sum3); #else float sum0[8] = {0}; float sum1[8] = {0}; float sum2[8] = {0}; float sum3[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; } va += 4; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; } #endif // __AVX__ output0 += 8; output1 += 8; output2 += 8; output3 += 8; } for (; j < N; j++) { float* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K; float* vb = pB_t + (j / 8 + j % 8) * 8 * K; #if __AVX__ __m128 _sum0_3 = _mm_set1_ps(0.0); __m128 _sum0 = _mm_set1_ps(0.0); __m128 _sum1 = _mm_set1_ps(0.0); __m128 _sum2 = _mm_set1_ps(0.0); __m128 _sum3 = _mm_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { __m128 _vb0 = _mm_set1_ps(vb[0]); __m128 _vb1 = _mm_set1_ps(vb[1]); __m128 _vb2 = _mm_set1_ps(vb[2]); __m128 _vb3 = _mm_set1_ps(vb[3]); __m128 _va0 = _mm_loadu_ps(va); __m128 _va1 = _mm_loadu_ps(va + 4); __m128 _va2 = _mm_loadu_ps(va + 8); __m128 _va3 = _mm_loadu_ps(va + 12); _sum0 = _mm_fmadd_ps(_va0, _vb0, _sum0); // sum0 += (k00-k30) * a00 _sum1 = _mm_fmadd_ps(_va1, _vb1, _sum1); // sum1 += (k01-k31) * a10 _sum2 = _mm_fmadd_ps(_va2, _vb2, _sum2); // sum2 += (k02-k32) * a20 _sum3 = _mm_fmadd_ps(_va3, _vb3, _sum3); // sum3 += (k03-k33) * a30 va += 16; vb += 4; } _sum0 = _mm_add_ps(_sum0, _sum1); _sum2 = _mm_add_ps(_sum2, _sum3); _sum0_3 = _mm_add_ps(_sum0_3, _sum0); _sum0_3 = _mm_add_ps(_sum0_3, _sum2); for (; k < K; k++) { __m128 _vb0 = _mm_set1_ps(vb[0]); __m128 _va = _mm_loadu_ps(va); _sum0_3 = _mm_fmadd_ps(_va, _vb0, _sum0_3); // sum0 += (k00-k30) * a00 va += 4; vb += 1; } float output_sum0_3[4] = {0.f}; _mm_storeu_ps(output_sum0_3, _sum0_3); output0[0] = output_sum0_3[0]; output1[0] = output_sum0_3[1]; output2[0] = output_sum0_3[2]; output3[0] = output_sum0_3[3]; #else float sum0 = 0; float sum1 = 0; float sum2 = 0; float sum3 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; #endif // __AVX__ output0++; output1++; output2++; output3++; } } remain_outch_start += nn_outch << 2; // output ch0 for (int i = remain_outch_start; i < M; i++) { float* output = pC + i * N; int j = 0; for (; j + 7 < N; j += 8) { float* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K; float* vb = pB_t + (j / 8) * 8 * K; #if __AVX__ __m256 _sum0 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb + 8); __m256 _vb2 = _mm256_loadu_ps(vb + 16); __m256 _vb3 = _mm256_loadu_ps(vb + 24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum0 = _mm256_fmadd_ps(_vb1, _va1, _sum0); // sum0 += (a10-a17) * k01 _sum0 = _mm256_fmadd_ps(_vb2, _va2, _sum0); // sum0 += (a20-a27) * k02 _sum0 = _mm256_fmadd_ps(_vb3, _va3, _sum0); // sum0 += (a30-a37) * k03 va += 4; vb += 32; } for (; k < K; k++) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 va += 1; vb += 8; } _mm256_storeu_ps(output, _sum0); #else float sum[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum[n] += va[0] * vb[n]; } va += 1; vb += 8; } for (int n = 0; n < 8; n++) { output[n] = sum[n]; } #endif // __AVX__ output += 8; } for (; j < N; j++) { float* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K; float* vb = pB_t + (j / 8 + j % 8) * 8 * K; int k = 0; #if __AVX__ __m128 _sum0 = _mm_set1_ps(0.f); for (; k + 3 < K; k += 4) { __m128 _p0 = _mm_loadu_ps(vb); __m128 _k0 = _mm_loadu_ps(va); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_p0, _k0)); va += 4; vb += 4; } #ifdef _WIN32 float sum0 = _sum0.m128_f32[0] + _sum0.m128_f32[1] + _sum0.m128_f32[2] + _sum0.m128_f32[3]; #else float sum0 = _sum0[0] + _sum0[1] + _sum0[2] + _sum0[3]; #endif #else float sum0 = 0.f; #endif // __AVX__ for (; k < K; k++) { sum0 += va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum0; output++; } } } void input_pack4_int8(int K, int N, int8_t* pB, int8_t* pB_t, int num_thread) { int nn_size = N >> 3; int remian_size_start = nn_size << 3; // [ch00, ch10, ch20, ch30, ch01, ch11, ch21, ch31, ch02, ch12, ch22, ch32, ch03, ch13, ch23, ch33 ....] #pragma omp parallel for num_threads(num_thread) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 8; const int8_t* img = pB + i; int8_t* tmp = pB_t + (i / 8) * 8 * K; for (int j = 0; j < K; j++) { tmp[0] = img[0]; tmp[1] = img[1]; tmp[2] = img[2]; tmp[3] = img[3]; tmp[4] = img[4]; tmp[5] = img[5]; tmp[6] = img[6]; tmp[7] = img[7]; tmp += 8; img += N; } } // [ch00, ch01, ch02, ch03 ....] #pragma omp parallel for num_threads(num_thread) for (int i = remian_size_start; i < N; i++) { const int8_t* img = pB + i; int8_t* tmp = pB_t + (i / 8 + i % 8) * 8 * K; for (int j = 0; j < K; j++) { tmp[0] = img[0]; tmp += 1; img += N; } } } static void sgemm_i8(int M, int N, int K, int8_t* pA_t, int8_t* pB_t, int32_t* pC, int num_thread) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = M >> 3; remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(num_thread) for (int pp = 0; pp < nn_outch; pp++) { int i = pp * 8; int32_t* output0 = pC + ( i )*N; int32_t* output1 = pC + (i + 1) * N; int32_t* output2 = pC + (i + 2) * N; int32_t* output3 = pC + (i + 3) * N; int32_t* output4 = pC + (i + 4) * N; int32_t* output5 = pC + (i + 5) * N; int32_t* output6 = pC + (i + 6) * N; int32_t* output7 = pC + (i + 7) * N; int j = 0; for (; j + 7 < N; j += 8) { int8_t* va = pA_t + (i / 8) * 8 * K; int8_t* vb = pB_t + (j / 8) * 8 * K; #if 0 //__AVX__ __m256i _sum0 = _mm256_set1_epi32(0); __m256i _sum1 = _mm256_set1_epi32(0); __m256i _sum2 = _mm256_set1_epi32(0); __m256i _sum3 = _mm256_set1_epi32(0); __m256i _sum4 = _mm256_set1_epi32(0); __m256i _sum5 = _mm256_set1_epi32(0); __m256i _sum6 = _mm256_set1_epi32(0); __m256i _sum7 = _mm256_set1_epi32(0); int k = 0; for (; k + 3 < K; k = k + 4) { // k0 __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb)); __m256i _vb1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 8))); __m256i _vb2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 16))); __m256i _vb3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 24))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3); _va0 = _mm256_set1_epi32(*(va + 4)); _va1 = _mm256_set1_epi32(*(va + 5)); _va2 = _mm256_set1_epi32(*(va + 6)); _va3 = _mm256_set1_epi32(*(va + 7)); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum7); va += 8; // k1 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va3), _sum3); _va0 = _mm256_set1_epi32(*(va + 4)); _va1 = _mm256_set1_epi32(*(va + 5)); _va2 = _mm256_set1_epi32(*(va + 6)); _va3 = _mm256_set1_epi32(*(va + 7)); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va0), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va2), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va3), _sum7); va += 8; // k2 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va3), _sum3); _va0 = _mm256_set1_epi32(*(va + 4)); _va1 = _mm256_set1_epi32(*(va + 5)); _va2 = _mm256_set1_epi32(*(va + 6)); _va3 = _mm256_set1_epi32(*(va + 7)); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va0), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va1), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va3), _sum7); va += 8; // k3 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum3); _va0 = _mm256_set1_epi32(*(va + 4)); _va1 = _mm256_set1_epi32(*(va + 5)); _va2 = _mm256_set1_epi32(*(va + 6)); _va3 = _mm256_set1_epi32(*(va + 7)); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va0), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va1), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va2), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum7); va += 8; vb += 32; } for (; k < K; k++) { __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _va4 = _mm256_set1_epi32(*(va + 4)); __m256i _va5 = _mm256_set1_epi32(*(va + 5)); __m256i _va6 = _mm256_set1_epi32(*(va + 6)); __m256i _va7 = _mm256_set1_epi32(*(va + 7)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va4), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va5), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va6), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va7), _sum7); va += 8; vb += 8; } _mm256_storeu_si256((__m256i* )output0, _sum0); _mm256_storeu_si256((__m256i* )output1, _sum1); _mm256_storeu_si256((__m256i* )output2, _sum2); _mm256_storeu_si256((__m256i* )output3, _sum3); _mm256_storeu_si256((__m256i* )output4, _sum4); _mm256_storeu_si256((__m256i* )output5, _sum5); _mm256_storeu_si256((__m256i* )output6, _sum6); _mm256_storeu_si256((__m256i* )output7, _sum7); #else int32_t sum0[8] = {0}; int32_t sum1[8] = {0}; int32_t sum2[8] = {0}; int32_t sum3[8] = {0}; int32_t sum4[8] = {0}; int32_t sum5[8] = {0}; int32_t sum6[8] = {0}; int32_t sum7[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; sum4[n] += va[4] * vb[n]; sum5[n] += va[5] * vb[n]; sum6[n] += va[6] * vb[n]; sum7[n] += va[7] * vb[n]; } va += 8; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; output4[n] = sum4[n]; output5[n] = sum5[n]; output6[n] = sum6[n]; output7[n] = sum7[n]; } #endif output0 += 8; output1 += 8; output2 += 8; output3 += 8; output4 += 8; output5 += 8; output6 += 8; output7 += 8; } for (; j < N; j++) { int8_t* va = pA_t + (i / 8) * 8 * K; int8_t* vb = pB_t + (j / 8 + j % 8) * 8 * K; #if 0 //__AVX__ __m256i _sum0_7 = _mm256_set1_epi32(0); __m256i _sum0 = _mm256_set1_epi32(0); __m256i _sum1 = _mm256_set1_epi32(0); __m256i _sum2 = _mm256_set1_epi32(0); __m256i _sum3 = _mm256_set1_epi32(0); int k = 0; for (; k + 3 < K; k = k + 4) { __m256i _vb0 = _mm256_set1_epi32(*vb); __m256i _vb1 = _mm256_set1_epi32(*(vb + 1)); __m256i _vb2 = _mm256_set1_epi32(*(vb + 2)); __m256i _vb3 = _mm256_set1_epi32(*(vb + 3)); __m256i _va0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)va)); __m256i _va1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 8))); __m256i _va2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 16))); __m256i _va3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 24))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_va0, _vb0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_va1, _vb1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_va2, _vb2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_va3, _vb3), _sum3); va += 32; vb += 4; } _sum0 = _mm256_add_epi32(_sum0, _sum1); _sum2 = _mm256_add_epi32(_sum2, _sum3); _sum0_7 = _mm256_add_epi32(_sum0_7, _sum0); _sum0_7 = _mm256_add_epi32(_sum0_7, _sum2); for (; k < K; k++) { __m256i _vb0 = _mm256_set1_epi32(*vb); __m256i _va = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)va)); _sum0_7 = _mm256_add_epi32(_mm256_mullo_epi32(_va, _vb0), _sum0_7); va += 8; vb += 1; } int32_t output_sum0_7[8] = {0}; _mm256_storeu_si256((__m256i* )output_sum0_7, _sum0_7); output0[0] = output_sum0_7[0]; output1[0] = output_sum0_7[1]; output2[0] = output_sum0_7[2]; output3[0] = output_sum0_7[3]; output4[0] = output_sum0_7[4]; output5[0] = output_sum0_7[5]; output6[0] = output_sum0_7[6]; output7[0] = output_sum0_7[7]; #else int32_t sum0 = 0; int32_t sum1 = 0; int32_t sum2 = 0; int32_t sum3 = 0; int32_t sum4 = 0; int32_t sum5 = 0; int32_t sum6 = 0; int32_t sum7 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; sum4 += va[4] * vb[0]; sum5 += va[5] * vb[0]; sum6 += va[6] * vb[0]; sum7 += va[7] * vb[0]; va += 8; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; output4[0] = sum4; output5[0] = sum5; output6[0] = sum6; output7[0] = sum7; #endif output0++; output1++; output2++; output3++; output4++; output5++; output6++; output7++; } } nn_outch = (M - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int i = remain_outch_start + pp * 4; int32_t* output0 = pC + ( i )*N; int32_t* output1 = pC + (i + 1) * N; int32_t* output2 = pC + (i + 2) * N; int32_t* output3 = pC + (i + 3) * N; int j = 0; for (; j + 7 < N; j += 8) { int8_t* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K; int8_t* vb = pB_t + (j / 8) * 8 * K; #if 0 //__AVX__ __m256i _sum0 = _mm256_set1_epi32(0); __m256i _sum1 = _mm256_set1_epi32(0); __m256i _sum2 = _mm256_set1_epi32(0); __m256i _sum3 = _mm256_set1_epi32(0); int k = 0; for (; k + 3 < K; k = K + 4) { // k0 __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb)); __m256i _vb1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 8))); __m256i _vb2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 16))); __m256i _vb3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 24))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3); va += 4; // k1 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va3), _sum3); va += 4; // k2 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va3), _sum3); va += 4; // k3 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum3); va += 4; vb += 32; } for (; k < K; k++) { __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3); va += 4; vb += 8; } _mm256_storeu_si256((__m256i* )output0, _sum0); _mm256_storeu_si256((__m256i* )output1, _sum1); _mm256_storeu_si256((__m256i* )output2, _sum2); _mm256_storeu_si256((__m256i* )output3, _sum3); #else int32_t sum0[8] = {0}; int32_t sum1[8] = {0}; int32_t sum2[8] = {0}; int32_t sum3[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; } va += 4; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; } #endif output0 += 8; output1 += 8; output2 += 8; output3 += 8; } for (; j < N; j++) { int8_t* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K; int8_t* vb = pB_t + (j / 8 + j % 8) * 8 * K; #if 0 //__AVX__ __m256i _sum0_3 = _mm256_set1_epi32(0); __m256i _sum0 = _mm256_set1_epi32(0); __m256i _sum1 = _mm256_set1_epi32(0); __m256i _sum2 = _mm256_set1_epi32(0); __m256i _sum3 = _mm256_set1_epi32(0); int k=0; for (; k + 3 < K; k = k + 4) { __m256i _vb0 = _mm256_set1_epi32(*vb); __m256i _vb1 = _mm256_set1_epi32(*(vb + 1)); __m256i _vb2 = _mm256_set1_epi32(*(vb + 2)); __m256i _vb3 = _mm256_set1_epi32(*(vb + 3)); __m256i _va0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)va)); __m256i _va1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 4))); __m256i _va2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 8))); __m256i _va3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 12))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_va0, _vb0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_va1, _vb1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_va2, _vb2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_va3, _vb3), _sum3); va+=16; vb+=4; } _sum0 = _mm256_add_epi32(_sum0, _sum1); _sum2 = _mm256_add_epi32(_sum2, _sum3); _sum0_3 = _mm256_add_epi32(_sum0_3, _sum0); _sum0_3 = _mm256_add_epi32(_sum0_3, _sum2); for (; k < K; k++) { __m256i _vb0 = _mm256_set1_epi32(*vb); __m256i _va = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)va)); _sum0_3 = _mm256_add_epi32(_mm256_mullo_epi32(_va, _vb0), _sum0_3); va += 4; vb += 1; } //drop last 4 value int32_t output_sum0_3[4] = {0}; _mm256_storeu_si256((__m256i* )output_sum0_3, _sum0_3); output0[0] = output_sum0_3[0]; output1[0] = output_sum0_3[1]; output2[0] = output_sum0_3[2]; output3[0] = output_sum0_3[3]; #else int32_t sum0 = 0; int32_t sum1 = 0; int32_t sum2 = 0; int32_t sum3 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; #endif output0++; output1++; output2++; output3++; } } remain_outch_start += nn_outch << 2; // output ch0 for (int i = remain_outch_start; i < M; i++) { int32_t* output = pC + i * N; int j = 0; for (; j + 7 < N; j += 8) { int8_t* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K; int8_t* vb = pB_t + (j / 8) * 8 * K; #if 0 //__AVX__ __m256i _sum0 = _mm256_set1_epi32(0); int k = 0; for (; k + 3 < K; k = k + 4) { __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb)); __m256i _vb1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 8))); __m256i _vb2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 16))); __m256i _vb3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 24))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum0); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum0); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum0); va += 4; vb += 32; } for (; k < K; k++) { __m256i _va0 = _mm256_set1_epi32(*va); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); va += 1; vb += 8; } _mm256_storeu_si256((__m256i* )output, _sum0); #else int32_t sum[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum[n] += va[0] * vb[n]; } va += 1; vb += 8; } for (int n = 0; n < 8; n++) { output[n] = sum[n]; } #endif output += 8; } for (; j < N; j++) { int8_t* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K; int8_t* vb = pB_t + (j / 8 + j % 8) * 8 * K; int k = 0; int32_t sum0 = 0.f; for (; k < K; k++) { sum0 += va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum0; output++; } } } static void sgemm_fp32(struct tensor* input, struct tensor* filter, struct tensor* bias, struct tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n, int group, int num_thread) { int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group; int outchan_g = param->output_channel / param->group; int out_h = output->dims[2]; int out_w = output->dims[3]; int out_image_size = output->dims[1] * output->dims[2] * output->dims[3]; float* interleave_fp32 = ( float* )priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size; float* im2col_pack4_fp32 = priv_info->im2col_buffer_pack4; float* output_fp32 = ( float* )output->data + n * out_image_size + outchan_g * group * out_h * out_w; float* bias_fp32 = NULL; if (bias) bias_fp32 = ( float* )bias->data + outchan_g * group; float* filter_sgemm = interleave_fp32; float* input_sgemm_pack4 = im2col_pack4_fp32; float* output_sgemm = output_fp32; sgemm_fp(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm, num_thread); // process bias if (bias) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; output_fp32[output_off] += bias_fp32[i]; } } } // process activation relu if (param->activation == 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; } } } // process activation relu6 if (param->activation > 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; if (output_fp32[output_off] > 6) output_fp32[output_off] = 6; } } } } static void sgemm_uint8(struct tensor* input, struct tensor* filter, struct tensor* bias, struct tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n, int group, int num_thread) { int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group; int outchan_g = param->output_channel / param->group; int out_h = output->dims[2]; int out_w = output->dims[3]; int out_image_size = output->dims[1] * output->dims[2] * output->dims[3]; float* interleave_fp32 = ( float* )priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size; float* im2col_pack4_fp32 = priv_info->im2col_buffer_pack4; uint8_t * output_uint8 = ( uint8_t* )output->data + n * out_image_size + outchan_g * group * out_h * out_w; int* bias_int32 = NULL; float bias_scale = 0.f; if (bias) { bias_int32 = ( int* )bias->data + outchan_g * group; bias_scale = input->scale * filter->scale; } float* filter_sgemm = interleave_fp32; float* input_sgemm_pack4 = im2col_pack4_fp32; float* output_sgemm = (float*)sys_malloc(outchan_g * out_h * out_w * sizeof(float)); sgemm_fp(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm, num_thread); /* process bias */ if (bias) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; output_sgemm[output_off] += (float )bias_int32[i] * bias_scale; } } } /* process activation relu */ if (param->activation == 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_sgemm[output_off] < 0) output_sgemm[output_off] = 0; } } } /* process activation relu6 */ if (param->activation > 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_sgemm[output_off] < 0) output_sgemm[output_off] = 0; if (output_sgemm[output_off] > 6) output_sgemm[output_off] = 6; } } } /* quant from fp32 to uint8 */ for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; int udata = ( int )(round(output_sgemm[output_off] / output->scale) + output->zero_point); if (udata > 255) udata = 255; else if (udata < 0) udata = 0; output_uint8[output_off] = udata; } } sys_free(output_sgemm); } static void sgemm_int8(struct tensor* input, struct tensor* filter, struct tensor* bias, struct tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n, int group, int num_thread) { int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group; int outchan_g = param->output_channel / param->group; int out_h = output->dims[2]; int out_w = output->dims[3]; int out_image_size = output->dims[1] * output->dims[2] * output->dims[3]; int8_t* interleave_int8 = ( int8_t* )priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size; int8_t* im2col_pack4_int8 = priv_info->im2col_buffer_pack4; int8_t * output_int8 = ( int8_t* )output->data + n * out_image_size + outchan_g * group * out_h * out_w; int32_t * bias_int32 = NULL; if (bias) bias_int32 = ( int* )bias->data + outchan_g * group; float input_scale = input->scale; float* kernel_scales = filter->scale_list; float output_scale = output->scale; int8_t* filter_sgemm = interleave_int8; int8_t* input_sgemm_pack4 = im2col_pack4_int8; int32_t* output_sgemm_int32 = (int32_t*)sys_malloc(outchan_g * out_h * out_w * sizeof(int32_t)); float* output_sgemm_fp32 = (float*)sys_malloc(outchan_g * out_h * out_w * sizeof(float)); sgemm_i8(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm_int32, num_thread); /* process bias and dequant output from int32 to fp32 */ #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (bias) output_sgemm_fp32[output_off] = (float )(output_sgemm_int32[output_off] + bias_int32[i]) * input_scale * kernel_scales[i]; else output_sgemm_fp32[output_off] = (float )output_sgemm_int32[output_off] * input_scale * kernel_scales[i]; } } /* process activation relu */ if (param->activation == 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_sgemm_fp32[output_off] < 0) output_sgemm_fp32[output_off] = 0; } } } /* process activation relu6 */ if (param->activation > 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_sgemm_fp32[output_off] < 0) output_sgemm_fp32[output_off] = 0; if (output_sgemm_fp32[output_off] > 6) output_sgemm_fp32[output_off] = 6; } } } /* quant from fp32 to int8 */ for (int i = 0; i < outchan_g; i++) { #pragma omp parallel for num_threads(num_thread) for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; int32_t data_i32 = ( int32_t )(round(output_sgemm_fp32[output_off] / output_scale)); if (data_i32 > 127) data_i32 = 127; else if (data_i32 < -127) data_i32 = -127; output_int8[output_off] = (int8_t)data_i32; } } sys_free(output_sgemm_int32); sys_free(output_sgemm_fp32); } /* check the conv wheather need to be using winograd */ static int winograd_support(struct conv_param* param, int in_h, int in_w) { int kernel_h = param->kernel_h; int kernel_w = param->kernel_w; int stride_h = param->stride_h; int stride_w = param->stride_w; int dilation_h = param->dilation_h; int dilation_w = param->dilation_w; int input_chan = param->input_channel; int output_chan = param->output_channel; int group = param->group; if (in_h <= 10 && in_w <= 10) return 0; if (group != 1 || kernel_h != 3 || kernel_w != 3 || stride_h != 1 || stride_w != 1 || dilation_h != 1 || dilation_w != 1 || input_chan < 16 || output_chan < 16 || output_chan % 16) return 0; return 1; } int conv_hcl_get_shared_mem_size(struct tensor* input, struct tensor* output, struct conv_param* param) { int group = param->group; int input_chan = param->input_channel / group; int kernel_size = input_chan * param->kernel_h * param->kernel_w; int output_xy = output->dims[2] * output->dims[3]; int elem_size = input->elem_size; // simulator uint8 inference with fp32 if (input->data_type == TENGINE_DT_UINT8) elem_size = 4; return elem_size * output_xy * kernel_size; } int conv_hcl_get_shared_pack4_mem_size(struct tensor* filter, struct tensor* output, struct conv_param* param) { int K = filter->elem_num / filter->dims[0]; int N = output->dims[2] * output->dims[3]; int elem_size = filter->elem_size; // simulator uint8 inference with fp32 if (filter->data_type == TENGINE_DT_UINT8) elem_size = 4; return (8 * K * (N / 8 + N % 8)) * elem_size; } int conv_hcl_get_interleave_pack4_size(int M, int K, struct tensor* filter) { int elem_size = filter->elem_size; // simulator uint8 inference with fp32 if (filter->data_type == TENGINE_DT_UINT8) elem_size = 4; int size = 8 * K * (M / 8 + (M % 8) / 4 + M % 4) * elem_size; return size; } void conv_hcl_interleave_pack4_fp32(int M, int K, struct conv_priv_info* priv_info) { float* pA = ( float* )priv_info->interleave_buffer; float* pA_t = ( float* )priv_info->interleave_buffer_pack4; int nn_outch = M >> 3; int remain_outch_start = nn_outch << 3; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; const float* k0 = pA + (p + 0) * K; const float* k1 = pA + (p + 1) * K; const float* k2 = pA + (p + 2) * K; const float* k3 = pA + (p + 3) * K; const float* k4 = pA + (p + 4) * K; const float* k5 = pA + (p + 5) * K; const float* k6 = pA + (p + 6) * K; const float* k7 = pA + (p + 7) * K; float* ktmp = pA_t + (p / 8) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp[4] = k4[0]; ktmp[5] = k5[0]; ktmp[6] = k6[0]; ktmp[7] = k7[0]; ktmp += 8; k0 += 1; k1 += 1; k2 += 1; k3 += 1; k4 += 1; k5 += 1; k6 += 1; k7 += 1; } } nn_outch = (M - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; const float* k0 = pA + (p + 0) * K; const float* k1 = pA + (p + 1) * K; const float* k2 = pA + (p + 2) * K; const float* k3 = pA + (p + 3) * K; float* ktmp = pA_t + (p / 8 + (p % 8) / 4) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } remain_outch_start += nn_outch << 2; for (int p = remain_outch_start; p < M; p++) { const float* k0 = pA + (p + 0) * K; float* ktmp = pA_t + (p / 8 + (p % 8) / 4 + p % 4) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } void conv_hcl_interleave_pack4_int8(int M, int K, struct conv_priv_info* priv_info) { int8_t* pA = ( int8_t * )priv_info->interleave_buffer; int8_t* pA_t = ( int8_t* )priv_info->interleave_buffer_pack4; int nn_outch = M >> 3; int remain_outch_start = nn_outch << 3; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; const int8_t* k0 = pA + (p + 0) * K; const int8_t* k1 = pA + (p + 1) * K; const int8_t* k2 = pA + (p + 2) * K; const int8_t* k3 = pA + (p + 3) * K; const int8_t* k4 = pA + (p + 4) * K; const int8_t* k5 = pA + (p + 5) * K; const int8_t* k6 = pA + (p + 6) * K; const int8_t* k7 = pA + (p + 7) * K; int8_t* ktmp = pA_t + (p / 8) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp[4] = k4[0]; ktmp[5] = k5[0]; ktmp[6] = k6[0]; ktmp[7] = k7[0]; ktmp += 8; k0 += 1; k1 += 1; k2 += 1; k3 += 1; k4 += 1; k5 += 1; k6 += 1; k7 += 1; } } nn_outch = (M - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; const int8_t* k0 = pA + (p + 0) * K; const int8_t* k1 = pA + (p + 1) * K; const int8_t* k2 = pA + (p + 2) * K; const int8_t* k3 = pA + (p + 3) * K; int8_t* ktmp = pA_t + (p / 8 + (p % 8) / 4) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } remain_outch_start += nn_outch << 2; for (int p = remain_outch_start; p < M; p++) { const int8_t* k0 = pA + (p + 0) * K; int8_t* ktmp = pA_t + (p / 8 + (p % 8) / 4 + p % 4) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } int conv_hcl_prerun(struct tensor* input_tensor, struct tensor* filter_tensor, struct tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param) { int in_h = input_tensor->dims[2]; int in_w = input_tensor->dims[3]; /* check winograd implement, only for conv3x3s1 */ if (input_tensor->data_type == TENGINE_DT_FP32) { priv_info->winograd = winograd_support(param, in_h, in_w); if (priv_info->winograd) { return wino_conv_hcl_prerun(input_tensor, filter_tensor, output_tensor, priv_info, param); } } if (!priv_info->external_im2col_mem) { int mem_size = conv_hcl_get_shared_mem_size(input_tensor, output_tensor, param); void* mem = sys_malloc(mem_size); priv_info->im2col_buffer = mem; priv_info->im2col_buffer_size = mem_size; } if (!priv_info->external_im2col_pack4_mem) { int mem_size = conv_hcl_get_shared_pack4_mem_size(filter_tensor, output_tensor, param); void* mem = sys_malloc(mem_size); priv_info->im2col_buffer_pack4 = mem; priv_info->im2col_buffer_pack4_size = mem_size; } if (!priv_info->external_interleave_mem) { int mem_size = get_private_mem_size(filter_tensor); void* mem = sys_malloc(mem_size); priv_info->interleave_buffer = mem; priv_info->interleave_buffer_size = mem_size; } if (input_tensor->data_type == TENGINE_DT_UINT8) interleave_uint8(filter_tensor, priv_info); else interleave(filter_tensor, priv_info); if (priv_info->external_interleave_pack4_mem) { int M = filter_tensor->dims[0]; int K = filter_tensor->elem_num / filter_tensor->dims[0]; int mem_size = conv_hcl_get_interleave_pack4_size(M, K, filter_tensor); void* mem = sys_malloc(mem_size); priv_info->interleave_buffer_pack4 = mem; priv_info->interleave_buffer_pack4_size = mem_size; if (input_tensor->data_type == TENGINE_DT_FP32 || input_tensor->data_type == TENGINE_DT_UINT8) conv_hcl_interleave_pack4_fp32(M, K, priv_info); else conv_hcl_interleave_pack4_int8(M, K, priv_info); if (!priv_info->external_interleave_mem && priv_info->interleave_buffer) { sys_free(priv_info->interleave_buffer); priv_info->interleave_buffer = NULL; } } else { priv_info->interleave_buffer_pack4 = priv_info->interleave_buffer; priv_info->interleave_buffer_pack4_size = priv_info->interleave_buffer_size; } return 0; } int conv_hcl_postrun(struct conv_priv_info* priv_info) { if (priv_info->winograd) { return wino_conv_hcl_postrun(priv_info); } if (priv_info->external_interleave_pack4_mem && !priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL) { sys_free(priv_info->interleave_buffer_pack4); priv_info->interleave_buffer_pack4 = NULL; } if (!priv_info->external_im2col_mem && priv_info->im2col_buffer != NULL) { sys_free(priv_info->im2col_buffer); priv_info->im2col_buffer = NULL; } if (!priv_info->external_im2col_pack4_mem && priv_info->im2col_buffer_pack4 != NULL) { sys_free(priv_info->im2col_buffer_pack4); priv_info->im2col_buffer_pack4 = NULL; } if (priv_info->external_interleave_pack4_mem && priv_info->interleave_buffer_pack4 != NULL) { sys_free(priv_info->interleave_buffer_pack4); priv_info->interleave_buffer_pack4 = NULL; } return 0; } int conv_hcl_run(struct tensor* input_tensor, struct tensor* filter_tensor, struct tensor* bias_tensor, struct tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param, int num_thread, int cpu_affinity) { int group = param->group; int type = input_tensor->data_type; if (priv_info->winograd) { return wino_conv_hcl_run(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, num_thread, cpu_affinity); } for (int i = 0; i < input_tensor->dims[0]; i++) // batch size { for (int j = 0; j < group; j++) { im2col_ir(input_tensor, output_tensor, priv_info, param, i, j); int K = filter_tensor->elem_num / filter_tensor->dims[0]; int N = output_tensor->dims[2] * output_tensor->dims[3]; void* im2col_buffer = priv_info->im2col_buffer; if (priv_info->external_interleave_pack4_mem) { if (type == TENGINE_DT_FP32 || type == TENGINE_DT_UINT8) input_pack4_fp32(K, N, im2col_buffer, priv_info->im2col_buffer_pack4, num_thread); else input_pack4_int8(K, N, im2col_buffer, priv_info->im2col_buffer_pack4, num_thread); } else { priv_info->im2col_buffer_pack4 = im2col_buffer; } if (type == TENGINE_DT_FP32) sgemm_fp32(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread); else if (type == TENGINE_DT_UINT8) sgemm_uint8(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread); else if (type == TENGINE_DT_INT8) sgemm_int8(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread); else { TLOG_ERR("Input data type %d not to be supported.\n", input_tensor->data_type); return -1; } } } return 0; } int conv_hcl_set_shared_mem(struct conv_priv_info* priv_info, void* mem, int mem_size) { priv_info->external_im2col_mem = 1; priv_info->im2col_buffer = mem; priv_info->im2col_buffer_size = mem_size; return 0; } int conv_hcl_set_shared_pack4_mem(struct conv_priv_info* priv_info, void* mem, int mem_size) { priv_info->external_im2col_pack4_mem = 1; priv_info->im2col_buffer_pack4 = mem; priv_info->im2col_buffer_pack4_size = mem_size; return 0; }
relic_core.c
/* * RELIC is an Efficient LIbrary for Cryptography * Copyright (C) 2007-2013 RELIC Authors * * This file is part of RELIC. RELIC is legal property of its developers, * whose names are not listed here. Please refer to the COPYRIGHT file * for contact information. * * RELIC is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * RELIC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with RELIC. If not, see <http://www.gnu.org/licenses/>. */ /** * @file * * Implementation of the library basic functions. * * @version $Id: relic_core.c 1522 2013-08-27 15:55:54Z dfaranha $ * @ingroup relic */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include "relic_core.h" #include "relic_rand.h" #include "relic_types.h" #include "relic_err.h" #include "relic_arch.h" #include "relic_fp.h" #include "relic_fb.h" #include "relic_ep.h" #include "relic_eb.h" #include "relic_cp.h" #include "relic_pp.h" /*============================================================================*/ /* Public definitions */ /*============================================================================*/ /** * Default library context. */ ctx_t first_ctx; /** * Active library context. */ ctx_t *core_ctx = NULL; #ifdef MULTI #pragma omp threadprivate(first_ctx, core_ctx) #endif int core_init(void) { if (core_ctx == NULL) { core_ctx = &(first_ctx); } #if defined(CHECK) || defined(TRACE) core_ctx->trace = 0; #endif #ifdef CHECK core_ctx->reason[ERR_NO_MEMORY] = MSG_NO_MEMORY; core_ctx->reason[ERR_NO_PRECI] = MSG_NO_PRECI; core_ctx->reason[ERR_NO_FILE] = MSG_NO_FILE; core_ctx->reason[ERR_NO_READ] = MSG_NO_READ; core_ctx->reason[ERR_NO_VALID] = MSG_NO_VALID; core_ctx->reason[ERR_NO_BUFFER] = MSG_NO_BUFFER; core_ctx->reason[ERR_NO_FIELD] = MSG_NO_FIELD; core_ctx->reason[ERR_NO_CURVE] = MSG_NO_CURVE; core_ctx->reason[ERR_NO_CONFIG] = MSG_NO_CONFIG; core_ctx->last = NULL; #endif /* CHECK */ #if ALLOC == STATIC core_ctx->next = 0; #endif #ifdef OVERH core_ctx->over = 0; #endif core_ctx->code = STS_OK; TRY { arch_init(); rand_init(); #ifdef WITH_FP fp_prime_init(); #endif #ifdef WITH_FB fb_poly_init(); #endif #ifdef WITH_FT ft_poly_init(); #endif #ifdef WITH_EP ep_curve_init(); #endif #ifdef WITH_EB eb_curve_init(); #endif #ifdef WITH_PP pp_map_init(); #endif #ifdef WITH_PB pb_map_init(); #endif } CATCH_ANY { return STS_ERR; } return STS_OK; } int core_clean(void) { rand_clean(); #ifdef WITH_FP fp_prime_clean(); #endif #ifdef WITH_FB fb_poly_clean(); #endif #ifdef WITH_FT ft_poly_clean(); #endif #ifdef WITH_EP ep_curve_clean(); #endif #ifdef WITH_EB eb_curve_clean(); #endif #ifdef WITH_PP pp_map_clean(); #endif #ifdef WITH_PB pb_map_clean(); #endif arch_clean(); core_ctx = NULL; return STS_OK; } ctx_t *core_get() { return core_ctx; } void core_set(ctx_t *ctx) { core_ctx = ctx; }
GraphBLAS.h
/* * GraphBLAS.h * * Created on: May 31, 2016 * Author: Michael Wegner (michael.wegner@student.kit.edu) */ #ifndef NETWORKIT_CPP_ALGEBRAIC_GRAPHBLAS_H_ #define NETWORKIT_CPP_ALGEBRAIC_GRAPHBLAS_H_ #include <limits> #include "Semirings.h" #include "SparseAccumulator.h" #include "AlgebraicGlobals.h" #include "Vector.h" /** * @ingroup algebraic * Implements the GraphBLAS interface. For more information visit https://graphblas.org. */ namespace GraphBLAS { // **************************************************** // Operations // **************************************************** /** * Computes binOp(A(i,j), B(i,j)) for all i,j element-wise. Note that the dimensions of * @a A and @a B must coincide and that the zero must be the same. * @param A * @param B * @param binOp * @return The resulting matrix. */ template<class SemiRing, class Matrix, typename L> Matrix eWiseBinOp(const Matrix& A, const Matrix& B, L binOp) { assert(A.numberOfRows() == B.numberOfRows() && A.numberOfColumns() == B.numberOfColumns()); assert(A.getZero() == B.getZero() && A.getZero() == SemiRing::zero()); std::vector<int64_t> columnPointer(A.numberOfColumns(), -1); std::vector<double> Arow(A.numberOfColumns(), SemiRing::zero()); std::vector<double> Brow(A.numberOfColumns(), SemiRing::zero()); std::vector<NetworKit::Triplet> triplets; for (NetworKit::index i = 0; i < A.numberOfRows(); ++i) { NetworKit::index listHead = 0; NetworKit::count nnz = 0; // search for nonZeros in matrix A A.forNonZeroElementsInRow(i, [&](NetworKit::index j, double value) { Arow[j] = value; columnPointer[j] = listHead; listHead = j; nnz++; }); // search for nonZeros in matrix B B.forNonZeroElementsInRow(i, [&](NetworKit::index j, double value) { Brow[j] = value; if (columnPointer[j] == -1) { // matrix A does not have a nonZero entry in column j columnPointer[j] = listHead; listHead = j; nnz++; } }); // apply operator on the found nonZeros in A and B for (NetworKit::count k = 0; k < nnz; ++k) { double value = binOp(Arow[listHead], Brow[listHead]); if (value != SemiRing::zero()) { triplets.push_back({i,listHead,value}); } NetworKit::index temp = listHead; listHead = columnPointer[listHead]; // reset for next row columnPointer[temp] = -1; Arow[temp] = SemiRing::zero(); Brow[temp] = SemiRing::zero(); } nnz = 0; } return Matrix(A.numberOfRows(), A.numberOfColumns(), triplets, A.getZero()); } /** * Computes the matrix-matrix multiplication of @a A and @a B. Note that * A.numberOfColumns() must be equal to B.numberOfRows() and the zero elements * must be the same. The default Semiring is the ArithmeticSemiring. * @param A * @param B * @return The result of the multiplication A * B. */ template<class SemiRing = ArithmeticSemiring, class Matrix> Matrix MxM(const Matrix& A, const Matrix& B) { assert(A.numberOfColumns() == B.numberOfRows()); assert(A.getZero() == SemiRing::zero() && B.getZero() == SemiRing::zero()); std::vector<NetworKit::Triplet> triplets; NetworKit::SparseAccumulator spa(B.numberOfRows()); for (NetworKit::index i = 0; i < A.numberOfRows(); ++i) { A.forNonZeroElementsInRow(i, [&](NetworKit::index k, double w1) { B.forNonZeroElementsInRow(k, [&](NetworKit::index j, double w2) { spa.scatter(SemiRing::mult(w1,w2), j, *SemiRing::add); }); }); spa.gather([&](NetworKit::index i, NetworKit::index j, double value){ triplets.push_back({i,j,value}); }); spa.increaseRow(); } return Matrix(A.numberOfRows(), B.numberOfColumns(), triplets, A.getZero()); } /** * Computes the matrix-matrix multiplication of @a A and @a B and adds it to @a C where * the add operation is that of the specified Semiring (i.e. C(i,j) = SemiRing::add(C(i,j), (A*B)(i,j))). * The default Semiring is the ArithmeticSemiring. * @param A * @param B * @param C */ template<class SemiRing = ArithmeticSemiring, class Matrix> void MxM(const Matrix& A, const Matrix& B, Matrix& C) { assert(A.numberOfColumns() == B.numberOfRows() && A.numberOfRows() == C.numberOfRows() && B.numberOfColumns() == C.numberOfColumns()); assert(A.getZero() == SemiRing::zero() && B.getZero() == SemiRing::zero() && C.getZero() == SemiRing::zero()); std::vector<NetworKit::Triplet> triplets; NetworKit::SparseAccumulator spa(B.numberOfRows()); for (NetworKit::index i = 0; i < A.numberOfRows(); ++i) { A.forNonZeroElementsInRow(i, [&](NetworKit::index k, double w1) { B.forNonZeroElementsInRow(k, [&](NetworKit::index j, double w2) { spa.scatter(SemiRing::mult(w1,w2), j, *SemiRing::add); }); }); spa.gather([&](NetworKit::index i, NetworKit::index j, double value){ triplets.push_back({i,j,value}); }); spa.increaseRow(); } Matrix temp(A.numberOfRows(), B.numberOfRows(), triplets, A.getZero()); C = eWiseBinOp<SemiRing, Matrix>(C, temp, *SemiRing::add); } /** * Computes the matrix-matrix multiplication of @a A and @a B and adds it to @a C where * the add operation is specified by the binary function @a accum (i.e. C(i,j) = accum(C(i,j), (A*B)(i,j))). * The default Semiring is the ArithmeticSemiring. * @param A * @param B * @param C * @param accum */ template<class SemiRing = ArithmeticSemiring, typename F, class Matrix> void MxM(const Matrix& A, const Matrix& B, Matrix& C, F accum) { assert(A.numberOfColumns() == B.numberOfRows() && A.numberOfRows() == C.numberOfRows() && B.numberOfColumns() == C.numberOfColumns()); assert(A.getZero() == SemiRing::zero() && B.getZero() == SemiRing::zero() && C.getZero() == SemiRing::zero()); std::vector<NetworKit::Triplet> triplets; NetworKit::SparseAccumulator spa(B.numberOfRows()); for (NetworKit::index i = 0; i < A.numberOfRows(); ++i) { A.forNonZeroElementsInRow(i, [&](NetworKit::index k, double w1) { B.forNonZeroElementsInRow(k, [&](NetworKit::index j, double w2) { spa.scatter(SemiRing::mult(w1,w2), j, *SemiRing::add); }); }); spa.gather([&](NetworKit::index i, NetworKit::index j, double value){ triplets.push_back({i,j,value}); }); spa.increaseRow(); } Matrix temp(A.numberOfRows(), B.numberOfRows(), triplets, A.getZero()); C = eWiseBinOp<SemiRing, Matrix>(C, temp, accum); } /** * Computes the matrix-vector product of matrix @a A and Vector @a v. The default Semiring is the ArithmeticSemiring. * @param A * @param v */ template<class SemiRing = ArithmeticSemiring, class Matrix> NetworKit::Vector MxV(const Matrix& A, const NetworKit::Vector& v) { assert(!v.isTransposed()); assert(A.numberOfColumns() == v.getDimension()); assert(A.getZero() == SemiRing::zero()); NetworKit::Vector result(A.numberOfRows(), A.getZero()); A.parallelForNonZeroElementsInRowOrder([&](NetworKit::index i, NetworKit::index j, double value) { result[i] = SemiRing::add(result[i], SemiRing::mult(value, v[j])); }); return result; } /** * Computes the matrix-vector product of matrix @a A and Vector @a v and adds it to @a c where the add operation * is that of the specified Semiring (i.e. c[i] = SemiRing::add(c[i], (A*v)[i]). The default Semiring is the * ArithmeticSemiring. * @param A * @param v * @param c */ template<class SemiRing = ArithmeticSemiring, class Matrix> void MxV(const Matrix& A, const NetworKit::Vector& v, NetworKit::Vector& c) { assert(!v.isTransposed()); assert(A.numberOfColumns() == v.getDimension()); assert(A.getZero() == SemiRing::zero()); A.parallelForNonZeroElementsInRowOrder([&](NetworKit::index i, NetworKit::index j, double value) { c[i] = SemiRing::add(c[i], SemiRing::mult(value, v[j])); }); } /** * Computes the matrix-vector product of matrix @a A and Vector @a v and adds it to @a c where the add operation * is that of the specified binary function @a accum (i.e. c[i] = accum(c[i], (A*v)[i]). The default Semiring is the * ArithmeticSemiring. * @param A * @param v * @param c */ template<class SemiRing = ArithmeticSemiring, typename F, class Matrix> void MxV(const Matrix& A, const NetworKit::Vector& v, NetworKit::Vector& c, F accum) { assert(!v.isTransposed()); assert(A.numberOfColumns() == v.getDimension()); assert(A.getZero() == SemiRing::zero()); A.parallelForNonZeroElementsInRowOrder([&](NetworKit::index i, NetworKit::index j, double value) { c[i] = accum(c[i], SemiRing::mult(value, v[j])); }); } /** * Computes SemiRing::add(A(i,j), B(i,j)) for all i,j element-wise and returns the resulting matrix. The default * Semiring is the ArithmeticSemiring. * @param A * @param B */ template<class SemiRing = ArithmeticSemiring, class Matrix> Matrix eWiseAdd(const Matrix& A, const Matrix& B) { return eWiseBinOp<SemiRing, Matrix>(A, B, [](const double a, const double b) {return SemiRing::add(a,b);}); } /** * Computes SemiRing::mult(A(i,j), B(i,j)) for all i,j element-wise and returns the resulting matrix. The default * Semiring is the ArithmeticSemiring. * @param A * @param B * @return */ template<class SemiRing = ArithmeticSemiring, class Matrix> Matrix eWiseMult(const Matrix& A, const Matrix& B) { return eWiseBinOp<SemiRing, Matrix>(A, B, [](const double a, const double b) {return SemiRing::mult(a,b);}); } /** * Computes the row-reduction of the @a matrix and returns the result as a vector. That is, the elements of each row * are summed up to form the respective entry in the result vector. The add operator is that of the specified * Semiring. The default Semiring is the ArithmeticSemiring. * @param matrix */ template<class SemiRing = ArithmeticSemiring, class Matrix> NetworKit::Vector rowReduce(const Matrix& matrix) { assert(matrix.getZero() == SemiRing::zero()); NetworKit::Vector rowReduction(matrix.numberOfRows(), 0.0); #pragma omp parallel for for (NetworKit::omp_index i = 0; i < static_cast<NetworKit::omp_index>(matrix.numberOfRows()); ++i) { matrix.forNonZeroElementsInRow(i, [&](NetworKit::index j, double value) { rowReduction[i] = SemiRing::add(rowReduction[i], value); }); } return rowReduction; } /** * Computes the column-reduction of the @a matrix and returns the result as a Vector. That is, the elements of each * column are summed up to form the respective entry in the result Vector. The add operator is that of the specified * Semiring. The default Semiring is the ArithmeticSemiring. * @param matrix */ template<class SemiRing = ArithmeticSemiring, class Matrix> NetworKit::Vector columnReduce(const Matrix& matrix) { assert(matrix.getZero() == SemiRing::zero()); NetworKit::Vector columnReduction(matrix.numberOfColumns(), 0.0); matrix.forNonZeroElementsInRowOrder([&](NetworKit::index i, NetworKit::index j, double value) { columnReduction[j] = SemiRing::add(columnReduction[j], value); }); return columnReduction; } } #endif /* NETWORKIT_CPP_ALGEBRAIC_GRAPHBLAS_H_ */
resample.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % RRRR EEEEE SSSSS AAA M M PPPP L EEEEE % % R R E SS A A MM MM P P L E % % RRRR EEE SSS AAAAA M M M PPPP L EEE % % R R E SS A A M M P L E % % R R EEEEE SSSSS A A M M P LLLLL EEEEE % % % % % % MagickCore Pixel Resampling Methods % % % % Software Design % % John Cristy % % Anthony Thyssen % % August 2007 % % % % % % Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/color-private.h" #include "magick/cache.h" #include "magick/draw.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/log.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/pixel.h" #include "magick/pixel-private.h" #include "magick/quantum.h" #include "magick/random_.h" #include "magick/resample.h" #include "magick/resize.h" #include "magick/resize-private.h" #include "magick/transform.h" #include "magick/signature-private.h" #include "magick/utility.h" /* EWA Resampling Options */ /* select ONE resampling method */ #define EWA 1 /* Normal EWA handling - raw or clamped */ /* if 0 then use "High Quality EWA" */ #define EWA_CLAMP 1 /* EWA Clamping from Nicolas Robidoux */ #define FILTER_LUT 1 /* Use a LUT rather then direct filter calls */ /* output debugging information */ #define DEBUG_ELLIPSE 0 /* output ellipse info for debug */ #define DEBUG_HIT_MISS 0 /* output hit/miss pixels (as gnuplot commands) */ #define DEBUG_NO_PIXEL_HIT 0 /* Make pixels that fail to hit anything - RED */ #if ! FILTER_DIRECT #define WLUT_WIDTH 1024 /* size of the filter cache */ #endif /* Typedef declarations. */ struct _ResampleFilter { CacheView *view; Image *image; ExceptionInfo *exception; MagickBooleanType debug; /* Information about image being resampled */ ssize_t image_area; InterpolatePixelMethod interpolate; VirtualPixelMethod virtual_pixel; FilterTypes filter; /* processing settings needed */ MagickBooleanType limit_reached, do_interpolate, average_defined; MagickPixelPacket average_pixel; /* current ellipitical area being resampled around center point */ double A, B, C, Vlimit, Ulimit, Uwidth, slope; #if FILTER_LUT /* LUT of weights for filtered average in elliptical area */ double filter_lut[WLUT_WIDTH]; #else /* Use a Direct call to the filter functions */ ResizeFilter *filter_def; double F; #endif /* the practical working support of the filter */ double support; size_t signature; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e R e s a m p l e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireResampleFilter() initializes the information resample needs do to a % scaled lookup of a color from an image, using area sampling. % % The algorithm is based on a Elliptical Weighted Average, where the pixels % found in a large elliptical area is averaged together according to a % weighting (filter) function. For more details see "Fundamentals of Texture % Mapping and Image Warping" a master's thesis by Paul.S.Heckbert, June 17, % 1989. Available for free from, http://www.cs.cmu.edu/~ph/ % % As EWA resampling (or any sort of resampling) can require a lot of % calculations to produce a distorted scaling of the source image for each % output pixel, the ResampleFilter structure generated holds that information % between individual image resampling. % % This function will make the appropriate AcquireCacheView() calls % to view the image, calling functions do not need to open a cache view. % % Usage Example... % resample_filter=AcquireResampleFilter(image,exception); % SetResampleFilter(resample_filter, GaussianFilter, 1.0); % for (y=0; y < (ssize_t) image->rows; y++) { % for (x=0; x < (ssize_t) image->columns; x++) { % u= ....; v= ....; % ScaleResampleFilter(resample_filter, ... scaling vectors ...); % (void) ResamplePixelColor(resample_filter,u,v,&pixel); % ... assign resampled pixel value ... % } % } % DestroyResampleFilter(resample_filter); % % The format of the AcquireResampleFilter method is: % % ResampleFilter *AcquireResampleFilter(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ResampleFilter *AcquireResampleFilter(const Image *image, ExceptionInfo *exception) { register ResampleFilter *resample_filter; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); resample_filter=(ResampleFilter *) AcquireMagickMemory( sizeof(*resample_filter)); if (resample_filter == (ResampleFilter *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(resample_filter,0,sizeof(*resample_filter)); resample_filter->exception=exception; resample_filter->image=ReferenceImage((Image *) image); resample_filter->view=AcquireCacheView(resample_filter->image); resample_filter->debug=IsEventLogging(); resample_filter->signature=MagickSignature; resample_filter->image_area=(ssize_t) (image->columns*image->rows); resample_filter->average_defined = MagickFalse; /* initialise the resampling filter settings */ SetResampleFilter(resample_filter, image->filter, image->blur); (void) SetResampleFilterInterpolateMethod(resample_filter, image->interpolate); (void) SetResampleFilterVirtualPixelMethod(resample_filter, GetImageVirtualPixelMethod(image)); return(resample_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y R e s a m p l e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyResampleFilter() finalizes and cleans up the resampling % resample_filter as returned by AcquireResampleFilter(), freeing any memory % or other information as needed. % % The format of the DestroyResampleFilter method is: % % ResampleFilter *DestroyResampleFilter(ResampleFilter *resample_filter) % % A description of each parameter follows: % % o resample_filter: resampling information structure % */ MagickExport ResampleFilter *DestroyResampleFilter( ResampleFilter *resample_filter) { assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickSignature); assert(resample_filter->image != (Image *) NULL); if (resample_filter->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", resample_filter->image->filename); resample_filter->view=DestroyCacheView(resample_filter->view); resample_filter->image=DestroyImage(resample_filter->image); #if ! FILTER_LUT resample_filter->filter_def=DestroyResizeFilter(resample_filter->filter_def); #endif resample_filter->signature=(~MagickSignature); resample_filter=(ResampleFilter *) RelinquishMagickMemory(resample_filter); return(resample_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s a m p l e P i x e l C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResamplePixelColor() samples the pixel values surrounding the location % given using an elliptical weighted average, at the scale previously % calculated, and in the most efficent manner possible for the % VirtualPixelMethod setting. % % The format of the ResamplePixelColor method is: % % MagickBooleanType ResamplePixelColor(ResampleFilter *resample_filter, % const double u0,const double v0,MagickPixelPacket *pixel) % % A description of each parameter follows: % % o resample_filter: the resample filter. % % o u0,v0: A double representing the center of the area to resample, % The distortion transformed transformed x,y coordinate. % % o pixel: the resampled pixel is returned here. % */ MagickExport MagickBooleanType ResamplePixelColor( ResampleFilter *resample_filter,const double u0,const double v0, MagickPixelPacket *pixel) { MagickBooleanType status; ssize_t u,v, v1, v2, uw, hit; double u1; double U,V,Q,DQ,DDQ; double divisor_c,divisor_m; register double weight; register const PixelPacket *pixels; register const IndexPacket *indexes; assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickSignature); status=MagickTrue; GetMagickPixelPacket(resample_filter->image,pixel); if ( resample_filter->do_interpolate ) { status=InterpolateMagickPixelPacket(resample_filter->image, resample_filter->view,resample_filter->interpolate,u0,v0,pixel, resample_filter->exception); return(status); } #if DEBUG_ELLIPSE fprintf(stderr, "u0=%lf; v0=%lf;\n", u0, v0); #endif /* Does resample area Miss the image? And is that area a simple solid color - then return that color */ hit = 0; switch ( resample_filter->virtual_pixel ) { case BackgroundVirtualPixelMethod: case ConstantVirtualPixelMethod: case TransparentVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case WhiteVirtualPixelMethod: case MaskVirtualPixelMethod: if ( resample_filter->limit_reached || u0 + resample_filter->Ulimit < 0.0 || u0 - resample_filter->Ulimit > (double) resample_filter->image->columns || v0 + resample_filter->Vlimit < 0.0 || v0 - resample_filter->Vlimit > (double) resample_filter->image->rows ) hit++; break; case UndefinedVirtualPixelMethod: case EdgeVirtualPixelMethod: if ( ( u0 + resample_filter->Ulimit < 0.0 && v0 + resample_filter->Vlimit < 0.0 ) || ( u0 + resample_filter->Ulimit < 0.0 && v0 - resample_filter->Vlimit > (double) resample_filter->image->rows ) || ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns && v0 + resample_filter->Vlimit < 0.0 ) || ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns && v0 - resample_filter->Vlimit > (double) resample_filter->image->rows ) ) hit++; break; case HorizontalTileVirtualPixelMethod: if ( v0 + resample_filter->Vlimit < 0.0 || v0 - resample_filter->Vlimit > (double) resample_filter->image->rows ) hit++; /* outside the horizontally tiled images. */ break; case VerticalTileVirtualPixelMethod: if ( u0 + resample_filter->Ulimit < 0.0 || u0 - resample_filter->Ulimit > (double) resample_filter->image->columns ) hit++; /* outside the vertically tiled images. */ break; case DitherVirtualPixelMethod: if ( ( u0 + resample_filter->Ulimit < -32.0 && v0 + resample_filter->Vlimit < -32.0 ) || ( u0 + resample_filter->Ulimit < -32.0 && v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+32.0 ) || ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+32.0 && v0 + resample_filter->Vlimit < -32.0 ) || ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+32.0 && v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+32.0 ) ) hit++; break; case TileVirtualPixelMethod: case MirrorVirtualPixelMethod: case RandomVirtualPixelMethod: case HorizontalTileEdgeVirtualPixelMethod: case VerticalTileEdgeVirtualPixelMethod: case CheckerTileVirtualPixelMethod: /* resampling of area is always needed - no VP limits */ break; } if ( hit ) { /* whole area is a solid color -- just return that color */ status=InterpolateMagickPixelPacket(resample_filter->image, resample_filter->view,IntegerInterpolatePixel,u0,v0,pixel, resample_filter->exception); return(status); } /* Scaling limits reached, return an 'averaged' result. */ if ( resample_filter->limit_reached ) { switch ( resample_filter->virtual_pixel ) { /* This is always handled by the above, so no need. case BackgroundVirtualPixelMethod: case ConstantVirtualPixelMethod: case TransparentVirtualPixelMethod: case GrayVirtualPixelMethod, case WhiteVirtualPixelMethod case MaskVirtualPixelMethod: */ case UndefinedVirtualPixelMethod: case EdgeVirtualPixelMethod: case DitherVirtualPixelMethod: case HorizontalTileEdgeVirtualPixelMethod: case VerticalTileEdgeVirtualPixelMethod: /* We need an average edge pixel, from the correct edge! How should I calculate an average edge color? Just returning an averaged neighbourhood, works well in general, but falls down for TileEdge methods. This needs to be done properly!!!!!! */ status=InterpolateMagickPixelPacket(resample_filter->image, resample_filter->view,AverageInterpolatePixel,u0,v0,pixel, resample_filter->exception); break; case HorizontalTileVirtualPixelMethod: case VerticalTileVirtualPixelMethod: /* just return the background pixel - Is there more direct way? */ status=InterpolateMagickPixelPacket(resample_filter->image, resample_filter->view,IntegerInterpolatePixel,-1.0,-1.0,pixel, resample_filter->exception); break; case TileVirtualPixelMethod: case MirrorVirtualPixelMethod: case RandomVirtualPixelMethod: case CheckerTileVirtualPixelMethod: default: /* generate a average color of the WHOLE image */ if ( resample_filter->average_defined == MagickFalse ) { Image *average_image; CacheView *average_view; GetMagickPixelPacket(resample_filter->image,(MagickPixelPacket *) &resample_filter->average_pixel); resample_filter->average_defined=MagickTrue; /* Try to get an averaged pixel color of whole image */ average_image=ResizeImage(resample_filter->image,1,1,BoxFilter,1.0, resample_filter->exception); if (average_image == (Image *) NULL) { *pixel=resample_filter->average_pixel; /* FAILED */ break; } average_view=AcquireCacheView(average_image); pixels=(PixelPacket *)GetCacheViewVirtualPixels(average_view,0,0,1,1, resample_filter->exception); if (pixels == (const PixelPacket *) NULL) { average_view=DestroyCacheView(average_view); average_image=DestroyImage(average_image); *pixel=resample_filter->average_pixel; /* FAILED */ break; } indexes=(IndexPacket *) GetCacheViewAuthenticIndexQueue(average_view); SetMagickPixelPacket(resample_filter->image,pixels,indexes, &(resample_filter->average_pixel)); average_view=DestroyCacheView(average_view); average_image=DestroyImage(average_image); if ( resample_filter->virtual_pixel == CheckerTileVirtualPixelMethod ) { /* CheckerTile is avergae of image average half background */ /* FUTURE: replace with a 50% blend of both pixels */ weight = QuantumScale*((MagickRealType)(QuantumRange- resample_filter->average_pixel.opacity)); resample_filter->average_pixel.red *= weight; resample_filter->average_pixel.green *= weight; resample_filter->average_pixel.blue *= weight; divisor_c = weight; weight = QuantumScale*((MagickRealType)(QuantumRange- resample_filter->image->background_color.opacity)); resample_filter->average_pixel.red += weight*resample_filter->image->background_color.red; resample_filter->average_pixel.green += weight*resample_filter->image->background_color.green; resample_filter->average_pixel.blue += weight*resample_filter->image->background_color.blue; resample_filter->average_pixel.opacity += resample_filter->image->background_color.opacity; divisor_c += weight; resample_filter->average_pixel.red /= divisor_c; resample_filter->average_pixel.green /= divisor_c; resample_filter->average_pixel.blue /= divisor_c; resample_filter->average_pixel.opacity /= 2; } } *pixel=resample_filter->average_pixel; break; } return(status); } /* Initialize weighted average data collection */ hit = 0; divisor_c = 0.0; divisor_m = 0.0; pixel->red = pixel->green = pixel->blue = 0.0; if (pixel->matte != MagickFalse) pixel->opacity = 0.0; if (pixel->colorspace == CMYKColorspace) pixel->index = 0.0; /* Determine the parellelogram bounding box fitted to the ellipse centered at u0,v0. This area is bounding by the lines... */ v1 = (ssize_t)ceil(v0 - resample_filter->Vlimit); /* range of scan lines */ v2 = (ssize_t)floor(v0 + resample_filter->Vlimit); /* scan line start and width accross the parallelogram */ u1 = u0 + (v1-v0)*resample_filter->slope - resample_filter->Uwidth; uw = (ssize_t)(2.0*resample_filter->Uwidth)+1; #if DEBUG_ELLIPSE fprintf(stderr, "v1=%ld; v2=%ld\n", (long)v1, (long)v2); fprintf(stderr, "u1=%ld; uw=%ld\n", (long)u1, (long)uw); #else # define DEBUG_HIT_MISS 0 /* only valid if DEBUG_ELLIPSE is enabled */ #endif /* Do weighted resampling of all pixels, within the scaled ellipse, bound by a Parellelogram fitted to the ellipse. */ DDQ = 2*resample_filter->A; for( v=v1; v<=v2; v++ ) { #if DEBUG_HIT_MISS long uu = ceil(u1); /* actual pixel location (for debug only) */ fprintf(stderr, "# scan line from pixel %ld, %ld\n", (long)uu, (long)v); #endif u = (ssize_t)ceil(u1); /* first pixel in scanline */ u1 += resample_filter->slope; /* start of next scan line */ /* location of this first pixel, relative to u0,v0 */ U = (double)u-u0; V = (double)v-v0; /* Q = ellipse quotent ( if Q<F then pixel is inside ellipse) */ Q = (resample_filter->A*U + resample_filter->B*V)*U + resample_filter->C*V*V; DQ = resample_filter->A*(2.0*U+1) + resample_filter->B*V; /* get the scanline of pixels for this v */ pixels=GetCacheViewVirtualPixels(resample_filter->view,u,v,(size_t) uw, 1,resample_filter->exception); if (pixels == (const PixelPacket *) NULL) return(MagickFalse); indexes=GetCacheViewVirtualIndexQueue(resample_filter->view); /* count up the weighted pixel colors */ for( u=0; u<uw; u++ ) { #if FILTER_LUT /* Note that the ellipse has been pre-scaled so F = WLUT_WIDTH */ if ( Q < (double)WLUT_WIDTH ) { weight = resample_filter->filter_lut[(int)Q]; #else /* Note that the ellipse has been pre-scaled so F = support^2 */ if ( Q < (double)resample_filter->F ) { weight = GetResizeFilterWeight(resample_filter->filter_def, sqrt(Q)); /* a SquareRoot! Arrggghhhhh... */ #endif pixel->opacity += weight*pixels->opacity; divisor_m += weight; if (pixel->matte != MagickFalse) weight *= QuantumScale*((MagickRealType)(QuantumRange-pixels->opacity)); pixel->red += weight*pixels->red; pixel->green += weight*pixels->green; pixel->blue += weight*pixels->blue; if (pixel->colorspace == CMYKColorspace) pixel->index += weight*(*indexes); divisor_c += weight; hit++; #if DEBUG_HIT_MISS /* mark the pixel according to hit/miss of the ellipse */ fprintf(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n", (long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1); fprintf(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n", (long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1); } else { fprintf(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n", (long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1); fprintf(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n", (long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1); } uu++; #else } #endif pixels++; indexes++; Q += DQ; DQ += DDQ; } } #if DEBUG_ELLIPSE fprintf(stderr, "Hit=%ld; Total=%ld;\n", (long)hit, (long)uw*(v2-v1) ); #endif /* Result sanity check -- this should NOT happen */ if ( hit == 0 ) { /* not enough pixels in resampling, resort to direct interpolation */ #if DEBUG_NO_PIXEL_HIT pixel->opacity = pixel->red = pixel->green = pixel->blue = 0; pixel->red = QuantumRange; /* show pixels for which EWA fails */ #else status=InterpolateMagickPixelPacket(resample_filter->image, resample_filter->view,resample_filter->interpolate,u0,v0,pixel, resample_filter->exception); #endif return status; } /* Finialize results of resampling */ divisor_m = 1.0/divisor_m; pixel->opacity = (MagickRealType) ClampToQuantum(divisor_m*pixel->opacity); divisor_c = 1.0/divisor_c; pixel->red = (MagickRealType) ClampToQuantum(divisor_c*pixel->red); pixel->green = (MagickRealType) ClampToQuantum(divisor_c*pixel->green); pixel->blue = (MagickRealType) ClampToQuantum(divisor_c*pixel->blue); if (pixel->colorspace == CMYKColorspace) pixel->index = (MagickRealType) ClampToQuantum(divisor_c*pixel->index); return(MagickTrue); } #if EWA && EWA_CLAMP /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % - C l a m p U p A x e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClampUpAxes() function converts the input vectors into a major and % minor axis unit vectors, and their magnitude. This allows us to % ensure that the ellipse generated is never smaller than the unit % circle and thus never too small for use in EWA resampling. % % This purely mathematical 'magic' was provided by Professor Nicolas % Robidoux and his Masters student Chantal Racette. % % Reference: "We Recommend Singular Value Decomposition", David Austin % http://www.ams.org/samplings/feature-column/fcarc-svd % % By generating major and minor axis vectors, we can actually use the % ellipse in its "canonical form", by remapping the dx,dy of the % sampled point into distances along the major and minor axis unit % vectors. % % Reference: http://en.wikipedia.org/wiki/Ellipse#Canonical_form */ static inline void ClampUpAxes(const double dux, const double dvx, const double duy, const double dvy, double *major_mag, double *minor_mag, double *major_unit_x, double *major_unit_y, double *minor_unit_x, double *minor_unit_y) { /* * ClampUpAxes takes an input 2x2 matrix * * [ a b ] = [ dux duy ] * [ c d ] = [ dvx dvy ] * * and computes from it the major and minor axis vectors [major_x, * major_y] and [minor_x,minor_y] of the smallest ellipse containing * both the unit disk and the ellipse which is the image of the unit * disk by the linear transformation * * [ dux duy ] [S] = [s] * [ dvx dvy ] [T] = [t] * * (The vector [S,T] is the difference between a position in output * space and [X,Y]; the vector [s,t] is the difference between a * position in input space and [x,y].) */ /* * Outputs: * * major_mag is the half-length of the major axis of the "new" * ellipse. * * minor_mag is the half-length of the minor axis of the "new" * ellipse. * * major_unit_x is the x-coordinate of the major axis direction vector * of both the "old" and "new" ellipses. * * major_unit_y is the y-coordinate of the major axis direction vector. * * minor_unit_x is the x-coordinate of the minor axis direction vector. * * minor_unit_y is the y-coordinate of the minor axis direction vector. * * Unit vectors are useful for computing projections, in particular, * to compute the distance between a point in output space and the * center (of a disk) from the position of the corresponding point * in input space. * * Now, if you want to modify the input pair of tangent vectors so * that it defines the modified ellipse, all you have to do is set * * newdux = major_mag * major_unit_x * newdvx = major_mag * major_unit_y * newduy = minor_mag * minor_unit_x = minor_mag * -major_unit_y * newdvy = minor_mag * minor_unit_y = minor_mag * major_unit_x * * and use these tangent vectors as if they were the original ones. * Usually, this is a drastic change in the tangent vectors even if * the singular values are not clamped; for example, the minor axis * vector always points in a direction which is 90 degrees * counterclockwise from the direction of the major axis vector. */ /* * Discussion: * * GOAL: Fix things so that the pullback, in input space, of a disk * of radius r in output space is an ellipse which contains, at * least, a disc of radius r. (Make this hold for any r>0.) * * ESSENCE OF THE METHOD: Compute the product of the first two * factors of an SVD of the linear transformation defining the * ellipse and make sure that both its columns have norm at least 1. * Because rotations and reflexions map disks to themselves, it is * not necessary to compute the third (rightmost) factor of the SVD. * * DETAILS: Find the singular values and (unit) left singular * vectors of Jinv, clampling up the singular values to 1, and * multiply the unit left singular vectors by the new singular * values in order to get the minor and major ellipse axis vectors. * * Image resampling context: * * The Jacobian matrix of the transformation at the output point * under consideration is defined as follows: * * Consider the transformation (x,y) -> (X,Y) from input locations * to output locations. (Anthony Thyssen, elsewhere in resample.c, * uses the notation (u,v) -> (x,y).) * * The Jacobian matrix of the transformation at (x,y) is equal to * * J = [ A, B ] = [ dX/dx, dX/dy ] * [ C, D ] [ dY/dx, dY/dy ] * * that is, the vector [A,C] is the tangent vector corresponding to * input changes in the horizontal direction, and the vector [B,D] * is the tangent vector corresponding to input changes in the * vertical direction. * * In the context of resampling, it is natural to use the inverse * Jacobian matrix Jinv because resampling is generally performed by * pulling pixel locations in the output image back to locations in * the input image. Jinv is * * Jinb = [ a, b ] = [ dx/dX, dx/dY ] * [ c, d ] [ dy/dX, dy/dY ] * * Note: Jinv can be computed from J with the following matrix * formula: * * Jinv = 1/(A*D-B*C) [ D, -B ] * [ -C, A ] * * What we do is modify Jinv so that it generates an ellipse which * is as close as possible to the original but which contains the * unit disk. This can be accomplished as follows: * * Let * * Jinv = U Sigma V^T * * be an SVD decomposition of Jinv. (The SVD is not unique, but the * final ellipse does not depend on the particular SVD.) * * We could clamp up the entries of the diagonal matrix Sigma so * that they are at least 1, and then set * * Jinv = U newSigma V^T. * * However, we do not need to compute V for the following reason: * V^T is an orthogonal matrix (that is, it represents a combination * of rotations and reflexions) so that it maps the unit circle to * itself. For this reason, the exact value of V does not affect the * final ellipse, and we can choose V to be the identity * matrix. This gives * * Jinv = U newSigma. * * In the end, we return the two diagonal entries of newSigma * together with the two columns of U. */ /* * ClampUpAxes was written by Nicolas Robidoux and Chantal Racette * of Laurentian University with insightful suggestions from Anthony * Thyssen and funding from the National Science and Engineering * Research Council of Canada. It is distinguished from its * predecessors by its efficient handling of degenerate cases. * * The idea of clamping up the EWA ellipse's major and minor axes so * that the result contains the reconstruction kernel filter support * is taken from Andreas Gustaffson's Masters thesis "Interactive * Image Warping", Helsinki University of Technology, Faculty of * Information Technology, 59 pages, 1993 (see Section 3.6). * * The use of the SVD to clamp up the singular values of the * Jacobian matrix of the pullback transformation for EWA resampling * is taken from the astrophysicist Craig DeForest. It is * implemented in his PDL::Transform code (PDL = Perl Data * Language). */ const double a = dux; const double b = duy; const double c = dvx; const double d = dvy; /* * n is the matrix Jinv * transpose(Jinv). Eigenvalues of n are the * squares of the singular values of Jinv. */ const double aa = a*a; const double bb = b*b; const double cc = c*c; const double dd = d*d; /* * Eigenvectors of n are left singular vectors of Jinv. */ const double n11 = aa+bb; const double n12 = a*c+b*d; const double n21 = n12; const double n22 = cc+dd; const double det = a*d-b*c; const double twice_det = det+det; const double frobenius_squared = n11+n22; const double discriminant = (frobenius_squared+twice_det)*(frobenius_squared-twice_det); const double sqrt_discriminant = sqrt(discriminant); /* * s1 is the largest singular value of the inverse Jacobian * matrix. In other words, its reciprocal is the smallest singular * value of the Jacobian matrix itself. * If s1 = 0, both singular values are 0, and any orthogonal pair of * left and right factors produces a singular decomposition of Jinv. */ /* * Initially, we only compute the squares of the singular values. */ const double s1s1 = 0.5*(frobenius_squared+sqrt_discriminant); /* * s2 the smallest singular value of the inverse Jacobian * matrix. Its reciprocal is the largest singular value of the * Jacobian matrix itself. */ const double s2s2 = 0.5*(frobenius_squared-sqrt_discriminant); const double s1s1minusn11 = s1s1-n11; const double s1s1minusn22 = s1s1-n22; /* * u1, the first column of the U factor of a singular decomposition * of Jinv, is a (non-normalized) left singular vector corresponding * to s1. It has entries u11 and u21. We compute u1 from the fact * that it is an eigenvector of n corresponding to the eigenvalue * s1^2. */ const double s1s1minusn11_squared = s1s1minusn11*s1s1minusn11; const double s1s1minusn22_squared = s1s1minusn22*s1s1minusn22; /* * The following selects the largest row of n-s1^2 I as the one * which is used to find the eigenvector. If both s1^2-n11 and * s1^2-n22 are zero, n-s1^2 I is the zero matrix. In that case, * any vector is an eigenvector; in addition, norm below is equal to * zero, and, in exact arithmetic, this is the only case in which * norm = 0. So, setting u1 to the simple but arbitrary vector [1,0] * if norm = 0 safely takes care of all cases. */ const double temp_u11 = ( (s1s1minusn11_squared>=s1s1minusn22_squared) ? n12 : s1s1minusn22 ); const double temp_u21 = ( (s1s1minusn11_squared>=s1s1minusn22_squared) ? s1s1minusn11 : n21 ); const double norm = sqrt(temp_u11*temp_u11+temp_u21*temp_u21); /* * Finalize the entries of first left singular vector (associated * with the largest singular value). */ const double u11 = ( (norm>0.0) ? temp_u11/norm : 1.0 ); const double u21 = ( (norm>0.0) ? temp_u21/norm : 0.0 ); /* * Clamp the singular values up to 1. */ *major_mag = ( (s1s1<=1.0) ? 1.0 : sqrt(s1s1) ); *minor_mag = ( (s2s2<=1.0) ? 1.0 : sqrt(s2s2) ); /* * Return the unit major and minor axis direction vectors. */ *major_unit_x = u11; *major_unit_y = u21; *minor_unit_x = -u21; *minor_unit_y = u11; } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e R e s a m p l e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleResampleFilter() does all the calculations needed to resample an image % at a specific scale, defined by two scaling vectors. This not using % a orthogonal scaling, but two distorted scaling vectors, to allow the % generation of a angled ellipse. % % As only two deritive scaling vectors are used the center of the ellipse % must be the center of the lookup. That is any curvature that the % distortion may produce is discounted. % % The input vectors are produced by either finding the derivitives of the % distortion function, or the partial derivitives from a distortion mapping. % They do not need to be the orthogonal dx,dy scaling vectors, but can be % calculated from other derivatives. For example you could use dr,da/r % polar coordinate vector scaling vectors % % If u,v = DistortEquation(x,y) OR u = Fu(x,y); v = Fv(x,y) % Then the scaling vectors are determined from the deritives... % du/dx, dv/dx and du/dy, dv/dy % If the resulting scaling vectors is othogonally aligned then... % dv/dx = 0 and du/dy = 0 % Producing an othogonally alligned ellipse in source space for the area to % be resampled. % % Note that scaling vectors are different to argument order. Argument order % is the general order the deritives are extracted from the distortion % equations, and not the scaling vectors. As such the middle two vaules % may be swapped from what you expect. Caution is advised. % % WARNING: It is assumed that any SetResampleFilter() method call will % always be performed before the ScaleResampleFilter() method, so that the % size of the ellipse will match the support for the resampling filter being % used. % % The format of the ScaleResampleFilter method is: % % void ScaleResampleFilter(const ResampleFilter *resample_filter, % const double dux,const double duy,const double dvx,const double dvy) % % A description of each parameter follows: % % o resample_filter: the resampling resample_filterrmation defining the % image being resampled % % o dux,duy,dvx,dvy: % The deritives or scaling vectors defining the EWA ellipse. % NOTE: watch the order, which is based on the order deritives % are usally determined from distortion equations (see above). % The middle two values may need to be swapped if you are thinking % in terms of scaling vectors. % */ MagickExport void ScaleResampleFilter(ResampleFilter *resample_filter, const double dux,const double duy,const double dvx,const double dvy) { double A,B,C,F; assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickSignature); resample_filter->limit_reached = MagickFalse; /* A 'point' filter forces use of interpolation instead of area sampling */ if ( resample_filter->filter == PointFilter ) return; /* EWA turned off - nothing to do */ #if DEBUG_ELLIPSE fprintf(stderr, "# -----\n" ); fprintf(stderr, "dux=%lf; dvx=%lf; duy=%lf; dvy=%lf;\n", dux, dvx, duy, dvy); #endif /* Find Ellipse Coefficents such that A*u^2 + B*u*v + C*v^2 = F With u,v relative to point around which we are resampling. And the given scaling dx,dy vectors in u,v space du/dx,dv/dx and du/dy,dv/dy */ #if EWA /* Direct conversion of derivatives into elliptical coefficients However when magnifying images, the scaling vectors will be small resulting in a ellipse that is too small to sample properly. As such we need to clamp the major/minor axis to a minumum of 1.0 to prevent it getting too small. */ #if EWA_CLAMP { double major_mag, minor_mag, major_x, major_y, minor_x, minor_y; ClampUpAxes(dux,dvx,duy,dvy, &major_mag, &minor_mag, &major_x, &major_y, &minor_x, &minor_y); major_x *= major_mag; major_y *= major_mag; minor_x *= minor_mag; minor_y *= minor_mag; #if DEBUG_ELLIPSE fprintf(stderr, "major_x=%lf; major_y=%lf; minor_x=%lf; minor_y=%lf;\n", major_x, major_y, minor_x, minor_y); #endif A = major_y*major_y+minor_y*minor_y; B = -2.0*(major_x*major_y+minor_x*minor_y); C = major_x*major_x+minor_x*minor_x; F = major_mag*minor_mag; F *= F; /* square it */ } #else /* raw unclamped EWA */ A = dvx*dvx+dvy*dvy; B = -2.0*(dux*dvx+duy*dvy); C = dux*dux+duy*duy; F = dux*dvy-duy*dvx; F *= F; /* square it */ #endif /* EWA_CLAMP */ #else /* HQ_EWA */ /* This Paul Heckbert's "Higher Quality EWA" formula, from page 60 in his thesis, which adds a unit circle to the elliptical area so as to do both Reconstruction and Prefiltering of the pixels in the resampling. It also means it is always likely to have at least 4 pixels within the area of the ellipse, for weighted averaging. No scaling will result with F == 4.0 and a circle of radius 2.0, and F smaller than this means magnification is being used. NOTE: This method produces a very blury result at near unity scale while producing perfect results for strong minitification and magnifications. However filter support is fixed to 2.0 (no good for Windowed Sinc filters) */ A = dvx*dvx+dvy*dvy+1; B = -2.0*(dux*dvx+duy*dvy); C = dux*dux+duy*duy+1; F = A*C - B*B/4; #endif #if DEBUG_ELLIPSE fprintf(stderr, "A=%lf; B=%lf; C=%lf; F=%lf\n", A,B,C,F); /* Figure out the various information directly about the ellipse. This information currently not needed at this time, but may be needed later for better limit determination. It is also good to have as a record for future debugging */ { double alpha, beta, gamma, Major, Minor; double Eccentricity, Ellipse_Area, Ellipse_Angle; alpha = A+C; beta = A-C; gamma = sqrt(beta*beta + B*B ); if ( alpha - gamma <= MagickEpsilon ) Major = MagickHuge; else Major = sqrt(2*F/(alpha - gamma)); Minor = sqrt(2*F/(alpha + gamma)); fprintf(stderr, "# Major=%lf; Minor=%lf\n", Major, Minor ); /* other information about ellipse include... */ Eccentricity = Major/Minor; Ellipse_Area = MagickPI*Major*Minor; Ellipse_Angle = atan2(B, A-C); fprintf(stderr, "# Angle=%lf Area=%lf\n", RadiansToDegrees(Ellipse_Angle), Ellipse_Area); } #endif /* If one or both of the scaling vectors is impossibly large (producing a very large raw F value), we may as well not bother doing any form of resampling since resampled area is very large. In this case some alternative means of pixel sampling, such as the average of the whole image is needed to get a reasonable result. Calculate only as needed. */ if ( (4*A*C - B*B) > MagickHuge ) { resample_filter->limit_reached = MagickTrue; return; } /* Scale ellipse to match the filters support (that is, multiply F by the square of the support). */ F *= resample_filter->support; F *= resample_filter->support; /* Orthogonal bounds of the ellipse */ resample_filter->Ulimit = sqrt(4*C*F/(4*A*C-B*B)); resample_filter->Vlimit = sqrt(4*A*F/(4*A*C-B*B)); /* Horizontally aligned parallelogram fitted to Ellipse */ resample_filter->Uwidth = sqrt(F/A); /* Half of the parallelogram width */ resample_filter->slope = -B/(2*A); /* Reciprocal slope of the parallelogram */ #if DEBUG_ELLIPSE fprintf(stderr, "Ulimit=%lf; Vlimit=%lf; UWidth=%lf; Slope=%lf;\n", resample_filter->Ulimit, resample_filter->Vlimit, resample_filter->Uwidth, resample_filter->slope ); #endif /* Check the absolute area of the parallelogram involved. * This limit needs more work, as it is too slow for larger images * with tiled views of the horizon. */ if ( (resample_filter->Uwidth * resample_filter->Vlimit) > (4.0*resample_filter->image_area)) { resample_filter->limit_reached = MagickTrue; return; } /* Scale ellipse formula to directly index the Filter Lookup Table */ { register double scale; #if FILTER_LUT /* scale so that F = WLUT_WIDTH; -- hardcoded */ scale = (double)WLUT_WIDTH/F; #else /* scale so that F = resample_filter->F (support^2) */ scale = resample_filter->F/F; #endif resample_filter->A = A*scale; resample_filter->B = B*scale; resample_filter->C = C*scale; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t R e s a m p l e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetResampleFilter() set the resampling filter lookup table based on a % specific filter. Note that the filter is used as a radial filter not as a % two pass othogonally aligned resampling filter. % % The default Filter, is Gaussian, which is the standard filter used by the % original paper on the Elliptical Weighted Everage Algorithm. However other % filters can also be used. % % The format of the SetResampleFilter method is: % % void SetResampleFilter(ResampleFilter *resample_filter, % const FilterTypes filter,const double blur) % % A description of each parameter follows: % % o resample_filter: resampling resample_filterrmation structure % % o filter: the resize filter for elliptical weighting LUT % % o blur: filter blur factor (radial scaling) for elliptical weighting LUT % */ MagickExport void SetResampleFilter(ResampleFilter *resample_filter, const FilterTypes filter,const double blur) { ResizeFilter *resize_filter; assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickSignature); resample_filter->do_interpolate = MagickFalse; resample_filter->filter = filter; if ( filter == PointFilter ) { resample_filter->do_interpolate = MagickTrue; return; /* EWA turned off - nothing more to do */ } /* Set a default cylindrical filter of a 'low blur' Jinc windowed Jinc */ if ( filter == UndefinedFilter ) resample_filter->filter = RobidouxFilter; resize_filter = AcquireResizeFilter(resample_filter->image, resample_filter->filter,blur,MagickTrue,resample_filter->exception); if (resize_filter == (ResizeFilter *) NULL) { (void) ThrowMagickException(resample_filter->exception,GetMagickModule(), ModuleError, "UnableToSetFilteringValue", "Fall back to default EWA gaussian filter"); resample_filter->filter = PointFilter; } /* Get the practical working support for the filter, * after any API call blur factors have been accoded for. */ #if EWA resample_filter->support = GetResizeFilterSupport(resize_filter); #else resample_filter->support = 2.0; /* fixed support size for HQ-EWA */ #endif #if FILTER_LUT /* Fill the LUT with the weights from the selected filter function */ { register int Q; double r_scale; /* Scale radius so the filter LUT covers the full support range */ r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH); for(Q=0; Q<WLUT_WIDTH; Q++) resample_filter->filter_lut[Q] = (double) GetResizeFilterWeight(resize_filter,sqrt((double)Q)*r_scale); /* finished with the resize filter */ resize_filter = DestroyResizeFilter(resize_filter); } #else /* save the filter and the scaled ellipse bounds needed for filter */ resample_filter->filter_def = resize_filter; resample_filter->F = resample_filter->support*resample_filter->support; #endif /* Adjust the scaling of the default unit circle This assumes that any real scaling changes will always take place AFTER the filter method has been initialized. */ ScaleResampleFilter(resample_filter, 1.0, 0.0, 0.0, 1.0); #if 0 /* This is old code kept as a reference only. It is very wrong, and I don't understand exactly what it was attempting to do. */ /* Create Normal Gaussian 2D Filter Weighted Lookup Table. A normal EWA guassual lookup would use exp(Q*ALPHA) where Q = distance squared from 0.0 (center) to 1.0 (edge) and ALPHA = -4.0*ln(2.0) ==> -2.77258872223978123767 The table is of length 1024, and equates to support radius of 2.0 thus needs to be scaled by ALPHA*4/1024 and any blur factor squared The above came from some reference code provided by Fred Weinhaus and seems to have been a guess that was appropriate for its use in a 3d perspective landscape mapping program. */ r_scale = -2.77258872223978123767/(WLUT_WIDTH*blur*blur); for(Q=0; Q<WLUT_WIDTH; Q++) resample_filter->filter_lut[Q] = exp((double)Q*r_scale); resample_filter->support = WLUT_WIDTH; break; #endif #if FILTER_LUT #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp single #endif { register int Q; double r_scale; /* Scale radius so the filter LUT covers the full support range */ r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH); if (IsMagickTrue(GetImageArtifact(resample_filter->image,"resample:verbose")) ) { /* Debug output of the filter weighting LUT Gnuplot the LUT with hoizontal adjusted to 'r' using... plot [0:2][-.2:1] "lut.dat" using (sqrt($0/1024)*2):1 with lines The filter values is normalized for comparision */ printf("#\n"); printf("# Resampling Filter LUT (%d values)\n", WLUT_WIDTH); printf("#\n"); printf("# Note: values in table are using a squared radius lookup.\n"); printf("# And the whole table represents the filters support.\n"); printf("\n"); /* generates a 'break' in gnuplot if multiple outputs */ for(Q=0; Q<WLUT_WIDTH; Q++) printf("%8.*g %.*g\n", GetMagickPrecision(),sqrt((double)Q)*r_scale, GetMagickPrecision(),resample_filter->filter_lut[Q] ); } /* output the above once only for each image, and each setting */ (void) DeleteImageArtifact(resample_filter->image,"resample:verbose"); } #endif /* FILTER_LUT */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t R e s a m p l e F i l t e r I n t e r p o l a t e M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetResampleFilterInterpolateMethod() sets the resample filter interpolation % method. % % The format of the SetResampleFilterInterpolateMethod method is: % % MagickBooleanType SetResampleFilterInterpolateMethod( % ResampleFilter *resample_filter,const InterpolateMethod method) % % A description of each parameter follows: % % o resample_filter: the resample filter. % % o method: the interpolation method. % */ MagickExport MagickBooleanType SetResampleFilterInterpolateMethod( ResampleFilter *resample_filter,const InterpolatePixelMethod method) { assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickSignature); assert(resample_filter->image != (Image *) NULL); if (resample_filter->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", resample_filter->image->filename); resample_filter->interpolate=method; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t R e s a m p l e F i l t e r V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetResampleFilterVirtualPixelMethod() changes the virtual pixel method % associated with the specified resample filter. % % The format of the SetResampleFilterVirtualPixelMethod method is: % % MagickBooleanType SetResampleFilterVirtualPixelMethod( % ResampleFilter *resample_filter,const VirtualPixelMethod method) % % A description of each parameter follows: % % o resample_filter: the resample filter. % % o method: the virtual pixel method. % */ MagickExport MagickBooleanType SetResampleFilterVirtualPixelMethod( ResampleFilter *resample_filter,const VirtualPixelMethod method) { assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickSignature); assert(resample_filter->image != (Image *) NULL); if (resample_filter->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", resample_filter->image->filename); resample_filter->virtual_pixel=method; if (method != UndefinedVirtualPixelMethod) (void) SetCacheViewVirtualPixelMethod(resample_filter->view,method); return(MagickTrue); }
Domain.h
/************************************************************************ * MechSys - Open Library for Mechanical Systems * * Copyright (C) 2014 Sergio Galindo * * * * This program is free software: you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation, either version 3 of the License, or * * any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program. If not, see <http://www.gnu.org/licenses/> * ************************************************************************/ #ifndef MECHSYS_EMLBM_DOMAIN_H #define MECHSYS_EMLBM_DOMAIN_H // STD #include <map> #include <vector> #include <utility> #include <set> // MechSys #include <mechsys/emlbm2/Lattice.h> using std::set; using std::map; using std::pair; using std::make_pair; namespace EMLBM { class Domain { public: //typedefs typedef void (*ptDFun_t) (Domain & Dom, void * UserData); //Constructors Domain ( iVec3_t Ndim, ///< Cell divisions per side double dx, ///< Space spacing double dt); ///< Time step //Methods #ifdef USE_HDF5 void WriteXDMF (char const * FileKey); ///< Write the domain data in xdmf file #endif void Initialize (double dt=0.0); ///< Set the particles to a initial state and asign the possible insteractions void Collide (size_t Np = 1); ///< Apply the interaction forces and the collision operator void Solve(double Tf, double dtOut, ptDFun_t ptSetup=NULL, ptDFun_t ptReport=NULL, char const * FileKey=NULL, bool RenderVideo=true, size_t Nproc=1); ///< Solve the Domain dynamics //Data bool Initialized; ///< System (particles and interactons) initialized ? bool PrtVec; ///< Print Vector data into the xdmf-h5 files bool Finished; ///< Has the simulation finished String FileKey; ///< File Key for output files Lattice Lat; ///< Fluid Lattices double Time; ///< Time of the simulation double dt; ///< Timestep void * UserData; ///< User Data size_t idx_out; ///< The discrete time step size_t Step; ///< The space step to reduce the size of the h5 file for visualization size_t Nproc; ///< Number of cores used for the simulation }; inline Domain::Domain(iVec3_t Ndim, double Thedx, double Thedt) { Initialized = false; Util::Stopwatch stopwatch; printf("\n%s--- Initializing LBM Domain --------------------------------------------%s\n",TERM_CLR1,TERM_RST); Lat = Lattice(Ndim,Thedx,Thedt); Time = 0.0; dt = Thedt; Step = 1; PrtVec = true; printf("%s Num of cells = %zd%s\n",TERM_CLR2,Lat.Ncells,TERM_RST); } #ifdef USE_HDF5 inline void Domain::WriteXDMF(char const * FileKey) { String fn(FileKey); fn.append(".h5"); hid_t file_id; file_id = H5Fcreate(fn.CStr(), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); size_t Nx = Lat.Ndim[0]/Step; size_t Ny = Lat.Ndim[1]/Step; size_t Nz = Lat.Ndim[2]/Step; // Creating data sets float * Sig = new float[ Nx*Ny*Nz]; float * Mu = new float[ Nx*Ny*Nz]; float * Eps = new float[ Nx*Ny*Nz]; float * Rho = new float[ Nx*Ny*Nz]; float * Cur = new float[3*Nx*Ny*Nz]; float * Bvec = new float[3*Nx*Ny*Nz]; float * Evec = new float[3*Nx*Ny*Nz]; float * State = new float[4*Lat.GetCell(iVec3_t(0,0,0))->Nneigh*Nx*Ny*Nz]; size_t i=0; for (size_t m=0;m<Lat.Ndim(2);m+=Step) for (size_t l=0;l<Lat.Ndim(1);l+=Step) for (size_t n=0;n<Lat.Ndim(0);n+=Step) { double sig = 0.0; double mu = 0.0; double eps = 0.0; double cha = 0.0; Vec3_t cur = OrthoSys::O; Vec3_t bvec = OrthoSys::O; Vec3_t evec = OrthoSys::O; for (size_t ni=0;ni<Step;ni++) for (size_t li=0;li<Step;li++) for (size_t mi=0;mi<Step;mi++) { sig += Lat.GetCell(iVec3_t(n+ni,l+li,m+mi))->Sig; mu += Lat.GetCell(iVec3_t(n+ni,l+li,m+mi))->Mu; eps += Lat.GetCell(iVec3_t(n+ni,l+li,m+mi))->Eps; cha += Lat.GetCell(iVec3_t(n+ni,l+li,m+mi))->Rho; cur (0) += Lat.GetCell(iVec3_t(n+ni,l+li,m+mi))->J[0]; cur (1) += Lat.GetCell(iVec3_t(n+ni,l+li,m+mi))->J[1]; cur (2) += Lat.GetCell(iVec3_t(n+ni,l+li,m+mi))->J[2]; bvec(0) += Lat.GetCell(iVec3_t(n+ni,l+li,m+mi))->B[0]; bvec(1) += Lat.GetCell(iVec3_t(n+ni,l+li,m+mi))->B[1]; bvec(2) += Lat.GetCell(iVec3_t(n+ni,l+li,m+mi))->B[2]; evec(0) += Lat.GetCell(iVec3_t(n+ni,l+li,m+mi))->E[0]; evec(1) += Lat.GetCell(iVec3_t(n+ni,l+li,m+mi))->E[1]; evec(2) += Lat.GetCell(iVec3_t(n+ni,l+li,m+mi))->E[2]; } sig /= Step*Step*Step; mu /= Step*Step*Step; eps /= Step*Step*Step; cha /= Step*Step*Step; cur /= Step*Step*Step; bvec /= Step*Step*Step; evec /= Step*Step*Step; Sig [i] = (float) sig; Mu [i] = (float) mu; Eps [i] = (float) eps; Rho [i] = (float) cha; Cur [3*i ] = (float) cur (0); Cur [3*i+1] = (float) cur (1); Cur [3*i+2] = (float) cur (2); Bvec[3*i ] = (float) bvec(0); Bvec[3*i+1] = (float) bvec(1); Bvec[3*i+2] = (float) bvec(2); Evec[3*i ] = (float) evec(0); Evec[3*i+1] = (float) evec(1); Evec[3*i+2] = (float) evec(2); i++; } size_t i_cell=0; for (size_t m=0;m<Lat.Ndim(2);m+=1) for (size_t l=0;l<Lat.Ndim(1);l+=1) for (size_t n=0;n<Lat.Ndim(0);n+=1) for (size_t k=0;k<Lat.GetCell(iVec3_t(0,0,0))->Nneigh;k+=1) { State[i_cell] = Lat.GetCell(iVec3_t(n,l,m))->FE[0][k]; i_cell++; State[i_cell] = Lat.GetCell(iVec3_t(n,l,m))->FE[1][k]; i_cell++; State[i_cell] = Lat.GetCell(iVec3_t(n,l,m))->FB[0][k]; i_cell++; State[i_cell] = Lat.GetCell(iVec3_t(n,l,m))->FB[1][k]; i_cell++; } //Write the data hsize_t dims[1]; dims[0] = Nx*Ny*Nz; String dsname; dsname.Printf("Charge"); H5LTmake_dataset_float(file_id,dsname.CStr(),1,dims,Rho ); dsname.Printf("Sigma"); H5LTmake_dataset_float(file_id,dsname.CStr(),1,dims,Sig ); dsname.Printf("Mu"); H5LTmake_dataset_float(file_id,dsname.CStr(),1,dims,Mu ); dsname.Printf("Epsilon"); H5LTmake_dataset_float(file_id,dsname.CStr(),1,dims,Eps ); if (PrtVec) { dims[0] = 3*Nx*Ny*Nz; dsname.Printf("Current"); H5LTmake_dataset_float(file_id,dsname.CStr(),1,dims,Cur ); dsname.Printf("MagField"); H5LTmake_dataset_float(file_id,dsname.CStr(),1,dims,Bvec ); dsname.Printf("ElecField"); H5LTmake_dataset_float(file_id,dsname.CStr(),1,dims,Evec ); } dims[0] = 4*Lat.GetCell(iVec3_t(0,0,0))->Nneigh*Nx*Ny*Nz; dsname.Printf("State"); H5LTmake_dataset_float(file_id,dsname.CStr(),1,dims,State ); dims[0] = 1; int N[1]; N[0] = Nx; dsname.Printf("Nx"); H5LTmake_dataset_int(file_id,dsname.CStr(),1,dims,N); dims[0] = 1; N[0] = Ny; dsname.Printf("Ny"); H5LTmake_dataset_int(file_id,dsname.CStr(),1,dims,N); dims[0] = 1; N[0] = Nz; dsname.Printf("Nz"); H5LTmake_dataset_int(file_id,dsname.CStr(),1,dims,N); delete [] Sig ; delete [] Mu ; delete [] Eps ; delete [] Rho ; delete [] Cur ; delete [] Bvec ; delete [] Evec ; delete [] State ; //Closing the file H5Fflush(file_id,H5F_SCOPE_GLOBAL); H5Fclose(file_id); // Writing xmf fil std::ostringstream oss; oss << "<?xml version=\"1.0\" ?>\n"; oss << "<!DOCTYPE Xdmf SYSTEM \"Xdmf.dtd\" []>\n"; oss << "<Xdmf Version=\"2.0\">\n"; oss << " <Domain>\n"; oss << " <Grid Name=\"EMLBM_Mesh\" GridType=\"Uniform\">\n"; oss << " <Topology TopologyType=\"3DCoRectMesh\" Dimensions=\"" << Nz << " " << Ny << " " << Nx << "\"/>\n"; oss << " <Geometry GeometryType=\"ORIGIN_DXDYDZ\">\n"; oss << " <DataItem Format=\"XML\" NumberType=\"Float\" Dimensions=\"3\"> 0.0 0.0 0.0\n"; oss << " </DataItem>\n"; oss << " <DataItem Format=\"XML\" NumberType=\"Float\" Dimensions=\"3\"> " << Step*Lat.dx << " " << Step*Lat.dx << " " << Step*Lat.dx << "\n"; oss << " </DataItem>\n"; oss << " </Geometry>\n"; oss << " <Attribute Name=\"Sigma" << "\" AttributeType=\"Scalar\" Center=\"Node\">\n"; oss << " <DataItem Dimensions=\"" << Nz << " " << Ny << " " << Nx << "\" NumberType=\"Float\" Precision=\"4\" Format=\"HDF\">\n"; oss << " " << fn.CStr() <<":/Sigma" << "\n"; oss << " </DataItem>\n"; oss << " </Attribute>\n"; oss << " <Attribute Name=\"Mu" << "\" AttributeType=\"Scalar\" Center=\"Node\">\n"; oss << " <DataItem Dimensions=\"" << Nz << " " << Ny << " " << Nx << "\" NumberType=\"Float\" Precision=\"4\" Format=\"HDF\">\n"; oss << " " << fn.CStr() <<":/Mu" << "\n"; oss << " </DataItem>\n"; oss << " </Attribute>\n"; oss << " <Attribute Name=\"Epsilon" << "\" AttributeType=\"Scalar\" Center=\"Node\">\n"; oss << " <DataItem Dimensions=\"" << Nz << " " << Ny << " " << Nx << "\" NumberType=\"Float\" Precision=\"4\" Format=\"HDF\">\n"; oss << " " << fn.CStr() <<":/Epsilon" << "\n"; oss << " </DataItem>\n"; oss << " </Attribute>\n"; oss << " <Attribute Name=\"Charge" << "\" AttributeType=\"Scalar\" Center=\"Node\">\n"; oss << " <DataItem Dimensions=\"" << Nz << " " << Ny << " " << Nx << "\" NumberType=\"Float\" Precision=\"4\" Format=\"HDF\">\n"; oss << " " << fn.CStr() <<":/Charge" << "\n"; oss << " </DataItem>\n"; oss << " </Attribute>\n"; if (PrtVec) { oss << " <Attribute Name=\"Current" << "\" AttributeType=\"Vector\" Center=\"Node\">\n"; oss << " <DataItem Dimensions=\"" << Nz << " " << Ny << " " << Nx << " 3\" NumberType=\"Float\" Precision=\"4\" Format=\"HDF\">\n"; oss << " " << fn.CStr() <<":/Current" << "\n"; oss << " </DataItem>\n"; oss << " </Attribute>\n"; oss << " <Attribute Name=\"MagField" << "\" AttributeType=\"Vector\" Center=\"Node\">\n"; oss << " <DataItem Dimensions=\"" << Nz << " " << Ny << " " << Nx << " 3\" NumberType=\"Float\" Precision=\"4\" Format=\"HDF\">\n"; oss << " " << fn.CStr() <<":/MagField" << "\n"; oss << " </DataItem>\n"; oss << " </Attribute>\n"; oss << " <Attribute Name=\"ElecField" << "\" AttributeType=\"Vector\" Center=\"Node\">\n"; oss << " <DataItem Dimensions=\"" << Nz << " " << Ny << " " << Nx << " 3\" NumberType=\"Float\" Precision=\"4\" Format=\"HDF\">\n"; oss << " " << fn.CStr() <<":/ElecField" << "\n"; oss << " </DataItem>\n"; oss << " </Attribute>\n"; } oss << " </Grid>\n"; oss << " </Domain>\n"; oss << "</Xdmf>\n"; fn = FileKey; fn.append(".xmf"); std::ofstream of(fn.CStr(), std::ios::out); of << oss.str(); of.close(); } #endif void Domain::Collide (size_t Np) { #ifdef USE_OMP #pragma omp parallel for schedule (static) num_threads(Np) #endif for (size_t i=0;i<Lat.Ncells;i++) { Cell * c = Lat.Cells[i]; c->F0[0] = c->F0[0] - 2.0*(c->F0[0] - c->Rho); c->F0[1] = c->F0[1] - 2.0*(c->F0[1] - c->Rho); for (size_t k=0;k<c->Nneigh;k++) { for (size_t mu=0;mu<2;mu++) { c->FEtemp[mu][k] = c->FE[mu][k] - 2.0*(c->FE[mu][k] - c->FEeq(mu,k)); c->FBtemp[mu][k] = c->FB[mu][k] - 2.0*(c->FB[mu][k] - c->FBeq(mu,k)); } } for (size_t k=0;k<c->Nneigh;k++) { for (size_t mu=0;mu<2;mu++) { c->FE[mu][k] = c->FEtemp[mu][k]; c->FB[mu][k] = c->FBtemp[mu][k]; } } } } inline void Domain::Solve(double Tf, double dtOut, ptDFun_t ptSetup, ptDFun_t ptReport, char const * TheFileKey, bool RenderVideo, size_t TheNproc) { idx_out = 0; FileKey.Printf("%s",TheFileKey); Finished = false; // info Util::Stopwatch stopwatch; printf("\n%s--- Solving ---------------------------------------------------------------------%s\n",TERM_CLR1 , TERM_RST); printf("%s Time step = %g%s\n" ,TERM_CLR2, dt , TERM_RST); Nproc = TheNproc; //for (size_t j=0;j<Lat.Size();j++) //{ //for (size_t i=0;i<Lat[j].Ncells;i++) //{ //Lat[j].Cells[i]->Initialize(); //Lat[j].Cells[i]->CalcProp(); //} //} double tout = Time; while (Time < Tf) { if (ptSetup!=NULL) (*ptSetup) ((*this), UserData); if (Time >= tout) { if (TheFileKey!=NULL) { String fn; fn.Printf ("%s_%04d", TheFileKey, idx_out); if ( RenderVideo) { #ifdef USE_HDF5 WriteXDMF(fn.CStr()); #else //WriteVTK (fn.CStr()); #endif } if (ptReport!=NULL) (*ptReport) ((*this), UserData); } tout += dtOut; idx_out++; } #ifdef USE_OMP Collide(Nproc); Lat.Stream1 (Nproc); Lat.Stream2 (Nproc); Lat.CalcField(Nproc); #endif Time += dt; } // last output Finished = true; if (ptReport!=NULL) (*ptReport) ((*this), UserData); printf("%s Final CPU time = %s\n",TERM_CLR2, TERM_RST); } } #endif
randarray.c
#include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <stdbool.h> #include <unistd.h> #include <math.h> #include "timer.h" #ifdef WITH_OPENMP #include <omp.h> #else // stubs for compiling without omp #define omp_get_thread_num() 0 #define omp_set_num_threads(...) #endif #ifdef WITH_MD5 #include <openssl/md5.h> #endif #define RD_STATE_LEN 128 #define CHECKPOINT fprintf(stderr, "%s:%d CHECKPOINT\n", __func__, __LINE__); #define MALLOC(var, size) do { \ var = malloc(size); \ if (var == NULL) { \ fprintf(stderr, "%s: malloc of %zu bytes failed for " #var, __func__, size); \ exit(1); \ }} while(0) #define CALLOC(var, size1, size2) do { \ var = calloc(size1, size2); \ if (var == NULL) { \ fprintf(stderr, "%s: calloc of %lu bytes failed for " #var, __func__, (long unsigned int)(size1 * size2)); \ exit(1); \ }} while(0) extern double get_hsize(const char *str); static void usage(void) { static const char usage_text[] = "Usage: randarray OPTIONS\n" "\n" "Arguments:\n" " -n SIZE The size of the array in bytes. Should be a multiple of 4\n" " (suffixes k,m,g,K,M,G for SI and Binary units can be used)\n" " -s SEED The initial seed for repeatable runs\n" " -T THREADS number of threads\n" " -q Run quietly and print just the number of seconds to stdout\n" " -m Print the md5 of the generated data\n" ; puts(usage_text); } int main(int argc, char *argv[]) { int nthreads = 1; size_t arr_size = 1024; bool quiet = false; unsigned int start_seed = time(NULL); #ifdef WITH_MD5 bool use_md5 = false; #endif unsigned int *seeds; int32_t *arr; atimer_t timer; // parse arguments int opt; while ((opt = getopt(argc, argv, "hmn:qs:T:")) != -1) { switch (opt) { case 'h': usage(); exit(0); break; case 'm': #ifdef WITH_MD5 use_md5 = true; #else fprintf(stderr, "no MD5 (openssl) support compiled in\n"); #endif break; case 'n': { double size_bytes = get_hsize(optarg); if (size_bytes < 0) exit(1); arr_size = lround(size_bytes / (double)(sizeof(*arr))); } break; case 'q': quiet = true; break; case 's': start_seed = strtoul(optarg, NULL, 0); break; case 'T': #ifdef WITH_OPENMP nthreads = strtol(optarg, NULL, 0); #else fprintf(stderr, "compiled without OMP, single-thread only\n"); #endif break; default: usage(); exit(1); break; } } if (!quiet) { printf("Using %d threads\n", nthreads); printf("Using array size %zu (%zu bytes)\n", arr_size, arr_size * sizeof(*arr)); } MALLOC(arr, arr_size * sizeof(*arr)); // init seeds MALLOC(seeds, nthreads * sizeof(*seeds)); srandom(start_seed); for (int i = 0; i < nthreads; i++) seeds[i] = random(); omp_set_num_threads(nthreads); // init and start timer, immediately before parallel region timer_init(&timer); timer_start(&timer); /* start threads */ #pragma omp parallel { // everything defined in this block is thread-private // everything defined outside of the parallel region is shared struct random_data *rd; char *rd_state; int tnum = omp_get_thread_num(); // init prng CALLOC(rd, 1, sizeof(*rd)); CALLOC(rd_state, 1, RD_STATE_LEN); initstate_r(seeds[tnum], rd_state, RD_STATE_LEN, rd); srandom_r(seeds[tnum], rd); /* magic happens here */ #pragma omp for for (size_t i = 0; i < arr_size; i++) random_r(rd, &arr[i]); free(rd); free(rd_state); } /* end parallel region */ timer_stop(&timer); free(seeds); if (quiet) { timer_print_sec(&timer, stdout); } else { printf("finished in "); timer_print(&timer, stdout); } printf("\n"); #ifdef WITH_MD5 if (use_md5) { unsigned char md[16]; if (!quiet) printf("calculating MD5...\n"); timer_init(&timer); timer_start(&timer); MD5((const unsigned char *)arr, arr_size * sizeof(*arr), md); timer_stop(&timer); for (int i = 0; i < 16; i++) printf("%02x", (unsigned int)md[i]); printf("\n"); if (!quiet) { printf("md5 calculation took "); timer_print(&timer, stdout); printf("\n"); } } #endif free(arr); return 0; }
bude.c
#include <float.h> #include <math.h> #include <stdio.h> #include <string.h> #include <sys/time.h> #include <sys/stat.h> #if defined(__APPLE__) #include <OpenCL/OpenCL.h> #else #include <CL/cl.h> #include <omp.h> #endif #define MAX_PLATFORMS 8 #define MAX_DEVICES 32 #define MAX_INFO_STRING 256 #define DATA_DIR "../data/bm1" #define FILE_LIGAND "/ligand.in" #define FILE_PROTEIN "/protein.in" #define FILE_FORCEFIELD "/forcefield.in" #define FILE_POSES "/poses.in" #define FILE_REF_ENERGIES "/ref_energies.out" #define REF_NPOSES 65536 #define FILE_KERNEL "budeMultiTD.cl" // Energy evaluation parameters #define CNSTNT 45.0f #define HBTYPE_F 70 #define HBTYPE_E 69 #define HARDNESS 38.0f #define NPNPDIST 5.5f #define NPPDIST 1.0f typedef struct { cl_float x, y, z; cl_int type; } Atom; typedef struct { cl_int hbtype; cl_float radius; cl_float hphb; cl_float elsc; } FFParams; struct { cl_int natlig; cl_int natpro; cl_int ntypes; cl_int nposes; Atom *restrict protein; Atom *restrict ligand; FFParams *restrict forcefield; float *restrict poses[6]; int iterations; int run_omp; } params = {0}; struct { cl_device_id device; cl_context context; cl_command_queue queue; cl_program program; cl_kernel kernel; int deviceIndex; int wgsize; int posesPerWI; char *deckDir; } cl = {0}; double getTimestamp(); void loadParameters(int argc, char *argv[]); void freeParameters(); void printTimings(double start, double end, double poses_per_wi); void initCL(); unsigned getDevices(cl_device_id devices[MAX_DEVICES]); void getDeviceName(cl_device_id device, char name[MAX_INFO_STRING]); void releaseCL(); void checkError(cl_int err, const char *op); void runOpenMP(float *energies); void runOpenCL(float *energies); FILE* openFile(const char *parent, const char *child, const char* mode, long *length) { char name[strlen(parent) + strlen(child) + 1]; strcpy(name, parent); strcat(name, child); FILE *file = NULL; if (!(file = fopen(name, mode))) { fprintf(stderr, "Failed to open '%s'\n", name); exit(1); } if(length){ fseek(file, 0, SEEK_END); *length = ftell(file); rewind(file); } return file; } int main(int argc, char *argv[]) { loadParameters(argc, argv); printf("\n"); printf("Poses : %d\n", params.nposes); printf("Iterations: %d\n", params.iterations); printf("Ligands : %d\n", params.natlig); printf("Proteins : %d\n", params.natpro); printf("Deck : %s\n", cl.deckDir); float maxdiff = -100.0f; size_t n_ref_poses = params.nposes; float *energiesOCL = malloc(params.nposes*sizeof(float)); float *energiesOMP = malloc(params.nposes*sizeof(float)); runOpenCL(energiesOCL); if (params.run_omp) runOpenMP(energiesOMP); else { // Load reference results from file FILE* ref_energies = openFile(cl.deckDir, FILE_REF_ENERGIES, "r", NULL); if (params.nposes > REF_NPOSES) { printf("Only validating the first %d poses.\n", REF_NPOSES); n_ref_poses = REF_NPOSES; } for (size_t i = 0; i < n_ref_poses; i++) fscanf(ref_energies, "%f", &energiesOMP[i]); fclose(ref_energies); } // Verify results if (params.run_omp) printf("\n OpenMP OpenCL (diff)\n"); else printf("\n Reference OpenCL (diff)\n"); for (int i = 0; i < n_ref_poses; i++) { if (fabs(energiesOMP[i]) < 1. && fabs(energiesOCL[i]) < 1.f) continue; float diff = fabs(energiesOMP[i] - energiesOCL[i]) / energiesOCL[i]; if (diff > maxdiff) maxdiff = diff; if (i < 8) { printf("%7.2f vs %7.2f (%5.2f%%)\n", energiesOMP[i], energiesOCL[i], 100*diff); } } printf("\nLargest difference was %.3f%%\n\n", maxdiff); free(energiesOCL); free(energiesOMP); freeParameters(); } void runOpenMP(float *results) { printf("\nRunning C/OpenMP\n"); double start = getTimestamp(); #pragma omp parallel for (int itr = 0; itr < params.iterations; itr++) { #pragma omp for for (unsigned i = 0; i < params.nposes; i++) { float etot = 0; // Compute transformation matrix const float sx = sin(params.poses[0][i]); const float cx = cos(params.poses[0][i]); const float sy = sin(params.poses[1][i]); const float cy = cos(params.poses[1][i]); const float sz = sin(params.poses[2][i]); const float cz = cos(params.poses[2][i]); float transform[3][4]; transform[0][0] = cy*cz; transform[0][1] = sx*sy*cz - cx*sz; transform[0][2] = cx*sy*cz + sx*sz; transform[0][3] = params.poses[3][i]; transform[1][0] = cy*sz; transform[1][1] = sx*sy*sz + cx*cz; transform[1][2] = cx*sy*sz - sx*cz; transform[1][3] = params.poses[4][i]; transform[2][0] = -sy; transform[2][1] = sx*cy; transform[2][2] = cx*cy; transform[2][3] = params.poses[5][i]; // Loop over ligand atoms int il = 0; do { // Load ligand atom data const Atom l_atom = params.ligand[il]; const FFParams l_params = params.forcefield[l_atom.type]; const int lhphb_ltz = l_params.hphb<0.f; const int lhphb_gtz = l_params.hphb>0.f; // Transform ligand atom float lpos_x = transform[0][3] + l_atom.x * transform[0][0] + l_atom.y * transform[0][1] + l_atom.z * transform[0][2]; float lpos_y = transform[1][3] + l_atom.x * transform[1][0] + l_atom.y * transform[1][1] + l_atom.z * transform[1][2]; float lpos_z = transform[2][3] + l_atom.x * transform[2][0] + l_atom.y * transform[2][1] + l_atom.z * transform[2][2]; // Loop over protein atoms int ip = 0; do { // Load protein atom data const Atom p_atom = params.protein[ip]; const FFParams p_params = params.forcefield[p_atom.type]; const float radij = p_params.radius + l_params.radius; const float r_radij = 1.f / radij; const float elcdst = (p_params.hbtype==HBTYPE_F && l_params.hbtype==HBTYPE_F) ? 4.f : 2.f; const float elcdst1 = (p_params.hbtype==HBTYPE_F && l_params.hbtype==HBTYPE_F) ? 0.25f : 0.5f; const int type_E = ((p_params.hbtype==HBTYPE_E || l_params.hbtype==HBTYPE_E)); const int phphb_ltz = p_params.hphb < 0.f; const int phphb_gtz = p_params.hphb > 0.f; const int phphb_nz = p_params.hphb != 0.f; const float p_hphb = p_params.hphb * (phphb_ltz && lhphb_gtz ? -1.f : 1.f); const float l_hphb = l_params.hphb * (phphb_gtz && lhphb_ltz ? -1.f : 1.f); const float distdslv = (phphb_ltz ? (lhphb_ltz ? NPNPDIST : NPPDIST) : (lhphb_ltz ? NPPDIST : -FLT_MAX)); const float r_distdslv = 1.f / distdslv; const float chrg_init = l_params.elsc * p_params.elsc; const float dslv_init = p_hphb + l_hphb; // Calculate distance between atoms const float x = lpos_x - p_atom.x; const float y = lpos_y - p_atom.y; const float z = lpos_z - p_atom.z; const float distij = sqrt(x*x + y*y + z*z); // Calculate the sum of the sphere radii const float distbb = distij - radij; const int zone1 = (distbb < 0.f); // Calculate steric energy etot += (1.f - (distij*r_radij)) * (zone1 ? 2*HARDNESS : 0.f); // Calculate formal and dipole charge interactions float chrg_e = chrg_init * ((zone1 ? 1 : (1.f - distbb*elcdst1)) * (distbb<elcdst ? 1 : 0.f)); float neg_chrg_e = -fabs(chrg_e); chrg_e = type_E ? neg_chrg_e : chrg_e; etot += chrg_e*CNSTNT; // Calculate the two cases for Nonpolar-Polar repulsive interactions float coeff = (1.f - (distbb*r_distdslv)); float dslv_e = dslv_init * ((distbb<distdslv && phphb_nz) ? 1 : 0.f); dslv_e *= (zone1 ? 1 : coeff); etot += dslv_e; } while (++ip < params.natpro); // loop over protein atoms } while (++il < params.natlig); // loop over ligand atoms // Write result results[i] = etot*0.5f; } } double end = getTimestamp(); printTimings(start, end, 1); } void runOpenCL(float *results) { printf("\nRunning OpenCL\n"); initCL(); cl_int err; cl_mem protein, ligand, energies, forcefield, poses[6]; // Create buffers protein = clCreateBuffer(cl.context, CL_MEM_READ_ONLY, params.natpro*sizeof(Atom), NULL, &err); checkError(err, "creating protein"); ligand = clCreateBuffer(cl.context, CL_MEM_READ_ONLY, params.natlig*sizeof(Atom), NULL, &err); checkError(err, "creating ligand"); energies = clCreateBuffer(cl.context, CL_MEM_WRITE_ONLY, params.nposes*sizeof(cl_float), NULL, &err); checkError(err, "creating energies"); forcefield = clCreateBuffer(cl.context, CL_MEM_READ_ONLY, params.ntypes*sizeof(FFParams), NULL, &err); checkError(err, "creating forcefield"); for (int i = 0; i < 6; i++) { poses[i] = clCreateBuffer(cl.context, CL_MEM_READ_ONLY, params.nposes*sizeof(cl_float), NULL, &err); } // Write data to device err = clEnqueueWriteBuffer(cl.queue, protein, CL_TRUE, 0, params.natpro*sizeof(Atom), params.protein, 0, NULL, NULL); checkError(err, "writing protein"); err = clEnqueueWriteBuffer(cl.queue, ligand, CL_TRUE, 0, params.natlig*sizeof(Atom), params.ligand, 0, NULL, NULL); checkError(err, "writing ligand"); err = clEnqueueWriteBuffer(cl.queue, forcefield, CL_TRUE, 0, params.ntypes*sizeof(FFParams), params.forcefield, 0, NULL, NULL); checkError(err, "writing forcefield"); for (int i = 0; i < 6; i++) { err = clEnqueueWriteBuffer(cl.queue, poses[i], CL_TRUE, 0, params.nposes*sizeof(cl_float), params.poses[i], 0, NULL, NULL); checkError(err, "writing poses"); } // Set kernel arguments err = clSetKernelArg(cl.kernel, 0, sizeof(cl_int), &params.natlig); err |= clSetKernelArg(cl.kernel, 1, sizeof(cl_int), &params.natpro); err |= clSetKernelArg(cl.kernel, 2, sizeof(cl_mem), &protein); err |= clSetKernelArg(cl.kernel, 3, sizeof(cl_mem), &ligand); err |= clSetKernelArg(cl.kernel, 4, sizeof(cl_mem), poses+0); err |= clSetKernelArg(cl.kernel, 5, sizeof(cl_mem), poses+1); err |= clSetKernelArg(cl.kernel, 6, sizeof(cl_mem), poses+2); err |= clSetKernelArg(cl.kernel, 7, sizeof(cl_mem), poses+3); err |= clSetKernelArg(cl.kernel, 8, sizeof(cl_mem), poses+4); err |= clSetKernelArg(cl.kernel, 9, sizeof(cl_mem), poses+5); err |= clSetKernelArg(cl.kernel, 10, sizeof(cl_mem), &energies); err |= clSetKernelArg(cl.kernel, 11, sizeof(cl_mem), &forcefield); err |= clSetKernelArg(cl.kernel, 12, params.ntypes*sizeof(FFParams), NULL); err |= clSetKernelArg(cl.kernel, 13, sizeof(cl_int), &params.ntypes); err |= clSetKernelArg(cl.kernel, 14, sizeof(cl_int), &params.nposes); checkError(err, "setting arguments"); size_t global = ceil(params.nposes/(double)cl.posesPerWI); global = cl.wgsize * ceil(global/(double)cl.wgsize); size_t local = cl.wgsize; // Warm-up run (not timed) err = clEnqueueNDRangeKernel(cl.queue, cl.kernel, 1, NULL, &global, &local, 0, NULL, NULL); checkError(err, "queuing kernel"); err = clFinish(cl.queue); checkError(err, "running kernel"); double start = getTimestamp(); // Timed runs for (int i = 0; i < params.iterations; i++) { err = clEnqueueNDRangeKernel(cl.queue, cl.kernel, 1, NULL, &global, &local, 0, NULL, NULL); } err = clFinish(cl.queue); checkError(err, "running kernel"); double end = getTimestamp(); // Read results err = clEnqueueReadBuffer(cl.queue, energies, CL_TRUE, 0, params.nposes*sizeof(cl_float), results, 0, NULL, NULL); checkError(err, "reading results"); printTimings(start, end, cl.posesPerWI); clReleaseMemObject(protein); clReleaseMemObject(ligand); clReleaseMemObject(energies); clReleaseMemObject(forcefield); clReleaseMemObject(poses[0]); clReleaseMemObject(poses[1]); clReleaseMemObject(poses[2]); clReleaseMemObject(poses[3]); clReleaseMemObject(poses[4]); clReleaseMemObject(poses[5]); releaseCL(); } int parseInt(const char *str) { char *next; int value = strtoul(str, &next, 10); return strlen(next) ? -1 : value; } void loadParameters(int argc, char *argv[]) { // Defaults params.iterations = 8; params.run_omp = 0; cl.wgsize = 64; cl.posesPerWI = 4; cl.deckDir = DATA_DIR; int nposes = 65536; for (int i = 1; i < argc; i++) { if (!strcmp(argv[i], "--list") || !strcmp(argv[i], "-l")) { // Get list of devices cl_device_id devices[MAX_DEVICES]; unsigned numDevices = getDevices(devices); // Print device names if (numDevices == 0) { printf("No devices found.\n"); } else { printf("\n"); printf("Devices:\n"); for (int i = 0; i < numDevices; i++) { char name[MAX_INFO_STRING]; getDeviceName(devices[i], name); printf("%2d: %s\n", i, name); } printf("\n"); } exit(0); } else if (!strcmp(argv[i], "--device") || !strcmp(argv[i], "-d")) { if (++i >= argc || (cl.deviceIndex = parseInt(argv[i])) < 0) { printf("Invalid device index\n"); exit(1); } } else if (!strcmp(argv[i], "--iterations") || !strcmp(argv[i], "-i")) { if (++i >= argc || (params.iterations = parseInt(argv[i])) < 0) { printf("Invalid number of iterations\n"); exit(1); } } else if (!strcmp(argv[i], "--numposes") || !strcmp(argv[i], "-n")) { if (++i >= argc || (nposes = parseInt(argv[i])) < 0) { printf("Invalid number of poses\n"); exit(1); } } else if (!strcmp(argv[i], "--posesperwi") || !strcmp(argv[i], "-p")) { if (++i >= argc || (cl.posesPerWI = parseInt(argv[i])) < 0) { printf("Invalid poses-per-workitem value\n"); exit(1); } } else if (!strcmp(argv[i], "--wgsize") || !strcmp(argv[i], "-w")) { if (++i >= argc || (cl.wgsize = parseInt(argv[i])) < 0) { printf("Invalid work-group size\n"); exit(1); } } else if (!strcmp(argv[i], "--deck")) { if (++i >= argc) { printf("Invalid deck\n"); exit(1); } cl.deckDir = argv[i]; } else if (!strcmp(argv[i], "--openmp")) { params.run_omp = 1; } else if (!strcmp(argv[i], "--help") || !strcmp(argv[i], "-h")) { printf("\n"); printf("Usage: ./bude [OPTIONS]\n\n"); printf("Options:\n"); printf(" -h --help Print this message\n"); printf(" --list List available devices\n"); printf(" -d --device INDEX Select device at INDEX\n"); printf(" -i --iterations I Repeat kernel I times\n"); printf(" -n --numposes N Compute energies for N poses\n"); printf(" -p --poserperwi PPWI Compute PPWI poses per work-item\n"); printf(" -w --wgsize WGSIZE Run with work-group size WGSIZE\n"); printf(" --deck DECK Use the DECK directory as input deck\n"); printf(" --openmp Validate results against a reference OpenMP implementation\n"); printf("\n"); exit(0); } else { printf("Unrecognized argument '%s' (try '--help')\n", argv[i]); exit(1); } } FILE *file = NULL; long length; struct stat s; int e = stat(cl.deckDir, &s); if(e == -1 || !S_ISDIR(s.st_mode)){ printf("Cannot stat or not a directory: %s\n", cl.deckDir); exit(1); } file = openFile(cl.deckDir, FILE_LIGAND, "rb", &length); params.natlig = length / sizeof(Atom); params.ligand = malloc(params.natlig*sizeof(Atom)); fread(params.ligand, sizeof(Atom), params.natlig, file); fclose(file); file = openFile(cl.deckDir, FILE_PROTEIN, "rb", &length); params.natpro = length / sizeof(Atom); params.protein = malloc(params.natpro*sizeof(Atom)); fread(params.protein, sizeof(Atom), params.natpro, file); fclose(file); file = openFile(cl.deckDir, FILE_FORCEFIELD, "rb", &length); params.ntypes = length / sizeof(FFParams); params.forcefield = malloc(params.ntypes*sizeof(FFParams)); fread(params.forcefield, sizeof(FFParams), params.ntypes, file); fclose(file); file = openFile(cl.deckDir, FILE_POSES, "rb", &length); for (int i = 0; i < 6; i++) params.poses[i] = malloc(nposes*sizeof(float)); long available = length / 6 / sizeof(float); params.nposes = 0; while (params.nposes < nposes) { long fetch = nposes - params.nposes; if (fetch > available) fetch = available; for (int i = 0; i < 6; i++) { fseek(file, i*available*sizeof(float), SEEK_SET); fread(params.poses[i] + params.nposes, sizeof(float), fetch, file); } rewind(file); params.nposes += fetch; } fclose(file); } void freeParameters() { free(params.ligand); free(params.protein); free(params.forcefield); for (int i = 0; i < 6; i++) free(params.poses[i]); } void printTimings(double start, double end, double poses_per_wi) { double ms = ((end-start)/params.iterations)*1e-3; // Compute FLOP/s double runtime = ms*1e-3; double ops_per_wi = 27*poses_per_wi + params.natlig*(3 + 18*poses_per_wi + params.natpro*(11 + 30*poses_per_wi)) + poses_per_wi; double total_ops = ops_per_wi * (params.nposes/poses_per_wi); double flops = total_ops / runtime; double gflops = flops / 1e9; double interactions = (double)params.nposes * (double)params.natlig * (double)params.natpro; double interactions_per_sec = interactions / runtime; // Print stats printf("- Total time: %7.2lf ms\n", (end-start)*1e-3); printf("- Average time: %7.2lf ms\n", ms); printf("- Interactions/s: %7.2lf billion\n", (interactions_per_sec / 1e9)); printf("- GFLOP/s: %7.2lf\n", gflops); } void checkError(cl_int err, const char *op) { if (err != CL_SUCCESS) { printf("Error during operation '%s' (%d)\n", op, err); releaseCL(); } } unsigned getDevices(cl_device_id devices[MAX_DEVICES]) { cl_int err; // Get list of platforms cl_uint numPlatforms = 0; cl_platform_id platforms[MAX_PLATFORMS]; err = clGetPlatformIDs(MAX_PLATFORMS, platforms, &numPlatforms); checkError(err, "getting platforms"); // Enumerate devices unsigned numDevices = 0; for (int i = 0; i < numPlatforms; i++) { cl_uint num = 0; err = clGetDeviceIDs(platforms[i], CL_DEVICE_TYPE_ALL, MAX_DEVICES-numDevices, devices+numDevices, &num); checkError(err, "getting deviceS"); numDevices += num; } return numDevices; } void getDeviceName(cl_device_id device, char name[MAX_INFO_STRING]) { cl_device_info info = CL_DEVICE_NAME; // Special case for AMD #ifdef CL_DEVICE_BOARD_NAME_AMD clGetDeviceInfo(device, CL_DEVICE_VENDOR, MAX_INFO_STRING, name, NULL); if (strstr(name, "Advanced Micro Devices")) info = CL_DEVICE_BOARD_NAME_AMD; #endif clGetDeviceInfo(device, info, MAX_INFO_STRING, name, NULL); } double getTimestamp() { struct timeval tv; gettimeofday(&tv, NULL); return tv.tv_usec + tv.tv_sec*1e6; } void initCL() { cl_int err; cl_device_id devices[MAX_DEVICES]; unsigned num = getDevices(devices); if (cl.deviceIndex >= num) { printf("Invalid device index (try '--list')\n"); exit(1); } cl.device = devices[cl.deviceIndex]; char name[128]; getDeviceName(cl.device, name); printf("Using device: %s\n", name); cl.context = clCreateContext(NULL, 1, &cl.device, NULL, NULL, &err); checkError(err, "creating context"); cl.queue = clCreateCommandQueue( cl.context, cl.device, CL_QUEUE_PROFILING_ENABLE, &err); checkError(err, "creating queue"); long length; FILE *file = openFile("./", FILE_KERNEL, "r", &length); char *source = malloc(length+1); fread(source, 1, length, file); source[length] = '\0'; fclose(file); cl.program = clCreateProgramWithSource( cl.context, 1, (const char**)&source, NULL, &err); checkError(err, "creating program"); char options[256]; sprintf(options, "-cl-fast-relaxed-math -cl-mad-enable -DNUM_TD_PER_THREAD=%d", cl.posesPerWI); err = clBuildProgram(cl.program, 1, &cl.device, options, NULL, NULL); if (err != CL_SUCCESS) { if (err == CL_BUILD_PROGRAM_FAILURE) { char log[16384]; clGetProgramBuildInfo(cl.program, cl.device, CL_PROGRAM_BUILD_LOG, 16384, log, NULL); printf("%s\n", log); } } free(source); checkError(err, "building program"); cl.kernel = clCreateKernel(cl.program, "fasten_main", &err); checkError(err, "creating kernel"); } #define RELEASE(func, obj) if (obj) {func(obj); obj=NULL;}; void releaseCL() { RELEASE(clReleaseKernel, cl.kernel); RELEASE(clReleaseProgram, cl.program); RELEASE(clReleaseCommandQueue, cl.queue); RELEASE(clReleaseContext, cl.context); }
cache.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC AAA CCCC H H EEEEE % % C A A C H H E % % C AAAAA C HHHHH EEE % % C A A C H H E % % CCCC A A CCCC H H EEEEE % % % % % % MagickCore Pixel Cache Methods % % % % Software Design % % Cristy % % July 1999 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/distribute-cache-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/nt-base-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/quantum.h" #include "MagickCore/random_.h" #include "MagickCore/registry.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/timer-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #if defined(MAGICKCORE_ZLIB_DELEGATE) #include "zlib.h" #endif /* Define declarations. */ #define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent) #define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \ GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse) /* Typedef declarations. */ typedef struct _MagickModulo { ssize_t quotient, remainder; } MagickModulo; /* Forward declarations. */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static Cache GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *) magick_hot_spot; static const Quantum *GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t, const ssize_t,const size_t,const size_t,ExceptionInfo *), *GetVirtualPixelsCache(const Image *); static const void *GetVirtualMetacontentFromCache(const Image *); static MagickBooleanType GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,Quantum *, ExceptionInfo *), GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod, const ssize_t,const ssize_t,Quantum *,ExceptionInfo *), OpenPixelCache(Image *,const MapMode,ExceptionInfo *), OpenPixelCacheOnDisk(CacheInfo *,const MapMode), ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict, ExceptionInfo *), ReadPixelCacheMetacontent(CacheInfo *magick_restrict, NexusInfo *magick_restrict,ExceptionInfo *), SyncAuthenticPixelsCache(Image *,ExceptionInfo *), WritePixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict, ExceptionInfo *), WritePixelCacheMetacontent(CacheInfo *,NexusInfo *magick_restrict, ExceptionInfo *); static Quantum *GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *SetPixelCacheNexusPixels(const CacheInfo *magick_restrict,const MapMode, const ssize_t,const ssize_t,const size_t,const size_t, const MagickBooleanType,NexusInfo *magick_restrict,ExceptionInfo *) magick_hot_spot; #if defined(MAGICKCORE_OPENCL_SUPPORT) static void CopyOpenCLBuffer(CacheInfo *magick_restrict); #endif #if defined(__cplusplus) || defined(c_plusplus) } #endif /* Global declarations. */ static SemaphoreInfo *cache_semaphore = (SemaphoreInfo *) NULL; static ssize_t cache_anonymous_memory = (-1); static time_t cache_epoch = 0; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCache() acquires a pixel cache. % % The format of the AcquirePixelCache() method is: % % Cache AcquirePixelCache(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickPrivate Cache AcquirePixelCache(const size_t number_threads) { CacheInfo *magick_restrict cache_info; char *value; cache_info=(CacheInfo *) AcquireAlignedMemory(1,sizeof(*cache_info)); if (cache_info == (CacheInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(cache_info,0,sizeof(*cache_info)); cache_info->type=UndefinedCache; cache_info->mode=IOMode; cache_info->disk_mode=IOMode; cache_info->colorspace=sRGBColorspace; cache_info->file=(-1); cache_info->id=GetMagickThreadId(); cache_info->number_threads=number_threads; if (GetOpenMPMaximumThreads() > cache_info->number_threads) cache_info->number_threads=GetOpenMPMaximumThreads(); if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads) cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource); if (cache_info->number_threads == 0) cache_info->number_threads=1; cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads); if (cache_info->nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); value=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (value != (const char *) NULL) { cache_info->synchronize=IsStringTrue(value); value=DestroyString(value); } value=GetPolicyValue("cache:synchronize"); if (value != (const char *) NULL) { cache_info->synchronize=IsStringTrue(value); value=DestroyString(value); } cache_info->width_limit=GetMagickResourceLimit(WidthResource); cache_info->height_limit=GetMagickResourceLimit(HeightResource); cache_info->semaphore=AcquireSemaphoreInfo(); cache_info->reference_count=1; cache_info->file_semaphore=AcquireSemaphoreInfo(); cache_info->debug=IsEventLogging(); cache_info->signature=MagickCoreSignature; return((Cache ) cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCacheNexus() allocates the NexusInfo structure. % % The format of the AcquirePixelCacheNexus method is: % % NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickPrivate NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) { NexusInfo **magick_restrict nexus_info; register ssize_t i; nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory(2* number_threads,sizeof(*nexus_info))); if (nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); *nexus_info=(NexusInfo *) AcquireQuantumMemory(2*number_threads, sizeof(**nexus_info)); if (*nexus_info == (NexusInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(*nexus_info,0,2*number_threads*sizeof(**nexus_info)); for (i=0; i < (ssize_t) (2*number_threads); i++) { nexus_info[i]=(*nexus_info+i); if (i < (ssize_t) number_threads) nexus_info[i]->virtual_nexus=(*nexus_info+number_threads+i); nexus_info[i]->signature=MagickCoreSignature; } return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCachePixels() returns the pixels associated with the specified % image. % % The format of the AcquirePixelCachePixels() method is: % % void *AcquirePixelCachePixels(const Image *image,size_t *length, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *AcquirePixelCachePixels(const Image *image,size_t *length, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); (void) exception; cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *length=0; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((void *) NULL); *length=(size_t) cache_info->length; return(cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t G e n e s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentGenesis() instantiates the cache component. % % The format of the CacheComponentGenesis method is: % % MagickBooleanType CacheComponentGenesis(void) % */ MagickPrivate MagickBooleanType CacheComponentGenesis(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) cache_semaphore=AcquireSemaphoreInfo(); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t T e r m i n u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentTerminus() destroys the cache component. % % The format of the CacheComponentTerminus() method is: % % CacheComponentTerminus(void) % */ MagickPrivate void CacheComponentTerminus(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) ActivateSemaphoreInfo(&cache_semaphore); /* no op-- nothing to destroy */ RelinquishSemaphoreInfo(&cache_semaphore); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l i p P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClipPixelCacheNexus() clips the cache nexus as defined by the image clip % mask. The method returns MagickTrue if the pixel region is clipped, % otherwise MagickFalse. % % The format of the ClipPixelCacheNexus() method is: % % MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to clip. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ClipPixelCacheNexus(Image *image, NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; register Quantum *magick_restrict p, *magick_restrict q; ssize_t y; /* Apply clip mask. */ if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->channels & WriteMaskChannel) == 0) return(MagickTrue); if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0)) return(MagickTrue); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return(MagickFalse); p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y, nexus_info->region.width,nexus_info->region.height, nexus_info->virtual_nexus,exception); q=nexus_info->pixels; if ((p == (Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickFalse); for (y=0; y < (ssize_t) nexus_info->region.height; y++) { register ssize_t x; for (x=0; x < (ssize_t) nexus_info->region.width; x++) { double mask_alpha; register ssize_t i; mask_alpha=QuantumScale*GetPixelWriteMask(image,p); if (fabs(mask_alpha) >= MagickEpsilon) { for (i=0; i < (ssize_t) image->number_channels; i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(MagickOver_((double) p[i],mask_alpha* GetPixelAlpha(image,p),(double) q[i],(double) GetPixelAlpha(image,q))); } SetPixelAlpha(image,GetPixelAlpha(image,p),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(image); } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCache() clones a pixel cache. % % The format of the ClonePixelCache() method is: % % Cache ClonePixelCache(const Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickPrivate Cache ClonePixelCache(const Cache cache) { CacheInfo *magick_restrict clone_info; const CacheInfo *magick_restrict cache_info; assert(cache != NULL); cache_info=(const CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads); clone_info->virtual_pixel_method=cache_info->virtual_pixel_method; return((Cache ) clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheMethods() clones the pixel cache methods from one cache to % another. % % The format of the ClonePixelCacheMethods() method is: % % void ClonePixelCacheMethods(Cache clone,const Cache cache) % % A description of each parameter follows: % % o clone: Specifies a pointer to a Cache structure. % % o cache: the pixel cache. % */ MagickPrivate void ClonePixelCacheMethods(Cache clone,const Cache cache) { CacheInfo *magick_restrict cache_info, *magick_restrict source_info; assert(clone != (Cache) NULL); source_info=(CacheInfo *) clone; assert(source_info->signature == MagickCoreSignature); if (source_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", source_info->filename); assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); source_info->methods=cache_info->methods; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e R e p o s i t o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheRepository() clones the source pixel cache to the destination % cache. % % The format of the ClonePixelCacheRepository() method is: % % MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info, % CacheInfo *source_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o source_info: the source pixel cache. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ClonePixelCacheOnDisk( CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info) { MagickSizeType extent; size_t quantum; ssize_t count; struct stat file_stats; unsigned char *buffer; /* Clone pixel cache on disk with identical morphology. */ if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) || (OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse)) return(MagickFalse); if ((lseek(cache_info->file,0,SEEK_SET) < 0) || (lseek(clone_info->file,0,SEEK_SET) < 0)) return(MagickFalse); quantum=(size_t) MagickMaxBufferExtent; if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0)) quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent); buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer)); if (buffer == (unsigned char *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); extent=0; while ((count=read(cache_info->file,buffer,quantum)) > 0) { ssize_t number_bytes; number_bytes=write(clone_info->file,buffer,(size_t) count); if (number_bytes != count) break; extent+=number_bytes; } buffer=(unsigned char *) RelinquishMagickMemory(buffer); if (extent != cache_info->length) return(MagickFalse); return(MagickTrue); } static MagickBooleanType ClonePixelCacheRepository( CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info, ExceptionInfo *exception) { #define MaxCacheThreads ((size_t) GetMagickResourceLimit(ThreadResource)) #define cache_number_threads(source,destination,chunk,multithreaded) \ num_threads((multithreaded) == 0 ? 1 : \ (((source)->type != MemoryCache) && ((source)->type != MapCache)) || \ (((destination)->type != MemoryCache) && ((destination)->type != MapCache)) ? \ MagickMax(MagickMin(GetMagickResourceLimit(ThreadResource),2),1) : \ MagickMax(MagickMin((ssize_t) GetMagickResourceLimit(ThreadResource),(ssize_t) (chunk)/256),1)) MagickBooleanType optimize, status; NexusInfo **magick_restrict cache_nexus, **magick_restrict clone_nexus; size_t length; ssize_t y; assert(cache_info != (CacheInfo *) NULL); assert(clone_info != (CacheInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); if (cache_info->type == PingCache) return(MagickTrue); length=cache_info->number_channels*sizeof(*cache_info->channel_map); if ((cache_info->storage_class == clone_info->storage_class) && (cache_info->colorspace == clone_info->colorspace) && (cache_info->alpha_trait == clone_info->alpha_trait) && (cache_info->channels == clone_info->channels) && (cache_info->columns == clone_info->columns) && (cache_info->rows == clone_info->rows) && (cache_info->number_channels == clone_info->number_channels) && (memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) && (cache_info->metacontent_extent == clone_info->metacontent_extent)) { /* Identical pixel cache morphology. */ if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) && ((clone_info->type == MemoryCache) || (clone_info->type == MapCache))) { (void) memcpy(clone_info->pixels,cache_info->pixels, cache_info->number_channels*cache_info->columns*cache_info->rows* sizeof(*cache_info->pixels)); if ((cache_info->metacontent_extent != 0) && (clone_info->metacontent_extent != 0)) (void) memcpy(clone_info->metacontent,cache_info->metacontent, cache_info->columns*cache_info->rows* clone_info->metacontent_extent*sizeof(unsigned char)); return(MagickTrue); } if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache)) return(ClonePixelCacheOnDisk(cache_info,clone_info)); } /* Mismatched pixel cache morphology. */ cache_nexus=AcquirePixelCacheNexus(cache_info->number_threads); clone_nexus=AcquirePixelCacheNexus(clone_info->number_threads); length=cache_info->number_channels*sizeof(*cache_info->channel_map); optimize=(cache_info->number_channels == clone_info->number_channels) && (memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) ? MagickTrue : MagickFalse; length=(size_t) MagickMin(cache_info->number_channels*cache_info->columns, clone_info->number_channels*clone_info->columns); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ cache_number_threads(cache_info,clone_info,cache_info->rows,1) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *pixels; register ssize_t x; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y, cache_info->columns,1,MagickFalse,cache_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y, clone_info->columns,1,MagickFalse,clone_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; (void) memset(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length); if (optimize != MagickFalse) (void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length* sizeof(Quantum)); else { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; /* Mismatched pixel channel map. */ p=cache_nexus[id]->pixels; q=clone_nexus[id]->pixels; for (x=0; x < (ssize_t) cache_info->columns; x++) { register ssize_t i; if (x == (ssize_t) clone_info->columns) break; for (i=0; i < (ssize_t) clone_info->number_channels; i++) { PixelChannel channel; PixelTrait traits; channel=clone_info->channel_map[i].channel; traits=cache_info->channel_map[channel].traits; if (traits != UndefinedPixelTrait) *q=*(p+cache_info->channel_map[channel].offset); q++; } p+=cache_info->number_channels; } } status=WritePixelCachePixels(clone_info,clone_nexus[id],exception); } if ((cache_info->metacontent_extent != 0) && (clone_info->metacontent_extent != 0)) { /* Clone metacontent. */ length=(size_t) MagickMin(cache_info->metacontent_extent, clone_info->metacontent_extent); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ cache_number_threads(cache_info,clone_info,cache_info->rows,1) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *pixels; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y, cache_info->columns,1,MagickFalse,cache_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; status=ReadPixelCacheMetacontent(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y, clone_info->columns,1,MagickFalse,clone_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; if ((clone_nexus[id]->metacontent != (void *) NULL) && (cache_nexus[id]->metacontent != (void *) NULL)) (void) memcpy(clone_nexus[id]->metacontent, cache_nexus[id]->metacontent,length*sizeof(unsigned char)); status=WritePixelCacheMetacontent(clone_info,clone_nexus[id],exception); } } clone_nexus=DestroyPixelCacheNexus(clone_nexus,clone_info->number_threads); cache_nexus=DestroyPixelCacheNexus(cache_nexus,cache_info->number_threads); if (cache_info->debug != MagickFalse) { char message[MagickPathExtent]; (void) FormatLocaleString(message,MagickPathExtent,"%s => %s", CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type), CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type)); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixelCache() method is: % % void DestroyImagePixelCache(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void DestroyImagePixelCache(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->cache != (void *) NULL) image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixels() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixels() method is: % % void DestroyImagePixels(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImagePixels(Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL) { cache_info->methods.destroy_pixel_handler(image); return; } image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyPixelCache() method is: % % Cache DestroyPixelCache(Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info) { int status; status=(-1); if (cache_info->file != -1) { status=close(cache_info->file); cache_info->file=(-1); RelinquishMagickResource(FileResource,1); } return(status == -1 ? MagickFalse : MagickTrue); } static inline void RelinquishPixelCachePixels(CacheInfo *cache_info) { switch (cache_info->type) { case MemoryCache: { #if defined(MAGICKCORE_OPENCL_SUPPORT) if (cache_info->opencl != (MagickCLCacheInfo) NULL) { cache_info->opencl=RelinquishMagickCLCacheInfo(cache_info->opencl, MagickTrue); cache_info->pixels=(Quantum *) NULL; break; } #endif if (cache_info->mapped == MagickFalse) cache_info->pixels=(Quantum *) RelinquishAlignedMemory( cache_info->pixels); else (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); RelinquishMagickResource(MemoryResource,cache_info->length); break; } case MapCache: { (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); cache_info->pixels=(Quantum *) NULL; if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode)) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(MapResource,cache_info->length); } case DiskCache: { if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode)) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(DiskResource,cache_info->length); break; } case DistributedCache: { *cache_info->cache_filename='\0'; (void) RelinquishDistributePixelCache((DistributeCacheInfo *) cache_info->server_info); break; } default: break; } cache_info->type=UndefinedCache; cache_info->mapped=MagickFalse; cache_info->metacontent=(void *) NULL; } MagickPrivate Cache DestroyPixelCache(Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count--; if (cache_info->reference_count != 0) { UnlockSemaphoreInfo(cache_info->semaphore); return((Cache) NULL); } UnlockSemaphoreInfo(cache_info->semaphore); if (cache_info->debug != MagickFalse) { char message[MagickPathExtent]; (void) FormatLocaleString(message,MagickPathExtent,"destroy %s", cache_info->filename); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } RelinquishPixelCachePixels(cache_info); if (cache_info->server_info != (DistributeCacheInfo *) NULL) cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *) cache_info->server_info); if (cache_info->nexus_info != (NexusInfo **) NULL) cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info, cache_info->number_threads); if (cache_info->random_info != (RandomInfo *) NULL) cache_info->random_info=DestroyRandomInfo(cache_info->random_info); if (cache_info->file_semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&cache_info->file_semaphore); if (cache_info->semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&cache_info->semaphore); cache_info->signature=(~MagickCoreSignature); cache_info=(CacheInfo *) RelinquishAlignedMemory(cache_info); cache=(Cache) NULL; return(cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCacheNexus() destroys a pixel cache nexus. % % The format of the DestroyPixelCacheNexus() method is: % % NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info, % const size_t number_threads) % % A description of each parameter follows: % % o nexus_info: the nexus to destroy. % % o number_threads: the number of nexus threads. % */ static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info) { if (nexus_info->mapped == MagickFalse) (void) RelinquishAlignedMemory(nexus_info->cache); else (void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length); nexus_info->cache=(Quantum *) NULL; nexus_info->pixels=(Quantum *) NULL; nexus_info->metacontent=(void *) NULL; nexus_info->length=0; nexus_info->mapped=MagickFalse; } MagickPrivate NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info, const size_t number_threads) { register ssize_t i; assert(nexus_info != (NexusInfo **) NULL); for (i=0; i < (ssize_t) (2*number_threads); i++) { if (nexus_info[i]->cache != (Quantum *) NULL) RelinquishCacheNexusPixels(nexus_info[i]); nexus_info[i]->signature=(~MagickCoreSignature); } *nexus_info=(NexusInfo *) RelinquishMagickMemory(*nexus_info); nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info); return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticMetacontent() returns the authentic metacontent corresponding % with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is % returned if the associated pixels are not available. % % The format of the GetAuthenticMetacontent() method is: % % void *GetAuthenticMetacontent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void *GetAuthenticMetacontent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_metacontent_from_handler != (GetAuthenticMetacontentFromHandler) NULL) { void *metacontent; metacontent=cache_info->methods. get_authentic_metacontent_from_handler(image); return(metacontent); } assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c M e t a c o n t e n t F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticMetacontentFromCache() returns the meta-content corresponding % with the last call to QueueAuthenticPixelsCache() or % GetAuthenticPixelsCache(). % % The format of the GetAuthenticMetacontentFromCache() method is: % % void *GetAuthenticMetacontentFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void *GetAuthenticMetacontentFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->metacontent); } #if defined(MAGICKCORE_OPENCL_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c O p e n C L B u f f e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL % operations. % % The format of the GetAuthenticOpenCLBuffer() method is: % % cl_mem GetAuthenticOpenCLBuffer(const Image *image, % MagickCLDevice device,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o device: the device to use. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image, MagickCLDevice device,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(device != (const MagickCLDevice) NULL); cache_info=(CacheInfo *) image->cache; if ((cache_info->type == UndefinedCache) || (cache_info->reference_count > 1)) { SyncImagePixelCache((Image *) image,exception); cache_info=(CacheInfo *) image->cache; } if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse)) return((cl_mem) NULL); LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->opencl != (MagickCLCacheInfo) NULL) && (cache_info->opencl->device->context != device->context)) cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl); if (cache_info->opencl == (MagickCLCacheInfo) NULL) { assert(cache_info->pixels != (Quantum *) NULL); cache_info->opencl=AcquireMagickCLCacheInfo(device,cache_info->pixels, cache_info->length); } if (cache_info->opencl != (MagickCLCacheInfo) NULL) RetainOpenCLMemObject(cache_info->opencl->buffer); UnlockSemaphoreInfo(cache_info->semaphore); if (cache_info->opencl == (MagickCLCacheInfo) NULL) return((cl_mem) NULL); assert(cache_info->opencl->pixels == cache_info->pixels); return(cache_info->opencl->buffer); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or % disk pixel cache as defined by the geometry parameters. A pointer to the % pixels is returned if the pixels are transferred, otherwise a NULL is % returned. % % The format of the GetAuthenticPixelCacheNexus() method is: % % Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to return. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; Quantum *magick_restrict pixels; /* Transfer pixels from the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue, nexus_info,exception); if (pixels == (Quantum *) NULL) return((Quantum *) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (nexus_info->authentic_pixel_cache != MagickFalse) return(pixels); if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse) return((Quantum *) NULL); if (cache_info->metacontent_extent != 0) if (ReadPixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse) return((Quantum *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsFromCache() returns the pixels associated with the last % call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods. % % The format of the GetAuthenticPixelsFromCache() method is: % % Quantum *GetAuthenticPixelsFromCache(const Image image) % % A description of each parameter follows: % % o image: the image. % */ static Quantum *GetAuthenticPixelsFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelQueue() returns the authentic pixels associated % corresponding with the last call to QueueAuthenticPixels() or % GetAuthenticPixels(). % % The format of the GetAuthenticPixelQueue() method is: % % Quantum *GetAuthenticPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Quantum *GetAuthenticPixelQueue(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) return(cache_info->methods.get_authentic_pixels_from_handler(image)); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixels() obtains a pixel region for read/write access. If the % region is successfully accessed, a pointer to a Quantum array % representing the region is returned, otherwise NULL is returned. % % The returned pointer may point to a temporary working copy of the pixels % or it may point to the original pixels in memory. Performance is maximized % if the selected region is part of one row, or one or more full rows, since % then there is opportunity to access the pixels in-place (without a copy) % if the image is in memory, or in a memory-mapped file. The returned pointer % must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image has corresponding metacontent,call % GetAuthenticMetacontent() after invoking GetAuthenticPixels() to obtain the % meta-content corresponding to the region. Once the Quantum array has % been updated, the changes must be saved back to the underlying image using % SyncAuthenticPixels() or they may be lost. % % The format of the GetAuthenticPixels() method is: % % Quantum *GetAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Quantum *GetAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *pixels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) { pixels=cache_info->methods.get_authentic_pixels_handler(image,x,y,columns, rows,exception); return(pixels); } assert(id < (int) cache_info->number_threads); pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache % as defined by the geometry parameters. A pointer to the pixels is returned % if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetAuthenticPixelsCache() method is: % % Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return((Quantum *) NULL); assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageExtent() returns the extent of the pixels associated corresponding % with the last call to QueueAuthenticPixels() or GetAuthenticPixels(). % % The format of the GetImageExtent() method is: % % MagickSizeType GetImageExtent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickSizeType GetImageExtent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCache() ensures that there is only a single reference to the % pixel cache to be modified, updating the provided cache pointer to point to % a clone of the original pixel cache if necessary. % % The format of the GetImagePixelCache method is: % % Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o clone: any value other than MagickFalse clones the cache pixels. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType ValidatePixelCacheMorphology( const Image *magick_restrict image) { const CacheInfo *magick_restrict cache_info; const PixelChannelMap *magick_restrict p, *magick_restrict q; /* Does the image match the pixel cache morphology? */ cache_info=(CacheInfo *) image->cache; p=image->channel_map; q=cache_info->channel_map; if ((image->storage_class != cache_info->storage_class) || (image->colorspace != cache_info->colorspace) || (image->alpha_trait != cache_info->alpha_trait) || (image->channels != cache_info->channels) || (image->columns != cache_info->columns) || (image->rows != cache_info->rows) || (image->number_channels != cache_info->number_channels) || (memcmp(p,q,image->number_channels*sizeof(*p)) != 0) || (image->metacontent_extent != cache_info->metacontent_extent) || (cache_info->nexus_info == (NexusInfo **) NULL)) return(MagickFalse); return(MagickTrue); } static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickBooleanType destroy, status; static MagickSizeType cache_timelimit = MagickResourceInfinity, cpu_throttle = MagickResourceInfinity, cycles = 0; status=MagickTrue; if (cpu_throttle == MagickResourceInfinity) cpu_throttle=GetMagickResourceLimit(ThrottleResource); if ((cpu_throttle != 0) && ((cycles++ % 32) == 0)) MagickDelay(cpu_throttle); if (cache_epoch == 0) { /* Set the expire time in seconds. */ cache_timelimit=GetMagickResourceLimit(TimeResource); cache_epoch=GetMagickTime(); } if ((cache_timelimit != MagickResourceInfinity) && ((MagickSizeType) (GetMagickTime()-cache_epoch) >= cache_timelimit)) { #if defined(ECANCELED) errno=ECANCELED; #endif cache_info=(CacheInfo *) image->cache; if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded"); } LockSemaphoreInfo(image->semaphore); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif destroy=MagickFalse; if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { CacheInfo *clone_info; Image clone_image; /* Clone pixel cache. */ clone_image=(*image); clone_image.semaphore=AcquireSemaphoreInfo(); clone_image.reference_count=1; clone_image.cache=ClonePixelCache(cache_info); clone_info=(CacheInfo *) clone_image.cache; status=OpenPixelCache(&clone_image,IOMode,exception); if (status == MagickFalse) clone_info=(CacheInfo *) DestroyPixelCache(clone_info); else { if (clone != MagickFalse) status=ClonePixelCacheRepository(clone_info,cache_info, exception); if (status == MagickFalse) clone_info=(CacheInfo *) DestroyPixelCache(clone_info); else { destroy=MagickTrue; image->cache=clone_info; } } RelinquishSemaphoreInfo(&clone_image.semaphore); } UnlockSemaphoreInfo(cache_info->semaphore); } if (destroy != MagickFalse) cache_info=(CacheInfo *) DestroyPixelCache(cache_info); if (status != MagickFalse) { /* Ensure the image matches the pixel cache morphology. */ if (image->type != UndefinedType) image->type=UndefinedType; if (ValidatePixelCacheMorphology(image) == MagickFalse) { status=OpenPixelCache(image,IOMode,exception); cache_info=(CacheInfo *) image->cache; if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); } } UnlockSemaphoreInfo(image->semaphore); if (status == MagickFalse) return((Cache) NULL); return(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCacheType() returns the pixel cache type: UndefinedCache, % DiskCache, MemoryCache, MapCache, or PingCache. % % The format of the GetImagePixelCacheType() method is: % % CacheType GetImagePixelCacheType(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport CacheType GetImagePixelCacheType(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e A u t h e n t i c P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixel() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixel() method is: % % MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x, % const ssize_t y,Quantum *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType CopyPixel(const Image *image, const Quantum *source,Quantum *destination) { register ssize_t i; if (source == (const Quantum *) NULL) { destination[RedPixelChannel]=ClampToQuantum(image->background_color.red); destination[GreenPixelChannel]=ClampToQuantum( image->background_color.green); destination[BluePixelChannel]=ClampToQuantum( image->background_color.blue); destination[BlackPixelChannel]=ClampToQuantum( image->background_color.black); destination[AlphaPixelChannel]=ClampToQuantum( image->background_color.alpha); return(MagickFalse); } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); destination[channel]=source[i]; } return(MagickTrue); } MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; register Quantum *magick_restrict q; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,pixel,exception)); q=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception); return(CopyPixel(image,q,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e A u t h e n t i c P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixelFromCache() method is: % % MagickBooleanType GetOneAuthenticPixelFromCache(const Image image, % const ssize_t x,const ssize_t y,Quantum *pixel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); q=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,cache_info->nexus_info[id], exception); return(CopyPixel(image,q,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixel() returns a single virtual pixel at the specified % (x,y) location. The image background color is returned if an error occurs. % If you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualPixel() method is: % % MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x, % const ssize_t y,Quantum *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); if (cache_info->methods.get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) return(cache_info->methods.get_one_virtual_pixel_from_handler(image, GetPixelCacheVirtualMethod(image),x,y,pixel,exception)); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y, 1UL,1UL,cache_info->nexus_info[id],exception); return(CopyPixel(image,p,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e V i r t u a l P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixelFromCache() returns a single virtual pixel at the % specified (x,y) location. The image background color is returned if an % error occurs. % % The format of the GetOneVirtualPixelFromCache() method is: % % MagickBooleanType GetOneVirtualPixelFromCache(const Image image, % const VirtualPixelMethod method,const ssize_t x,const ssize_t y, % Quantum *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); return(CopyPixel(image,p,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l P i x e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixelInfo() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. If % you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualPixelInfo() method is: % % MagickBooleanType GetOneVirtualPixelInfo(const Image image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,PixelInfo *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: these values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualPixelInfo(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, PixelInfo *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); GetPixelInfo(image,pixel); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); if (p == (const Quantum *) NULL) return(MagickFalse); GetPixelInfoPixel(image,p,pixel); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheColorspace() returns the colorspace of the pixel cache. % % The format of the GetPixelCacheColorspace() method is: % % Colorspace GetPixelCacheColorspace(const Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickPrivate ColorspaceType GetPixelCacheColorspace(const Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->colorspace); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheFilename() returns the filename associated with the pixel % cache. % % The format of the GetPixelCacheFilename() method is: % % const char *GetPixelCacheFilename(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const char *GetPixelCacheFilename(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->cache_filename); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheMethods() initializes the CacheMethods structure. % % The format of the GetPixelCacheMethods() method is: % % void GetPixelCacheMethods(CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickPrivate void GetPixelCacheMethods(CacheMethods *cache_methods) { assert(cache_methods != (CacheMethods *) NULL); (void) memset(cache_methods,0,sizeof(*cache_methods)); cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache; cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache; cache_methods->get_virtual_metacontent_from_handler= GetVirtualMetacontentFromCache; cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache; cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache; cache_methods->get_authentic_metacontent_from_handler= GetAuthenticMetacontentFromCache; cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache; cache_methods->get_one_authentic_pixel_from_handler= GetOneAuthenticPixelFromCache; cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache; cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache; cache_methods->destroy_pixel_handler=DestroyImagePixelCache; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e N e x u s E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheNexusExtent() returns the extent of the pixels associated % corresponding with the last call to SetPixelCacheNexusPixels() or % GetPixelCacheNexusPixels(). % % The format of the GetPixelCacheNexusExtent() method is: % % MagickSizeType GetPixelCacheNexusExtent(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o nexus_info: the nexus info. % */ MagickPrivate MagickSizeType GetPixelCacheNexusExtent(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; MagickSizeType extent; assert(cache != NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height; if (extent == 0) return((MagickSizeType) cache_info->columns*cache_info->rows); return(extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCachePixels() returns the pixels associated with the specified image. % % The format of the GetPixelCachePixels() method is: % % void *GetPixelCachePixels(Image *image,MagickSizeType *length, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); assert(length != (MagickSizeType *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *length=cache_info->length; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((void *) NULL); return((void *) cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheStorageClass() returns the class type of the pixel cache. % % The format of the GetPixelCacheStorageClass() method is: % % ClassType GetPixelCacheStorageClass(Cache cache) % % A description of each parameter follows: % % o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass. % % o cache: the pixel cache. % */ MagickPrivate ClassType GetPixelCacheStorageClass(const Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->storage_class); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e T i l e S i z e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheTileSize() returns the pixel cache tile size. % % The format of the GetPixelCacheTileSize() method is: % % void GetPixelCacheTileSize(const Image *image,size_t *width, % size_t *height) % % A description of each parameter follows: % % o image: the image. % % o width: the optimized cache tile width in pixels. % % o height: the optimized cache tile height in pixels. % */ MagickPrivate void GetPixelCacheTileSize(const Image *image,size_t *width, size_t *height) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *width=2048UL/(MagickMax(cache_info->number_channels,1)*sizeof(Quantum)); if (GetImagePixelCacheType(image) == DiskCache) *width=8192UL/(MagickMax(cache_info->number_channels,1)*sizeof(Quantum)); *height=(*width); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the % pixel cache. A virtual pixel is any pixel access that is outside the % boundaries of the image cache. % % The format of the GetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickPrivate VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->virtual_pixel_method); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l M e t a c o n t e n t F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontentFromCache() returns the meta-content corresponding with % the last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualMetacontentFromCache() method is: % % void *GetVirtualMetacontentFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const void *GetVirtualMetacontentFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const void *magick_restrict metacontent; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); metacontent=GetVirtualMetacontentFromNexus(cache_info, cache_info->nexus_info[id]); return(metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l M e t a c o n t e n t F r o m N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontentFromNexus() returns the meta-content for the specified % cache nexus. % % The format of the GetVirtualMetacontentFromNexus() method is: % % const void *GetVirtualMetacontentFromNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the meta-content. % */ MagickPrivate const void *GetVirtualMetacontentFromNexus(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->storage_class == UndefinedClass) return((void *) NULL); return(nexus_info->metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontent() returns the virtual metacontent corresponding with % the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is % returned if the meta-content are not available. % % The format of the GetVirtualMetacontent() method is: % % const void *GetVirtualMetacontent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const void *GetVirtualMetacontent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const void *magick_restrict metacontent; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); metacontent=cache_info->methods.get_virtual_metacontent_from_handler(image); if (metacontent != (void *) NULL) return(metacontent); assert(id < (int) cache_info->number_threads); metacontent=GetVirtualMetacontentFromNexus(cache_info, cache_info->nexus_info[id]); return(metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelCacheNexus() gets virtual pixels from the in-memory or disk % pixel cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelCacheNexus() method is: % % Quantum *GetVirtualPixelCacheNexus(const Image *image, % const VirtualPixelMethod method,const ssize_t x,const ssize_t y, % const size_t columns,const size_t rows,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to acquire. % % o exception: return any errors or warnings in this structure. % */ static ssize_t DitherMatrix[64] = { 0, 48, 12, 60, 3, 51, 15, 63, 32, 16, 44, 28, 35, 19, 47, 31, 8, 56, 4, 52, 11, 59, 7, 55, 40, 24, 36, 20, 43, 27, 39, 23, 2, 50, 14, 62, 1, 49, 13, 61, 34, 18, 46, 30, 33, 17, 45, 29, 10, 58, 6, 54, 9, 57, 5, 53, 42, 26, 38, 22, 41, 25, 37, 21 }; static inline ssize_t DitherX(const ssize_t x,const size_t columns) { ssize_t index; index=x+DitherMatrix[x & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) columns) return((ssize_t) columns-1L); return(index); } static inline ssize_t DitherY(const ssize_t y,const size_t rows) { ssize_t index; index=y+DitherMatrix[y & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) rows) return((ssize_t) rows-1L); return(index); } static inline ssize_t EdgeX(const ssize_t x,const size_t columns) { if (x < 0L) return(0L); if (x >= (ssize_t) columns) return((ssize_t) (columns-1)); return(x); } static inline ssize_t EdgeY(const ssize_t y,const size_t rows) { if (y < 0L) return(0L); if (y >= (ssize_t) rows) return((ssize_t) (rows-1)); return(y); } static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns) { return((ssize_t) (columns*GetPseudoRandomValue(random_info))); } static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows) { return((ssize_t) (rows*GetPseudoRandomValue(random_info))); } static inline MagickModulo VirtualPixelModulo(const ssize_t offset, const size_t extent) { MagickModulo modulo; modulo.quotient=offset/((ssize_t) extent); modulo.remainder=offset % ((ssize_t) extent); if ((modulo.remainder != 0) && ((offset ^ ((ssize_t) extent)) < 0)) { modulo.quotient-=1; modulo.remainder+=((ssize_t) extent); } return(modulo); } MagickPrivate const Quantum *GetVirtualPixelCacheNexus(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickOffsetType offset; MagickSizeType length, number_pixels; NexusInfo *magick_restrict virtual_nexus; Quantum *magick_restrict pixels, virtual_pixel[MaxPixelChannels]; register const Quantum *magick_restrict p; register const void *magick_restrict r; register Quantum *magick_restrict q; register ssize_t i, u; register unsigned char *magick_restrict s; ssize_t v; void *magick_restrict virtual_metacontent; /* Acquire pixels. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return((const Quantum *) NULL); #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,x,y,columns,rows, ((image->channels & WriteMaskChannel) != 0) || ((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse, nexus_info,exception); if (pixels == (Quantum *) NULL) return((const Quantum *) NULL); q=pixels; offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+ nexus_info->region.width-1L; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels)) if ((x >= 0) && ((ssize_t) (x+columns-1) < (ssize_t) cache_info->columns) && (y >= 0) && ((ssize_t) (y+rows-1) < (ssize_t) cache_info->rows)) { MagickBooleanType status; /* Pixel request is inside cache extents. */ if (nexus_info->authentic_pixel_cache != MagickFalse) return(q); status=ReadPixelCachePixels(cache_info,nexus_info,exception); if (status == MagickFalse) return((const Quantum *) NULL); if (cache_info->metacontent_extent != 0) { status=ReadPixelCacheMetacontent(cache_info,nexus_info,exception); if (status == MagickFalse) return((const Quantum *) NULL); } return(q); } /* Pixel request is outside cache extents. */ virtual_nexus=nexus_info->virtual_nexus; s=(unsigned char *) nexus_info->metacontent; (void) memset(virtual_pixel,0,cache_info->number_channels* sizeof(*virtual_pixel)); virtual_metacontent=(void *) NULL; switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: case EdgeVirtualPixelMethod: case CheckerTileVirtualPixelMethod: case HorizontalTileVirtualPixelMethod: case VerticalTileVirtualPixelMethod: { if (cache_info->metacontent_extent != 0) { /* Acquire a metacontent buffer. */ virtual_metacontent=(void *) AcquireQuantumMemory(1, cache_info->metacontent_extent); if (virtual_metacontent == (void *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), CacheError,"UnableToGetCacheNexus","`%s'",image->filename); return((const Quantum *) NULL); } (void) memset(virtual_metacontent,0,cache_info->metacontent_extent); } switch (virtual_pixel_method) { case BlackVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } case GrayVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,QuantumRange/2, virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } case TransparentVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel); SetPixelAlpha(image,TransparentAlpha,virtual_pixel); break; } case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,QuantumRange,virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } default: { SetPixelRed(image,ClampToQuantum(image->background_color.red), virtual_pixel); SetPixelGreen(image,ClampToQuantum(image->background_color.green), virtual_pixel); SetPixelBlue(image,ClampToQuantum(image->background_color.blue), virtual_pixel); SetPixelBlack(image,ClampToQuantum(image->background_color.black), virtual_pixel); SetPixelAlpha(image,ClampToQuantum(image->background_color.alpha), virtual_pixel); break; } } break; } default: break; } for (v=0; v < (ssize_t) rows; v++) { ssize_t y_offset; y_offset=y+v; if ((virtual_pixel_method == EdgeVirtualPixelMethod) || (virtual_pixel_method == UndefinedVirtualPixelMethod)) y_offset=EdgeY(y_offset,cache_info->rows); for (u=0; u < (ssize_t) columns; u+=length) { ssize_t x_offset; x_offset=x+u; length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u); if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) || ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) || (length == 0)) { MagickModulo x_modulo, y_modulo; /* Transfer a single pixel. */ length=(MagickSizeType) 1; switch (virtual_pixel_method) { case EdgeVirtualPixelMethod: default: { p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns), EdgeY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info, nexus_info->virtual_nexus); break; } case RandomVirtualPixelMethod: { if (cache_info->random_info == (RandomInfo *) NULL) cache_info->random_info=AcquireRandomInfo(); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, RandomX(cache_info->random_info,cache_info->columns), RandomY(cache_info->random_info,cache_info->rows),1UL,1UL, virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case DitherVirtualPixelMethod: { p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, DitherX(x_offset,cache_info->columns), DitherY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case TileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case MirrorVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); if ((x_modulo.quotient & 0x01) == 1L) x_modulo.remainder=(ssize_t) cache_info->columns- x_modulo.remainder-1L; y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if ((y_modulo.quotient & 0x01) == 1L) y_modulo.remainder=(ssize_t) cache_info->rows- y_modulo.remainder-1L; p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case HorizontalTileEdgeVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL, virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case VerticalTileEdgeVirtualPixelMethod: { y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL, virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case BackgroundVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { p=virtual_pixel; r=virtual_metacontent; break; } case CheckerTileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L) { p=virtual_pixel; r=virtual_metacontent; break; } p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case HorizontalTileVirtualPixelMethod: { if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) { p=virtual_pixel; r=virtual_metacontent; break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case VerticalTileVirtualPixelMethod: { if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) { p=virtual_pixel; r=virtual_metacontent; break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } } if (p == (const Quantum *) NULL) break; (void) memcpy(q,p,(size_t) (cache_info->number_channels*length* sizeof(*p))); q+=cache_info->number_channels; if ((s != (void *) NULL) && (r != (const void *) NULL)) { (void) memcpy(s,r,(size_t) cache_info->metacontent_extent); s+=cache_info->metacontent_extent; } continue; } /* Transfer a run of pixels. */ p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x_offset,y_offset, (size_t) length,1UL,virtual_nexus,exception); if (p == (const Quantum *) NULL) break; r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); (void) memcpy(q,p,(size_t) (cache_info->number_channels*length* sizeof(*p))); q+=cache_info->number_channels*length; if ((r != (void *) NULL) && (s != (const void *) NULL)) { (void) memcpy(s,r,(size_t) length); s+=length*cache_info->metacontent_extent; } } if (u < (ssize_t) columns) break; } /* Free resources. */ if (virtual_metacontent != (void *) NULL) virtual_metacontent=(void *) RelinquishMagickMemory(virtual_metacontent); if (v < (ssize_t) rows) return((const Quantum *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel % cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelCache() method is: % % const Quantum *GetVirtualPixelCache(const Image *image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static const Quantum *GetVirtualPixelCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,columns,rows, cache_info->nexus_info[id],exception); return(p); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelQueue() returns the virtual pixels associated corresponding % with the last call to QueueAuthenticPixels() or GetVirtualPixels(). % % The format of the GetVirtualPixelQueue() method is: % % const Quantum *GetVirtualPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const Quantum *GetVirtualPixelQueue(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_virtual_pixels_handler != (GetVirtualPixelsHandler) NULL) return(cache_info->methods.get_virtual_pixels_handler(image)); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixels() returns an immutable pixel region. If the % region is successfully accessed, a pointer to it is returned, otherwise % NULL is returned. The returned pointer may point to a temporary working % copy of the pixels or it may point to the original pixels in memory. % Performance is maximized if the selected region is part of one row, or one % or more full rows, since there is opportunity to access the pixels in-place % (without a copy) if the image is in memory, or in a memory-mapped file. The % returned pointer must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to % access the meta-content (of type void) corresponding to the % region. % % If you plan to modify the pixels, use GetAuthenticPixels() instead. % % Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread- % safe. In a threaded environment, use GetCacheViewVirtualPixels() or % GetCacheViewAuthenticPixels() instead. % % The format of the GetVirtualPixels() method is: % % const Quantum *GetVirtualPixels(const Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport const Quantum *GetVirtualPixels(const Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) return(cache_info->methods.get_virtual_pixel_handler(image, GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception)); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y, columns,rows,cache_info->nexus_info[id],exception); return(p); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsCache() returns the pixels associated corresponding with the % last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualPixelsCache() method is: % % Quantum *GetVirtualPixelsCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const Quantum *GetVirtualPixelsCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsNexus() returns the pixels associated with the specified % cache nexus. % % The format of the GetVirtualPixelsNexus() method is: % % const Quantum *GetVirtualPixelsNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the colormap pixels. % */ MagickPrivate const Quantum *GetVirtualPixelsNexus(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->storage_class == UndefinedClass) return((Quantum *) NULL); return((const Quantum *) nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a s k P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MaskPixelCacheNexus() masks the cache nexus as defined by the composite mask. % The method returns MagickTrue if the pixel region is masked, otherwise % MagickFalse. % % The format of the MaskPixelCacheNexus() method is: % % MagickBooleanType MaskPixelCacheNexus(Image *image, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to clip. % % o exception: return any errors or warnings in this structure. % */ static inline Quantum ApplyPixelCompositeMask(const Quantum p, const MagickRealType alpha,const Quantum q,const MagickRealType beta) { double mask_alpha; Quantum pixel; if (fabs(alpha-OpaqueAlpha) < MagickEpsilon) return(p); mask_alpha=1.0-QuantumScale*QuantumScale*alpha*beta; mask_alpha=PerceptibleReciprocal(mask_alpha); pixel=ClampToQuantum(mask_alpha*MagickOver_((double) p,alpha,(double) q, beta)); return(pixel); } static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; register Quantum *magick_restrict p, *magick_restrict q; ssize_t y; /* Apply composite mask. */ if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->channels & CompositeMaskChannel) == 0) return(MagickTrue); if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0)) return(MagickTrue); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return(MagickFalse); p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y, nexus_info->region.width,nexus_info->region.height, nexus_info->virtual_nexus,exception); q=nexus_info->pixels; if ((p == (Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickFalse); for (y=0; y < (ssize_t) nexus_info->region.height; y++) { register ssize_t x; for (x=0; x < (ssize_t) nexus_info->region.width; x++) { double mask_alpha; register ssize_t i; mask_alpha=(double) GetPixelCompositeMask(image,p); for (i=0; i < (ssize_t) image->number_channels; i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ApplyPixelCompositeMask(p[i],mask_alpha,q[i],(MagickRealType) GetPixelAlpha(image,q)); } p+=GetPixelChannels(image); q+=GetPixelChannels(image); } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p e n P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpenPixelCache() allocates the pixel cache. This includes defining the cache % dimensions, allocating space for the image pixels and optionally the % metacontent, and memory mapping the cache if it is disk based. The cache % nexus array is initialized as well. % % The format of the OpenPixelCache() method is: % % MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o mode: ReadMode, WriteMode, or IOMode. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info, const MapMode mode) { int file; /* Open pixel cache on disk. */ if ((cache_info->file != -1) && (cache_info->disk_mode == mode)) return(MagickTrue); /* cache already open and in the proper mode */ if (*cache_info->cache_filename == '\0') file=AcquireUniqueFileResource(cache_info->cache_filename); else switch (mode) { case ReadMode: { file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0); break; } case WriteMode: { file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE); break; } case IOMode: default: { file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE); break; } } if (file == -1) return(MagickFalse); (void) AcquireMagickResource(FileResource,1); if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); cache_info->file=file; cache_info->disk_mode=mode; return(MagickTrue); } static inline MagickOffsetType WritePixelCacheRegion( const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,const unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PWRITE) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PWRITE) count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX)); #else count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX),offset+i); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length) { CacheInfo *magick_restrict cache_info; MagickOffsetType count, extent, offset; cache_info=(CacheInfo *) image->cache; if (image->debug != MagickFalse) { char format[MagickPathExtent], message[MagickPathExtent]; (void) FormatMagickSize(length,MagickFalse,"B",MagickPathExtent,format); (void) FormatLocaleString(message,MagickPathExtent, "extend %s (%s[%d], disk, %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } if (length != (MagickSizeType) ((MagickOffsetType) length)) return(MagickFalse); offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END); if (offset < 0) return(MagickFalse); if ((MagickSizeType) offset >= length) count=(MagickOffsetType) 1; else { extent=(MagickOffsetType) length-1; count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *) ""); if (count != 1) return(MagickFalse); #if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE) if (cache_info->synchronize != MagickFalse) if (posix_fallocate(cache_info->file,offset+1,extent-offset) != 0) return(MagickFalse); #endif } offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET); if (offset < 0) return(MagickFalse); return(MagickTrue); } static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info, source_info; char format[MagickPathExtent], message[MagickPathExtent]; const char *hosts, *type; MagickBooleanType status; MagickSizeType length, number_pixels; size_t columns, packet_size; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (cache_anonymous_memory < 0) { char *value; /* Does the security policy require anonymous mapping for pixel cache? */ cache_anonymous_memory=0; value=GetPolicyValue("pixel-cache-memory"); if (value == (char *) NULL) value=GetPolicyValue("cache:memory-map"); if (LocaleCompare(value,"anonymous") == 0) { #if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS) cache_anonymous_memory=1; #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"DelegateLibrarySupportNotBuiltIn", "'%s' (policy requires anonymous memory mapping)",image->filename); #endif } value=DestroyString(value); } if ((image->columns == 0) || (image->rows == 0)) ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (((MagickSizeType) image->columns > cache_info->width_limit) || ((MagickSizeType) image->rows > cache_info->height_limit)) ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit", image->filename); length=GetImageListLength(image); if (AcquireMagickResource(ListLengthResource,length) == MagickFalse) ThrowBinaryException(ResourceLimitError,"ListLengthExceedsLimit", image->filename); source_info=(*cache_info); source_info.file=(-1); (void) FormatLocaleString(cache_info->filename,MagickPathExtent,"%s[%.20g]", image->filename,(double) image->scene); cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; cache_info->alpha_trait=image->alpha_trait; cache_info->channels=image->channels; cache_info->rows=image->rows; cache_info->columns=image->columns; InitializePixelChannelMap(image); cache_info->number_channels=GetPixelChannels(image); (void) memcpy(cache_info->channel_map,image->channel_map,MaxPixelChannels* sizeof(*image->channel_map)); cache_info->metacontent_extent=image->metacontent_extent; cache_info->mode=mode; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; packet_size=cache_info->number_channels*sizeof(Quantum); if (image->metacontent_extent != 0) packet_size+=cache_info->metacontent_extent; length=number_pixels*packet_size; columns=(size_t) (length/cache_info->rows/packet_size); if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) || ((ssize_t) cache_info->rows < 0)) ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed", image->filename); cache_info->length=length; if (image->ping != MagickFalse) { cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; cache_info->type=PingCache; return(MagickTrue); } status=AcquireMagickResource(AreaResource,(MagickSizeType) cache_info->columns*cache_info->rows); if (cache_info->mode == PersistMode) status=MagickFalse; length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+ cache_info->metacontent_extent); if ((status != MagickFalse) && (length == (MagickSizeType) ((size_t) length)) && ((cache_info->type == UndefinedCache) || (cache_info->type == MemoryCache))) { status=AcquireMagickResource(MemoryResource,cache_info->length); if (status != MagickFalse) { status=MagickTrue; if (cache_anonymous_memory <= 0) { cache_info->mapped=MagickFalse; cache_info->pixels=(Quantum *) MagickAssumeAligned( AcquireAlignedMemory(1,(size_t) cache_info->length)); } else { cache_info->mapped=MagickTrue; cache_info->pixels=(Quantum *) MapBlob(-1,IOMode,0,(size_t) cache_info->length); } if (cache_info->pixels == (Quantum *) NULL) { cache_info->mapped=source_info.mapped; cache_info->pixels=source_info.pixels; } else { /* Create memory pixel cache. */ cache_info->type=MemoryCache; cache_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) cache_info->metacontent=(void *) (cache_info->pixels+ cache_info->number_channels*number_pixels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->mapped != MagickFalse ? "Anonymous" : "Heap",type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } cache_info->storage_class=image->storage_class; if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } } } status=AcquireMagickResource(DiskResource,cache_info->length); hosts=(const char *) GetImageRegistry(StringRegistryType,"cache:hosts", exception); if ((status == MagickFalse) && (hosts != (const char *) NULL)) { DistributeCacheInfo *server_info; /* Distribute the pixel cache to a remote server. */ server_info=AcquireDistributeCacheInfo(exception); if (server_info != (DistributeCacheInfo *) NULL) { status=OpenDistributePixelCache(server_info,image); if (status == MagickFalse) { ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", GetDistributeCacheHostname(server_info)); server_info=DestroyDistributeCacheInfo(server_info); } else { /* Create a distributed pixel cache. */ status=MagickTrue; cache_info->type=DistributedCache; cache_info->server_info=server_info; (void) FormatLocaleString(cache_info->cache_filename, MagickPathExtent,"%s:%d",GetDistributeCacheHostname( (DistributeCacheInfo *) cache_info->server_info), GetDistributeCachePort((DistributeCacheInfo *) cache_info->server_info)); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->cache_filename, GetDistributeCacheFile((DistributeCacheInfo *) cache_info->server_info),type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } } cache_info->type=UndefinedCache; (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } /* Create pixel cache on disk. */ if (status == MagickFalse) { cache_info->type=UndefinedCache; (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) && (cache_info->mode != PersistMode)) { (void) ClosePixelCacheOnDisk(cache_info); *cache_info->cache_filename='\0'; } if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse) { cache_info->type=UndefinedCache; ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", image->filename); return(MagickFalse); } status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+ cache_info->length); if (status == MagickFalse) { cache_info->type=UndefinedCache; ThrowFileException(exception,CacheError,"UnableToExtendCache", image->filename); return(MagickFalse); } cache_info->type=DiskCache; length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+ cache_info->metacontent_extent); if (length == (MagickSizeType) ((size_t) length)) { status=AcquireMagickResource(MapResource,cache_info->length); if (status != MagickFalse) { cache_info->pixels=(Quantum *) MapBlob(cache_info->file,mode, cache_info->offset,(size_t) cache_info->length); if (cache_info->pixels == (Quantum *) NULL) { cache_info->mapped=source_info.mapped; cache_info->pixels=source_info.pixels; RelinquishMagickResource(MapResource,cache_info->length); } else { /* Create file-backed memory-mapped pixel cache. */ (void) ClosePixelCacheOnDisk(cache_info); cache_info->type=MapCache; cache_info->mapped=MagickTrue; cache_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) cache_info->metacontent=(void *) (cache_info->pixels+ cache_info->number_channels*number_pixels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->cache_filename, cache_info->file,type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } } } status=MagickTrue; if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info,exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,type,(double) cache_info->columns,(double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r s i s t P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PersistPixelCache() attaches to or initializes a persistent pixel cache. A % persistent pixel cache is one that resides on disk and is not destroyed % when the program exits. % % The format of the PersistPixelCache() method is: % % MagickBooleanType PersistPixelCache(Image *image,const char *filename, % const MagickBooleanType attach,MagickOffsetType *offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o filename: the persistent pixel cache filename. % % o attach: A value other than zero initializes the persistent pixel cache. % % o initialize: A value other than zero initializes the persistent pixel % cache. % % o offset: the offset in the persistent cache to store pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType PersistPixelCache(Image *image, const char *filename,const MagickBooleanType attach,MagickOffsetType *offset, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info, *magick_restrict clone_info; MagickBooleanType status; ssize_t page_size; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (void *) NULL); assert(filename != (const char *) NULL); assert(offset != (MagickOffsetType *) NULL); page_size=GetMagickPageSize(); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif if (attach != MagickFalse) { /* Attach existing persistent pixel cache. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "attach persistent cache"); (void) CopyMagickString(cache_info->cache_filename,filename, MagickPathExtent); cache_info->type=MapCache; cache_info->offset=(*offset); if (OpenPixelCache(image,ReadMode,exception) == MagickFalse) return(MagickFalse); *offset+=cache_info->length+page_size-(cache_info->length % page_size); return(MagickTrue); } /* Clone persistent pixel cache. */ status=AcquireMagickResource(DiskResource,cache_info->length); if (status == MagickFalse) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } clone_info=(CacheInfo *) ClonePixelCache(cache_info); clone_info->type=DiskCache; (void) CopyMagickString(clone_info->cache_filename,filename,MagickPathExtent); clone_info->file=(-1); clone_info->storage_class=cache_info->storage_class; clone_info->colorspace=cache_info->colorspace; clone_info->alpha_trait=cache_info->alpha_trait; clone_info->channels=cache_info->channels; clone_info->columns=cache_info->columns; clone_info->rows=cache_info->rows; clone_info->number_channels=cache_info->number_channels; clone_info->metacontent_extent=cache_info->metacontent_extent; clone_info->mode=PersistMode; clone_info->length=cache_info->length; (void) memcpy(clone_info->channel_map,cache_info->channel_map, MaxPixelChannels*sizeof(*cache_info->channel_map)); clone_info->offset=(*offset); status=ClonePixelCacheRepository(clone_info,cache_info,exception); *offset+=cache_info->length+page_size-(cache_info->length % page_size); clone_info=(CacheInfo *) DestroyPixelCache(clone_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelCacheNexus() method is: % % Quantum *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % const MagickBooleanType clone,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to set. % % o clone: clone the pixel cache. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate Quantum *QueueAuthenticPixelCacheNexus(Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickOffsetType offset; MagickSizeType number_pixels; Quantum *magick_restrict pixels; /* Validate pixel cache geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception); if (cache_info == (Cache) NULL) return((Quantum *) NULL); assert(cache_info->signature == MagickCoreSignature); if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) || (y < 0) || (x >= (ssize_t) cache_info->columns) || (y >= (ssize_t) cache_info->rows)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "PixelsAreNotAuthentic","`%s'",image->filename); return((Quantum *) NULL); } offset=(MagickOffsetType) y*cache_info->columns+x; if (offset < 0) return((Quantum *) NULL); number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1; if ((MagickSizeType) offset >= number_pixels) return((Quantum *) NULL); /* Return pixel cache. */ pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,x,y,columns,rows, ((image->channels & WriteMaskChannel) != 0) || ((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse, nexus_info,exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelsCache() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelsCache() method is: % % Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u e u e A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixels() queues a mutable pixel region. If the region is % successfully initialized a pointer to a Quantum array representing the % region is returned, otherwise NULL is returned. The returned pointer may % point to a temporary working buffer for the pixels or it may point to the % final location of the pixels in memory. % % Write-only access means that any existing pixel values corresponding to % the region are ignored. This is useful if the initial image is being % created from scratch, or if the existing pixel values are to be % completely replaced without need to refer to their pre-existing values. % The application is free to read and write the pixel buffer returned by % QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not % initialize the pixel array values. Initializing pixel array values is the % application's responsibility. % % Performance is maximized if the selected region is part of one row, or % one or more full rows, since then there is opportunity to access the % pixels in-place (without a copy) if the image is in memory, or in a % memory-mapped file. The returned pointer must *never* be deallocated % by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to % obtain the meta-content (of type void) corresponding to the region. % Once the Quantum (and/or Quantum) array has been updated, the % changes must be saved back to the underlying image using % SyncAuthenticPixels() or they may be lost. % % The format of the QueueAuthenticPixels() method is: % % Quantum *QueueAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Quantum *QueueAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) { pixels=cache_info->methods.queue_authentic_pixels_handler(image,x,y, columns,rows,exception); return(pixels); } assert(id < (int) cache_info->number_threads); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCacheMetacontent() reads metacontent from the specified region of % the pixel cache. % % The format of the ReadPixelCacheMetacontent() method is: % % MagickBooleanType ReadPixelCacheMetacontent(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the metacontent. % % o exception: return any errors or warnings in this structure. % */ static inline MagickOffsetType ReadPixelCacheRegion( const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PREAD) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PREAD) count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX)); #else count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX),offset+i); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType ReadPixelCacheMetacontent( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register ssize_t y; register unsigned char *magick_restrict q; size_t rows; if (cache_info->metacontent_extent == 0) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width* cache_info->metacontent_extent; extent=length*nexus_info->region.height; rows=nexus_info->region.height; y=0; q=(unsigned char *) nexus_info->metacontent; switch (cache_info->type) { case MemoryCache: case MapCache: { register unsigned char *magick_restrict p; /* Read meta-content from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=(unsigned char *) cache_info->metacontent+offset* cache_info->metacontent_extent; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->metacontent_extent*cache_info->columns; q+=cache_info->metacontent_extent*nexus_info->region.width; } break; } case DiskCache: { /* Read meta content from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent* cache_info->number_channels*sizeof(Quantum)+offset* cache_info->metacontent_extent,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; offset+=cache_info->columns; q+=cache_info->metacontent_extent*nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read metacontent from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCacheMetacontent((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=cache_info->metacontent_extent*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCachePixels() reads pixels from the specified region of the pixel % cache. % % The format of the ReadPixelCachePixels() method is: % % MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ReadPixelCachePixels( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register Quantum *magick_restrict q; register ssize_t y; size_t number_channels, rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns; if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y) return(MagickFalse); offset+=nexus_info->region.x; number_channels=cache_info->number_channels; length=(MagickSizeType) number_channels*nexus_info->region.width* sizeof(Quantum); if ((length/number_channels/sizeof(Quantum)) != nexus_info->region.width) return(MagickFalse); rows=nexus_info->region.height; extent=length*rows; if ((extent == 0) || ((extent/length) != rows)) return(MagickFalse); y=0; q=nexus_info->pixels; switch (cache_info->type) { case MemoryCache: case MapCache: { register Quantum *magick_restrict p; /* Read pixels from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=cache_info->pixels+cache_info->number_channels*offset; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->number_channels*cache_info->columns; q+=cache_info->number_channels*nexus_info->region.width; } break; } case DiskCache: { /* Read pixels from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset* cache_info->number_channels*sizeof(*q),length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; offset+=cache_info->columns; q+=cache_info->number_channels*nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read pixels from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=cache_info->number_channels*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e f e r e n c e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferencePixelCache() increments the reference count associated with the % pixel cache returning a pointer to the cache. % % The format of the ReferencePixelCache method is: % % Cache ReferencePixelCache(Cache cache_info) % % A description of each parameter follows: % % o cache_info: the pixel cache. % */ MagickPrivate Cache ReferencePixelCache(Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count++; UnlockSemaphoreInfo(cache_info->semaphore); return(cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t P i x e l C a c h e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetPixelCacheChannels() resets the pixel cache channels. % % The format of the ResetPixelCacheChannels method is: % % void ResetPixelCacheChannels(Image *) % % A description of each parameter follows: % % o image: the image. % */ MagickPrivate void ResetPixelCacheChannels(Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); cache_info->number_channels=GetPixelChannels(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t C a c h e A n o n y m o u s M e m o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetCacheAnonymousMemory() resets the anonymous_memory value. % % The format of the ResetCacheAnonymousMemory method is: % % void ResetCacheAnonymousMemory(void) % */ MagickPrivate void ResetCacheAnonymousMemory(void) { cache_anonymous_memory=0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t P i x e l C a c h e E p o c h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetPixelCacheEpoch() resets the pixel cache epoch. % % The format of the ResetPixelCacheEpoch method is: % % void ResetPixelCacheEpoch(void) % */ MagickPrivate void ResetPixelCacheEpoch(void) { cache_epoch=0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheMethods() sets the image pixel methods to the specified ones. % % The format of the SetPixelCacheMethods() method is: % % SetPixelCacheMethods(Cache *,CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache: the pixel cache. % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickPrivate void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods) { CacheInfo *magick_restrict cache_info; GetOneAuthenticPixelFromHandler get_one_authentic_pixel_from_handler; GetOneVirtualPixelFromHandler get_one_virtual_pixel_from_handler; /* Set cache pixel methods. */ assert(cache != (Cache) NULL); assert(cache_methods != (CacheMethods *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) cache_info->methods.get_virtual_pixel_handler= cache_methods->get_virtual_pixel_handler; if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL) cache_info->methods.destroy_pixel_handler= cache_methods->destroy_pixel_handler; if (cache_methods->get_virtual_metacontent_from_handler != (GetVirtualMetacontentFromHandler) NULL) cache_info->methods.get_virtual_metacontent_from_handler= cache_methods->get_virtual_metacontent_from_handler; if (cache_methods->get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) cache_info->methods.get_authentic_pixels_handler= cache_methods->get_authentic_pixels_handler; if (cache_methods->queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) cache_info->methods.queue_authentic_pixels_handler= cache_methods->queue_authentic_pixels_handler; if (cache_methods->sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) cache_info->methods.sync_authentic_pixels_handler= cache_methods->sync_authentic_pixels_handler; if (cache_methods->get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) cache_info->methods.get_authentic_pixels_from_handler= cache_methods->get_authentic_pixels_from_handler; if (cache_methods->get_authentic_metacontent_from_handler != (GetAuthenticMetacontentFromHandler) NULL) cache_info->methods.get_authentic_metacontent_from_handler= cache_methods->get_authentic_metacontent_from_handler; get_one_virtual_pixel_from_handler= cache_info->methods.get_one_virtual_pixel_from_handler; if (get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) cache_info->methods.get_one_virtual_pixel_from_handler= cache_methods->get_one_virtual_pixel_from_handler; get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; if (get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) cache_info->methods.get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e N e x u s P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheNexusPixels() defines the region of the cache for the % specified cache nexus. % % The format of the SetPixelCacheNexusPixels() method is: % % Quantum SetPixelCacheNexusPixels( % const CacheInfo *magick_restrict cache_info,const MapMode mode, % const ssize_t x,const ssize_t y,const size_t width,const size_t height, % const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o mode: ReadMode, WriteMode, or IOMode. % % o x,y,width,height: define the region of this particular cache nexus. % % o buffered: if true, nexus pixels are buffered. % % o nexus_info: the cache nexus to set. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType AcquireCacheNexusPixels( const CacheInfo *magick_restrict cache_info,const MagickSizeType length, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { if (length != (MagickSizeType) ((size_t) length)) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"PixelCacheAllocationFailed","`%s'", cache_info->filename); return(MagickFalse); } nexus_info->length=0; nexus_info->mapped=MagickFalse; if (cache_anonymous_memory <= 0) { nexus_info->cache=(Quantum *) MagickAssumeAligned(AcquireAlignedMemory(1, (size_t) length)); if (nexus_info->cache != (Quantum *) NULL) (void) memset(nexus_info->cache,0,(size_t) length); } else { nexus_info->cache=(Quantum *) MapBlob(-1,IOMode,0,(size_t) length); if (nexus_info->cache != (Quantum *) NULL) nexus_info->mapped=MagickTrue; } if (nexus_info->cache == (Quantum *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"PixelCacheAllocationFailed","`%s'", cache_info->filename); return(MagickFalse); } nexus_info->length=length; return(MagickTrue); } static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info, const MapMode mode) { if (nexus_info->length < CACHE_LINE_SIZE) return; if (mode == ReadMode) { MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE, 0,1); return; } MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,1,1); } static Quantum *SetPixelCacheNexusPixels( const CacheInfo *magick_restrict cache_info,const MapMode mode, const ssize_t x,const ssize_t y,const size_t width,const size_t height, const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickBooleanType status; MagickSizeType length, number_pixels; assert(cache_info != (const CacheInfo *) NULL); assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return((Quantum *) NULL); assert(nexus_info->signature == MagickCoreSignature); (void) memset(&nexus_info->region,0,sizeof(nexus_info->region)); if ((width == 0) || (height == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "NoPixelsDefinedInCache","`%s'",cache_info->filename); return((Quantum *) NULL); } if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) && (buffered == MagickFalse)) { if (((x >= 0) && (y >= 0) && (((ssize_t) height+y-1) < (ssize_t) cache_info->rows)) && (((x == 0) && (width == cache_info->columns)) || ((height == 1) && (((ssize_t) width+x-1) < (ssize_t) cache_info->columns)))) { MagickOffsetType offset; /* Pixels are accessed directly from memory. */ offset=(MagickOffsetType) y*cache_info->columns+x; nexus_info->pixels=cache_info->pixels+cache_info->number_channels* offset; nexus_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) nexus_info->metacontent=(unsigned char *) cache_info->metacontent+ offset*cache_info->metacontent_extent; nexus_info->region.width=width; nexus_info->region.height=height; nexus_info->region.x=x; nexus_info->region.y=y; nexus_info->authentic_pixel_cache=MagickTrue; PrefetchPixelCacheNexusPixels(nexus_info,mode); return(nexus_info->pixels); } } /* Pixels are stored in a staging region until they are synced to the cache. */ if (((MagickSizeType) width > cache_info->width_limit) || ((MagickSizeType) height > cache_info->height_limit)) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "WidthOrHeightExceedsLimit","`%s'",cache_info->filename); return((Quantum *) NULL); } number_pixels=(MagickSizeType) width*height; length=MagickMax(number_pixels,MagickMax(cache_info->columns, cache_info->rows))*cache_info->number_channels*sizeof(*nexus_info->pixels); if (cache_info->metacontent_extent != 0) length+=number_pixels*cache_info->metacontent_extent; status=MagickTrue; if (nexus_info->cache == (Quantum *) NULL) status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception); else if (nexus_info->length < length) { RelinquishCacheNexusPixels(nexus_info); status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception); } if (status == MagickFalse) return((Quantum *) NULL); nexus_info->pixels=nexus_info->cache; nexus_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) nexus_info->metacontent=(void *) (nexus_info->pixels+ cache_info->number_channels*number_pixels); nexus_info->region.width=width; nexus_info->region.height=height; nexus_info->region.x=x; nexus_info->region.y=y; nexus_info->authentic_pixel_cache=cache_info->type == PingCache ? MagickTrue : MagickFalse; PrefetchPixelCacheNexusPixels(nexus_info,mode); return(nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the % pixel cache and returns the previous setting. A virtual pixel is any pixel % access that is outside the boundaries of the image cache. % % The format of the SetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image, % const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType SetCacheAlphaChannel(Image *image,const Quantum alpha, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; CacheView *magick_restrict image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); image->alpha_trait=BlendPixelTrait; status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); /* must be virtual */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelAlpha(image,alpha,q); q+=GetPixelChannels(image); } status=SyncCacheViewAuthenticPixels(image_view,exception); } image_view=DestroyCacheView(image_view); return(status); } MagickPrivate VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image, const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; VirtualPixelMethod method; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); method=cache_info->virtual_pixel_method; cache_info->virtual_pixel_method=virtual_pixel_method; if ((image->columns != 0) && (image->rows != 0)) switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: { if ((image->background_color.alpha_trait != UndefinedPixelTrait) && (image->alpha_trait == UndefinedPixelTrait)) (void) SetCacheAlphaChannel(image,OpaqueAlpha,exception); if ((IsPixelInfoGray(&image->background_color) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace,exception); break; } case TransparentVirtualPixelMethod: { if (image->alpha_trait == UndefinedPixelTrait) (void) SetCacheAlphaChannel(image,OpaqueAlpha,exception); break; } default: break; } return(method); } #if defined(MAGICKCORE_OPENCL_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c O p e n C L B u f f e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticOpenCLBuffer() makes sure that all the OpenCL operations have % been completed and updates the host memory. % % The format of the SyncAuthenticOpenCLBuffer() method is: % % void SyncAuthenticOpenCLBuffer(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info) { assert(cache_info != (CacheInfo *) NULL); assert(cache_info->signature == MagickCoreSignature); if ((cache_info->type != MemoryCache) || (cache_info->opencl == (MagickCLCacheInfo) NULL)) return; /* Ensure single threaded access to OpenCL environment. */ LockSemaphoreInfo(cache_info->semaphore); cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl); UnlockSemaphoreInfo(cache_info->semaphore); } MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); cache_info=(CacheInfo *) image->cache; CopyOpenCLBuffer(cache_info); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the % in-memory or disk cache. The method returns MagickTrue if the pixel region % is synced, otherwise MagickFalse. % % The format of the SyncAuthenticPixelCacheNexus() method is: % % MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to sync. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickBooleanType status; /* Transfer pixels to the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->cache == (Cache) NULL) ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return(MagickFalse); if (image->mask_trait != UpdatePixelTrait) { if (((image->channels & WriteMaskChannel) != 0) && (ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse)) return(MagickFalse); if (((image->channels & CompositeMaskChannel) != 0) && (MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse)) return(MagickFalse); } if (nexus_info->authentic_pixel_cache != MagickFalse) { if (image->taint == MagickFalse) image->taint=MagickTrue; return(MagickTrue); } assert(cache_info->signature == MagickCoreSignature); status=WritePixelCachePixels(cache_info,nexus_info,exception); if ((cache_info->metacontent_extent != 0) && (WritePixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse)) return(MagickFalse); if ((status != MagickFalse) && (image->taint == MagickFalse)) image->taint=MagickTrue; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory % or disk cache. The method returns MagickTrue if the pixel region is synced, % otherwise MagickFalse. % % The format of the SyncAuthenticPixelsCache() method is: % % MagickBooleanType SyncAuthenticPixelsCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType SyncAuthenticPixelsCache(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncAuthenticPixels() method is: % % MagickBooleanType SyncAuthenticPixels(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncAuthenticPixels(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) { status=cache_info->methods.sync_authentic_pixels_handler(image, exception); return(status); } assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImagePixelCache() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncImagePixelCache() method is: % % MagickBooleanType SyncImagePixelCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(exception != (ExceptionInfo *) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception); return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e P i x e l C a c h e M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCacheMetacontent() writes the meta-content to the specified region % of the pixel cache. % % The format of the WritePixelCacheMetacontent() method is: % % MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the meta-content. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register const unsigned char *magick_restrict p; register ssize_t y; size_t rows; if (cache_info->metacontent_extent == 0) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width* cache_info->metacontent_extent; extent=(MagickSizeType) length*nexus_info->region.height; rows=nexus_info->region.height; y=0; p=(unsigned char *) nexus_info->metacontent; switch (cache_info->type) { case MemoryCache: case MapCache: { register unsigned char *magick_restrict q; /* Write associated pixels to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=(unsigned char *) cache_info->metacontent+offset* cache_info->metacontent_extent; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=nexus_info->region.width*cache_info->metacontent_extent; q+=cache_info->columns*cache_info->metacontent_extent; } break; } case DiskCache: { /* Write associated pixels to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+extent* cache_info->number_channels*sizeof(Quantum)+offset* cache_info->metacontent_extent,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->metacontent_extent*nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write metacontent to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCacheMetacontent((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->metacontent_extent*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCachePixels() writes image pixels to the specified region of the % pixel cache. % % The format of the WritePixelCachePixels() method is: % % MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCachePixels( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register const Quantum *magick_restrict p; register ssize_t y; size_t rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) cache_info->number_channels*nexus_info->region.width* sizeof(Quantum); extent=length*nexus_info->region.height; rows=nexus_info->region.height; y=0; p=nexus_info->pixels; switch (cache_info->type) { case MemoryCache: case MapCache: { register Quantum *magick_restrict q; /* Write pixels to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=cache_info->pixels+cache_info->number_channels*offset; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->number_channels*nexus_info->region.width; q+=cache_info->number_channels*cache_info->columns; } break; } case DiskCache: { /* Write pixels to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+offset* cache_info->number_channels*sizeof(*p),length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->number_channels*nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write pixels to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->number_channels*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); }
draw.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD RRRR AAA W W % % D D R R A A W W % % D D RRRR AAAAA W W W % % D D R RN A A WW WW % % DDDD R R A A W W % % % % % % MagickCore Image Drawing Methods % % % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon % rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion", % Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent % (www.appligent.com) contributed the dash pattern, linecap stroking % algorithm, and minor rendering improvements. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" /* Define declarations. */ #define BezierQuantum 200 #define PrimitiveExtentPad 4296.0 #define MaxBezierCoordinates 67108864 #define ThrowPointExpectedException(token,exception) \ { \ (void) ThrowMagickException(exception,GetMagickModule(),DrawError, \ "NonconformingDrawingPrimitiveDefinition","`%s'",token); \ status=MagickFalse; \ break; \ } /* Typedef declarations. */ typedef struct _EdgeInfo { SegmentInfo bounds; double scanline; PointInfo *points; size_t number_points; ssize_t direction; MagickBooleanType ghostline; size_t highwater; } EdgeInfo; typedef struct _ElementInfo { double cx, cy, major, minor, angle; } ElementInfo; typedef struct _MVGInfo { PrimitiveInfo **primitive_info; size_t *extent; ssize_t offset; PointInfo point; ExceptionInfo *exception; } MVGInfo; typedef struct _PolygonInfo { EdgeInfo *edges; size_t number_edges; } PolygonInfo; typedef enum { MoveToCode, OpenCode, GhostlineCode, LineToCode, EndCode } PathInfoCode; typedef struct _PathInfo { PointInfo point; PathInfoCode code; } PathInfo; /* Forward declarations. */ static Image *DrawClippingMask(Image *,const DrawInfo *,const char *,const char *, ExceptionInfo *); static MagickBooleanType DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *, ExceptionInfo *), RenderMVGContent(Image *,const DrawInfo *,const size_t,ExceptionInfo *), TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo), TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo, const double,const MagickBooleanType,const MagickBooleanType), TraceBezier(MVGInfo *,const size_t), TraceCircle(MVGInfo *,const PointInfo,const PointInfo), TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo), TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo), TraceSquareLinecap(PrimitiveInfo *,const size_t,const double); static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *,const PrimitiveInfo *,ExceptionInfo *); static ssize_t TracePath(MVGInfo *,const char *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireDrawInfo() returns a DrawInfo structure properly initialized. % % The format of the AcquireDrawInfo method is: % % DrawInfo *AcquireDrawInfo(void) % */ MagickExport DrawInfo *AcquireDrawInfo(void) { DrawInfo *draw_info; draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info)); GetDrawInfo((ImageInfo *) NULL,draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneDrawInfo() makes a copy of the given draw_info structure. If NULL % is specified, a new DrawInfo structure is created initialized to default % values. % % The format of the CloneDrawInfo method is: % % DrawInfo *CloneDrawInfo(const ImageInfo *image_info, % const DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info. % % o draw_info: the draw info. % */ MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info, const DrawInfo *draw_info) { DrawInfo *clone_info; ExceptionInfo *exception; clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info)); GetDrawInfo(image_info,clone_info); if (draw_info == (DrawInfo *) NULL) return(clone_info); exception=AcquireExceptionInfo(); if (draw_info->id != (char *) NULL) (void) CloneString(&clone_info->id,draw_info->id); if (draw_info->primitive != (char *) NULL) (void) CloneString(&clone_info->primitive,draw_info->primitive); if (draw_info->geometry != (char *) NULL) (void) CloneString(&clone_info->geometry,draw_info->geometry); clone_info->compliance=draw_info->compliance; clone_info->viewbox=draw_info->viewbox; clone_info->affine=draw_info->affine; clone_info->gravity=draw_info->gravity; clone_info->fill=draw_info->fill; clone_info->stroke=draw_info->stroke; clone_info->stroke_width=draw_info->stroke_width; if (draw_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue, exception); if (draw_info->stroke_pattern != (Image *) NULL) clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0, MagickTrue,exception); clone_info->stroke_antialias=draw_info->stroke_antialias; clone_info->text_antialias=draw_info->text_antialias; clone_info->fill_rule=draw_info->fill_rule; clone_info->linecap=draw_info->linecap; clone_info->linejoin=draw_info->linejoin; clone_info->miterlimit=draw_info->miterlimit; clone_info->dash_offset=draw_info->dash_offset; clone_info->decorate=draw_info->decorate; clone_info->compose=draw_info->compose; if (draw_info->text != (char *) NULL) (void) CloneString(&clone_info->text,draw_info->text); if (draw_info->font != (char *) NULL) (void) CloneString(&clone_info->font,draw_info->font); if (draw_info->metrics != (char *) NULL) (void) CloneString(&clone_info->metrics,draw_info->metrics); if (draw_info->family != (char *) NULL) (void) CloneString(&clone_info->family,draw_info->family); clone_info->style=draw_info->style; clone_info->stretch=draw_info->stretch; clone_info->weight=draw_info->weight; if (draw_info->encoding != (char *) NULL) (void) CloneString(&clone_info->encoding,draw_info->encoding); clone_info->pointsize=draw_info->pointsize; clone_info->kerning=draw_info->kerning; clone_info->interline_spacing=draw_info->interline_spacing; clone_info->interword_spacing=draw_info->interword_spacing; clone_info->direction=draw_info->direction; if (draw_info->density != (char *) NULL) (void) CloneString(&clone_info->density,draw_info->density); clone_info->align=draw_info->align; clone_info->undercolor=draw_info->undercolor; clone_info->border_color=draw_info->border_color; if (draw_info->server_name != (char *) NULL) (void) CloneString(&clone_info->server_name,draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) { ssize_t x; for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ; clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2), sizeof(*clone_info->dash_pattern)); if (clone_info->dash_pattern == (double *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) memset(clone_info->dash_pattern,0,(size_t) (2*x+2)* sizeof(*clone_info->dash_pattern)); (void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t) (x+1)*sizeof(*clone_info->dash_pattern)); } clone_info->gradient=draw_info->gradient; if (draw_info->gradient.stops != (StopInfo *) NULL) { size_t number_stops; number_stops=clone_info->gradient.number_stops; clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t) number_stops,sizeof(*clone_info->gradient.stops)); if (clone_info->gradient.stops == (StopInfo *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops, (size_t) number_stops*sizeof(*clone_info->gradient.stops)); } clone_info->bounds=draw_info->bounds; clone_info->fill_alpha=draw_info->fill_alpha; clone_info->stroke_alpha=draw_info->stroke_alpha; clone_info->element_reference=draw_info->element_reference; clone_info->clip_path=draw_info->clip_path; clone_info->clip_units=draw_info->clip_units; if (draw_info->clip_mask != (char *) NULL) (void) CloneString(&clone_info->clip_mask,draw_info->clip_mask); if (draw_info->clipping_mask != (Image *) NULL) clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0, MagickTrue,exception); if (draw_info->composite_mask != (Image *) NULL) clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0, MagickTrue,exception); clone_info->render=draw_info->render; clone_info->debug=IsEventLogging(); exception=DestroyExceptionInfo(exception); return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P a t h T o P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPathToPolygon() converts a path to the more efficient sorted % rendering form. % % The format of the ConvertPathToPolygon method is: % % PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info, % ExceptionInfo *excetion) % % A description of each parameter follows: % % o ConvertPathToPolygon() returns the path in a more efficient sorted % rendering form of type PolygonInfo. % % o draw_info: Specifies a pointer to an DrawInfo structure. % % o path_info: Specifies a pointer to an PathInfo structure. % % */ static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info) { ssize_t i; if (polygon_info->edges != (EdgeInfo *) NULL) { for (i=0; i < (ssize_t) polygon_info->number_edges; i++) if (polygon_info->edges[i].points != (PointInfo *) NULL) polygon_info->edges[i].points=(PointInfo *) RelinquishMagickMemory(polygon_info->edges[i].points); polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory( polygon_info->edges); } return((PolygonInfo *) RelinquishMagickMemory(polygon_info)); } #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int DrawCompareEdges(const void *p_edge,const void *q_edge) { #define DrawCompareEdge(p,q) \ { \ if (((p)-(q)) < 0.0) \ return(-1); \ if (((p)-(q)) > 0.0) \ return(1); \ } const PointInfo *p, *q; /* Edge sorting for right-handed coordinate system. */ p=((const EdgeInfo *) p_edge)->points; q=((const EdgeInfo *) q_edge)->points; DrawCompareEdge(p[0].y,q[0].y); DrawCompareEdge(p[0].x,q[0].x); DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)* (q[1].x-q[0].x)); DrawCompareEdge(p[1].y,q[1].y); DrawCompareEdge(p[1].x,q[1].x); return(0); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static void LogPolygonInfo(const PolygonInfo *polygon_info) { EdgeInfo *p; ssize_t i, j; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge"); p=polygon_info->edges; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { (void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:", (double) i); (void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s", p->direction != MagickFalse ? "down" : "up"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s", p->ghostline != MagickFalse ? "transparent" : "opaque"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1, p->bounds.x2,p->bounds.y2); for (j=0; j < (ssize_t) p->number_points; j++) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g", p->points[j].x,p->points[j].y); p++; } (void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge"); } static void ReversePoints(PointInfo *points,const size_t number_points) { PointInfo point; ssize_t i; for (i=0; i < (ssize_t) (number_points >> 1); i++) { point=points[i]; points[i]=points[number_points-(i+1)]; points[number_points-(i+1)]=point; } } static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info, ExceptionInfo *exception) { long direction, next_direction; PointInfo point, *points; PolygonInfo *polygon_info; SegmentInfo bounds; ssize_t i, n; MagickBooleanType ghostline; size_t edge, number_edges, number_points; /* Convert a path to the more efficient sorted rendering form. */ polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info)); if (polygon_info == (PolygonInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return((PolygonInfo *) NULL); } number_edges=16; polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } (void) memset(polygon_info->edges,0,number_edges* sizeof(*polygon_info->edges)); direction=0; edge=0; ghostline=MagickFalse; n=0; number_points=0; points=(PointInfo *) NULL; (void) memset(&point,0,sizeof(point)); (void) memset(&bounds,0,sizeof(bounds)); polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=0.0; polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) direction; polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->number_edges=0; for (i=0; path_info[i].code != EndCode; i++) { if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) || (path_info[i].code == GhostlineCode)) { /* Move to. */ if ((points != (PointInfo *) NULL) && (n >= 2)) { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); points=(PointInfo *) RelinquishMagickMemory(points); return(DestroyPolygonInfo(polygon_info)); } } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; points=(PointInfo *) NULL; ghostline=MagickFalse; edge++; polygon_info->number_edges=edge; } if (points == (PointInfo *) NULL) { number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } } ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse; point=path_info[i].point; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; direction=0; n=1; continue; } /* Line to. */ next_direction=((path_info[i].point.y > point.y) || ((fabs(path_info[i].point.y-point.y) < MagickEpsilon) && (path_info[i].point.x > point.x))) ? 1 : -1; if ((points != (PointInfo *) NULL) && (direction != 0) && (direction != next_direction)) { /* New edge. */ point=points[n-1]; if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); points=(PointInfo *) RelinquishMagickMemory(points); return(DestroyPolygonInfo(polygon_info)); } } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; polygon_info->number_edges=edge+1; points=(PointInfo *) NULL; number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } n=1; ghostline=MagickFalse; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; edge++; } direction=next_direction; if (points == (PointInfo *) NULL) continue; if (n == (ssize_t) number_points) { number_points<<=1; points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } } point=path_info[i].point; points[n]=point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.x > bounds.x2) bounds.x2=point.x; n++; } if (points != (PointInfo *) NULL) { if (n < 2) points=(PointInfo *) RelinquishMagickMemory(points); else { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; points=(PointInfo *) NULL; ghostline=MagickFalse; edge++; polygon_info->number_edges=edge; } } polygon_info->number_edges=edge; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(polygon_info->edges, polygon_info->number_edges,sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { EdgeInfo *edge_info; edge_info=polygon_info->edges+i; edge_info->points=(PointInfo *) ResizeQuantumMemory(edge_info->points, edge_info->number_points,sizeof(*edge_info->points)); if (edge_info->points == (PointInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } } qsort(polygon_info->edges,(size_t) polygon_info->number_edges, sizeof(*polygon_info->edges),DrawCompareEdges); if (IsEventLogging() != MagickFalse) LogPolygonInfo(polygon_info); return(polygon_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P r i m i t i v e T o P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector % path structure. % % The format of the ConvertPrimitiveToPath method is: % % PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o ConvertPrimitiveToPath() returns a vector path structure of type % PathInfo. % % o draw_info: a structure of type DrawInfo. % % o primitive_info: Specifies a pointer to an PrimitiveInfo structure. % */ static void LogPathInfo(const PathInfo *path_info) { const PathInfo *p; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path"); for (p=path_info; p->code != EndCode; p++) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ? "moveto ghostline" : p->code == OpenCode ? "moveto open" : p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" : "?"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path"); } static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { MagickBooleanType closed_subpath; PathInfo *path_info; PathInfoCode code; PointInfo p, q; ssize_t i, n; ssize_t coordinates, start; /* Converts a PrimitiveInfo structure into a vector path structure. */ switch (primitive_info->primitive) { case AlphaPrimitive: case ColorPrimitive: case ImagePrimitive: case PointPrimitive: case TextPrimitive: return((PathInfo *) NULL); default: break; } for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL), sizeof(*path_info)); if (path_info == (PathInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return((PathInfo *) NULL); } coordinates=0; closed_subpath=MagickFalse; n=0; p.x=(-1.0); p.y=(-1.0); q.x=(-1.0); q.y=(-1.0); start=0; for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { code=LineToCode; if (coordinates <= 0) { /* New subpath. */ coordinates=(ssize_t) primitive_info[i].coordinates; p=primitive_info[i].point; start=n; code=MoveToCode; closed_subpath=primitive_info[i].closed_subpath; } coordinates--; if ((code == MoveToCode) || (coordinates <= 0) || (fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) || (fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon)) { /* Eliminate duplicate points. */ path_info[n].code=code; path_info[n].point=primitive_info[i].point; q=primitive_info[i].point; n++; } if (coordinates > 0) continue; /* next point in current subpath */ if (closed_subpath != MagickFalse) { closed_subpath=MagickFalse; continue; } /* Mark the p point as open if the subpath is not closed. */ path_info[start].code=OpenCode; path_info[n].code=GhostlineCode; path_info[n].point=primitive_info[i].point; n++; path_info[n].code=LineToCode; path_info[n].point=p; n++; } path_info[n].code=EndCode; path_info[n].point.x=0.0; path_info[n].point.y=0.0; if (IsEventLogging() != MagickFalse) LogPathInfo(path_info); path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1), sizeof(*path_info)); return(path_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyDrawInfo() deallocates memory associated with an DrawInfo structure. % % The format of the DestroyDrawInfo method is: % % DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) % % A description of each parameter follows: % % o draw_info: the draw info. % */ MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) { assert(draw_info != (DrawInfo *) NULL); if (draw_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info->signature == MagickCoreSignature); if (draw_info->id != (char *) NULL) draw_info->id=DestroyString(draw_info->id); if (draw_info->primitive != (char *) NULL) draw_info->primitive=DestroyString(draw_info->primitive); if (draw_info->text != (char *) NULL) draw_info->text=DestroyString(draw_info->text); if (draw_info->geometry != (char *) NULL) draw_info->geometry=DestroyString(draw_info->geometry); if (draw_info->fill_pattern != (Image *) NULL) draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern); if (draw_info->stroke_pattern != (Image *) NULL) draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern); if (draw_info->font != (char *) NULL) draw_info->font=DestroyString(draw_info->font); if (draw_info->metrics != (char *) NULL) draw_info->metrics=DestroyString(draw_info->metrics); if (draw_info->family != (char *) NULL) draw_info->family=DestroyString(draw_info->family); if (draw_info->encoding != (char *) NULL) draw_info->encoding=DestroyString(draw_info->encoding); if (draw_info->density != (char *) NULL) draw_info->density=DestroyString(draw_info->density); if (draw_info->server_name != (char *) NULL) draw_info->server_name=(char *) RelinquishMagickMemory(draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) draw_info->dash_pattern=(double *) RelinquishMagickMemory( draw_info->dash_pattern); if (draw_info->gradient.stops != (StopInfo *) NULL) draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory( draw_info->gradient.stops); if (draw_info->clip_mask != (char *) NULL) draw_info->clip_mask=DestroyString(draw_info->clip_mask); if (draw_info->clipping_mask != (Image *) NULL) draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask); if (draw_info->composite_mask != (Image *) NULL) draw_info->composite_mask=DestroyImage(draw_info->composite_mask); draw_info->signature=(~MagickCoreSignature); draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w A f f i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawAffineImage() composites the source over the destination image as % dictated by the affine transform. % % The format of the DrawAffineImage method is: % % MagickBooleanType DrawAffineImage(Image *image,const Image *source, % const AffineMatrix *affine,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o source: the source image. % % o affine: the affine transform. % % o exception: return any errors or warnings in this structure. % */ static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine, const double y,const SegmentInfo *edge) { double intercept, z; double x; SegmentInfo inverse_edge; /* Determine left and right edges. */ inverse_edge.x1=edge->x1; inverse_edge.y1=edge->y1; inverse_edge.x2=edge->x2; inverse_edge.y2=edge->y2; z=affine->ry*y+affine->tx; if (affine->sx >= MagickEpsilon) { intercept=(-z/affine->sx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->sx < -MagickEpsilon) { intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->sx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns)) { inverse_edge.x2=edge->x1; return(inverse_edge); } /* Determine top and bottom edges. */ z=affine->sy*y+affine->ty; if (affine->rx >= MagickEpsilon) { intercept=(-z/affine->rx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->rx < -MagickEpsilon) { intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->rx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows)) { inverse_edge.x2=edge->x2; return(inverse_edge); } return(inverse_edge); } static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine) { AffineMatrix inverse_affine; double determinant; determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx* affine->ry); inverse_affine.sx=determinant*affine->sy; inverse_affine.rx=determinant*(-affine->rx); inverse_affine.ry=determinant*(-affine->ry); inverse_affine.sy=determinant*affine->sx; inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty* inverse_affine.ry; inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty* inverse_affine.sy; return(inverse_affine); } MagickExport MagickBooleanType DrawAffineImage(Image *image, const Image *source,const AffineMatrix *affine,ExceptionInfo *exception) { AffineMatrix inverse_affine; CacheView *image_view, *source_view; MagickBooleanType status; PixelInfo zero; PointInfo extent[4], min, max; ssize_t i; SegmentInfo edge; ssize_t start, stop, y; /* Determine bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(source != (const Image *) NULL); assert(source->signature == MagickCoreSignature); assert(affine != (AffineMatrix *) NULL); extent[0].x=0.0; extent[0].y=0.0; extent[1].x=(double) source->columns-1.0; extent[1].y=0.0; extent[2].x=(double) source->columns-1.0; extent[2].y=(double) source->rows-1.0; extent[3].x=0.0; extent[3].y=(double) source->rows-1.0; for (i=0; i < 4; i++) { PointInfo point; point=extent[i]; extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx; extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty; } min=extent[0]; max=extent[0]; for (i=1; i < 4; i++) { if (min.x > extent[i].x) min.x=extent[i].x; if (min.y > extent[i].y) min.y=extent[i].y; if (max.x < extent[i].x) max.x=extent[i].x; if (max.y < extent[i].y) max.y=extent[i].y; } /* Affine transform image. */ if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; edge.x1=MagickMax(min.x,0.0); edge.y1=MagickMax(min.y,0.0); edge.x2=MagickMin(max.x,(double) image->columns-1.0); edge.y2=MagickMin(max.y,(double) image->rows-1.0); inverse_affine=InverseAffineMatrix(affine); GetPixelInfo(image,&zero); start=CastDoubleToLong(ceil(edge.y1-0.5)); stop=CastDoubleToLong(floor(edge.y2+0.5)); source_view=AcquireVirtualCacheView(source,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source,image,stop-start,1) #endif for (y=start; y <= stop; y++) { PixelInfo composite, pixel; PointInfo point; ssize_t x; Quantum *magick_restrict q; SegmentInfo inverse_edge; ssize_t x_offset; if (status == MagickFalse) continue; inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge); if (inverse_edge.x2 < inverse_edge.x1) continue; q=GetCacheViewAuthenticPixels(image_view,CastDoubleToLong( ceil(inverse_edge.x1-0.5)),y,(size_t) CastDoubleToLong(floor( inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),1,exception); if (q == (Quantum *) NULL) continue; pixel=zero; composite=zero; x_offset=0; for (x=CastDoubleToLong(ceil(inverse_edge.x1-0.5)); x <= CastDoubleToLong(floor(inverse_edge.x2+0.5)); x++) { point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+ inverse_affine.tx; point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+ inverse_affine.ty; status=InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel, point.x,point.y,&pixel,exception); if (status == MagickFalse) break; GetPixelInfoPixel(image,q,&composite); CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha, &composite); SetPixelViaPixelInfo(image,&composite,q); x_offset++; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w B o u n d i n g R e c t a n g l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawBoundingRectangles() draws the bounding rectangles on the image. This % is only useful for developers debugging the rendering algorithm. % % The format of the DrawBoundingRectangles method is: % % MagickBooleanType DrawBoundingRectangles(Image *image, % const DrawInfo *draw_info,PolygonInfo *polygon_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o polygon_info: Specifies a pointer to a PolygonInfo structure. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType DrawBoundingRectangles(Image *image, const DrawInfo *draw_info,const PolygonInfo *polygon_info, ExceptionInfo *exception) { double mid; DrawInfo *clone_info; MagickStatusType status; PointInfo end, resolution, start; PrimitiveInfo primitive_info[6]; ssize_t i; SegmentInfo bounds; ssize_t coordinates; (void) memset(primitive_info,0,sizeof(primitive_info)); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); status=QueryColorCompliance("#000F",AllCompliance,&clone_info->fill, exception); if (status == MagickFalse) { clone_info=DestroyDrawInfo(clone_info); return(MagickFalse); } resolution.x=96.0; resolution.y=96.0; if (clone_info->density != (char *) NULL) { GeometryInfo geometry_info; MagickStatusType flags; flags=ParseGeometry(clone_info->density,&geometry_info); if ((flags & RhoValue) != 0) resolution.x=geometry_info.rho; resolution.y=resolution.x; if ((flags & SigmaValue) != 0) resolution.y=geometry_info.sigma; } mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)* clone_info->stroke_width/2.0; bounds.x1=0.0; bounds.y1=0.0; bounds.x2=0.0; bounds.y2=0.0; if (polygon_info != (PolygonInfo *) NULL) { bounds=polygon_info->edges[0].bounds; for (i=1; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1) bounds.x1=polygon_info->edges[i].bounds.x1; if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1) bounds.y1=polygon_info->edges[i].bounds.y1; if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2) bounds.x2=polygon_info->edges[i].bounds.x2; if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2) bounds.y2=polygon_info->edges[i].bounds.y2; } bounds.x1-=mid; bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns ? (double) image->columns-1 : bounds.x1; bounds.y1-=mid; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows ? (double) image->rows-1 : bounds.y1; bounds.x2+=mid; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns ? (double) image->columns-1 : bounds.x2; bounds.y2+=mid; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows ? (double) image->rows-1 : bounds.y2; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].direction != 0) status=QueryColorCompliance("#f00",AllCompliance,&clone_info->stroke, exception); else status=QueryColorCompliance("#0f0",AllCompliance,&clone_info->stroke, exception); if (status == MagickFalse) break; start.x=(double) (polygon_info->edges[i].bounds.x1-mid); start.y=(double) (polygon_info->edges[i].bounds.y1-mid); end.x=(double) (polygon_info->edges[i].bounds.x2+mid); end.y=(double) (polygon_info->edges[i].bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; status&=TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; status=DrawPrimitive(image,clone_info,primitive_info,exception); if (status == MagickFalse) break; } if (i < (ssize_t) polygon_info->number_edges) { clone_info=DestroyDrawInfo(clone_info); return(status == 0 ? MagickFalse : MagickTrue); } } status=QueryColorCompliance("#00f",AllCompliance,&clone_info->stroke, exception); if (status == MagickFalse) { clone_info=DestroyDrawInfo(clone_info); return(MagickFalse); } start.x=(double) (bounds.x1-mid); start.y=(double) (bounds.y1-mid); end.x=(double) (bounds.x2+mid); end.y=(double) (bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; status&=TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; status=DrawPrimitive(image,clone_info,primitive_info,exception); clone_info=DestroyDrawInfo(clone_info); return(status == 0 ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClipPath() draws the clip path on the image mask. % % The format of the DrawClipPath method is: % % MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info, % const char *id,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the clip path id. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawClipPath(Image *image, const DrawInfo *draw_info,const char *id,ExceptionInfo *exception) { const char *clip_path; Image *clipping_mask; MagickBooleanType status; clip_path=GetImageArtifact(image,id); if (clip_path == (const char *) NULL) return(MagickFalse); clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path, exception); if (clipping_mask == (Image *) NULL) return(MagickFalse); status=SetImageMask(image,WritePixelMask,clipping_mask,exception); clipping_mask=DestroyImage(clipping_mask); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p p i n g M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClippingMask() draws the clip path and returns it as an image clipping % mask. % % The format of the DrawClippingMask method is: % % Image *DrawClippingMask(Image *image,const DrawInfo *draw_info, % const char *id,const char *clip_path,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the clip path id. % % o clip_path: the clip path. % % o exception: return any errors or warnings in this structure. % */ static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info, const char *id,const char *clip_path,ExceptionInfo *exception) { DrawInfo *clone_info; Image *clip_mask, *separate_mask; MagickStatusType status; /* Draw a clip path. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); clip_mask=AcquireImage((const ImageInfo *) NULL,exception); status=SetImageExtent(clip_mask,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImage(clip_mask)); status=SetImageMask(clip_mask,WritePixelMask,(Image *) NULL,exception); status=QueryColorCompliance("#0000",AllCompliance, &clip_mask->background_color,exception); clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha; clip_mask->background_color.alpha_trait=BlendPixelTrait; status=SetImageBackgroundColor(clip_mask,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s", id); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,clip_path); status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); if (clone_info->clip_mask != (char *) NULL) clone_info->clip_mask=DestroyString(clone_info->clip_mask); status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke, exception); clone_info->stroke_width=0.0; clone_info->alpha=OpaqueAlpha; clone_info->clip_path=MagickTrue; status=RenderMVGContent(clip_mask,clone_info,0,exception); clone_info=DestroyDrawInfo(clone_info); separate_mask=SeparateImage(clip_mask,AlphaChannel,exception); if (separate_mask != (Image *) NULL) { clip_mask=DestroyImage(clip_mask); clip_mask=separate_mask; status=NegateImage(clip_mask,MagickFalse,exception); if (status == MagickFalse) clip_mask=DestroyImage(clip_mask); } if (status == MagickFalse) clip_mask=DestroyImage(clip_mask); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path"); return(clip_mask); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C o m p o s i t e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawCompositeMask() draws the mask path and returns it as an image mask. % % The format of the DrawCompositeMask method is: % % Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info, % const char *id,const char *mask_path,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the mask path id. % % o mask_path: the mask path. % % o exception: return any errors or warnings in this structure. % */ static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info, const char *id,const char *mask_path,ExceptionInfo *exception) { Image *composite_mask, *separate_mask; DrawInfo *clone_info; MagickStatusType status; /* Draw a mask path. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); composite_mask=AcquireImage((const ImageInfo *) NULL,exception); status=SetImageExtent(composite_mask,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImage(composite_mask)); status=SetImageMask(composite_mask,CompositePixelMask,(Image *) NULL, exception); status=QueryColorCompliance("#0000",AllCompliance, &composite_mask->background_color,exception); composite_mask->background_color.alpha=(MagickRealType) TransparentAlpha; composite_mask->background_color.alpha_trait=BlendPixelTrait; (void) SetImageBackgroundColor(composite_mask,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s", id); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,mask_path); status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke, exception); clone_info->stroke_width=0.0; clone_info->alpha=OpaqueAlpha; status=RenderMVGContent(composite_mask,clone_info,0,exception); clone_info=DestroyDrawInfo(clone_info); separate_mask=SeparateImage(composite_mask,AlphaChannel,exception); if (separate_mask != (Image *) NULL) { composite_mask=DestroyImage(composite_mask); composite_mask=separate_mask; status=NegateImage(composite_mask,MagickFalse,exception); if (status == MagickFalse) composite_mask=DestroyImage(composite_mask); } if (status == MagickFalse) composite_mask=DestroyImage(composite_mask); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path"); return(composite_mask); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w D a s h P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the % image while respecting the dash offset and dash pattern attributes. % % The format of the DrawDashPolygon method is: % % MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception) { double length, maximum_length, offset, scale, total_length; DrawInfo *clone_info; MagickStatusType status; PrimitiveInfo *dash_polygon; double dx, dy; ssize_t i; size_t number_vertices; ssize_t j, n; assert(draw_info != (const DrawInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash"); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; number_vertices=(size_t) i; dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (2UL*number_vertices+32UL),sizeof(*dash_polygon)); if (dash_polygon == (PrimitiveInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } (void) memset(dash_polygon,0,(2UL*number_vertices+32UL)* sizeof(*dash_polygon)); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->miterlimit=0; dash_polygon[0]=primitive_info[0]; scale=ExpandAffine(&draw_info->affine); length=scale*draw_info->dash_pattern[0]; offset=fabs(draw_info->dash_offset) >= MagickEpsilon ? scale*draw_info->dash_offset : 0.0; j=1; for (n=0; offset > 0.0; j=0) { if (draw_info->dash_pattern[n] <= 0.0) break; length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5)); if (offset > length) { offset-=length; n++; length=scale*draw_info->dash_pattern[n]; continue; } if (offset < length) { length-=offset; offset=0.0; break; } offset=0.0; n++; } status=MagickTrue; maximum_length=0.0; total_length=0.0; for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++) { dx=primitive_info[i].point.x-primitive_info[i-1].point.x; dy=primitive_info[i].point.y-primitive_info[i-1].point.y; maximum_length=hypot(dx,dy); if (maximum_length > (double) (MaxBezierCoordinates >> 2)) continue; if (fabs(length) < MagickEpsilon) { if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon) n++; if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon) n=0; length=scale*draw_info->dash_pattern[n]; } for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); ) { total_length+=length; if ((n & 0x01) != 0) { dash_polygon[0]=primitive_info[0]; dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy* total_length*PerceptibleReciprocal(maximum_length)); j=1; } else { if ((j+1) > (ssize_t) number_vertices) break; dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception); if (status == MagickFalse) break; } if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon) n++; if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon) n=0; length=scale*draw_info->dash_pattern[n]; } length-=(maximum_length-total_length); if ((n & 0x01) != 0) continue; dash_polygon[j]=primitive_info[i]; dash_polygon[j].coordinates=1; j++; } if ((status != MagickFalse) && (total_length < maximum_length) && ((n & 0x01) == 0) && (j > 1)) { dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x+=MagickEpsilon; dash_polygon[j].point.y+=MagickEpsilon; dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception); } dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGradientImage() draws a linear gradient on the image. % % The format of the DrawGradientImage method is: % % MagickBooleanType DrawGradientImage(Image *image, % const DrawInfo *draw_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o exception: return any errors or warnings in this structure. % */ static inline double GetStopColorOffset(const GradientInfo *gradient, const ssize_t x,const ssize_t y) { switch (gradient->type) { case UndefinedGradient: case LinearGradient: { double gamma, length, offset, scale; PointInfo p, q; const SegmentInfo *gradient_vector; gradient_vector=(&gradient->gradient_vector); p.x=gradient_vector->x2-gradient_vector->x1; p.y=gradient_vector->y2-gradient_vector->y1; q.x=(double) x-gradient_vector->x1; q.y=(double) y-gradient_vector->y1; length=sqrt(q.x*q.x+q.y*q.y); gamma=sqrt(p.x*p.x+p.y*p.y)*length; gamma=PerceptibleReciprocal(gamma); scale=p.x*q.x+p.y*q.y; offset=gamma*scale*length; return(offset); } case RadialGradient: { PointInfo v; if (gradient->spread == RepeatSpread) { v.x=(double) x-gradient->center.x; v.y=(double) y-gradient->center.y; return(sqrt(v.x*v.x+v.y*v.y)); } v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians( gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians( gradient->angle))))*PerceptibleReciprocal(gradient->radii.x); v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians( gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians( gradient->angle))))*PerceptibleReciprocal(gradient->radii.y); return(sqrt(v.x*v.x+v.y*v.y)); } } return(0.0); } static int StopInfoCompare(const void *x,const void *y) { StopInfo *stop_1, *stop_2; stop_1=(StopInfo *) x; stop_2=(StopInfo *) y; if (stop_1->offset > stop_2->offset) return(1); if (fabs(stop_1->offset-stop_2->offset) <= MagickEpsilon) return(0); return(-1); } MagickExport MagickBooleanType DrawGradientImage(Image *image, const DrawInfo *draw_info,ExceptionInfo *exception) { CacheView *image_view; const GradientInfo *gradient; const SegmentInfo *gradient_vector; double length; MagickBooleanType status; PixelInfo zero; PointInfo point; RectangleInfo bounding_box; ssize_t y; /* Draw linear or radial gradient on image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); gradient=(&draw_info->gradient); qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo), StopInfoCompare); gradient_vector=(&gradient->gradient_vector); point.x=gradient_vector->x2-gradient_vector->x1; point.y=gradient_vector->y2-gradient_vector->y1; length=sqrt(point.x*point.x+point.y*point.y); bounding_box=gradient->bounding_box; status=MagickTrue; GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,bounding_box.height-bounding_box.y,1) #endif for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++) { double alpha, offset; PixelInfo composite, pixel; Quantum *magick_restrict q; ssize_t i, x; ssize_t j; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; composite=zero; offset=GetStopColorOffset(gradient,0,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++) { GetPixelInfoPixel(image,q,&pixel); switch (gradient->spread) { case UndefinedSpread: case PadSpread: { if ((x != CastDoubleToLong(ceil(gradient_vector->x1-0.5))) || (y != CastDoubleToLong(ceil(gradient_vector->y1-0.5)))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if ((offset < 0.0) || (i == 0)) composite=gradient->stops[0].color; else if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops)) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case ReflectSpread: { if ((x != CastDoubleToLong(ceil(gradient_vector->x1-0.5))) || (y != CastDoubleToLong(ceil(gradient_vector->y1-0.5)))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); } if (offset < 0.0) offset=(-offset); if ((ssize_t) fmod(offset,2.0) == 0) offset=fmod(offset,1.0); else offset=1.0-fmod(offset,1.0); for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case RepeatSpread: { double repeat; MagickBooleanType antialias; antialias=MagickFalse; repeat=0.0; if ((x != CastDoubleToLong(ceil(gradient_vector->x1-0.5))) || (y != CastDoubleToLong(ceil(gradient_vector->y1-0.5)))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type == LinearGradient) { repeat=fmod(offset,length); if (repeat < 0.0) repeat=length-fmod(-repeat,length); else repeat=fmod(offset,length); antialias=(repeat < length) && ((repeat+1.0) > length) ? MagickTrue : MagickFalse; offset=PerceptibleReciprocal(length)*repeat; } else { repeat=fmod(offset,gradient->radius); if (repeat < 0.0) repeat=gradient->radius-fmod(-repeat,gradient->radius); else repeat=fmod(offset,gradient->radius); antialias=repeat+1.0 > gradient->radius ? MagickTrue : MagickFalse; offset=repeat*PerceptibleReciprocal(gradient->radius); } } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); if (antialias != MagickFalse) { if (gradient->type == LinearGradient) alpha=length-repeat; else alpha=gradient->radius-repeat; i=0; j=(ssize_t) gradient->number_stops-1L; } CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } } CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha, &pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawImage() draws a graphic primitive on your image. The primitive % may be represented as a string or filename. Precede the filename with an % "at" sign (@) and the contents of the file are drawn on the image. You % can affect how text is drawn by setting one or more members of the draw % info structure. % % The format of the DrawImage method is: % % MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info, const double pad) { double extent; size_t quantum; /* Check if there is enough storage for drawing pimitives. */ quantum=sizeof(**mvg_info->primitive_info); extent=(double) mvg_info->offset+pad+(PrimitiveExtentPad+1)*quantum; if (extent <= (double) *mvg_info->extent) return(MagickTrue); if (extent == (double) CastDoubleToLong(extent)) { *mvg_info->primitive_info=(PrimitiveInfo *) ResizeQuantumMemory( *mvg_info->primitive_info,(size_t) (extent+1),quantum); if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL) { ssize_t i; *mvg_info->extent=(size_t) extent; for (i=mvg_info->offset+1; i <= (ssize_t) extent; i++) { (*mvg_info->primitive_info)[i].primitive=UndefinedPrimitive; (*mvg_info->primitive_info)[i].text=(char *) NULL; } return(MagickTrue); } } /* Reallocation failed, allocate a primitive to facilitate unwinding. */ (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL) *mvg_info->primitive_info=(PrimitiveInfo *) RelinquishMagickMemory( *mvg_info->primitive_info); *mvg_info->primitive_info=(PrimitiveInfo *) AcquireCriticalMemory((size_t) ( (PrimitiveExtentPad+1)*quantum)); (void) memset(*mvg_info->primitive_info,0,(size_t) ((PrimitiveExtentPad+1)* quantum)); *mvg_info->extent=1; mvg_info->offset=0; return(MagickFalse); } static inline double GetDrawValue(const char *magick_restrict string, char **magick_restrict sentinal) { char **magick_restrict q; double value; q=sentinal; value=InterpretLocaleValue(string,q); sentinal=q; return(value); } static int MVGMacroCompare(const void *target,const void *source) { const char *p, *q; p=(const char *) target; q=(const char *) source; return(strcmp(p,q)); } static SplayTreeInfo *GetMVGMacros(const char *primitive) { char *macro, *token; const char *q; size_t extent; SplayTreeInfo *macros; /* Scan graphic primitives for definitions and classes. */ if (primitive == (const char *) NULL) return((SplayTreeInfo *) NULL); macros=NewSplayTree(MVGMacroCompare,RelinquishMagickMemory, RelinquishMagickMemory); macro=AcquireString(primitive); token=AcquireString(primitive); extent=strlen(token)+MagickPathExtent; for (q=primitive; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (*token == '\0') break; if (LocaleCompare("push",token) == 0) { const char *end, *start; (void) GetNextToken(q,&q,extent,token); if (*q == '"') { char name[MagickPathExtent]; const char *p; ssize_t n; /* Named macro (e.g. push graphic-context "wheel"). */ (void) GetNextToken(q,&q,extent,token); start=q; end=q; (void) CopyMagickString(name,token,MagickPathExtent); n=1; for (p=q; *p != '\0'; ) { if (GetNextToken(p,&p,extent,token) < 1) break; if (*token == '\0') break; if (LocaleCompare(token,"pop") == 0) { end=p-strlen(token)-1; n--; } if (LocaleCompare(token,"push") == 0) n++; if ((n == 0) && (end > start)) { /* Extract macro. */ (void) GetNextToken(p,&p,extent,token); (void) CopyMagickString(macro,start,(size_t) (end-start)); (void) AddValueToSplayTree(macros,ConstantString(name), ConstantString(macro)); break; } } } } } token=DestroyString(token); macro=DestroyString(macro); return(macros); } static inline MagickBooleanType IsPoint(const char *point) { char *p; double value; value=GetDrawValue(point,&p); return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse : MagickTrue); } static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info, const PointInfo point) { primitive_info->coordinates=1; primitive_info->closed_subpath=MagickFalse; primitive_info->point=point; return(MagickTrue); } static MagickBooleanType RenderMVGContent(Image *image, const DrawInfo *draw_info,const size_t depth,ExceptionInfo *exception) { #define RenderImageTag "Render/Image" AffineMatrix affine, current; char keyword[MagickPathExtent], geometry[MagickPathExtent], *next_token, pattern[MagickPathExtent], *primitive, *token; const char *q; double angle, coordinates, cursor, factor, primitive_extent; DrawInfo *clone_info, **graphic_context; MagickBooleanType proceed; MagickStatusType status; MVGInfo mvg_info; PointInfo point; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; const char *p; ssize_t i, x; SegmentInfo bounds; size_t extent, number_points, number_stops; SplayTreeInfo *macros; ssize_t defsDepth, j, k, n, symbolDepth; StopInfo *stops; TypeMetric metrics; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (depth > MagickMaxRecursionDepth) ThrowBinaryException(DrawError,"VectorGraphicsNestedTooDeeply", image->filename); if ((draw_info->primitive == (char *) NULL) || (*draw_info->primitive == '\0')) return(MagickFalse); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image"); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) { status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); if (status == MagickFalse) return(MagickFalse); } if ((*draw_info->primitive == '@') && (strlen(draw_info->primitive) > 1) && (*(draw_info->primitive+1) != '-') && (depth == 0)) primitive=FileToString(draw_info->primitive+1,~0UL,exception); else primitive=AcquireString(draw_info->primitive); if (primitive == (char *) NULL) return(MagickFalse); primitive_extent=(double) strlen(primitive); (void) SetImageArtifact(image,"mvg:vector-graphics",primitive); n=0; number_stops=0; stops=(StopInfo *) NULL; /* Allocate primitive info memory. */ graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { primitive=DestroyString(primitive); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } number_points=(size_t) PrimitiveExtentPad; primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (number_points+1),sizeof(*primitive_info)); if (primitive_info == (PrimitiveInfo *) NULL) { primitive=DestroyString(primitive); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(primitive_info,0,(size_t) (number_points+1)* sizeof(*primitive_info)); (void) memset(&mvg_info,0,sizeof(mvg_info)); mvg_info.primitive_info=(&primitive_info); mvg_info.extent=(&number_points); mvg_info.exception=exception; graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info); graphic_context[n]->viewbox=image->page; if ((image->page.width == 0) || (image->page.height == 0)) { graphic_context[n]->viewbox.width=image->columns; graphic_context[n]->viewbox.height=image->rows; } token=AcquireString(primitive); extent=strlen(token)+MagickPathExtent; defsDepth=0; symbolDepth=0; cursor=0.0; macros=GetMVGMacros(primitive); status=MagickTrue; for (q=primitive; *q != '\0'; ) { /* Interpret graphic primitive. */ if (GetNextToken(q,&q,MagickPathExtent,keyword) < 1) break; if (*keyword == '\0') break; if (*keyword == '#') { /* Comment. */ while ((*q != '\n') && (*q != '\0')) q++; continue; } p=q-strlen(keyword)-1; primitive_type=UndefinedPrimitive; current=graphic_context[n]->affine; GetAffineMatrix(&affine); *token='\0'; switch (*keyword) { case ';': break; case 'a': case 'A': { if (LocaleCompare("affine",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.sx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.rx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.ry=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.sy=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.tx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.ty=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("alpha",keyword) == 0) { primitive_type=AlphaPrimitive; break; } if (LocaleCompare("arc",keyword) == 0) { primitive_type=ArcPrimitive; break; } status=MagickFalse; break; } case 'b': case 'B': { if (LocaleCompare("bezier",keyword) == 0) { primitive_type=BezierPrimitive; break; } if (LocaleCompare("border-color",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->border_color,exception); break; } status=MagickFalse; break; } case 'c': case 'C': { if (LocaleCompare("class",keyword) == 0) { const char *mvg_class; (void) GetNextToken(q,&q,extent,token); if (*token == '\0') { status=MagickFalse; break; } if (LocaleCompare(token,graphic_context[n]->id) == 0) break; mvg_class=(const char *) GetValueFromSplayTree(macros,token); if ((graphic_context[n]->render != MagickFalse) && (mvg_class != (const char *) NULL) && (p > primitive)) { char *elements; ssize_t offset; /* Inject class elements in stream. */ offset=(ssize_t) (p-primitive); elements=AcquireString(primitive); elements[offset]='\0'; (void) ConcatenateString(&elements,mvg_class); (void) ConcatenateString(&elements,"\n"); (void) ConcatenateString(&elements,q); primitive=DestroyString(primitive); primitive=elements; q=primitive+offset; } break; } if (LocaleCompare("clip-path",keyword) == 0) { const char *clip_path; /* Take a node from within the MVG document, and duplicate it here. */ (void) GetNextToken(q,&q,extent,token); if (*token == '\0') { status=MagickFalse; break; } (void) CloneString(&graphic_context[n]->clip_mask,token); clip_path=(const char *) GetValueFromSplayTree(macros,token); if (clip_path != (const char *) NULL) { if (graphic_context[n]->clipping_mask != (Image *) NULL) graphic_context[n]->clipping_mask= DestroyImage(graphic_context[n]->clipping_mask); graphic_context[n]->clipping_mask=DrawClippingMask(image, graphic_context[n],token,clip_path,exception); if (graphic_context[n]->compliance != SVGCompliance) { clip_path=(const char *) GetValueFromSplayTree(macros, graphic_context[n]->clip_mask); if (clip_path != (const char *) NULL) (void) SetImageArtifact(image, graphic_context[n]->clip_mask,clip_path); status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask,exception); } } break; } if (LocaleCompare("clip-rule",keyword) == 0) { ssize_t fill_rule; (void) GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) { status=MagickFalse; break; } graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("clip-units",keyword) == 0) { ssize_t clip_units; (void) GetNextToken(q,&q,extent,token); clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse, token); if (clip_units == -1) { status=MagickFalse; break; } graphic_context[n]->clip_units=(ClipPathUnits) clip_units; if (clip_units == ObjectBoundingBox) { GetAffineMatrix(&current); affine.sx=draw_info->bounds.x2; affine.sy=draw_info->bounds.y2; affine.tx=draw_info->bounds.x1; affine.ty=draw_info->bounds.y1; break; } break; } if (LocaleCompare("circle",keyword) == 0) { primitive_type=CirclePrimitive; break; } if (LocaleCompare("color",keyword) == 0) { primitive_type=ColorPrimitive; break; } if (LocaleCompare("compliance",keyword) == 0) { /* MVG compliance associates a clipping mask with an image; SVG compliance associates a clipping mask with a graphics context. */ (void) GetNextToken(q,&q,extent,token); graphic_context[n]->compliance=(ComplianceType) ParseCommandOption( MagickComplianceOptions,MagickFalse,token); break; } if (LocaleCompare("currentColor",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); break; } status=MagickFalse; break; } case 'd': case 'D': { if (LocaleCompare("decorate",keyword) == 0) { ssize_t decorate; (void) GetNextToken(q,&q,extent,token); decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse, token); if (decorate == -1) { status=MagickFalse; break; } graphic_context[n]->decorate=(DecorationType) decorate; break; } if (LocaleCompare("density",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->density,token); break; } if (LocaleCompare("direction",keyword) == 0) { ssize_t direction; (void) GetNextToken(q,&q,extent,token); direction=ParseCommandOption(MagickDirectionOptions,MagickFalse, token); if (direction == -1) status=MagickFalse; else graphic_context[n]->direction=(DirectionType) direction; break; } status=MagickFalse; break; } case 'e': case 'E': { if (LocaleCompare("ellipse",keyword) == 0) { primitive_type=EllipsePrimitive; break; } if (LocaleCompare("encoding",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->encoding,token); break; } status=MagickFalse; break; } case 'f': case 'F': { if (LocaleCompare("fill",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; (void) FormatLocaleString(pattern,MagickPathExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->fill_pattern,exception); else { status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->fill,exception); if (graphic_context[n]->fill_alpha != OpaqueAlpha) graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha; } break; } if (LocaleCompare("fill-opacity",keyword) == 0) { double opacity; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* GetDrawValue(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->compliance == SVGCompliance) graphic_context[n]->fill_alpha*=opacity; else graphic_context[n]->fill_alpha=QuantumRange*opacity; if (graphic_context[n]->fill.alpha != TransparentAlpha) graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha; else graphic_context[n]->fill.alpha=(MagickRealType) ClampToQuantum(QuantumRange*(1.0-opacity)); break; } if (LocaleCompare("fill-rule",keyword) == 0) { ssize_t fill_rule; (void) GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) { status=MagickFalse; break; } graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("font",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->font,token); if (LocaleCompare("none",token) == 0) graphic_context[n]->font=(char *) RelinquishMagickMemory( graphic_context[n]->font); break; } if (LocaleCompare("font-family",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->family,token); break; } if (LocaleCompare("font-size",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->pointsize=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("font-stretch",keyword) == 0) { ssize_t stretch; (void) GetNextToken(q,&q,extent,token); stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token); if (stretch == -1) { status=MagickFalse; break; } graphic_context[n]->stretch=(StretchType) stretch; break; } if (LocaleCompare("font-style",keyword) == 0) { ssize_t style; (void) GetNextToken(q,&q,extent,token); style=ParseCommandOption(MagickStyleOptions,MagickFalse,token); if (style == -1) { status=MagickFalse; break; } graphic_context[n]->style=(StyleType) style; break; } if (LocaleCompare("font-weight",keyword) == 0) { ssize_t weight; (void) GetNextToken(q,&q,extent,token); weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(token); graphic_context[n]->weight=(size_t) weight; break; } status=MagickFalse; break; } case 'g': case 'G': { if (LocaleCompare("gradient-units",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("gravity",keyword) == 0) { ssize_t gravity; (void) GetNextToken(q,&q,extent,token); gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token); if (gravity == -1) { status=MagickFalse; break; } graphic_context[n]->gravity=(GravityType) gravity; break; } status=MagickFalse; break; } case 'i': case 'I': { if (LocaleCompare("image",keyword) == 0) { ssize_t compose; primitive_type=ImagePrimitive; (void) GetNextToken(q,&q,extent,token); compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token); if (compose == -1) { status=MagickFalse; break; } graphic_context[n]->compose=(CompositeOperator) compose; break; } if (LocaleCompare("interline-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->interline_spacing=GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("interword-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 'k': case 'K': { if (LocaleCompare("kerning",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->kerning=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 'l': case 'L': { if (LocaleCompare("letter-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (IsPoint(token) == MagickFalse) break; clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); clone_info->text=AcquireString(" "); status&=GetTypeMetrics(image,clone_info,&metrics,exception); graphic_context[n]->kerning=metrics.width* GetDrawValue(token,&next_token); clone_info=DestroyDrawInfo(clone_info); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("line",keyword) == 0) { primitive_type=LinePrimitive; break; } status=MagickFalse; break; } case 'm': case 'M': { if (LocaleCompare("mask",keyword) == 0) { const char *mask_path; /* Take a node from within the MVG document, and duplicate it here. */ (void) GetNextToken(q,&q,extent,token); mask_path=(const char *) GetValueFromSplayTree(macros,token); if (mask_path != (const char *) NULL) { if (graphic_context[n]->composite_mask != (Image *) NULL) graphic_context[n]->composite_mask= DestroyImage(graphic_context[n]->composite_mask); graphic_context[n]->composite_mask=DrawCompositeMask(image, graphic_context[n],token,mask_path,exception); if (graphic_context[n]->compliance != SVGCompliance) status=SetImageMask(image,CompositePixelMask, graphic_context[n]->composite_mask,exception); } break; } status=MagickFalse; break; } case 'o': case 'O': { if (LocaleCompare("offset",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("opacity",keyword) == 0) { double opacity; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* GetDrawValue(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->compliance == SVGCompliance) { graphic_context[n]->fill_alpha*=opacity; graphic_context[n]->stroke_alpha*=opacity; } else { graphic_context[n]->fill_alpha=QuantumRange*opacity; graphic_context[n]->stroke_alpha=QuantumRange*opacity; } break; } status=MagickFalse; break; } case 'p': case 'P': { if (LocaleCompare("path",keyword) == 0) { primitive_type=PathPrimitive; break; } if (LocaleCompare("point",keyword) == 0) { primitive_type=PointPrimitive; break; } if (LocaleCompare("polyline",keyword) == 0) { primitive_type=PolylinePrimitive; break; } if (LocaleCompare("polygon",keyword) == 0) { primitive_type=PolygonPrimitive; break; } if (LocaleCompare("pop",keyword) == 0) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare("class",token) == 0) break; if (LocaleCompare("clip-path",token) == 0) break; if (LocaleCompare("defs",token) == 0) { defsDepth--; graphic_context[n]->render=defsDepth > 0 ? MagickFalse : MagickTrue; break; } if (LocaleCompare("gradient",token) == 0) break; if (LocaleCompare("graphic-context",token) == 0) { if (n <= 0) { (void) ThrowMagickException(exception,GetMagickModule(), DrawError,"UnbalancedGraphicContextPushPop","`%s'",token); status=MagickFalse; n=0; break; } if ((graphic_context[n]->clip_mask != (char *) NULL) && (graphic_context[n]->compliance != SVGCompliance)) if (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0) status=SetImageMask(image,WritePixelMask,(Image *) NULL, exception); graphic_context[n]=DestroyDrawInfo(graphic_context[n]); n--; break; } if (LocaleCompare("mask",token) == 0) break; if (LocaleCompare("pattern",token) == 0) break; if (LocaleCompare("symbol",token) == 0) { symbolDepth--; graphic_context[n]->render=symbolDepth > 0 ? MagickFalse : MagickTrue; break; } status=MagickFalse; break; } if (LocaleCompare("push",keyword) == 0) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare("class",token) == 0) { /* Class context. */ for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"class") != 0) continue; break; } (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("clip-path",token) == 0) { (void) GetNextToken(q,&q,extent,token); for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"clip-path") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("defs",token) == 0) { defsDepth++; graphic_context[n]->render=defsDepth > 0 ? MagickFalse : MagickTrue; break; } if (LocaleCompare("gradient",token) == 0) { char key[2*MagickPathExtent], name[MagickPathExtent], type[MagickPathExtent]; SegmentInfo segment; (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MagickPathExtent); (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(type,token,MagickPathExtent); (void) GetNextToken(q,&q,extent,token); segment.x1=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); segment.y1=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); segment.x2=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); segment.y2=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (LocaleCompare(type,"radial") == 0) { (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); } for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"gradient") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); bounds.x1=graphic_context[n]->affine.sx*segment.x1+ graphic_context[n]->affine.ry*segment.y1+ graphic_context[n]->affine.tx; bounds.y1=graphic_context[n]->affine.rx*segment.x1+ graphic_context[n]->affine.sy*segment.y1+ graphic_context[n]->affine.ty; bounds.x2=graphic_context[n]->affine.sx*segment.x2+ graphic_context[n]->affine.ry*segment.y2+ graphic_context[n]->affine.tx; bounds.y2=graphic_context[n]->affine.rx*segment.x2+ graphic_context[n]->affine.sy*segment.y2+ graphic_context[n]->affine.ty; (void) FormatLocaleString(key,MagickPathExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MagickPathExtent,"%s-type",name); (void) SetImageArtifact(image,key,type); (void) FormatLocaleString(key,MagickPathExtent,"%s-geometry", name); (void) FormatLocaleString(geometry,MagickPathExtent, "%gx%g%+.15g%+.15g", MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0), MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0), bounds.x1,bounds.y1); (void) SetImageArtifact(image,key,geometry); (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("graphic-context",token) == 0) { n++; graphic_context=(DrawInfo **) ResizeQuantumMemory( graphic_context,(size_t) (n+1),sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL, graphic_context[n-1]); if (*q == '"') { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->id,token); } break; } if (LocaleCompare("mask",token) == 0) { (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("pattern",token) == 0) { char key[2*MagickPathExtent], name[MagickPathExtent]; RectangleInfo region; (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MagickPathExtent); (void) GetNextToken(q,&q,extent,token); region.x=CastDoubleToLong(ceil(GetDrawValue(token, &next_token)-0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); region.y=CastDoubleToLong(ceil(GetDrawValue(token, &next_token)-0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); region.width=(size_t) CastDoubleToLong(floor(GetDrawValue( token,&next_token)+0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); region.height=(size_t) floor(GetDrawValue(token,&next_token)+ 0.5); if (token == next_token) ThrowPointExpectedException(token,exception); for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"pattern") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); (void) FormatLocaleString(key,MagickPathExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MagickPathExtent,"%s-geometry", name); (void) FormatLocaleString(geometry,MagickPathExtent, "%.20gx%.20g%+.20g%+.20g",(double) region.width,(double) region.height,(double) region.x,(double) region.y); (void) SetImageArtifact(image,key,geometry); (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("symbol",token) == 0) { symbolDepth++; graphic_context[n]->render=symbolDepth > 0 ? MagickFalse : MagickTrue; break; } status=MagickFalse; break; } status=MagickFalse; break; } case 'r': case 'R': { if (LocaleCompare("rectangle",keyword) == 0) { primitive_type=RectanglePrimitive; break; } if (LocaleCompare("rotate",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); angle=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0))); affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0))); affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0)))); affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0))); break; } if (LocaleCompare("roundRectangle",keyword) == 0) { primitive_type=RoundRectanglePrimitive; break; } status=MagickFalse; break; } case 's': case 'S': { if (LocaleCompare("scale",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.sx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.sy=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("skewX",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); angle=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); affine.ry=sin(DegreesToRadians(angle)); break; } if (LocaleCompare("skewY",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); angle=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); affine.rx=(-tan(DegreesToRadians(angle)/2.0)); break; } if (LocaleCompare("stop-color",keyword) == 0) { PixelInfo stop_color; number_stops++; if (number_stops == 1) stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops)); else if (number_stops > 2) stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops, sizeof(*stops)); if (stops == (StopInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } (void) GetNextToken(q,&q,extent,token); status&=QueryColorCompliance(token,AllCompliance,&stop_color, exception); stops[number_stops-1].color=stop_color; (void) GetNextToken(q,&q,extent,token); factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; stops[number_stops-1].offset=factor*GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("stroke",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; (void) FormatLocaleString(pattern,MagickPathExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->stroke_pattern,exception); else { status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->stroke,exception); if (graphic_context[n]->stroke_alpha != OpaqueAlpha) graphic_context[n]->stroke.alpha= graphic_context[n]->stroke_alpha; } break; } if (LocaleCompare("stroke-antialias",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("stroke-dasharray",keyword) == 0) { if (graphic_context[n]->dash_pattern != (double *) NULL) graphic_context[n]->dash_pattern=(double *) RelinquishMagickMemory(graphic_context[n]->dash_pattern); if (IsPoint(q) != MagickFalse) { const char *r; r=q; (void) GetNextToken(r,&r,extent,token); if (*token == ',') (void) GetNextToken(r,&r,extent,token); for (x=0; IsPoint(token) != MagickFalse; x++) { (void) GetNextToken(r,&r,extent,token); if (*token == ',') (void) GetNextToken(r,&r,extent,token); } graphic_context[n]->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2), sizeof(*graphic_context[n]->dash_pattern)); if (graphic_context[n]->dash_pattern == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); status=MagickFalse; break; } (void) memset(graphic_context[n]->dash_pattern,0,(size_t) (2*x+2)*sizeof(*graphic_context[n]->dash_pattern)); for (j=0; j < x; j++) { (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->dash_pattern[j]=GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->dash_pattern[j] < 0.0) status=MagickFalse; } if ((x & 0x01) != 0) for ( ; j < (2*x); j++) graphic_context[n]->dash_pattern[j]= graphic_context[n]->dash_pattern[j-x]; graphic_context[n]->dash_pattern[j]=0.0; break; } (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("stroke-dashoffset",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->dash_offset=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("stroke-linecap",keyword) == 0) { ssize_t linecap; (void) GetNextToken(q,&q,extent,token); linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token); if (linecap == -1) { status=MagickFalse; break; } graphic_context[n]->linecap=(LineCap) linecap; break; } if (LocaleCompare("stroke-linejoin",keyword) == 0) { ssize_t linejoin; (void) GetNextToken(q,&q,extent,token); linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse, token); if (linejoin == -1) { status=MagickFalse; break; } graphic_context[n]->linejoin=(LineJoin) linejoin; break; } if (LocaleCompare("stroke-miterlimit",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->miterlimit=StringToUnsignedLong(token); break; } if (LocaleCompare("stroke-opacity",keyword) == 0) { double opacity; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* GetDrawValue(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->compliance == SVGCompliance) graphic_context[n]->stroke_alpha*=opacity; else graphic_context[n]->stroke_alpha=QuantumRange*opacity; if (graphic_context[n]->stroke.alpha != TransparentAlpha) graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha; else graphic_context[n]->stroke.alpha=(MagickRealType) ClampToQuantum(QuantumRange*(1.0-opacity)); break; } if (LocaleCompare("stroke-width",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; graphic_context[n]->stroke_width=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 't': case 'T': { if (LocaleCompare("text",keyword) == 0) { primitive_type=TextPrimitive; cursor=0.0; break; } if (LocaleCompare("text-align",keyword) == 0) { ssize_t align; (void) GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) { status=MagickFalse; break; } graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-anchor",keyword) == 0) { ssize_t align; (void) GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) { status=MagickFalse; break; } graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-antialias",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->text_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("text-undercolor",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->undercolor,exception); break; } if (LocaleCompare("translate",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.tx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.ty=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); cursor=0.0; break; } status=MagickFalse; break; } case 'u': case 'U': { if (LocaleCompare("use",keyword) == 0) { const char *use; /* Get a macro from the MVG document, and "use" it here. */ (void) GetNextToken(q,&q,extent,token); use=(const char *) GetValueFromSplayTree(macros,token); if (use != (const char *) NULL) { clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); (void) CloneString(&clone_info->primitive,use); status=RenderMVGContent(image,clone_info,depth+1,exception); clone_info=DestroyDrawInfo(clone_info); } break; } status=MagickFalse; break; } case 'v': case 'V': { if (LocaleCompare("viewbox",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.x=CastDoubleToLong(ceil( GetDrawValue(token,&next_token)-0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.y=CastDoubleToLong(ceil( GetDrawValue(token,&next_token)-0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.width=(size_t) CastDoubleToLong( floor(GetDrawValue(token,&next_token)+0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.height=(size_t) CastDoubleToLong( floor(GetDrawValue(token,&next_token)+0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 'w': case 'W': { if (LocaleCompare("word-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } default: { status=MagickFalse; break; } } if (status == MagickFalse) break; if ((fabs(affine.sx-1.0) >= MagickEpsilon) || (fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) || (fabs(affine.sy-1.0) >= MagickEpsilon) || (fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon)) { graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx; graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx; graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy; graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy; graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+ current.tx; graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+ current.ty; } if (primitive_type == UndefinedPrimitive) { if (*q == '\0') { if (number_stops > 1) { GradientType type; type=LinearGradient; if (draw_info->gradient.type == RadialGradient) type=RadialGradient; (void) GradientImage(image,type,PadSpread,stops,number_stops, exception); } if (number_stops > 0) stops=(StopInfo *) RelinquishMagickMemory(stops); } if ((image->debug != MagickFalse) && (q > p)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),p); continue; } /* Parse the primitive attributes. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) if ((primitive_info[i].primitive == TextPrimitive) || (primitive_info[i].primitive == ImagePrimitive)) if (primitive_info[i].text != (char *) NULL) primitive_info[i].text=DestroyString(primitive_info[i].text); i=0; mvg_info.offset=i; j=0; primitive_info[0].point.x=0.0; primitive_info[0].point.y=0.0; primitive_info[0].coordinates=0; primitive_info[0].method=FloodfillMethod; primitive_info[0].closed_subpath=MagickFalse; for (x=0; *q != '\0'; x++) { /* Define points. */ if (IsPoint(q) == MagickFalse) break; (void) GetNextToken(q,&q,extent,token); point.x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); point.y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,(const char **) NULL,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); primitive_info[i].primitive=primitive_type; primitive_info[i].point=point; primitive_info[i].coordinates=0; primitive_info[i].method=FloodfillMethod; primitive_info[i].closed_subpath=MagickFalse; i++; mvg_info.offset=i; if (i < (ssize_t) number_points) continue; status&=CheckPrimitiveExtent(&mvg_info,(double) number_points); } if (status == MagickFalse) break; if ((primitive_info[j].primitive == TextPrimitive) || (primitive_info[j].primitive == ImagePrimitive)) if (primitive_info[j].text != (char *) NULL) primitive_info[j].text=DestroyString(primitive_info[j].text); primitive_info[j].primitive=primitive_type; primitive_info[j].coordinates=(size_t) x; primitive_info[j].method=FloodfillMethod; primitive_info[j].closed_subpath=MagickFalse; /* Circumscribe primitive within a circle. */ bounds.x1=primitive_info[j].point.x; bounds.y1=primitive_info[j].point.y; bounds.x2=primitive_info[j].point.x; bounds.y2=primitive_info[j].point.y; for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++) { point=primitive_info[j+k].point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.y < bounds.y1) bounds.y1=point.y; if (point.x > bounds.x2) bounds.x2=point.x; if (point.y > bounds.y2) bounds.y2=point.y; } /* Speculate how many points our primitive might consume. */ coordinates=(double) primitive_info[j].coordinates; switch (primitive_type) { case RectanglePrimitive: { coordinates*=5.0; break; } case RoundRectanglePrimitive: { double alpha, beta, radius; alpha=bounds.x2-bounds.x1; beta=bounds.y2-bounds.y1; radius=hypot(alpha,beta); coordinates*=5.0; coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0* BezierQuantum+360.0; break; } case BezierPrimitive: { coordinates=(BezierQuantum*(double) primitive_info[j].coordinates); break; } case PathPrimitive: { char *s, *t; (void) GetNextToken(q,&q,extent,token); coordinates=1.0; t=token; for (s=token; *s != '\0'; s=t) { double value; value=GetDrawValue(s,&t); (void) value; if (s == t) { t++; continue; } coordinates++; } for (s=token; *s != '\0'; s++) if (strspn(s,"AaCcQqSsTt") != 0) coordinates+=(20.0*BezierQuantum)+360.0; break; } default: break; } if (status == MagickFalse) break; if (((size_t) (i+coordinates)) >= number_points) { /* Resize based on speculative points required by primitive. */ number_points+=coordinates+1; if (number_points < (size_t) coordinates) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } mvg_info.offset=i; status&=CheckPrimitiveExtent(&mvg_info,(double) number_points); } status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad); if (status == MagickFalse) break; mvg_info.offset=j; switch (primitive_type) { case PointPrimitive: default: { if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } status&=TracePoint(primitive_info+j,primitive_info[j].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case LinePrimitive: { double dx, dy, maximum_length; if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } dx=primitive_info[i].point.x-primitive_info[i-1].point.x; dy=primitive_info[i].point.y-primitive_info[i-1].point.y; maximum_length=hypot(dx,dy); if (maximum_length > (MaxBezierCoordinates/100.0)) ThrowPointExpectedException(keyword,exception); status&=TraceLine(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RectanglePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceRectangle(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RoundRectanglePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } if ((primitive_info[j+2].point.x < 0.0) || (primitive_info[j+2].point.y < 0.0)) { status=MagickFalse; break; } if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0) { status=MagickFalse; break; } if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0) { status=MagickFalse; break; } status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case ArcPrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } status&=TraceArc(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case EllipsePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } if ((primitive_info[j+1].point.x < 0.0) || (primitive_info[j+1].point.y < 0.0)) { status=MagickFalse; break; } status&=TraceEllipse(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case CirclePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceCircle(&mvg_info,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PolylinePrimitive: { if (primitive_info[j].coordinates < 1) { status=MagickFalse; break; } break; } case PolygonPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } primitive_info[i]=primitive_info[j]; primitive_info[i].coordinates=0; primitive_info[j].coordinates++; primitive_info[j].closed_subpath=MagickTrue; i++; break; } case BezierPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } status&=TraceBezier(&mvg_info,primitive_info[j].coordinates); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PathPrimitive: { coordinates=(double) TracePath(&mvg_info,token,exception); if (coordinates < 0.0) { status=MagickFalse; break; } i=(ssize_t) (j+coordinates); break; } case AlphaPrimitive: case ColorPrimitive: { ssize_t method; if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } (void) GetNextToken(q,&q,extent,token); method=ParseCommandOption(MagickMethodOptions,MagickFalse,token); if (method == -1) { status=MagickFalse; break; } primitive_info[j].method=(PaintMethod) method; break; } case TextPrimitive: { if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } if (*token != ',') (void) GetNextToken(q,&q,extent,token); (void) CloneString(&primitive_info[j].text,token); /* Compute text cursor offset. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) && (fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon)) { mvg_info.point=primitive_info->point; primitive_info->point.x+=cursor; } else { mvg_info.point=primitive_info->point; cursor=0.0; } clone_info->render=MagickFalse; clone_info->text=AcquireString(token); status&=GetTypeMetrics(image,clone_info,&metrics,exception); clone_info=DestroyDrawInfo(clone_info); cursor+=metrics.width; if (graphic_context[n]->compliance != SVGCompliance) cursor=0.0; break; } case ImagePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } (void) GetNextToken(q,&q,extent,token); (void) CloneString(&primitive_info[j].text,token); break; } } mvg_info.offset=i; if (status == 0) break; primitive_info[i].primitive=UndefinedPrimitive; if ((image->debug != MagickFalse) && (q > p)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1), p); /* Sanity check. */ status&=CheckPrimitiveExtent(&mvg_info,ExpandAffine( &graphic_context[n]->affine)); if (status == 0) break; status&=CheckPrimitiveExtent(&mvg_info,(double) graphic_context[n]->stroke_width); if (status == 0) break; if (i == 0) continue; /* Transform points. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+ graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx; primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+ graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty; point=primitive_info[i].point; if (point.x < graphic_context[n]->bounds.x1) graphic_context[n]->bounds.x1=point.x; if (point.y < graphic_context[n]->bounds.y1) graphic_context[n]->bounds.y1=point.y; if (point.x > graphic_context[n]->bounds.x2) graphic_context[n]->bounds.x2=point.x; if (point.y > graphic_context[n]->bounds.y2) graphic_context[n]->bounds.y2=point.y; if (primitive_info[i].primitive == ImagePrimitive) break; if (i >= (ssize_t) number_points) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); } if (graphic_context[n]->render != MagickFalse) { if ((n != 0) && (graphic_context[n]->compliance != SVGCompliance) && (graphic_context[n]->clip_mask != (char *) NULL) && (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0)) { const char *clip_path; clip_path=(const char *) GetValueFromSplayTree(macros, graphic_context[n]->clip_mask); if (clip_path != (const char *) NULL) (void) SetImageArtifact(image,graphic_context[n]->clip_mask, clip_path); status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask,exception); } status&=DrawPrimitive(image,graphic_context[n],primitive_info, exception); } proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType) primitive_extent); if (proceed == MagickFalse) break; if (status == 0) break; } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image"); /* Relinquish resources. */ macros=DestroySplayTree(macros); token=DestroyString(token); if (primitive_info != (PrimitiveInfo *) NULL) { for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) if ((primitive_info[i].primitive == TextPrimitive) || (primitive_info[i].primitive == ImagePrimitive)) if (primitive_info[i].text != (char *) NULL) primitive_info[i].text=DestroyString(primitive_info[i].text); primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info); } primitive=DestroyString(primitive); if (stops != (StopInfo *) NULL) stops=(StopInfo *) RelinquishMagickMemory(stops); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); if (status == MagickFalse) ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition", keyword); return(status != 0 ? MagickTrue : MagickFalse); } MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info, ExceptionInfo *exception) { return(RenderMVGContent(image,draw_info,0,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P a t t e r n P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPatternPath() draws a pattern. % % The format of the DrawPatternPath method is: % % MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info, % const char *name,Image **pattern,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o name: the pattern name. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawPatternPath(Image *image, const DrawInfo *draw_info,const char *name,Image **pattern, ExceptionInfo *exception) { char property[MagickPathExtent]; const char *geometry, *path, *type; DrawInfo *clone_info; ImageInfo *image_info; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); assert(name != (const char *) NULL); (void) FormatLocaleString(property,MagickPathExtent,"%s",name); path=GetImageArtifact(image,property); if (path == (const char *) NULL) return(MagickFalse); (void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name); geometry=GetImageArtifact(image,property); if (geometry == (const char *) NULL) return(MagickFalse); if ((*pattern) != (Image *) NULL) *pattern=DestroyImage(*pattern); image_info=AcquireImageInfo(); image_info->size=AcquireString(geometry); *pattern=AcquireImage(image_info,exception); image_info=DestroyImageInfo(image_info); (void) QueryColorCompliance("#00000000",AllCompliance, &(*pattern)->background_color,exception); (void) SetImageBackgroundColor(*pattern,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), "begin pattern-path %s %s",name,geometry); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); if (clone_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern); if (clone_info->stroke_pattern != (Image *) NULL) clone_info->stroke_pattern=DestroyImage(clone_info->stroke_pattern); (void) FormatLocaleString(property,MagickPathExtent,"%s-type",name); type=GetImageArtifact(image,property); if (type != (const char *) NULL) clone_info->gradient.type=(GradientType) ParseCommandOption( MagickGradientOptions,MagickFalse,type); (void) CloneString(&clone_info->primitive,path); status=RenderMVGContent(*pattern,clone_info,0,exception); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w P o l y g o n P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPolygonPrimitive() draws a polygon on the image. % % The format of the DrawPolygonPrimitive method is: % % MagickBooleanType DrawPolygonPrimitive(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o exception: return any errors or warnings in this structure. % */ static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info) { ssize_t i; assert(polygon_info != (PolygonInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (polygon_info[i] != (PolygonInfo *) NULL) polygon_info[i]=DestroyPolygonInfo(polygon_info[i]); polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info); return(polygon_info); } static PolygonInfo **AcquirePolygonThreadSet( const PrimitiveInfo *primitive_info,ExceptionInfo *exception) { PathInfo *magick_restrict path_info; PolygonInfo **polygon_info; ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads, sizeof(*polygon_info)); if (polygon_info == (PolygonInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return((PolygonInfo **) NULL); } (void) memset(polygon_info,0,number_threads*sizeof(*polygon_info)); path_info=ConvertPrimitiveToPath(primitive_info,exception); if (path_info == (PathInfo *) NULL) return(DestroyPolygonThreadSet(polygon_info)); polygon_info[0]=ConvertPathToPolygon(path_info,exception); if (polygon_info[0] == (PolygonInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonThreadSet(polygon_info)); } for (i=1; i < (ssize_t) number_threads; i++) { EdgeInfo *edge_info; ssize_t j; polygon_info[i]=(PolygonInfo *) AcquireMagickMemory( sizeof(*polygon_info[i])); if (polygon_info[i] == (PolygonInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonThreadSet(polygon_info)); } polygon_info[i]->number_edges=0; edge_info=polygon_info[0]->edges; polygon_info[i]->edges=(EdgeInfo *) AcquireQuantumMemory( polygon_info[0]->number_edges,sizeof(*edge_info)); if (polygon_info[i]->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonThreadSet(polygon_info)); } (void) memcpy(polygon_info[i]->edges,edge_info, polygon_info[0]->number_edges*sizeof(*edge_info)); for (j=0; j < (ssize_t) polygon_info[i]->number_edges; j++) polygon_info[i]->edges[j].points=(PointInfo *) NULL; polygon_info[i]->number_edges=polygon_info[0]->number_edges; for (j=0; j < (ssize_t) polygon_info[i]->number_edges; j++) { edge_info=polygon_info[0]->edges+j; polygon_info[i]->edges[j].points=(PointInfo *) AcquireQuantumMemory( edge_info->number_points,sizeof(*edge_info)); if (polygon_info[i]->edges[j].points == (PointInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonThreadSet(polygon_info)); } (void) memcpy(polygon_info[i]->edges[j].points,edge_info->points, edge_info->number_points*sizeof(*edge_info->points)); } } path_info=(PathInfo *) RelinquishMagickMemory(path_info); return(polygon_info); } static size_t DestroyEdge(PolygonInfo *polygon_info,const ssize_t edge) { assert(edge < (ssize_t) polygon_info->number_edges); polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory( polygon_info->edges[edge].points); polygon_info->number_edges--; if (edge < (ssize_t) polygon_info->number_edges) (void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1, (size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges)); return(polygon_info->number_edges); } static double GetFillAlpha(PolygonInfo *polygon_info,const double mid, const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x, const ssize_t y,double *stroke_alpha) { double alpha, beta, distance, subpath_alpha; PointInfo delta; const PointInfo *q; EdgeInfo *p; ssize_t i; ssize_t j, winding_number; /* Compute fill & stroke opacity for this (x,y) point. */ *stroke_alpha=0.0; subpath_alpha=0.0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= (p->bounds.y1-mid-0.5)) break; if ((double) y > (p->bounds.y2+mid+0.5)) { (void) DestroyEdge(polygon_info,j); continue; } if (((double) x <= (p->bounds.x1-mid-0.5)) || ((double) x > (p->bounds.x2+mid+0.5))) continue; i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) p->number_points; i++) { if ((double) y <= (p->points[i-1].y-mid-0.5)) break; if ((double) y > (p->points[i].y+mid+0.5)) continue; if (p->scanline != (double) y) { p->scanline=(double) y; p->highwater=(size_t) i; } /* Compute distance between a point and an edge. */ q=p->points+i-1; delta.x=(q+1)->x-q->x; delta.y=(q+1)->y-q->y; beta=delta.x*(x-q->x)+delta.y*(y-q->y); if (beta <= 0.0) { delta.x=(double) x-q->x; delta.y=(double) y-q->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=delta.x*delta.x+delta.y*delta.y; if (beta >= alpha) { delta.x=(double) x-(q+1)->x; delta.y=(double) y-(q+1)->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=PerceptibleReciprocal(alpha); beta=delta.x*(y-q->y)-delta.y*(x-q->x)+MagickEpsilon; distance=alpha*beta*beta; } } /* Compute stroke & subpath opacity. */ beta=0.0; if (p->ghostline == MagickFalse) { alpha=mid+0.5; if ((*stroke_alpha < 1.0) && (distance <= ((alpha+0.25)*(alpha+0.25)))) { alpha=mid-0.5; if (distance <= ((alpha+0.25)*(alpha+0.25))) *stroke_alpha=1.0; else { beta=1.0; if (fabs(distance-1.0) >= MagickEpsilon) beta=sqrt((double) distance); alpha=beta-mid-0.5; if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25))) *stroke_alpha=(alpha-0.25)*(alpha-0.25); } } } if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0)) continue; if (distance <= 0.0) { subpath_alpha=1.0; continue; } if (distance > 1.0) continue; if (fabs(beta) < MagickEpsilon) { beta=1.0; if (fabs(distance-1.0) >= MagickEpsilon) beta=sqrt(distance); } alpha=beta-1.0; if (subpath_alpha < (alpha*alpha)) subpath_alpha=alpha*alpha; } } /* Compute fill opacity. */ if (fill == MagickFalse) return(0.0); if (subpath_alpha >= 1.0) return(1.0); /* Determine winding number. */ winding_number=0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= p->bounds.y1) break; if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1)) continue; if ((double) x > p->bounds.x2) { winding_number+=p->direction ? 1 : -1; continue; } i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) (p->number_points-1); i++) if ((double) y <= p->points[i].y) break; q=p->points+i-1; if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x))) winding_number+=p->direction ? 1 : -1; } if (fill_rule != NonZeroRule) { if ((MagickAbsoluteValue(winding_number) & 0x01) != 0) return(1.0); } else if (MagickAbsoluteValue(winding_number) != 0) return(1.0); return(subpath_alpha); } static MagickBooleanType DrawPolygonPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { CacheView *image_view; const char *artifact; MagickBooleanType fill, status; double mid; PolygonInfo **magick_restrict polygon_info; EdgeInfo *p; ssize_t i; SegmentInfo bounds; ssize_t start_y, stop_y, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); assert(primitive_info != (PrimitiveInfo *) NULL); if (primitive_info->coordinates <= 1) return(MagickTrue); /* Compute bounding box. */ polygon_info=AcquirePolygonThreadSet(primitive_info,exception); if (polygon_info == (PolygonInfo **) NULL) return(MagickFalse); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon"); fill=(primitive_info->method == FillToBorderMethod) || (primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse; mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; bounds=polygon_info[0]->edges[0].bounds; artifact=GetImageArtifact(image,"draw:render-bounding-rectangles"); if (IsStringTrue(artifact) != MagickFalse) (void) DrawBoundingRectangles(image,draw_info,polygon_info[0],exception); for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++) { p=polygon_info[0]->edges+i; if (p->bounds.x1 < bounds.x1) bounds.x1=p->bounds.x1; if (p->bounds.y1 < bounds.y1) bounds.y1=p->bounds.y1; if (p->bounds.x2 > bounds.x2) bounds.x2=p->bounds.x2; if (p->bounds.y2 > bounds.y2) bounds.y2=p->bounds.y2; } bounds.x1-=(mid+1.0); bounds.y1-=(mid+1.0); bounds.x2+=(mid+1.0); bounds.y2+=(mid+1.0); if ((bounds.x1 >= (double) image->columns) || (bounds.y1 >= (double) image->rows) || (bounds.x2 <= 0.0) || (bounds.y2 <= 0.0)) { polygon_info=DestroyPolygonThreadSet(polygon_info); return(MagickTrue); /* virtual polygon */ } bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ? (double) image->columns-1.0 : bounds.x1; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ? (double) image->rows-1.0 : bounds.y1; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ? (double) image->columns-1.0 : bounds.x2; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ? (double) image->rows-1.0 : bounds.y2; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); if ((primitive_info->coordinates == 1) || (polygon_info[0]->number_edges == 0)) { /* Draw point. */ start_y=CastDoubleToLong(ceil(bounds.y1-0.5)); stop_y=CastDoubleToLong(floor(bounds.y2+0.5)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,stop_y-start_y+1,1) #endif for (y=start_y; y <= stop_y; y++) { MagickBooleanType sync; PixelInfo pixel; ssize_t x; Quantum *magick_restrict q; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=CastDoubleToLong(ceil(bounds.x1-0.5)); stop_x=CastDoubleToLong(floor(bounds.x2+0.5)); x=start_x; q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for ( ; x <= stop_x; x++) { if ((x == CastDoubleToLong(ceil(primitive_info->point.x-0.5))) && (y == CastDoubleToLong(ceil(primitive_info->point.y-0.5)))) { GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); } q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-polygon"); return(status); } /* Draw polygon or line. */ start_y=CastDoubleToLong(ceil(bounds.y1-0.5)); stop_y=CastDoubleToLong(floor(bounds.y2+0.5)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,stop_y-start_y+1,1) #endif for (y=start_y; y <= stop_y; y++) { const int id = GetOpenMPThreadId(); Quantum *magick_restrict q; ssize_t x; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=CastDoubleToLong(ceil(bounds.x1-0.5)); stop_x=CastDoubleToLong(floor(bounds.x2+0.5)); q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+ 1),1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=start_x; x <= stop_x; x++) { double fill_alpha, stroke_alpha; PixelInfo fill_color, stroke_color; /* Fill and/or stroke. */ fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule, x,y,&stroke_alpha); if (draw_info->stroke_antialias == MagickFalse) { fill_alpha=fill_alpha > 0.5 ? 1.0 : 0.0; stroke_alpha=stroke_alpha > 0.5 ? 1.0 : 0.0; } GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception); CompositePixelOver(image,&fill_color,fill_alpha*fill_color.alpha,q, (double) GetPixelAlpha(image,q),q); GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception); CompositePixelOver(image,&stroke_color,stroke_alpha*stroke_color.alpha,q, (double) GetPixelAlpha(image,q),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image. % % The format of the DrawPrimitive method is: % % MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info, % PrimitiveInfo *primitive_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o exception: return any errors or warnings in this structure. % */ static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info) { const char *methods[] = { "point", "replace", "floodfill", "filltoborder", "reset", "?" }; PointInfo p, point, q; ssize_t i, x; ssize_t coordinates, y; x=CastDoubleToLong(ceil(primitive_info->point.x-0.5)); y=CastDoubleToLong(ceil(primitive_info->point.y-0.5)); switch (primitive_info->primitive) { case AlphaPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ColorPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ColorPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ImagePrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ImagePrimitive %.20g,%.20g",(double) x,(double) y); return; } case PointPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "PointPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case TextPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "TextPrimitive %.20g,%.20g",(double) x,(double) y); return; } default: break; } coordinates=0; p=primitive_info[0].point; q.x=(-1.0); q.y=(-1.0); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; if (coordinates <= 0) { coordinates=(ssize_t) primitive_info[i].coordinates; (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin open (%.20g)",(double) coordinates); p=point; } point=primitive_info[i].point; if ((fabs(q.x-point.x) >= MagickEpsilon) || (fabs(q.y-point.y) >= MagickEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y); else (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y); q=point; coordinates--; if (coordinates > 0) continue; if ((fabs(p.x-point.x) >= MagickEpsilon) || (fabs(p.y-point.y) >= MagickEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)", (double) coordinates); else (void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)", (double) coordinates); } } MagickExport MagickBooleanType DrawPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { CacheView *image_view; MagickStatusType status; ssize_t i, x; ssize_t y; if (image->debug != MagickFalse) { (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-primitive"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx, draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy, draw_info->affine.tx,draw_info->affine.ty); } status=MagickTrue; if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsPixelInfoGray(&draw_info->fill) == MagickFalse) || (IsPixelInfoGray(&draw_info->stroke) == MagickFalse))) status&=SetImageColorspace(image,sRGBColorspace,exception); if (draw_info->compliance == SVGCompliance) { status&=SetImageMask(image,WritePixelMask,draw_info->clipping_mask, exception); status&=SetImageMask(image,CompositePixelMask,draw_info->composite_mask, exception); } x=CastDoubleToLong(ceil(primitive_info->point.x-0.5)); y=CastDoubleToLong(ceil(primitive_info->point.y-0.5)); image_view=AcquireAuthenticCacheView(image,exception); switch (primitive_info->primitive) { case AlphaPrimitive: { if (image->alpha_trait == UndefinedPixelTrait) status&=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; Quantum *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { PixelInfo pixel, target; status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target, exception); GetPixelInfo(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { q+=GetPixelChannels(image); continue; } GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { ChannelType channel_mask; PixelInfo target; status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y, &target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(double) draw_info->border_color.red; target.green=(double) draw_info->border_color.green; target.blue=(double) draw_info->border_color.blue; } channel_mask=SetImageChannelMask(image,AlphaChannel); status&=FloodfillPaintImage(image,draw_info,&target,x,y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue,exception); (void) SetImageChannelMask(image,channel_mask); break; } case ResetMethod: { PixelInfo pixel; for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } } break; } case ColorPrimitive: { switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; Quantum *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetPixelInfo(image,&pixel); GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { PixelInfo pixel, target; status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target, exception); for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { q+=GetPixelChannels(image); continue; } GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { PixelInfo target; status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y, &target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(double) draw_info->border_color.red; target.green=(double) draw_info->border_color.green; target.blue=(double) draw_info->border_color.blue; } status&=FloodfillPaintImage(image,draw_info,&target,x,y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue,exception); break; } case ResetMethod: { PixelInfo pixel; GetPixelInfo(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } } break; } case ImagePrimitive: { AffineMatrix affine; char composite_geometry[MagickPathExtent]; Image *composite_image, *composite_images; ImageInfo *clone_info; RectangleInfo geometry; ssize_t x1, y1; if (primitive_info->text == (char *) NULL) break; clone_info=AcquireImageInfo(); composite_images=(Image *) NULL; if (LocaleNCompare(primitive_info->text,"data:",5) == 0) composite_images=ReadInlineImage(clone_info,primitive_info->text, exception); else if (*primitive_info->text != '\0') { MagickBooleanType path_status; struct stat attributes; (void) CopyMagickString(clone_info->filename,primitive_info->text, MagickPathExtent); path_status=GetPathAttributes(clone_info->filename,&attributes); if ((path_status != MagickFalse) && (S_ISCHR(attributes.st_mode) == 0)) { (void) SetImageInfo(clone_info,1,exception); (void) CopyMagickString(clone_info->filename, primitive_info->text,MagickPathExtent); if (clone_info->size != (char *) NULL) clone_info->size=DestroyString(clone_info->size); if (clone_info->extract != (char *) NULL) clone_info->extract=DestroyString(clone_info->extract); if ((LocaleCompare(clone_info->magick,"file") == 0) || (LocaleCompare(clone_info->magick,"https") == 0) || (LocaleCompare(clone_info->magick,"http") == 0) || (LocaleCompare(clone_info->magick,"mpri") == 0) || (IsPathAccessible(clone_info->filename) != MagickFalse)) composite_images=ReadImage(clone_info,exception); } } clone_info=DestroyImageInfo(clone_info); if (composite_images == (Image *) NULL) { status=MagickFalse; break; } composite_image=RemoveFirstImageFromList(&composite_images); composite_images=DestroyImageList(composite_images); (void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor) NULL,(void *) NULL); x1=CastDoubleToLong(ceil(primitive_info[1].point.x-0.5)); y1=CastDoubleToLong(ceil(primitive_info[1].point.y-0.5)); if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) || ((y1 != 0L) && (y1 != (ssize_t) composite_image->rows))) { /* Resize image. */ (void) FormatLocaleString(composite_geometry,MagickPathExtent, "%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y); composite_image->filter=image->filter; status&=TransformImage(&composite_image,(char *) NULL, composite_geometry,exception); } if (composite_image->alpha_trait == UndefinedPixelTrait) status&=SetImageAlphaChannel(composite_image,OpaqueAlphaChannel, exception); if (draw_info->alpha != OpaqueAlpha) status&=SetImageAlpha(composite_image,draw_info->alpha,exception); SetGeometry(image,&geometry); image->gravity=draw_info->gravity; geometry.x=x; geometry.y=y; (void) FormatLocaleString(composite_geometry,MagickPathExtent, "%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double) composite_image->rows,(double) geometry.x,(double) geometry.y); (void) ParseGravityGeometry(image,composite_geometry,&geometry,exception); affine=draw_info->affine; affine.tx=(double) geometry.x; affine.ty=(double) geometry.y; composite_image->interpolate=image->interpolate; if ((draw_info->compose == OverCompositeOp) || (draw_info->compose == SrcOverCompositeOp)) status&=DrawAffineImage(image,composite_image,&affine,exception); else status&=CompositeImage(image,composite_image,draw_info->compose, MagickTrue,geometry.x,geometry.y,exception); composite_image=DestroyImage(composite_image); break; } case PointPrimitive: { PixelInfo fill_color; Quantum *q; if ((y < 0) || (y >= (ssize_t) image->rows)) break; if ((x < 0) || (x >= (ssize_t) image->columns)) break; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info,x,y,&fill_color,exception); CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q,(double) GetPixelAlpha(image,q),q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case TextPrimitive: { char geometry[MagickPathExtent]; DrawInfo *clone_info; if (primitive_info->text == (char *) NULL) break; clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->text,primitive_info->text); (void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f", primitive_info->point.x,primitive_info->point.y); (void) CloneString(&clone_info->geometry,geometry); status&=AnnotateImage(image,clone_info,exception); clone_info=DestroyDrawInfo(clone_info); break; } default: { double mid, scale; DrawInfo *clone_info; if (IsEventLogging() != MagickFalse) LogPrimitiveInfo(primitive_info); scale=ExpandAffine(&draw_info->affine); if ((draw_info->dash_pattern != (double *) NULL) && (fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) && (fabs(scale*draw_info->stroke_width) >= MagickEpsilon) && (draw_info->stroke.alpha != (Quantum) TransparentAlpha)) { /* Draw dash polygon. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; status&=DrawPolygonPrimitive(image,clone_info,primitive_info, exception); clone_info=DestroyDrawInfo(clone_info); if (status != MagickFalse) status&=DrawDashPolygon(draw_info,primitive_info,image,exception); break; } mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; if ((mid > 1.0) && ((draw_info->stroke.alpha != (Quantum) TransparentAlpha) || (draw_info->stroke_pattern != (Image *) NULL))) { double point_x, point_y; MagickBooleanType closed_path; /* Draw strokes while respecting line cap/join attributes. */ closed_path=primitive_info[0].closed_subpath; i=(ssize_t) primitive_info[0].coordinates; point_x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x); point_y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y); if ((point_x < MagickEpsilon) && (point_y < MagickEpsilon)) closed_path=MagickTrue; if ((((draw_info->linecap == RoundCap) || (closed_path != MagickFalse)) && (draw_info->linejoin == RoundJoin)) || (primitive_info[i].primitive != UndefinedPrimitive)) { status&=DrawPolygonPrimitive(image,draw_info,primitive_info, exception); break; } clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; status&=DrawPolygonPrimitive(image,clone_info,primitive_info, exception); clone_info=DestroyDrawInfo(clone_info); if (status != MagickFalse) status&=DrawStrokePolygon(image,draw_info,primitive_info,exception); break; } status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception); break; } } image_view=DestroyCacheView(image_view); if (draw_info->compliance == SVGCompliance) { status&=SetImageMask(image,WritePixelMask,(Image *) NULL,exception); status&=SetImageMask(image,CompositePixelMask,(Image *) NULL,exception); } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w S t r o k e P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on % the image while respecting the line cap and join attributes. % % The format of the DrawStrokePolygon method is: % % MagickBooleanType DrawStrokePolygon(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % */ static MagickBooleanType DrawRoundLinecap(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { PrimitiveInfo linecap[5]; ssize_t i; for (i=0; i < 4; i++) linecap[i]=(*primitive_info); linecap[0].coordinates=4; linecap[1].point.x+=2.0*MagickEpsilon; linecap[2].point.x+=2.0*MagickEpsilon; linecap[2].point.y+=2.0*MagickEpsilon; linecap[3].point.y+=2.0*MagickEpsilon; linecap[4].primitive=UndefinedPrimitive; return(DrawPolygonPrimitive(image,draw_info,linecap,exception)); } static MagickBooleanType DrawStrokePolygon(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { DrawInfo *clone_info; MagickBooleanType closed_path; MagickStatusType status; PrimitiveInfo *stroke_polygon; const PrimitiveInfo *p, *q; /* Draw stroked polygon. */ if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-stroke-polygon"); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->fill=draw_info->stroke; if (clone_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern); if (clone_info->stroke_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0, MagickTrue,exception); clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; clone_info->stroke_width=0.0; clone_info->fill_rule=NonZeroRule; status=MagickTrue; for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates) { if (p->coordinates == 1) continue; stroke_polygon=TraceStrokePolygon(draw_info,p,exception); if (stroke_polygon == (PrimitiveInfo *) NULL) { status=0; break; } status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception); stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon); if (status == 0) break; q=p+p->coordinates-1; closed_path=p->closed_subpath; if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse)) { status&=DrawRoundLinecap(image,draw_info,p,exception); status&=DrawRoundLinecap(image,draw_info,q,exception); } } clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-stroke-polygon"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A f f i n e M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAffineMatrix() returns an AffineMatrix initialized to the identity % matrix. % % The format of the GetAffineMatrix method is: % % void GetAffineMatrix(AffineMatrix *affine_matrix) % % A description of each parameter follows: % % o affine_matrix: the affine matrix. % */ MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(affine_matrix != (AffineMatrix *) NULL); (void) memset(affine_matrix,0,sizeof(*affine_matrix)); affine_matrix->sx=1.0; affine_matrix->sy=1.0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetDrawInfo() initializes draw_info to default values from image_info. % % The format of the GetDrawInfo method is: % % void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info.. % % o draw_info: the draw info. % */ MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) { char *next_token; const char *option; ExceptionInfo *exception; ImageInfo *clone_info; /* Initialize draw attributes. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info != (DrawInfo *) NULL); (void) memset(draw_info,0,sizeof(*draw_info)); clone_info=CloneImageInfo(image_info); GetAffineMatrix(&draw_info->affine); exception=AcquireExceptionInfo(); (void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill, exception); (void) QueryColorCompliance("#FFF0",AllCompliance,&draw_info->stroke, exception); draw_info->stroke_antialias=clone_info->antialias; draw_info->stroke_width=1.0; draw_info->fill_rule=EvenOddRule; draw_info->alpha=OpaqueAlpha; draw_info->fill_alpha=OpaqueAlpha; draw_info->stroke_alpha=OpaqueAlpha; draw_info->linecap=ButtCap; draw_info->linejoin=MiterJoin; draw_info->miterlimit=10; draw_info->decorate=NoDecoration; draw_info->pointsize=12.0; draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha; draw_info->compose=OverCompositeOp; draw_info->render=MagickTrue; draw_info->clip_path=MagickFalse; draw_info->debug=IsEventLogging(); if (clone_info->font != (char *) NULL) draw_info->font=AcquireString(clone_info->font); if (clone_info->density != (char *) NULL) draw_info->density=AcquireString(clone_info->density); draw_info->text_antialias=clone_info->antialias; if (fabs(clone_info->pointsize) >= MagickEpsilon) draw_info->pointsize=clone_info->pointsize; draw_info->border_color=clone_info->border_color; if (clone_info->server_name != (char *) NULL) draw_info->server_name=AcquireString(clone_info->server_name); option=GetImageOption(clone_info,"direction"); if (option != (const char *) NULL) draw_info->direction=(DirectionType) ParseCommandOption( MagickDirectionOptions,MagickFalse,option); else draw_info->direction=UndefinedDirection; option=GetImageOption(clone_info,"encoding"); if (option != (const char *) NULL) (void) CloneString(&draw_info->encoding,option); option=GetImageOption(clone_info,"family"); if (option != (const char *) NULL) (void) CloneString(&draw_info->family,option); option=GetImageOption(clone_info,"fill"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->fill, exception); option=GetImageOption(clone_info,"gravity"); if (option != (const char *) NULL) draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(clone_info,"interline-spacing"); if (option != (const char *) NULL) draw_info->interline_spacing=GetDrawValue(option,&next_token); option=GetImageOption(clone_info,"interword-spacing"); if (option != (const char *) NULL) draw_info->interword_spacing=GetDrawValue(option,&next_token); option=GetImageOption(clone_info,"kerning"); if (option != (const char *) NULL) draw_info->kerning=GetDrawValue(option,&next_token); option=GetImageOption(clone_info,"stroke"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke, exception); option=GetImageOption(clone_info,"strokewidth"); if (option != (const char *) NULL) draw_info->stroke_width=GetDrawValue(option,&next_token); option=GetImageOption(clone_info,"style"); if (option != (const char *) NULL) draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions, MagickFalse,option); option=GetImageOption(clone_info,"undercolor"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor, exception); option=GetImageOption(clone_info,"weight"); if (option != (const char *) NULL) { ssize_t weight; weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(option); draw_info->weight=(size_t) weight; } exception=DestroyExceptionInfo(exception); draw_info->signature=MagickCoreSignature; clone_info=DestroyImageInfo(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r m u t a t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Permutate() returns the permuation of the (n,k). % % The format of the Permutate method is: % % void Permutate(ssize_t n,ssize_t k) % % A description of each parameter follows: % % o n: % % o k: % % */ static inline double Permutate(const ssize_t n,const ssize_t k) { double r; ssize_t i; r=1.0; for (i=k+1; i <= n; i++) r*=i; for (i=1; i <= (n-k); i++) r/=i; return(r); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a c e P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TracePrimitive is a collection of methods for generating graphic % primitives such as arcs, ellipses, paths, etc. % */ static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start, const PointInfo end,const PointInfo degrees) { PointInfo center, radius; center.x=0.5*(end.x+start.x); center.y=0.5*(end.y+start.y); radius.x=fabs(center.x-start.x); radius.y=fabs(center.y-start.y); return(TraceEllipse(mvg_info,center,radius,degrees)); } static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start, const PointInfo end,const PointInfo arc,const double angle, const MagickBooleanType large_arc,const MagickBooleanType sweep) { double alpha, beta, delta, factor, gamma, theta; MagickStatusType status; PointInfo center, points[3], radii; double cosine, sine; PrimitiveInfo *primitive_info; PrimitiveInfo *p; ssize_t i; size_t arc_segments; ssize_t offset; offset=mvg_info->offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=0; if ((fabs(start.x-end.x) < MagickEpsilon) && (fabs(start.y-end.y) < MagickEpsilon)) return(TracePoint(primitive_info,end)); radii.x=fabs(arc.x); radii.y=fabs(arc.y); if ((radii.x < MagickEpsilon) || (radii.y < MagickEpsilon)) return(TraceLine(primitive_info,start,end)); cosine=cos(DegreesToRadians(fmod((double) angle,360.0))); sine=sin(DegreesToRadians(fmod((double) angle,360.0))); center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2); center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2); delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/ (radii.y*radii.y); if (delta < MagickEpsilon) return(TraceLine(primitive_info,start,end)); if (delta > 1.0) { radii.x*=sqrt((double) delta); radii.y*=sqrt((double) delta); } points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x); points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y); points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x); points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y); alpha=points[1].x-points[0].x; beta=points[1].y-points[0].y; if (fabs(alpha*alpha+beta*beta) < MagickEpsilon) return(TraceLine(primitive_info,start,end)); factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25; if (factor <= 0.0) factor=0.0; else { factor=sqrt((double) factor); if (sweep == large_arc) factor=(-factor); } center.x=(double) ((points[0].x+points[1].x)/2-factor*beta); center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha); alpha=atan2(points[0].y-center.y,points[0].x-center.x); theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha; if ((theta < 0.0) && (sweep != MagickFalse)) theta+=2.0*MagickPI; else if ((theta > 0.0) && (sweep == MagickFalse)) theta-=2.0*MagickPI; arc_segments=(size_t) CastDoubleToLong(ceil(fabs((double) (theta/(0.5* MagickPI+MagickEpsilon))))); status=MagickTrue; p=primitive_info; for (i=0; i < (ssize_t) arc_segments; i++) { beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments)); gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))* sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/ sin(fmod((double) beta,DegreesToRadians(360.0))); points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x; p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y; (p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y* points[0].y); (p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y* points[0].y); (p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y* points[1].y); (p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y* points[1].y); (p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y* points[2].y); (p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y* points[2].y); if (i == (ssize_t) (arc_segments-1)) (p+3)->point=end; status&=TraceBezier(mvg_info,4); if (status == 0) break; p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; p+=p->coordinates; } if (status == 0) return(MagickFalse); mvg_info->offset=offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceBezier(MVGInfo *mvg_info, const size_t number_coordinates) { double alpha, *coefficients, weight; PointInfo end, point, *points; PrimitiveInfo *primitive_info; PrimitiveInfo *p; ssize_t i, j; size_t control_points, quantum; /* Allocate coefficients. */ primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; quantum=number_coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { for (j=i+1; j < (ssize_t) number_coordinates; j++) { alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x); if (alpha > (double) MAGICK_SSIZE_MAX) { (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } if (alpha > (double) quantum) quantum=(size_t) alpha; alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y); if (alpha > (double) MAGICK_SSIZE_MAX) { (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } if (alpha > (double) quantum) quantum=(size_t) alpha; } } primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; quantum=MagickMin(quantum/number_coordinates,BezierQuantum); coefficients=(double *) AcquireQuantumMemory(number_coordinates, sizeof(*coefficients)); points=(PointInfo *) AcquireQuantumMemory(quantum,number_coordinates* sizeof(*points)); if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL)) { if (points != (PointInfo *) NULL) points=(PointInfo *) RelinquishMagickMemory(points); if (coefficients != (double *) NULL) coefficients=(double *) RelinquishMagickMemory(coefficients); (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } control_points=quantum*number_coordinates; if (CheckPrimitiveExtent(mvg_info,(double) control_points+1) == MagickFalse) { points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickFalse); } primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; /* Compute bezier points. */ end=primitive_info[number_coordinates-1].point; for (i=0; i < (ssize_t) number_coordinates; i++) coefficients[i]=Permutate((ssize_t) number_coordinates-1,i); weight=0.0; for (i=0; i < (ssize_t) control_points; i++) { p=primitive_info; point.x=0.0; point.y=0.0; alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0); for (j=0; j < (ssize_t) number_coordinates; j++) { point.x+=alpha*coefficients[j]*p->point.x; point.y+=alpha*coefficients[j]*p->point.y; alpha*=weight/(1.0-weight); p++; } points[i]=point; weight+=1.0/control_points; } /* Bezier curves are just short segmented polys. */ p=primitive_info; for (i=0; i < (ssize_t) control_points; i++) { if (TracePoint(p,points[i]) == MagickFalse) { points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickFalse); } p+=p->coordinates; } if (TracePoint(p,end) == MagickFalse) { points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickFalse); } p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickTrue); } static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start, const PointInfo end) { double alpha, beta, radius; PointInfo offset, degrees; alpha=end.x-start.x; beta=end.y-start.y; radius=hypot((double) alpha,(double) beta); offset.x=(double) radius; offset.y=(double) radius; degrees.x=0.0; degrees.y=360.0; return(TraceEllipse(mvg_info,start,offset,degrees)); } static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center, const PointInfo radii,const PointInfo arc) { double coordinates, delta, step, x, y; PointInfo angle, point; PrimitiveInfo *primitive_info; PrimitiveInfo *p; ssize_t i; /* Ellipses are just short segmented polys. */ primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=0; if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon)) return(MagickTrue); delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y)); step=MagickPI/8.0; if ((delta >= 0.0) && (delta < (MagickPI/8.0))) step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0); angle.x=DegreesToRadians(arc.x); y=arc.y; while (y < arc.x) y+=360.0; angle.y=DegreesToRadians(y); coordinates=ceil((angle.y-angle.x)/step+1.0); if (CheckPrimitiveExtent(mvg_info,coordinates) == MagickFalse) return(MagickFalse); primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; for (p=primitive_info; angle.x < angle.y; angle.x+=step) { point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x; point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; } point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x; point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; x=fabs(primitive_info[0].point.x- primitive_info[primitive_info->coordinates-1].point.x); y=fabs(primitive_info[0].point.y- primitive_info[primitive_info->coordinates-1].point.y); if ((x < MagickEpsilon) && (y < MagickEpsilon)) primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end) { if (TracePoint(primitive_info,start) == MagickFalse) return(MagickFalse); if ((fabs(start.x-end.x) < MagickEpsilon) && (fabs(start.y-end.y) < MagickEpsilon)) { primitive_info->primitive=PointPrimitive; primitive_info->coordinates=1; return(MagickTrue); } if (TracePoint(primitive_info+1,end) == MagickFalse) return(MagickFalse); (primitive_info+1)->primitive=primitive_info->primitive; primitive_info->coordinates=2; primitive_info->closed_subpath=MagickFalse; return(MagickTrue); } static ssize_t TracePath(MVGInfo *mvg_info,const char *path, ExceptionInfo *exception) { char *next_token, token[MagickPathExtent]; const char *p; double x, y; int attribute, last_attribute; MagickBooleanType status; PointInfo end = {0.0, 0.0}, points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} }, point = {0.0, 0.0}, start = {0.0, 0.0}; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; PrimitiveInfo *q; ssize_t i; size_t number_coordinates, z_count; ssize_t subpath_offset; subpath_offset=mvg_info->offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; status=MagickTrue; attribute=0; number_coordinates=0; z_count=0; primitive_type=primitive_info->primitive; q=primitive_info; for (p=path; *p != '\0'; ) { if (status == MagickFalse) break; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == '\0') break; last_attribute=attribute; attribute=(int) (*p++); switch (attribute) { case 'a': case 'A': { double angle = 0.0; MagickBooleanType large_arc = MagickFalse, sweep = MagickFalse; PointInfo arc = {0.0, 0.0}; /* Elliptical arc. */ do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); arc.x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); arc.y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); angle=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse; (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse; if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); end.x=(double) (attribute == (int) 'A' ? x : point.x+x); end.y=(double) (attribute == (int) 'A' ? y : point.y+y); if (TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'c': case 'C': { /* Cubic Bézier curve. */ do { points[0]=point; for (i=1; i < 4; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); end.x=(double) (attribute == (int) 'C' ? x : point.x+x); end.y=(double) (attribute == (int) 'C' ? y : point.y+y); points[i]=end; } for (i=0; i < 4; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,4) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'H': case 'h': { do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.x=(double) (attribute == (int) 'H' ? x: point.x+x); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'l': case 'L': { /* Line to. */ do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.x=(double) (attribute == (int) 'L' ? x : point.x+x); point.y=(double) (attribute == (int) 'L' ? y : point.y+y); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'M': case 'm': { /* Move to. */ if (mvg_info->offset != subpath_offset) { primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; primitive_info=q; subpath_offset=mvg_info->offset; } i=0; do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.x=(double) (attribute == (int) 'M' ? x : point.x+x); point.y=(double) (attribute == (int) 'M' ? y : point.y+y); if (i == 0) start=point; i++; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'q': case 'Q': { /* Quadratic Bézier curve. */ do { points[0]=point; for (i=1; i < 3; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (*p == ',') p++; end.x=(double) (attribute == (int) 'Q' ? x : point.x+x); end.y=(double) (attribute == (int) 'Q' ? y : point.y+y); points[i]=end; } for (i=0; i < 3; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,3) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 's': case 'S': { /* Cubic Bézier curve. */ do { points[0]=points[3]; points[1].x=2.0*points[3].x-points[2].x; points[1].y=2.0*points[3].y-points[2].y; for (i=2; i < 4; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (*p == ',') p++; end.x=(double) (attribute == (int) 'S' ? x : point.x+x); end.y=(double) (attribute == (int) 'S' ? y : point.y+y); points[i]=end; } if (strchr("CcSs",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 4; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,4) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; last_attribute=attribute; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 't': case 'T': { /* Quadratic Bézier curve. */ do { points[0]=points[2]; points[1].x=2.0*points[2].x-points[1].x; points[1].y=2.0*points[2].y-points[1].y; for (i=2; i < 3; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); end.x=(double) (attribute == (int) 'T' ? x : point.x+x); end.y=(double) (attribute == (int) 'T' ? y : point.y+y); points[i]=end; } if (status == MagickFalse) break; if (strchr("QqTt",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 3; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,3) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; last_attribute=attribute; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'v': case 'V': { /* Line to. */ do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.y=(double) (attribute == (int) 'V' ? y : point.y+y); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'z': case 'Z': { /* Close path. */ point=start; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); primitive_info->closed_subpath=MagickTrue; number_coordinates+=primitive_info->coordinates; primitive_info=q; subpath_offset=mvg_info->offset; z_count++; break; } default: { ThrowPointExpectedException(token,exception); break; } } } if (status == MagickFalse) return(-1); primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { q--; q->primitive=primitive_type; if (z_count > 1) q->method=FillToBorderMethod; } q=primitive_info; return((ssize_t) number_coordinates); } static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end) { PointInfo point; PrimitiveInfo *p; ssize_t i; p=primitive_info; if (TracePoint(p,start) == MagickFalse) return(MagickFalse); p+=p->coordinates; point.x=start.x; point.y=end.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; if (TracePoint(p,end) == MagickFalse) return(MagickFalse); p+=p->coordinates; point.x=end.x; point.y=start.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; if (TracePoint(p,start) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info, const PointInfo start,const PointInfo end,PointInfo arc) { PointInfo degrees, point, segment; PrimitiveInfo *primitive_info; PrimitiveInfo *p; ssize_t i; ssize_t offset; offset=mvg_info->offset; segment.x=fabs(end.x-start.x); segment.y=fabs(end.y-start.y); if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon)) { (*mvg_info->primitive_info+mvg_info->offset)->coordinates=0; return(MagickTrue); } if (arc.x > (0.5*segment.x)) arc.x=0.5*segment.x; if (arc.y > (0.5*segment.y)) arc.y=0.5*segment.y; point.x=start.x+segment.x-arc.x; point.y=start.y+arc.y; degrees.x=270.0; degrees.y=360.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+segment.x-arc.x; point.y=start.y+segment.y-arc.y; degrees.x=0.0; degrees.y=90.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+segment.y-arc.y; degrees.x=90.0; degrees.y=180.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+arc.y; degrees.x=180.0; degrees.y=270.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse) return(MagickFalse); p+=p->coordinates; mvg_info->offset=offset; primitive_info=(*mvg_info->primitive_info)+offset; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info, const size_t number_vertices,const double offset) { double distance; double dx, dy; ssize_t i; ssize_t j; dx=0.0; dy=0.0; for (i=1; i < (ssize_t) number_vertices; i++) { dx=primitive_info[0].point.x-primitive_info[i].point.x; dy=primitive_info[0].point.y-primitive_info[i].point.y; if ((fabs((double) dx) >= MagickEpsilon) || (fabs((double) dy) >= MagickEpsilon)) break; } if (i == (ssize_t) number_vertices) i=(ssize_t) number_vertices-1L; distance=hypot((double) dx,(double) dy); primitive_info[0].point.x=(double) (primitive_info[i].point.x+ dx*(distance+offset)/distance); primitive_info[0].point.y=(double) (primitive_info[i].point.y+ dy*(distance+offset)/distance); for (j=(ssize_t) number_vertices-2; j >= 0; j--) { dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x; dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y; if ((fabs((double) dx) >= MagickEpsilon) || (fabs((double) dy) >= MagickEpsilon)) break; } distance=hypot((double) dx,(double) dy); primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+ dx*(distance+offset)/distance); primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+ dy*(distance+offset)/distance); return(MagickTrue); } static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info,ExceptionInfo *exception) { #define MaxStrokePad (6*BezierQuantum+360) #define CheckPathExtent(pad_p,pad_q) \ { \ if ((pad_p) > MaxBezierCoordinates) \ stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \ else \ if ((ssize_t) (p+(pad_p)) >= (ssize_t) extent_p) \ { \ if (~extent_p < (pad_p)) \ stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \ else \ { \ extent_p+=(pad_p); \ stroke_p=(PointInfo *) ResizeQuantumMemory(stroke_p,extent_p+ \ MaxStrokePad,sizeof(*stroke_p)); \ } \ } \ if ((pad_q) > MaxBezierCoordinates) \ stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \ else \ if ((ssize_t) (q+(pad_q)) >= (ssize_t) extent_q) \ { \ if (~extent_q < (pad_q)) \ stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \ else \ { \ extent_q+=(pad_q); \ stroke_q=(PointInfo *) ResizeQuantumMemory(stroke_q,extent_q+ \ MaxStrokePad,sizeof(*stroke_q)); \ } \ } \ if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) \ { \ if (stroke_p != (PointInfo *) NULL) \ stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \ if (stroke_q != (PointInfo *) NULL) \ stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \ polygon_primitive=(PrimitiveInfo *) \ RelinquishMagickMemory(polygon_primitive); \ (void) ThrowMagickException(exception,GetMagickModule(), \ ResourceLimitError,"MemoryAllocationFailed","`%s'",""); \ return((PrimitiveInfo *) NULL); \ } \ } typedef struct _StrokeSegment { double p, q; } StrokeSegment; double delta_theta, dot_product, mid, miterlimit; MagickBooleanType closed_path; PointInfo box_p[5], box_q[5], center, offset, *stroke_p, *stroke_q; PrimitiveInfo *polygon_primitive, *stroke_polygon; ssize_t i; size_t arc_segments, extent_p, extent_q, number_vertices; ssize_t j, n, p, q; StrokeSegment dx = {0.0, 0.0}, dy = {0.0, 0.0}, inverse_slope = {0.0, 0.0}, slope = {0.0, 0.0}, theta = {0.0, 0.0}; /* Allocate paths. */ number_vertices=primitive_info->coordinates; polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_vertices+2UL,sizeof(*polygon_primitive)); if (polygon_primitive == (PrimitiveInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return((PrimitiveInfo *) NULL); } (void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices* sizeof(*polygon_primitive)); offset.x=primitive_info[number_vertices-1].point.x-primitive_info[0].point.x; offset.y=primitive_info[number_vertices-1].point.y-primitive_info[0].point.y; closed_path=(fabs(offset.x) < MagickEpsilon) && (fabs(offset.y) < MagickEpsilon) ? MagickTrue : MagickFalse; if (((draw_info->linejoin == RoundJoin) || (draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse)) { polygon_primitive[number_vertices]=primitive_info[1]; number_vertices++; } polygon_primitive[number_vertices].primitive=UndefinedPrimitive; /* Compute the slope for the first line segment, p. */ dx.p=0.0; dy.p=0.0; for (n=1; n < (ssize_t) number_vertices; n++) { dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x; dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y; if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon)) break; } if (n == (ssize_t) number_vertices) { if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse)) { /* Zero length subpath. */ stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory( sizeof(*stroke_polygon)); stroke_polygon[0]=polygon_primitive[0]; stroke_polygon[0].coordinates=0; polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return(stroke_polygon); } n=(ssize_t) number_vertices-1L; } extent_p=2*number_vertices; extent_q=2*number_vertices; stroke_p=(PointInfo *) AcquireQuantumMemory((size_t) extent_p+MaxStrokePad, sizeof(*stroke_p)); stroke_q=(PointInfo *) AcquireQuantumMemory((size_t) extent_q+MaxStrokePad, sizeof(*stroke_q)); if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) { if (stroke_p != (PointInfo *) NULL) stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); if (stroke_q != (PointInfo *) NULL) stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return((PrimitiveInfo *) NULL); } slope.p=0.0; inverse_slope.p=0.0; if (fabs(dx.p) < MagickEpsilon) { if (dx.p >= 0.0) slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else if (fabs(dy.p) < MagickEpsilon) { if (dy.p >= 0.0) inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else { slope.p=dy.p/dx.p; inverse_slope.p=(-1.0/slope.p); } mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid); if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse)) (void) TraceSquareLinecap(polygon_primitive,number_vertices,mid); offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0))); offset.y=(double) (offset.x*inverse_slope.p); if ((dy.p*offset.x-dx.p*offset.y) > 0.0) { box_p[0].x=polygon_primitive[0].point.x-offset.x; box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p; box_p[1].x=polygon_primitive[n].point.x-offset.x; box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p; box_q[0].x=polygon_primitive[0].point.x+offset.x; box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p; box_q[1].x=polygon_primitive[n].point.x+offset.x; box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p; } else { box_p[0].x=polygon_primitive[0].point.x+offset.x; box_p[0].y=polygon_primitive[0].point.y+offset.y; box_p[1].x=polygon_primitive[n].point.x+offset.x; box_p[1].y=polygon_primitive[n].point.y+offset.y; box_q[0].x=polygon_primitive[0].point.x-offset.x; box_q[0].y=polygon_primitive[0].point.y-offset.y; box_q[1].x=polygon_primitive[n].point.x-offset.x; box_q[1].y=polygon_primitive[n].point.y-offset.y; } /* Create strokes for the line join attribute: bevel, miter, round. */ p=0; q=0; stroke_q[p++]=box_q[0]; stroke_p[q++]=box_p[0]; for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++) { /* Compute the slope for this line segment, q. */ dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x; dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y; dot_product=dx.q*dx.q+dy.q*dy.q; if (dot_product < 0.25) continue; slope.q=0.0; inverse_slope.q=0.0; if (fabs(dx.q) < MagickEpsilon) { if (dx.q >= 0.0) slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else if (fabs(dy.q) < MagickEpsilon) { if (dy.q >= 0.0) inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else { slope.q=dy.q/dx.q; inverse_slope.q=(-1.0/slope.q); } offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0))); offset.y=(double) (offset.x*inverse_slope.q); dot_product=dy.q*offset.x-dx.q*offset.y; if (dot_product > 0.0) { box_p[2].x=polygon_primitive[n].point.x-offset.x; box_p[2].y=polygon_primitive[n].point.y-offset.y; box_p[3].x=polygon_primitive[i].point.x-offset.x; box_p[3].y=polygon_primitive[i].point.y-offset.y; box_q[2].x=polygon_primitive[n].point.x+offset.x; box_q[2].y=polygon_primitive[n].point.y+offset.y; box_q[3].x=polygon_primitive[i].point.x+offset.x; box_q[3].y=polygon_primitive[i].point.y+offset.y; } else { box_p[2].x=polygon_primitive[n].point.x+offset.x; box_p[2].y=polygon_primitive[n].point.y+offset.y; box_p[3].x=polygon_primitive[i].point.x+offset.x; box_p[3].y=polygon_primitive[i].point.y+offset.y; box_q[2].x=polygon_primitive[n].point.x-offset.x; box_q[2].y=polygon_primitive[n].point.y-offset.y; box_q[3].x=polygon_primitive[i].point.x-offset.x; box_q[3].y=polygon_primitive[i].point.y-offset.y; } if (fabs((double) (slope.p-slope.q)) < MagickEpsilon) { box_p[4]=box_p[1]; box_q[4]=box_q[1]; } else { box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+ box_p[3].y)/(slope.p-slope.q)); box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y); box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+ box_q[3].y)/(slope.p-slope.q)); box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y); } DisableMSCWarning(4127) CheckPathExtent(MaxStrokePad,MaxStrokePad); RestoreMSCWarning dot_product=dx.q*dy.p-dx.p*dy.q; if (dot_product <= 0.0) switch (draw_info->linejoin) { case BevelJoin: { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_p[p++]=box_p[4]; else { stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { stroke_q[q++]=box_q[4]; stroke_p[p++]=box_p[4]; } else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_p[p++]=box_p[4]; else { stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x); theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x); if (theta.q < theta.p) theta.q+=2.0*MagickPI; arc_segments=(size_t) CastDoubleToLong(ceil((double) ((theta. q-theta.p)/(2.0*sqrt(PerceptibleReciprocal(mid)))))); DisableMSCWarning(4127) CheckPathExtent(MaxStrokePad,arc_segments+MaxStrokePad); RestoreMSCWarning stroke_q[q].x=box_q[1].x; stroke_q[q].y=box_q[1].y; q++; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); stroke_q[q].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); stroke_q[q].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); q++; } stroke_q[q++]=box_q[2]; break; } default: break; } else switch (draw_info->linejoin) { case BevelJoin: { stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_q[q++]=box_q[4]; else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { stroke_q[q++]=box_q[4]; stroke_p[p++]=box_p[4]; } else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_q[q++]=box_q[4]; else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x); theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x); if (theta.p < theta.q) theta.p+=2.0*MagickPI; arc_segments=(size_t) CastDoubleToLong(ceil((double) ((theta.p- theta.q)/(2.0*sqrt((double) (PerceptibleReciprocal(mid))))))); DisableMSCWarning(4127) CheckPathExtent(arc_segments+MaxStrokePad,MaxStrokePad); RestoreMSCWarning stroke_p[p++]=box_p[1]; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); stroke_p[p].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); stroke_p[p].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); p++; } stroke_p[p++]=box_p[2]; break; } default: break; } slope.p=slope.q; inverse_slope.p=inverse_slope.q; box_p[0]=box_p[2]; box_p[1]=box_p[3]; box_q[0]=box_q[2]; box_q[1]=box_q[3]; dx.p=dx.q; dy.p=dy.q; n=i; } stroke_p[p++]=box_p[1]; stroke_q[q++]=box_q[1]; /* Trace stroked polygon. */ stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon)); if (stroke_polygon == (PrimitiveInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return(stroke_polygon); } for (i=0; i < (ssize_t) p; i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_p[i]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; } for ( ; i < (ssize_t) (p+q+closed_path); i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_q[p+q+closed_path-(i+1)]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[p+closed_path].point; i++; } stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; stroke_polygon[i].primitive=UndefinedPrimitive; stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1); stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return(stroke_polygon); }
brilliantrussian.c
/******************************************************************* * * M4RI: Linear Algebra over GF(2) * * Copyright (C) 2007, 2008 Gregory Bard <bard@fordham.edu> * Copyright (C) 2008-2010 Martin Albrecht <M.R.Albrecht@rhul.ac.uk> * * Distributed under the terms of the GNU General Public License (GPL) * version 2 or higher. * * This code is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full text of the GPL is available at: * * http://www.gnu.org/licenses/ * ********************************************************************/ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "brilliantrussian.h" #include "xor.h" #include "graycode.h" #include "echelonform.h" #include "ple_russian.h" /** * \brief Perform Gaussian reduction to reduced row echelon form on a * submatrix. * * The submatrix has dimension at most k starting at r x c of A. Checks * for pivot rows up to row endrow (exclusive). Terminates as soon as * finding a pivot column fails. * * \param A Matrix. * \param r First row. * \param c First column. * \param k Maximal dimension of identity matrix to produce. * \param end_row Maximal row index (exclusive) for rows to consider * for inclusion. */ static inline int _mzd_gauss_submatrix_full(mzd_t *A, rci_t r, rci_t c, rci_t end_row, int k) { assert(k <= m4ri_radix); rci_t start_row = r; rci_t j; for (j = c; j < c + k; ++j) { int found = 0; for (rci_t i = start_row; i < end_row; ++i) { /* first we need to clear the first columns */ word const tmp = mzd_read_bits(A, i, c, j - c + 1); if(tmp) { for (int l = 0; l < j - c; ++l) if (__M4RI_GET_BIT(tmp, l)) mzd_row_add_offset(A, i, r+l, c+l); /* pivot? */ if (mzd_read_bit(A, i, j)) { mzd_row_swap(A, i, start_row); /* clear above */ for (rci_t l = r; l < start_row; ++l) { if (mzd_read_bit(A, l, j)) { mzd_row_add_offset(A, l, start_row, j); } } ++start_row; found = 1; break; } } } if (found == 0) { break; } } __M4RI_DD_MZD(A); __M4RI_DD_INT(j - c); return j - c; } /** * \brief Perform Gaussian reduction to upper triangular matrix on a * submatrix. * * The submatrix has dimension at most k starting at r x c of A. Checks * for pivot rows up to row end_row (exclusive). Terminates as soon as * finding a pivot column fails. * * \param A Matrix. * \param r First row. * \param c First column. * \param k Maximal dimension of identity matrix to produce. * \param end_row Maximal row index (exclusive) for rows to consider * for inclusion. */ static inline int _mzd_gauss_submatrix(mzd_t *A, rci_t r, rci_t c, rci_t end_row, int k) { rci_t start_row = r; int found; rci_t j; for (j = c; j < c+k; ++j) { found = 0; for (rci_t i = start_row; i < end_row; ++i) { /* first we need to clear the first columns */ for (int l = 0; l < j - c; ++l) if (mzd_read_bit(A, i, c+l)) mzd_row_add_offset(A, i, r+l, c+l); /* pivot? */ if (mzd_read_bit(A, i, j)) { mzd_row_swap(A, i, start_row); start_row++; found = 1; break; } } if (found == 0) { break; } } __M4RI_DD_MZD(A); __M4RI_DD_INT(j - c); return j - c; } /** * \brief Given a submatrix in upper triangular form compute the * reduced row echelon form. * * The submatrix has dimension at most k starting at r x c of A. Checks * for pivot rows up to row end_row (exclusive). Terminates as soon as * finding a pivot column fails. * * \param A Matrix. * \param r First row. * \param c First column. * \param k Maximal dimension of identity matrix to produce. * \param end_row Maximal row index (exclusive) for rows to consider * for inclusion. */ static inline int _mzd_gauss_submatrix_top(mzd_t *A, rci_t r, rci_t c, int k) { rci_t start_row = r; for (rci_t j = c; j < c + k; ++j) { for (rci_t l = r; l < start_row; ++l) { if (mzd_read_bit(A, l, j)) { mzd_row_add_offset(A, l, start_row, j); } } ++start_row; } __M4RI_DD_MZD(A); __M4RI_DD_INT(k); return k; } static inline void _mzd_copy_back_rows(mzd_t *A, mzd_t const *U, rci_t r, rci_t c, int k) { wi_t const startblock = c / m4ri_radix; wi_t const width = A->width - startblock; for (int i = 0; i < k; ++i) { word const *const src = U->rows[i] + startblock; word *const dst = A->rows[r+i] + startblock; for (wi_t j = 0; j < width; ++j) { dst[j] = src[j]; } } __M4RI_DD_MZD(A); } void mzd_make_table(mzd_t const *M, rci_t r, rci_t c, int k, mzd_t *T, rci_t *L) { wi_t const homeblock = c / m4ri_radix; word const mask_end = __M4RI_LEFT_BITMASK(M->ncols % m4ri_radix); word const pure_mask_begin = __M4RI_RIGHT_BITMASK(m4ri_radix - (c % m4ri_radix)); word const mask_begin = (M->width - homeblock != 1) ? pure_mask_begin : pure_mask_begin & mask_end; wi_t const wide = M->width - homeblock; int const twokay = __M4RI_TWOPOW(k); L[0] = 0; for (rci_t i = 1; i < twokay; ++i) { word *ti = T->rows[i] + homeblock; word *ti1 = T->rows[i-1] + homeblock; rci_t const rowneeded = r + m4ri_codebook[k]->inc[i - 1]; int const id = m4ri_codebook[k]->ord[i]; L[id] = i; if (rowneeded >= M->nrows) continue; word *m = M->rows[rowneeded] + homeblock; *ti++ = (*m++ ^ *ti1++) & mask_begin; wi_t j; for(j = 1; j + 8 <= wide - 1; j += 8) { *ti++ = *m++ ^ *ti1++; *ti++ = *m++ ^ *ti1++; *ti++ = *m++ ^ *ti1++; *ti++ = *m++ ^ *ti1++; *ti++ = *m++ ^ *ti1++; *ti++ = *m++ ^ *ti1++; *ti++ = *m++ ^ *ti1++; *ti++ = *m++ ^ *ti1++; } switch(wide - j) { case 8: *ti++ = *m++ ^ *ti1++; case 7: *ti++ = *m++ ^ *ti1++; case 6: *ti++ = *m++ ^ *ti1++; case 5: *ti++ = *m++ ^ *ti1++; case 4: *ti++ = *m++ ^ *ti1++; case 3: *ti++ = *m++ ^ *ti1++; case 2: *ti++ = *m++ ^ *ti1++; case 1: *ti++ = (*m++ ^ *ti1++) & mask_end; } } __M4RI_DD_MZD(T); __M4RI_DD_RCI_ARRAY(L, twokay); } void mzd_process_rows(mzd_t *M, rci_t startrow, rci_t stoprow, rci_t startcol, int k, mzd_t const *T, rci_t const *L) { wi_t const block = startcol / m4ri_radix; wi_t const wide = M->width - block; wi_t const count = (wide + 7) / 8; /* Unrolled loop count */ int const entry_point = wide % 8; /* Unrolled loop entry point */ if(k == 1) { word const bm = m4ri_one << (startcol % m4ri_radix); rci_t r; for (r = startrow; r + 2 <= stoprow; r += 2) { word const b0 = M->rows[r+0][block] & bm; word const b1 = M->rows[r+1][block] & bm; word *m0 = M->rows[r+0] + block; word *m1 = M->rows[r+1] + block; word *t = T->rows[1] + block; wi_t n = count; if((b0 & b1)) { switch (entry_point) { case 0: do { *m0++ ^= *t; *m1++ ^= *t++; case 7: *m0++ ^= *t; *m1++ ^= *t++; case 6: *m0++ ^= *t; *m1++ ^= *t++; case 5: *m0++ ^= *t; *m1++ ^= *t++; case 4: *m0++ ^= *t; *m1++ ^= *t++; case 3: *m0++ ^= *t; *m1++ ^= *t++; case 2: *m0++ ^= *t; *m1++ ^= *t++; case 1: *m0++ ^= *t; *m1++ ^= *t++; } while (--n > 0); } } else if(b0) { switch (entry_point) { case 0: do { *m0++ ^= *t++; case 7: *m0++ ^= *t++; case 6: *m0++ ^= *t++; case 5: *m0++ ^= *t++; case 4: *m0++ ^= *t++; case 3: *m0++ ^= *t++; case 2: *m0++ ^= *t++; case 1: *m0++ ^= *t++; } while (--n > 0); } } else if(b1) { switch (entry_point) { case 0: do { *m1++ ^= *t++; case 7: *m1++ ^= *t++; case 6: *m1++ ^= *t++; case 5: *m1++ ^= *t++; case 4: *m1++ ^= *t++; case 3: *m1++ ^= *t++; case 2: *m1++ ^= *t++; case 1: *m1++ ^= *t++; } while (--n > 0); } } } /* TODO: this code is a bit silly/overkill, it just takes care of the last row */ for( ; r < stoprow; ++r) { rci_t const x0 = L[ mzd_read_bits_int(M, r, startcol, k) ]; word *m0 = M->rows[r] + block; word *t0 = T->rows[x0] + block; wi_t n = count; switch (entry_point) { case 0: do { *m0++ ^= *t0++; case 7: *m0++ ^= *t0++; case 6: *m0++ ^= *t0++; case 5: *m0++ ^= *t0++; case 4: *m0++ ^= *t0++; case 3: *m0++ ^= *t0++; case 2: *m0++ ^= *t0++; case 1: *m0++ ^= *t0++; } while (--n > 0); } } __M4RI_DD_MZD(M); return; } rci_t r; for (r = startrow; r + 2 <= stoprow; r += 2) { rci_t const x0 = L[ mzd_read_bits_int(M, r+0, startcol, k) ]; rci_t const x1 = L[ mzd_read_bits_int(M, r+1, startcol, k) ]; word *m0 = M->rows[r+0] + block; word *t0 = T->rows[x0] + block; word *m1 = M->rows[r+1] + block; word *t1 = T->rows[x1] + block; wi_t n = count; switch (entry_point) { case 0: do { *m0++ ^= *t0++; *m1++ ^= *t1++; case 7: *m0++ ^= *t0++; *m1++ ^= *t1++; case 6: *m0++ ^= *t0++; *m1++ ^= *t1++; case 5: *m0++ ^= *t0++; *m1++ ^= *t1++; case 4: *m0++ ^= *t0++; *m1++ ^= *t1++; case 3: *m0++ ^= *t0++; *m1++ ^= *t1++; case 2: *m0++ ^= *t0++; *m1++ ^= *t1++; case 1: *m0++ ^= *t0++; *m1++ ^= *t1++; } while (--n > 0); } } for( ; r < stoprow; ++r) { rci_t const x0 = L[ mzd_read_bits_int(M, r, startcol, k) ]; word *m0 = M->rows[r] + block; word *t0 = T->rows[x0] + block; wi_t n = count; switch (entry_point) { case 0: do { *m0++ ^= *t0++; case 7: *m0++ ^= *t0++; case 6: *m0++ ^= *t0++; case 5: *m0++ ^= *t0++; case 4: *m0++ ^= *t0++; case 3: *m0++ ^= *t0++; case 2: *m0++ ^= *t0++; case 1: *m0++ ^= *t0++; } while (--n > 0); } } __M4RI_DD_MZD(M); } void mzd_process_rows2(mzd_t *M, rci_t startrow, rci_t stoprow, rci_t startcol, int k, mzd_t const *T0, rci_t const *L0, mzd_t const *T1, rci_t const *L1) { assert(k <= m4ri_radix); wi_t const blocknum = startcol / m4ri_radix; wi_t const wide = M->width - blocknum; int const ka = k / 2; int const kb = k - k / 2; rci_t r; word const ka_bm = __M4RI_LEFT_BITMASK(ka); word const kb_bm = __M4RI_LEFT_BITMASK(kb); #if __M4RI_HAVE_OPENMP #pragma omp parallel for private(r) shared(startrow, stoprow) schedule(static,512) // MAX((__M4RI_CPU_L1_CACHE >> 3) / wide, #endif for(r = startrow; r < stoprow; ++r) { word bits = mzd_read_bits(M, r, startcol, k); rci_t const x0 = L0[ bits & ka_bm ]; bits>>=ka; rci_t const x1 = L1[ bits & kb_bm ]; if((x0 | x1) == 0) // x0 == 0 && x1 == 0 continue; word *m0 = M->rows[r] + blocknum; word const *t[2]; t[0] = T0->rows[x0] + blocknum; t[1] = T1->rows[x1] + blocknum; _mzd_combine_2( m0, t, wide); } __M4RI_DD_MZD(M); } void mzd_process_rows3(mzd_t *M, rci_t startrow, rci_t stoprow, rci_t startcol, int k, mzd_t const *T0, rci_t const *L0, mzd_t const *T1, rci_t const *L1, mzd_t const *T2, rci_t const *L2) { assert(k <= m4ri_radix); wi_t const blocknum = startcol / m4ri_radix; wi_t const wide = M->width - blocknum; int rem = k % 3; int const ka = k / 3 + ((rem >= 2) ? 1 : 0); int const kb = k / 3 + ((rem >= 1) ? 1 : 0); int const kc = k / 3; rci_t r; word const ka_bm = __M4RI_LEFT_BITMASK(ka); word const kb_bm = __M4RI_LEFT_BITMASK(kb); word const kc_bm = __M4RI_LEFT_BITMASK(kc); #if __M4RI_HAVE_OPENMP #pragma omp parallel for private(r) shared(startrow, stoprow) schedule(static,512) //if(stoprow-startrow > 128) #endif for(r= startrow; r < stoprow; ++r) { word bits = mzd_read_bits(M, r, startcol, k); rci_t const x0 = L0[ bits & ka_bm ]; bits>>=ka; rci_t const x1 = L1[ bits & kb_bm ]; bits>>=kb; rci_t const x2 = L2[ bits & kc_bm ]; if((x0 | x1 | x2) == 0) // x0 == 0 && x1 == 0 && x2 == 0 continue; word *m0 = M->rows[r] + blocknum; word const *t[3]; t[0] = T0->rows[x0] + blocknum; t[1] = T1->rows[x1] + blocknum; t[2] = T2->rows[x2] + blocknum; _mzd_combine_3( m0, t, wide); } __M4RI_DD_MZD(M); } void mzd_process_rows4(mzd_t *M, rci_t startrow, rci_t stoprow, rci_t startcol, int k, mzd_t const *T0, rci_t const *L0, mzd_t const *T1, rci_t const *L1, mzd_t const *T2, rci_t const *L2, mzd_t const *T3, rci_t const *L3) { assert(k <= m4ri_radix); wi_t const blocknum = startcol / m4ri_radix; wi_t const wide = M->width - blocknum; int const rem = k % 4; int const ka = k / 4 + ((rem >= 3) ? 1 : 0); int const kb = k / 4 + ((rem >= 2) ? 1 : 0); int const kc = k / 4 + ((rem >= 1) ? 1 : 0); int const kd = k / 4; rci_t r; word const ka_bm = __M4RI_LEFT_BITMASK(ka); word const kb_bm = __M4RI_LEFT_BITMASK(kb); word const kc_bm = __M4RI_LEFT_BITMASK(kc); word const kd_bm = __M4RI_LEFT_BITMASK(kd); #if __M4RI_HAVE_OPENMP #pragma omp parallel for private(r) shared(startrow, stoprow) schedule(static,512) //if(stoprow-startrow > 128) #endif for(r = startrow; r < stoprow; ++r) { word bits = mzd_read_bits(M, r, startcol, k); rci_t const x0 = L0[ bits & ka_bm ]; bits>>=ka; rci_t const x1 = L1[ bits & kb_bm ]; bits>>=kb; rci_t const x2 = L2[ bits & kc_bm ]; bits>>=kc; rci_t const x3 = L3[ bits & kd_bm ]; if(((x0 | x1) | (x2 | x3)) == 0) // x0 == 0 && x1 == 0 && x2 == 0 && x3 == 0 continue; word *m0 = M->rows[r] + blocknum; word const *t[4]; t[0] = T0->rows[x0] + blocknum; t[1] = T1->rows[x1] + blocknum; t[2] = T2->rows[x2] + blocknum; t[3] = T3->rows[x3] + blocknum; _mzd_combine_4( m0, t, wide); } __M4RI_DD_MZD(M); } void mzd_process_rows5(mzd_t *M, rci_t startrow, rci_t stoprow, rci_t startcol, int k, mzd_t const *T0, rci_t const *L0, mzd_t const *T1, rci_t const *L1, mzd_t const *T2, rci_t const *L2, mzd_t const *T3, rci_t const *L3, mzd_t const *T4, rci_t const *L4) { assert(k <= m4ri_radix); wi_t const blocknum = startcol / m4ri_radix; wi_t const wide = M->width - blocknum; int rem = k % 5; int const ka = k / 5 + ((rem >= 4) ? 1 : 0); int const kb = k / 5 + ((rem >= 3) ? 1 : 0); int const kc = k / 5 + ((rem >= 2) ? 1 : 0); int const kd = k / 5 + ((rem >= 1) ? 1 : 0); int const ke = k / 5; rci_t r; word const ka_bm = __M4RI_LEFT_BITMASK(ka); word const kb_bm = __M4RI_LEFT_BITMASK(kb); word const kc_bm = __M4RI_LEFT_BITMASK(kc); word const kd_bm = __M4RI_LEFT_BITMASK(kd); word const ke_bm = __M4RI_LEFT_BITMASK(ke); #if __M4RI_HAVE_OPENMP #pragma omp parallel for private(r) shared(startrow, stoprow) schedule(static,512) //if(stoprow-startrow > 128) #endif for(r = startrow; r < stoprow; ++r) { word bits = mzd_read_bits(M, r, startcol, k); rci_t const x0 = L0[ bits & ka_bm ]; bits>>=ka; rci_t const x1 = L1[ bits & kb_bm ]; bits>>=kb; rci_t const x2 = L2[ bits & kc_bm ]; bits>>=kc; rci_t const x3 = L3[ bits & kd_bm ]; bits>>=kd; rci_t const x4 = L4[ bits & ke_bm ]; if(((x0 | x1 | x2) | (x3 | x4)) == 0) // x0 == 0 && x1 == 0 && x2 == 0 && x3 == 0 && x4 == 0 continue; word *m0 = M->rows[r] + blocknum; word const *t[5]; t[0] = T0->rows[x0] + blocknum; t[1] = T1->rows[x1] + blocknum; t[2] = T2->rows[x2] + blocknum; t[3] = T3->rows[x3] + blocknum; t[4] = T4->rows[x4] + blocknum; _mzd_combine_5( m0, t, wide); } __M4RI_DD_MZD(M); } void mzd_process_rows6(mzd_t *M, rci_t startrow, rci_t stoprow, rci_t startcol, int k, mzd_t const *T0, rci_t const *L0, mzd_t const *T1, rci_t const *L1, mzd_t const *T2, rci_t const *L2, mzd_t const *T3, rci_t const *L3, mzd_t const *T4, rci_t const *L4, mzd_t const *T5, rci_t const *L5) { assert(k <= m4ri_radix); wi_t const blocknum = startcol / m4ri_radix; wi_t const wide = M->width - blocknum; int const rem = k % 6; int const ka = k / 6 + ((rem >= 5) ? 1 : 0); int const kb = k / 6 + ((rem >= 4) ? 1 : 0); int const kc = k / 6 + ((rem >= 3) ? 1 : 0); int const kd = k / 6 + ((rem >= 2) ? 1 : 0); int const ke = k / 6 + ((rem >= 1) ? 1 : 0);; int const kf = k / 6; rci_t r; word const ka_bm = __M4RI_LEFT_BITMASK(ka); word const kb_bm = __M4RI_LEFT_BITMASK(kb); word const kc_bm = __M4RI_LEFT_BITMASK(kc); word const kd_bm = __M4RI_LEFT_BITMASK(kd); word const ke_bm = __M4RI_LEFT_BITMASK(ke); word const kf_bm = __M4RI_LEFT_BITMASK(kf); #if __M4RI_HAVE_OPENMP #pragma omp parallel for private(r) shared(startrow, stoprow) schedule(static,512) //if(stoprow-startrow > 128) #endif for(r = startrow; r < stoprow; ++r) { word bits = mzd_read_bits(M, r, startcol, k); rci_t const x0 = L0[ bits & ka_bm ]; bits>>=ka; rci_t const x1 = L1[ bits & kb_bm ]; bits>>=kb; rci_t const x2 = L2[ bits & kc_bm ]; bits>>=kc; rci_t const x3 = L3[ bits & kd_bm ]; bits>>=kd; rci_t const x4 = L4[ bits & ke_bm ]; bits>>=ke; rci_t const x5 = L5[ bits & kf_bm ]; /* Waste three clocks on OR-ing (modern CPU can do three in * parallel) to avoid possible multiple conditional jumps. */ if(((x0 | x1) | (x2 | x3) | (x4 | x5)) == 0) // x0 == 0 && x1 == 0 && x2 == 0 && x3 == 0 && x4 == 0 && x5 == 0 continue; word *m0 = M->rows[r] + blocknum; word const *t[6]; t[0] = T0->rows[x0] + blocknum; t[1] = T1->rows[x1] + blocknum; t[2] = T2->rows[x2] + blocknum; t[3] = T3->rows[x3] + blocknum; t[4] = T4->rows[x4] + blocknum; t[5] = T5->rows[x5] + blocknum; _mzd_combine_6( m0, t, wide); } __M4RI_DD_MZD(M); } rci_t _mzd_echelonize_m4ri(mzd_t *A, int const full, int k, int heuristic, double const threshold) { /** * \par General algorithm * \li Step 1.Denote the first column to be processed in a given * iteration as \f$a_i\f$. Then, perform Gaussian elimination on the * first \f$3k\f$ rows after and including the \f$i\f$-th row to * produce an identity matrix in \f$a_{i,i} ... a_{i+k-1,i+k-1},\f$ * and zeroes in \f$a_{i+k,i} ... a_{i+3k-1,i+k-1}\f$. * * \li Step 2. Construct a table consisting of the \f$2^k\f$ binary strings of * length k in a Gray code. Thus with only \f$2^k\f$ vector * additions, all possible linear combinations of these k rows * have been precomputed. * * \li Step 3. One can rapidly process the remaining rows from \f$i + * 3k\f$ until row \f$m\f$ (the last row) by using the table. For * example, suppose the \f$j\f$-th row has entries \f$a_{j,i} * ... a_{j,i+k-1}\f$ in the columns being processed. Selecting the * row of the table associated with this k-bit string, and adding it * to row j will force the k columns to zero, and adjust the * remaining columns from \f$ i + k\f$ to n in the appropriate way, * as if Gaussian elimination had been performed. * * \li Step 4. While the above form of the algorithm will reduce a * system of boolean linear equations to unit upper triangular form, * and thus permit a system to be solved with back substitution, the * M4RI algorithm can also be used to invert a matrix, or put the * system into reduced row echelon form (RREF). Simply run Step 3 * on rows \f$0 ... i-1\f$ as well as on rows \f$i + 3k * ... m\f$. This only affects the complexity slightly, changing the * 2.5 coeffcient to 3. * * \attention This function implements a variant of the algorithm * described above. If heuristic is true, then this algorithm, will * switch to PLUQ based echelon form computation once the density * reaches the threshold. */ rci_t const ncols = A->ncols; if (k == 0) { k = m4ri_opt_k(A->nrows, ncols, 0); if (k >= 7) k = 7; if (0.75 * __M4RI_TWOPOW(k) * ncols > __M4RI_CPU_L3_CACHE / 2.0) k -= 1; } int kk = 6 * k; mzd_t *U = mzd_init(kk, ncols); mzd_t *T0 = mzd_init(__M4RI_TWOPOW(k), ncols); mzd_t *T1 = mzd_init(__M4RI_TWOPOW(k), ncols); mzd_t *T2 = mzd_init(__M4RI_TWOPOW(k), ncols); mzd_t *T3 = mzd_init(__M4RI_TWOPOW(k), ncols); mzd_t *T4 = mzd_init(__M4RI_TWOPOW(k), ncols); mzd_t *T5 = mzd_init(__M4RI_TWOPOW(k), ncols); rci_t *L0 = (rci_t*)m4ri_mm_calloc(__M4RI_TWOPOW(k), sizeof(rci_t)); rci_t *L1 = (rci_t*)m4ri_mm_calloc(__M4RI_TWOPOW(k), sizeof(rci_t)); rci_t *L2 = (rci_t*)m4ri_mm_calloc(__M4RI_TWOPOW(k), sizeof(rci_t)); rci_t *L3 = (rci_t*)m4ri_mm_calloc(__M4RI_TWOPOW(k), sizeof(rci_t)); rci_t *L4 = (rci_t*)m4ri_mm_calloc(__M4RI_TWOPOW(k), sizeof(rci_t)); rci_t *L5 = (rci_t*)m4ri_mm_calloc(__M4RI_TWOPOW(k), sizeof(rci_t)); rci_t last_check = 0; rci_t r = 0; rci_t c = 0; if (heuristic) { if (c < ncols && r < A->nrows && _mzd_density(A, 32, 0, 0) >= threshold) { wi_t const tmp = c / m4ri_radix; rci_t const tmp2 = tmp * m4ri_radix; mzd_t *Abar = mzd_init_window(A, r, tmp2, A->nrows, ncols); r += mzd_echelonize_pluq(Abar, full); mzd_free(Abar); c = ncols; } } while(c < ncols) { if (heuristic && c > (last_check + 256)) { last_check = c; if (c < ncols && r < A->nrows && _mzd_density(A, 32, r, c) >= threshold) { mzd_t *Abar = mzd_init_window(A, r, (c / m4ri_radix) * m4ri_radix, A->nrows, ncols); if (!full) { r += mzd_echelonize_pluq(Abar, full); } else { rci_t r2 = mzd_echelonize_pluq(Abar, full); if (r > 0) _mzd_top_echelonize_m4ri(A, 0, r, c, r); r += r2; } mzd_free(Abar); break; } } if(c + kk > ncols) { kk = ncols - c; } int kbar; if (full) { kbar = _mzd_gauss_submatrix_full(A, r, c, A->nrows, kk); } else { kbar = _mzd_gauss_submatrix(A, r, c, A->nrows, kk); /* this isn't necessary, adapt make_table */ U = mzd_submatrix(U, A, r, 0, r + kbar, ncols); _mzd_gauss_submatrix_top(A, r, c, kbar); } if (kbar > 5 * k) { int const rem = kbar % 6; int const ka = kbar / 6 + ((rem >= 5) ? 1 : 0); int const kb = kbar / 6 + ((rem >= 4) ? 1 : 0); int const kc = kbar / 6 + ((rem >= 3) ? 1 : 0); int const kd = kbar / 6 + ((rem >= 2) ? 1 : 0); int const ke = kbar / 6 + ((rem >= 1) ? 1 : 0);; int const kf = kbar / 6; if(full || kbar == kk) { mzd_make_table(A, r, c, ka, T0, L0); mzd_make_table(A, r+ka, c, kb, T1, L1); mzd_make_table(A, r+ka+kb, c, kc, T2, L2); mzd_make_table(A, r+ka+kb+kc, c, kd, T3, L3); mzd_make_table(A, r+ka+kb+kc+kd, c, ke, T4, L4); mzd_make_table(A, r+ka+kb+kc+kd+ke, c, kf, T5, L5); } if(kbar == kk) mzd_process_rows6(A, r+kbar, A->nrows, c, kbar, T0, L0, T1, L1, T2, L2, T3, L3, T4, L4, T5, L5); if(full) mzd_process_rows6(A, 0, r, c, kbar, T0, L0, T1, L1, T2, L2, T3, L3, T4, L4, T5, L5); } else if (kbar > 4 * k) { int const rem = kbar % 5; int const ka = kbar / 5 + ((rem >= 4) ? 1 : 0); int const kb = kbar / 5 + ((rem >= 3) ? 1 : 0); int const kc = kbar / 5 + ((rem >= 2) ? 1 : 0); int const kd = kbar / 5 + ((rem >= 1) ? 1 : 0); int const ke = kbar / 5; if(full || kbar == kk) { mzd_make_table(A, r, c, ka, T0, L0); mzd_make_table(A, r+ka, c, kb, T1, L1); mzd_make_table(A, r+ka+kb, c, kc, T2, L2); mzd_make_table(A, r+ka+kb+kc, c, kd, T3, L3); mzd_make_table(A, r+ka+kb+kc+kd, c, ke, T4, L4); } if(kbar == kk) mzd_process_rows5(A, r+kbar, A->nrows, c, kbar, T0, L0, T1, L1, T2, L2, T3, L3, T4, L4); if(full) mzd_process_rows5(A, 0, r, c, kbar, T0, L0, T1, L1, T2, L2, T3, L3, T4, L4); } else if (kbar > 3 * k) { int const rem = kbar % 4; int const ka = kbar / 4 + ((rem >= 3) ? 1 : 0); int const kb = kbar / 4 + ((rem >= 2) ? 1 : 0); int const kc = kbar / 4 + ((rem >= 1) ? 1 : 0); int const kd = kbar / 4; if(full || kbar == kk) { mzd_make_table(A, r, c, ka, T0, L0); mzd_make_table(A, r+ka, c, kb, T1, L1); mzd_make_table(A, r+ka+kb, c, kc, T2, L2); mzd_make_table(A, r+ka+kb+kc, c, kd, T3, L3); } if(kbar == kk) mzd_process_rows4(A, r+kbar, A->nrows, c, kbar, T0, L0, T1, L1, T2, L2, T3, L3); if(full) mzd_process_rows4(A, 0, r, c, kbar, T0, L0, T1, L1, T2, L2, T3, L3); } else if (kbar > 2 * k) { int const rem = kbar % 3; int const ka = kbar / 3 + ((rem >= 2) ? 1 : 0); int const kb = kbar / 3 + ((rem >= 1) ? 1 : 0); int const kc = kbar / 3; if(full || kbar == kk) { mzd_make_table(A, r, c, ka, T0, L0); mzd_make_table(A, r+ka, c, kb, T1, L1); mzd_make_table(A, r+ka+kb, c, kc, T2, L2); } if(kbar == kk) mzd_process_rows3(A, r+kbar, A->nrows, c, kbar, T0, L0, T1, L1, T2, L2); if(full) mzd_process_rows3(A, 0, r, c, kbar, T0, L0, T1, L1, T2, L2); } else if (kbar > k) { int const ka = kbar / 2; int const kb = kbar - ka; if(full || kbar == kk) { mzd_make_table(A, r, c, ka, T0, L0); mzd_make_table(A, r+ka, c, kb, T1, L1); } if(kbar == kk) mzd_process_rows2(A, r+kbar, A->nrows, c, kbar, T0, L0, T1, L1); if(full) mzd_process_rows2(A, 0, r, c, kbar, T0, L0, T1, L1); } else if(kbar > 0) { if(full || kbar == kk) { mzd_make_table(A, r, c, kbar, T0, L0); } if(kbar == kk) mzd_process_rows(A, r+kbar, A->nrows, c, kbar, T0, L0); if(full) mzd_process_rows(A, 0, r, c, kbar, T0, L0); } if (!full) { _mzd_copy_back_rows(A, U, r, c, kbar); } r += kbar; c += kbar; if(kk != kbar) { rci_t cbar; rci_t rbar; if (mzd_find_pivot(A, r, c, &rbar, &cbar)) { c = cbar; mzd_row_swap(A, r, rbar); } else { break; } //c++; } } mzd_free(T0); m4ri_mm_free(L0); mzd_free(T1); m4ri_mm_free(L1); mzd_free(T2); m4ri_mm_free(L2); mzd_free(T3); m4ri_mm_free(L3); mzd_free(T4); m4ri_mm_free(L4); mzd_free(T5); m4ri_mm_free(L5); mzd_free(U); __M4RI_DD_MZD(A); __M4RI_DD_RCI(r); return r; } rci_t _mzd_top_echelonize_m4ri(mzd_t *A, int k, rci_t r, rci_t c, rci_t max_r) { rci_t const ncols = A->ncols; int kbar = 0; if (k == 0) { k = m4ri_opt_k(max_r, A->ncols, 0); if (k >= 7) k = 7; if (0.75 * __M4RI_TWOPOW(k) *A->ncols > __M4RI_CPU_L3_CACHE / 2.0) k -= 1; } int kk = 6 * k; mzd_t *U = mzd_init(kk, A->ncols); mzd_t *T0 = mzd_init(__M4RI_TWOPOW(k), A->ncols); mzd_t *T1 = mzd_init(__M4RI_TWOPOW(k), A->ncols); mzd_t *T2 = mzd_init(__M4RI_TWOPOW(k), A->ncols); mzd_t *T3 = mzd_init(__M4RI_TWOPOW(k), A->ncols); mzd_t *T4 = mzd_init(__M4RI_TWOPOW(k), A->ncols); mzd_t *T5 = mzd_init(__M4RI_TWOPOW(k), A->ncols); rci_t *L0 = (rci_t*)m4ri_mm_calloc(__M4RI_TWOPOW(k), sizeof(rci_t)); rci_t *L1 = (rci_t*)m4ri_mm_calloc(__M4RI_TWOPOW(k), sizeof(rci_t)); rci_t *L2 = (rci_t*)m4ri_mm_calloc(__M4RI_TWOPOW(k), sizeof(rci_t)); rci_t *L3 = (rci_t*)m4ri_mm_calloc(__M4RI_TWOPOW(k), sizeof(rci_t)); rci_t *L4 = (rci_t*)m4ri_mm_calloc(__M4RI_TWOPOW(k), sizeof(rci_t)); rci_t *L5 = (rci_t*)m4ri_mm_calloc(__M4RI_TWOPOW(k), sizeof(rci_t)); while(c < ncols) { if(c+kk > A->ncols) { kk = ncols - c; } kbar = _mzd_gauss_submatrix_full(A, r, c, MIN(A->nrows,r+kk), kk); if (kbar > 5 * k) { int const rem = kbar % 6; int const ka = kbar / 6 + ((rem >= 5) ? 1 : 0); int const kb = kbar / 6 + ((rem >= 4) ? 1 : 0); int const kc = kbar / 6 + ((rem >= 3) ? 1 : 0); int const kd = kbar / 6 + ((rem >= 2) ? 1 : 0); int const ke = kbar / 6 + ((rem >= 1) ? 1 : 0);; int const kf = kbar / 6; mzd_make_table(A, r, c, ka, T0, L0); mzd_make_table(A, r+ka, c, kb, T1, L1); mzd_make_table(A, r+ka+kb, c, kc, T2, L2); mzd_make_table(A, r+ka+kb+kc, c, kd, T3, L3); mzd_make_table(A, r+ka+kb+kc+kd, c, ke, T4, L4); mzd_make_table(A, r+ka+kb+kc+kd+ke, c, kf, T5, L5); mzd_process_rows6(A, 0, MIN(r, max_r), c, kbar, T0, L0, T1, L1, T2, L2, T3, L3, T4, L4, T5, L5); } else if (kbar > 4 * k) { int const rem = kbar % 5; int const ka = kbar / 5 + ((rem >= 4) ? 1 : 0); int const kb = kbar / 5 + ((rem >= 3) ? 1 : 0); int const kc = kbar / 5 + ((rem >= 2) ? 1 : 0); int const kd = kbar / 5 + ((rem >= 1) ? 1 : 0); int const ke = kbar / 5; mzd_make_table(A, r, c, ka, T0, L0); mzd_make_table(A, r+ka, c, kb, T1, L1); mzd_make_table(A, r+ka+kb, c, kc, T2, L2); mzd_make_table(A, r+ka+kb+kc, c, kd, T3, L3); mzd_make_table(A, r+ka+kb+kc+kd, c, ke, T4, L4); mzd_process_rows5(A, 0, MIN(r, max_r), c, kbar, T0, L0, T1, L1, T2, L2, T3, L3, T4, L4); } else if (kbar > 3 * k) { const int rem = kbar%4; const int ka = kbar/4 + ((rem >= 3) ? 1 : 0); const int kb = kbar/4 + ((rem >= 2) ? 1 : 0); const int kc = kbar/4 + ((rem >= 1) ? 1 : 0); const int kd = kbar/4; mzd_make_table(A, r, c, ka, T0, L0); mzd_make_table(A, r+ka, c, kb, T1, L1); mzd_make_table(A, r+ka+kb, c, kc, T2, L2); mzd_make_table(A, r+ka+kb+kc, c, kd, T3, L3); mzd_process_rows4(A, 0, MIN(r, max_r), c, kbar, T0, L0, T1, L1, T2, L2, T3, L3); } else if (kbar > 2 * k) { const int rem = kbar%3; const int ka = kbar/3 + ((rem >= 2) ? 1 : 0); const int kb = kbar/3 + ((rem >= 1) ? 1 : 0); const int kc = kbar/3; mzd_make_table(A, r, c, ka, T0, L0); mzd_make_table(A, r+ka, c, kb, T1, L1); mzd_make_table(A, r+ka+kb, c, kc, T2, L2); mzd_process_rows3(A, 0, MIN(r, max_r), c, kbar, T0, L0, T1, L1, T2, L2); } else if (kbar > k) { const int ka = kbar/2; const int kb = kbar - ka; mzd_make_table(A, r, c, ka, T0, L0); mzd_make_table(A, r+ka, c, kb, T1, L1); mzd_process_rows2(A, 0, MIN(r, max_r), c, kbar, T0, L0, T1, L1); } else if(kbar > 0) { mzd_make_table(A, r, c, kbar, T0, L0); mzd_process_rows(A, 0, MIN(r, max_r), c, kbar, T0, L0); } r += kbar; c += kbar; if(kk != kbar) { c++; } } mzd_free(T0); m4ri_mm_free(L0); mzd_free(T1); m4ri_mm_free(L1); mzd_free(T2); m4ri_mm_free(L2); mzd_free(T3); m4ri_mm_free(L3); mzd_free(T4); m4ri_mm_free(L4); mzd_free(T5); m4ri_mm_free(L5); mzd_free(U); __M4RI_DD_MZD(A); __M4RI_DD_RCI(r); return r; } void mzd_top_echelonize_m4ri(mzd_t *M, int k) { _mzd_top_echelonize_m4ri(M,k,0,0,M->nrows); } mzd_t *mzd_inv_m4ri(mzd_t *B, mzd_t const* A, int k) { assert(A->nrows == A->ncols); if(B == NULL) { B = mzd_init(A->nrows, A->ncols); } else { assert(B->ncols == A->ncols && B->nrows && A->ncols); } const rci_t n = A->nrows; const rci_t nr = m4ri_radix * A->width; mzd_t *C = mzd_init(n, 2*nr); mzd_t *AW = mzd_init_window(C, 0, 0, n, n); mzd_t *BW = mzd_init_window(C, 0, nr, n, nr+n); mzd_copy(AW, A); mzd_set_ui(BW, 1); mzd_echelonize_m4ri(C, TRUE, 0); mzd_copy(B, BW); mzd_free_window(AW); mzd_free_window(BW); mzd_free(C); __M4RI_DD_MZD(B); return B; } mzd_t *mzd_mul_m4rm(mzd_t *C, mzd_t const *A, mzd_t const *B, int k) { rci_t a = A->nrows; rci_t c = B->ncols; if(A->ncols != B->nrows) m4ri_die("mzd_mul_m4rm: A ncols (%d) need to match B nrows (%d).\n", A->ncols, B->nrows); if (C == NULL) { C = mzd_init(a, c); } else { if (C->nrows != a || C->ncols != c) m4ri_die("mzd_mul_m4rm: C (%d x %d) has wrong dimensions.\n", C->nrows, C->ncols); } return _mzd_mul_m4rm(C, A, B, k, TRUE); } mzd_t *mzd_addmul_m4rm(mzd_t *C, mzd_t const *A, mzd_t const *B, int k) { rci_t a = A->nrows; rci_t c = B->ncols; if(C->ncols == 0 || C->nrows == 0) return C; if(A->ncols != B->nrows) m4ri_die("mzd_mul_m4rm A ncols (%d) need to match B nrows (%d) .\n", A->ncols, B->nrows); if (C == NULL) { C = mzd_init(a, c); } else { if (C->nrows != a || C->ncols != c) m4ri_die("mzd_mul_m4rm: C has wrong dimensions.\n"); } return _mzd_mul_m4rm(C, A, B, k, FALSE); } #define __M4RI_M4RM_NTABLES 8 mzd_t *_mzd_mul_m4rm(mzd_t *C, mzd_t const *A, mzd_t const *B, int k, int clear) { /** * The algorithm proceeds as follows: * * Step 1. Make a Gray code table of all the \f$2^k\f$ linear combinations * of the \f$k\f$ rows of \f$B_i\f$. Call the \f$x\f$-th row * \f$T_x\f$. * * Step 2. Read the entries * \f$a_{j,(i-1)k+1}, a_{j,(i-1)k+2} , ... , a_{j,(i-1)k+k}.\f$ * * Let \f$x\f$ be the \f$k\f$ bit binary number formed by the * concatenation of \f$a_{j,(i-1)k+1}, ... , a_{j,ik}\f$. * * Step 3. for \f$h = 1,2, ... , c\f$ do * calculate \f$C_{jh} = C_{jh} + T_{xh}\f$. */ rci_t x[__M4RI_M4RM_NTABLES]; rci_t *L[__M4RI_M4RM_NTABLES]; word const *t[__M4RI_M4RM_NTABLES]; mzd_t *T[__M4RI_M4RM_NTABLES]; #ifdef __M4RI_HAVE_SSE2 mzd_t *Talign[__M4RI_M4RM_NTABLES]; int c_align = (__M4RI_ALIGNMENT(C->rows[0], 16) == 8); #endif word *c; rci_t const a_nr = A->nrows; rci_t const a_nc = A->ncols; rci_t const b_nc = B->ncols; if (b_nc < m4ri_radix-10 || a_nr < 16) { if(clear) return mzd_mul_naive(C, A, B); else return mzd_addmul_naive(C, A, B); } /* clear first */ if (clear) { mzd_set_ui(C, 0); } const int blocksize = __M4RI_MUL_BLOCKSIZE; if(k==0) { /* __M4RI_CPU_L2_CACHE == 2^k * B->width * 8 * 8 */ k = (int)log2((__M4RI_CPU_L2_CACHE/64)/(double)B->width); if ((__M4RI_CPU_L2_CACHE - 64*__M4RI_TWOPOW(k)*B->width) > (64*__M4RI_TWOPOW(k+1)*B->width - __M4RI_CPU_L2_CACHE)) k++; rci_t const klog = round(0.75 * log2_floor(MIN(MIN(a_nr,a_nc),b_nc))); if(klog < k) k = klog; } if (k<2) k=2; else if(k>8) k=8; const wi_t wide = C->width; const word bm = __M4RI_TWOPOW(k)-1; rci_t *buffer = (rci_t*)m4ri_mm_malloc(__M4RI_M4RM_NTABLES * __M4RI_TWOPOW(k) * sizeof(rci_t)); for(int z=0; z<__M4RI_M4RM_NTABLES; z++) { L[z] = buffer + z*__M4RI_TWOPOW(k); #ifdef __M4RI_HAVE_SSE2 /* we make sure that T are aligned as C */ Talign[z] = mzd_init(__M4RI_TWOPOW(k), b_nc+m4ri_radix); T[z] = mzd_init_window(Talign[z], 0, c_align*m4ri_radix, Talign[z]->nrows, b_nc + c_align*m4ri_radix); #else T[z] = mzd_init(__M4RI_TWOPOW(k), b_nc); #endif } /* process stuff that fits into multiple of k first, but blockwise (babystep-giantstep)*/ int const kk = __M4RI_M4RM_NTABLES * k; assert(kk <= m4ri_radix); rci_t const end = a_nc / kk; for (rci_t giantstep = 0; giantstep < a_nr; giantstep += blocksize) { for(rci_t i = 0; i < end; ++i) { #if __M4RI_HAVE_OPENMP #pragma omp parallel for schedule(static,1) #endif for(int z=0; z<__M4RI_M4RM_NTABLES; z++) { mzd_make_table( B, kk*i + k*z, 0, k, T[z], L[z]); } const rci_t blockend = MIN(giantstep+blocksize, a_nr); #if __M4RI_HAVE_OPENMP #pragma omp parallel for schedule(static,512) private(x,t) #endif for(rci_t j = giantstep; j < blockend; j++) { const word a = mzd_read_bits(A, j, kk*i, kk); switch(__M4RI_M4RM_NTABLES) { case 8: t[7] = T[ 7]->rows[ L[7][ (a >> 7*k) & bm ] ]; case 7: t[6] = T[ 6]->rows[ L[6][ (a >> 6*k) & bm ] ]; case 6: t[5] = T[ 5]->rows[ L[5][ (a >> 5*k) & bm ] ]; case 5: t[4] = T[ 4]->rows[ L[4][ (a >> 4*k) & bm ] ]; case 4: t[3] = T[ 3]->rows[ L[3][ (a >> 3*k) & bm ] ]; case 3: t[2] = T[ 2]->rows[ L[2][ (a >> 2*k) & bm ] ]; case 2: t[1] = T[ 1]->rows[ L[1][ (a >> 1*k) & bm ] ]; case 1: t[0] = T[ 0]->rows[ L[0][ (a >> 0*k) & bm ] ]; break; default: m4ri_die("__M4RI_M4RM_NTABLES must be <= 8 but got %d", __M4RI_M4RM_NTABLES); } c = C->rows[j]; switch(__M4RI_M4RM_NTABLES) { case 8: _mzd_combine_8(c, t, wide); break; case 7: _mzd_combine_7(c, t, wide); break; case 6: _mzd_combine_6(c, t, wide); break; case 5: _mzd_combine_5(c, t, wide); break; case 4: _mzd_combine_4(c, t, wide); break; case 3: _mzd_combine_3(c, t, wide); break; case 2: _mzd_combine_2(c, t, wide); break; case 1: _mzd_combine(c, t[0], wide); break; default: m4ri_die("__M4RI_M4RM_NTABLES must be <= 8 but got %d", __M4RI_M4RM_NTABLES); } } } } /* handle stuff that doesn't fit into multiple of kk */ if (a_nc%kk) { rci_t i; for (i = kk / k * end; i < a_nc / k; ++i) { mzd_make_table( B, k*i, 0, k, T[0], L[0]); for(rci_t j = 0; j < a_nr; ++j) { x[0] = L[0][ mzd_read_bits_int(A, j, k*i, k) ]; c = C->rows[j]; t[0] = T[0]->rows[x[0]]; for(wi_t ii = 0; ii < wide; ++ii) { c[ii] ^= t[0][ii]; } } } /* handle stuff that doesn't fit into multiple of k */ if (a_nc%k) { mzd_make_table( B, k*(a_nc/k), 0, a_nc%k, T[0], L[0]); for(rci_t j = 0; j < a_nr; ++j) { x[0] = L[0][ mzd_read_bits_int(A, j, k*i, a_nc%k) ]; c = C->rows[j]; t[0] = T[0]->rows[x[0]]; for(wi_t ii = 0; ii < wide; ++ii) { c[ii] ^= t[0][ii]; } } } } for(int j=0; j<__M4RI_M4RM_NTABLES; j++) { mzd_free(T[j]); #ifdef __M4RI_HAVE_SSE2 mzd_free(Talign[j]); #endif } m4ri_mm_free(buffer); __M4RI_DD_MZD(C); return C; }
rose_scalar_true.c
/* * Scalar-to-scalar dependencies * */ #include "omp.h" int a[100]; void foo2() { int i; int tmp; #pragma omp parallel for private (tmp,i) for (i = 0; i <= 99; i += 1) { tmp = a[i] + i; a[i] = tmp; } } void foo() { int i; int tmp; #pragma omp parallel for private (i) lastprivate (tmp) for (i = 0; i <= 99; i += 1) { tmp = a[i] + i; a[i] = tmp; } i = tmp; }
GB_binop__lt_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lt_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__lt_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__lt_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__lt_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_uint64) // A*D function (colscale): GB (_AxD__lt_uint64) // D*A function (rowscale): GB (_DxB__lt_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__lt_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__lt_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_uint64) // C=scalar+B GB (_bind1st__lt_uint64) // C=scalar+B' GB (_bind1st_tran__lt_uint64) // C=A+scalar GB (_bind2nd__lt_uint64) // C=A'+scalar GB (_bind2nd_tran__lt_uint64) // C type: bool // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LT || GxB_NO_UINT64 || GxB_NO_LT_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__lt_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lt_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lt_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lt_uint64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lt_uint64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lt_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lt_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lt_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lt_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lt_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lt_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lt_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__lt_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__lt_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
example-omp.c
// PWD011: Missing OpenMP lastprivate clause // https://www.appentra.com/knowledge/checks/pwd011 double example(int m, double *A, double *B, double *C) { double liveOut; // liveOut is private but used after the loop, should be lastprivate #pragma omp parallel for private(liveOut) for (int i = 0; i < m; i++) { liveOut = A[i] * B[i]; C[i] = C[i] + liveOut; } liveOut += 5; return liveOut; }
data.c
#include "../mesh.h" #include "../params.h" #include "../shared.h" #include "../umesh.h" #include <math.h> #include <stdlib.h> // Checks if two strings match #pragma omp declare target int device_strmatch(const char* str1, const char* str2) { int ii = 0; for (ii = 0; str1[ii] != '\0'; ++ii) { if (str1[ii] != str2[ii]) { return 0; } } return str1[ii] == str2[ii]; } #pragma omp end declare target // Allocates some double precision data size_t allocate_data(double** buf, size_t len) { if(!len) { return 0; } allocate_host_data(buf, len); double* local_buf = *buf; #pragma omp target enter data map(to : local_buf[ : len]) #pragma omp target teams distribute parallel for for (size_t ii = 0; ii < len; ++ii) { local_buf[ii] = 0.0; } return sizeof(double) * len; } // Allocates some int precision data size_t allocate_int_data(int** buf, size_t len) { if(!len) { return 0; } allocate_host_int_data(buf, len); int* local_buf = *buf; #pragma omp target enter data map(to : local_buf[ : len]) #pragma omp target teams distribute parallel for for (size_t ii = 0; ii < len; ++ii) { local_buf[ii] = 0; } return sizeof(int) * len; } // Allocates some int precision data size_t allocate_uint64_data(uint64_t** buf, size_t len) { if(!len) { return 0; } allocate_host_uint64_data(buf, len); uint64_t* local_buf = *buf; #pragma omp target enter data map(to : local_buf[ : len]) #pragma omp target teams distribute parallel for for (size_t ii = 0; ii < len; ++ii) { local_buf[ii] = 0; } return sizeof(uint64_t) * len; } // Allocates a host copy of some buffer void allocate_host_data(double** buf, size_t len) { #ifdef INTEL *buf = (double*)_mm_malloc(sizeof(double) * len, VEC_ALIGN); #else *buf = (double*)malloc(sizeof(double) * len); #endif if (*buf == NULL) { TERMINATE("Failed to allocate a data array.\n"); } } // Allocates a host copy of some integer buffer void allocate_host_int_data(int** buf, size_t len) { #ifdef INTEL *buf = (int*)_mm_malloc(sizeof(int) * len, VEC_ALIGN); #else *buf = (int*)malloc(sizeof(int) * len); #endif if (*buf == NULL) { TERMINATE("Failed to allocate a data array.\n"); } } void allocate_host_uint64_data(uint64_t** buf, const size_t len) { #ifdef INTEL *buf = (uint64_t*)_mm_malloc(sizeof(uint64_t) * len, VEC_ALIGN); #else *buf = (uint64_t*)malloc(sizeof(uint64_t) * len); #endif if (*buf == NULL) { TERMINATE("Failed to allocate a data array.\n"); } } // Allocates a data array void deallocate_data(double* buf) { #pragma omp target exit data map(delete : buf) } // Allocates a data array void deallocate_int_data(int* buf) { #pragma omp target exit data map(delete : buf) } // Allocates a data array void deallocate_host_data(double* buf) { #ifdef INTEL _mm_free(buf); #else free(buf); #endif } // Synchronise data void copy_buffer(const size_t len, double** src, double** dst, int send) { double* local_src = *src; if (send == SEND) { #pragma omp target update to(local_src[ : len]) } else { #pragma omp target update from(local_src[ : len]) } *dst = *src; } // Move a host buffer onto the device void move_host_buffer_to_device(const size_t len, double** src, double** dst) { double* local_src = *src; #pragma omp target enter data map(to : local_src[ : len]) *dst = *src; } // Initialises mesh data in device specific manner void mesh_data_init_2d(const int local_nx, const int local_ny, const int global_nx, const int global_ny, const int pad, const int x_off, const int y_off, const double width, const double height, double* edgex, double* edgey, double* edgedx, double* edgedy, double* celldx, double* celldy) { // Simple uniform rectilinear initialisation #pragma omp target teams distribute parallel for for (int ii = 0; ii < local_nx + 1; ++ii) { edgedx[ii] = width / (global_nx); // Note: correcting for padding edgex[ii] = edgedx[ii] * (x_off + ii - pad); } #pragma omp target teams distribute parallel for for (int ii = 0; ii < local_nx; ++ii) { celldx[ii] = width / (global_nx); } #pragma omp target teams distribute parallel for for (int ii = 0; ii < local_ny + 1; ++ii) { edgedy[ii] = height / (global_ny); // Note: correcting for padding edgey[ii] = edgedy[ii] * (y_off + ii - pad); } #pragma omp target teams distribute parallel for for (int ii = 0; ii < local_ny; ++ii) { celldy[ii] = height / (global_ny); } } // Initialises mesh data in device specific manner void mesh_data_init_3d(const int local_nx, const int local_ny, const int local_nz, const int global_nx, const int global_ny, const int global_nz, const int pad, const int x_off, const int y_off, const int z_off, const double width, const double height, const double depth, double* edgex, double* edgey, double* edgez, double* edgedx, double* edgedy, double* edgedz, double* celldx, double* celldy, double* celldz) { // Initialise as in the 2d case mesh_data_init_2d(local_nx, local_ny, global_nx, global_ny, pad, x_off, y_off, width, height, edgex, edgey, edgedx, edgedy, celldx, celldy); // Simple uniform rectilinear initialisation #pragma omp target teams distribute parallel for for (int ii = 0; ii < local_nz + 1; ++ii) { edgedz[ii] = depth / (global_nz); edgez[ii] = edgedz[ii] * (z_off + ii - pad); } #pragma omp target teams distribute parallel for for (int ii = 0; ii < local_nz; ++ii) { celldz[ii] = depth / (global_nz); } } // Initialise state data in device specific manner void set_problem_2d(const int local_nx, const int local_ny, const int pad, const double mesh_width, const double mesh_height, const double* edgex, const double* edgey, const int ndims, const char* problem_def_filename, double* rho, double* e, double* x) { char* keys = (char*)malloc(sizeof(char) * MAX_KEYS * MAX_STR_LEN); #pragma omp target enter data map(to : keys[ : MAX_KEYS* MAX_STR_LEN]) double* values; allocate_data(&values, MAX_KEYS); int nentries = 0; while (1) { char specifier[MAX_STR_LEN]; sprintf(specifier, "problem_%d", nentries++); int nkeys = 0; if (!get_key_value_parameter(specifier, problem_def_filename, keys, values, &nkeys)) { break; } copy_buffer(MAX_KEYS, &values, &values, SEND); #pragma omp target update to(keys[ : MAX_KEYS* MAX_STR_LEN]) // The last four keys are the bound specification double xpos = values[nkeys - 4] * mesh_width; double ypos = values[nkeys - 3] * mesh_height; double width = values[nkeys - 2] * mesh_width; double height = values[nkeys - 1] * mesh_height; int failed = 0; // Loop through the mesh and set the problem #pragma omp target teams distribute parallel for \ map(tofrom : failed) reduction(+ : failed) for (int ii = pad; ii < local_ny - pad; ++ii) { for (int jj = pad; jj < local_nx - pad; ++jj) { double global_xpos = edgex[jj]; double global_ypos = edgey[ii]; // Check we are in bounds of the problem entry if (global_xpos >= xpos && global_ypos >= ypos && global_xpos < xpos + width && global_ypos < ypos + height) { // The upper bound excludes the bounding box for the entry for (int kk = 0; kk < nkeys - (2 * ndims); ++kk) { const char* key = &keys[kk * MAX_STR_LEN]; if (device_strmatch(key, "density")) { rho[ii * local_nx + jj] = values[kk]; } else if (device_strmatch(key, "energy")) { e[ii * local_nx + jj] = values[kk]; } else if (device_strmatch(key, "temperature")) { x[ii * local_nx + jj] = values[kk]; } else { failed++; } } } } } if (failed) { TERMINATE("Found unrecognised key in %s.\n", problem_def_filename); } } free(keys); deallocate_data(values); } // Initialise state data in device specific manner void set_problem_3d(const int local_nx, const int local_ny, const int local_nz, const int pad, const double mesh_width, const double mesh_height, const double mesh_depth, const double* edgex, const double* edgey, const double* edgez, const int ndims, const char* problem_def_filename, double* rho, double* e, double* x) { char* keys = (char*)malloc(sizeof(char) * MAX_KEYS * MAX_STR_LEN); #pragma omp target enter data map(to : keys[ : MAX_KEYS* MAX_STR_LEN]) double* values; allocate_data(&values, MAX_KEYS); int nentries = 0; while (1) { char specifier[MAX_STR_LEN]; sprintf(specifier, "problem_%d", nentries++); int nkeys = 0; if (!get_key_value_parameter(specifier, problem_def_filename, keys, values, &nkeys)) { break; } copy_buffer(MAX_KEYS, &values, &values, SEND); #pragma omp target update to(keys[ : MAX_KEYS* MAX_STR_LEN]) // The last four keys are the bound specification double xpos = values[nkeys - 6] * mesh_width; double ypos = values[nkeys - 5] * mesh_height; double zpos = values[nkeys - 4] * mesh_depth; double width = values[nkeys - 3] * mesh_width; double height = values[nkeys - 2] * mesh_height; double depth = values[nkeys - 1] * mesh_depth; int failed = 0; // Loop through the mesh and set the problem #pragma omp target teams distribute parallel for \ map(tofrom: failed) reduction(+: failed) for (int ii = pad; ii < local_nz - pad; ++ii) { for (int jj = pad; jj < local_ny - pad; ++jj) { for (int kk = pad; kk < local_nx - pad; ++kk) { double global_xpos = edgex[kk]; double global_ypos = edgey[jj]; double global_zpos = edgez[ii]; // Check we are in bounds of the problem entry if (global_xpos >= xpos && global_ypos >= ypos && global_zpos >= zpos && global_xpos < xpos + width && global_ypos < ypos + height && global_zpos < zpos + depth) { // The upper bound excludes the bounding box for the entry for (int ee = 0; ee < nkeys - (2 * ndims); ++ee) { const int index = (ii * local_nx * local_ny) + (jj * local_nx) + (kk); const char* key = &keys[ee * MAX_STR_LEN]; if (device_strmatch(key, "density")) { rho[(index)] = values[ee]; } else if (device_strmatch(key, "energy")) { e[(index)] = values[ee]; } else if (device_strmatch(key, "temperature")) { x[(index)] = values[ee]; } else { failed++; } } } } } } if(failed) { TERMINATE("Found unrecognised key in %s.\n", problem_def_filename); } } free(keys); deallocate_data(values); } // Finds the normals for all boundary cells void find_boundary_normals(UnstructuredMesh* umesh, int* boundary_edge_list) { const int nnodes = umesh->nnodes; const int nboundary_nodes = umesh->nboundary_nodes; const int* boundary_index = umesh->boundary_index; const double* nodes_x0 = umesh->nodes_x0; const double* nodes_y0 = umesh->nodes_y0; int* boundary_type = umesh->boundary_type; double* boundary_normal_x = umesh->boundary_normal_x; double* boundary_normal_y = umesh->boundary_normal_y; // Loop through all of the boundary cells and find their normals #pragma omp target teams distribute parallel for for (int nn = 0; nn < nnodes; ++nn) { const int bi = boundary_index[(nn)]; if (bi == IS_INTERIOR) { continue; } double normal_x = 0.0; double normal_y = 0.0; for (int bb1 = 0; bb1 < nboundary_nodes; ++bb1) { const int node0 = boundary_edge_list[bb1 * 2]; const int node1 = boundary_edge_list[bb1 * 2 + 1]; if (node0 == nn || node1 == nn) { const double node0_x = nodes_x0[(node0)]; const double node0_y = nodes_y0[(node0)]; const double node1_x = nodes_x0[(node1)]; const double node1_y = nodes_y0[(node1)]; normal_x += node0_y - node1_y; normal_y += -(node0_x - node1_x); } } // We are fixed if we are one of the four corners if ((nodes_x0[(nn)] == 0.0 || nodes_x0[(nn)] == 1.0) && (nodes_y0[(nn)] == 0.0 || nodes_y0[(nn)] == 1.0)) { boundary_type[(bi)] = IS_CORNER; } else { boundary_type[(bi)] = IS_BOUNDARY; } const double normal_mag = sqrt(normal_x * normal_x + normal_y * normal_y); boundary_normal_x[(bi)] = normal_x / normal_mag; boundary_normal_y[(bi)] = normal_y / normal_mag; } }
NETNTLM_bs_fmt_plug.c
/* * NETNTLM_fmt.c -- NTLM Challenge/Response * * Written by JoMo-Kun <jmk at foofus.net> in 2007 * and placed in the public domain. * * Modified for performance, support for Extended Session Security, OMP * and UTF-8, by magnum 2010-2011. * Modified for using Bitsliced DES by Deepika Dutta Mishra * <dipikadutta at gmail.com> in 2013, no rights reserved. * * This algorithm is designed for performing brute-force cracking of the NTLM * (version 1) challenge/response pairs exchanged during network-based * authentication attempts [1]. The captured challenge/response pairs from these * attempts should be stored using the L0phtCrack 2.0 LC format, specifically: * username:unused:unused:lm response:ntlm response:challenge. For example: * * CORP\Administrator:::25B2B477CE101D83648BB087CE7A1C217F51C7FC64C0EBB1: * C8BD0C1630A9ECF7A95F494A8F0B2CB4A3F25B1225514304:1122334455667788 * * It should be noted that a NTLM authentication response is not same as a NTLM * password hash, which can be extracted using tools such as FgDump [2]. NTLM * responses can be gathered via normal network capture or via tools which * perform layer 2 attacks, such as Ettercap [3] and Cain [4]. The responses can * also be harvested using a modified Samba service [5] in conjunction with * some trickery to convince the user to connect to it. I leave what that * trickery may actually be as an exercise for the reader (HINT: Karma, NMB * broadcasts, IE, Outlook, social engineering, ...). * * [1] http://davenport.sourceforge.net/ntlm.html#theNtlmResponse * [2] http://www.foofus.net/~fizzgig/fgdump/ * [3] http://ettercap.sourceforge.net/ * [4] http://www.oxid.it/cain.html * [5] http://www.foofus.net/jmk/smbchallenge.html * * This version supports Extended Session Security. This is what * is used when the "LM" hash ends in 32 zeros: * * DOMAIN\User:::c70e4fb229437ef300000000000000000000000000000000: * abf7762caf2b1bbfc5cfc1f46665249f049e0af72ae5b5a9:24ca92fdab441aa4 * */ #if FMT_EXTERNS_H extern struct fmt_main fmt_NETNTLM_old; #elif FMT_REGISTERS_H john_register_one(&fmt_NETNTLM_old); #else #include <string.h> #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "DES_std.h" #include "DES_bs.h" #include "misc.h" #include "common.h" #include "formats.h" #include "options.h" #include "md5.h" #include "unicode.h" #include "memdbg.h" #ifndef uchar #define uchar unsigned char #endif #define FORMAT_LABEL "netntlm-naive" #define FORMAT_NAME "NTLMv1 C/R" #define FORMAT_TAG "$NETNTLM$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "MD4 DES (ESS MD5) " DES_BS_ALGORITHM_NAME " naive" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE 24 #define BINARY_ALIGN 4 #define PARTIAL_BINARY_SIZE 8 #define SALT_SIZE 8 #define SALT_ALIGN 4 #define CIPHERTEXT_LENGTH 48 #define TOTAL_LENGTH (10 + 2 * 2 * SALT_SIZE + CIPHERTEXT_LENGTH) #define MIN_KEYS_PER_CRYPT DES_BS_DEPTH #define MAX_KEYS_PER_CRYPT DES_BS_DEPTH static struct fmt_tests tests[] = { {"", "FooBarGerg", {"User", "", "", "lm-hash", "35B62750E1B9B3205C50D6BA351092C12A1B9B3CDC65D44A", "1122334455667788"} }, {"$NETNTLM$1122334455667788$BFCCAF26128EC95F9999C9792F49434267A1D9B0EF89BFFB", "g3rg3g3rg3g3rg3"}, {"$NETNTLM$1122334455667788$E463FAA5D868ECE20CAE622474A2F440A652D642156AF863", "M1xedC4se%^&*@)##(blahblah!@#"}, {"$NETNTLM$c75c20bff9baa71f4765f360625700b0$81f5ecd8a77fe819f7f6689a08a27ac705fc2e1bb00cecb2", "password"}, {"$NETNTLM$1122334455667788$35B62750E1B9B3205C50D6BA351092C12A1B9B3CDC65D44A", "FooBarGerg"}, {"$NETNTLM$1122334455667788$A4765EBFE83D345A7CB1660B8899251905164029F8086DDE", "visit www.foofus.net"}, {"$NETNTLM$24ca92fdab441aa4c70e4fb229437ef3$abf7762caf2b1bbfc5cfc1f46665249f049e0af72ae5b5a9", "longpassword"}, {"$NETNTLM$1122334455667788$B2B2220790F40C88BCFF347C652F67A7C4A70D3BEBD70233", "cory21"}, {"", "g3rg3g3rg3g3rg3", {"User", "", "", "lm-hash", "BFCCAF26128EC95F9999C9792F49434267A1D9B0EF89BFFB", "1122334455667788"} }, {"", "M1xedC4se%^&*@)##(blahblah!@#", {"User", "", "", "lm-hash", "E463FAA5D868ECE20CAE622474A2F440A652D642156AF863", "1122334455667788"} }, {"", "visit www.foofus.net", {"User", "", "", "lm-hash", "A4765EBFE83D345A7CB1660B8899251905164029F8086DDE", "1122334455667788"} }, {"", "password", {"ESS", "", "", "4765f360625700b000000000000000000000000000000000", "81f5ecd8a77fe819f7f6689a08a27ac705fc2e1bb00cecb2", "c75c20bff9baa71f"} }, {"", "cory21", {"User", "", "", "lm-hash", "B2B2220790F40C88BCFF347C652F67A7C4A70D3BEBD70233", "1122334455667788"} }, {NULL} }; static char (*saved_plain)[PLAINTEXT_LENGTH + 1]; static int (*saved_len); static uchar (*output)[PARTIAL_BINARY_SIZE]; static uchar (*saved_key)[21]; // NT hash static uchar *challenge; static int keys_prepared; static void set_salt(void *salt); static void init(struct fmt_main *self) { /* LM =2 for DES encryption with no salt and no iterations */ DES_bs_init(2, DES_bs_cpt); #if DES_bs_mt self->params.min_keys_per_crypt = DES_bs_min_kpc; self->params.max_keys_per_crypt = DES_bs_max_kpc; #endif saved_plain = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_plain)); saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); output = mem_calloc(self->params.max_keys_per_crypt, sizeof(*output)); saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); } static void done(void) { MEM_FREE(saved_key); MEM_FREE(output); MEM_FREE(saved_len); MEM_FREE(saved_plain); } static int valid(char *ciphertext, struct fmt_main *self) { char *pos; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)!=0) return 0; if ((strlen(ciphertext) != 74) && (strlen(ciphertext) != 90)) return 0; if ((ciphertext[25] != '$') && (ciphertext[41] != '$')) return 0; for (pos = &ciphertext[FORMAT_TAG_LEN]; atoi16[ARCH_INDEX(*pos)] != 0x7F; pos++); if (*pos != '$') return 0; for (pos++;atoi16[ARCH_INDEX(*pos)] != 0x7F; pos++); if (!*pos && ((pos - ciphertext - 26 == CIPHERTEXT_LENGTH) || (pos - ciphertext - 42 == CIPHERTEXT_LENGTH))) return 1; else return 0; } static char *prepare(char *split_fields[10], struct fmt_main *self) { char *cp; char clientChal[17]; char *srv_challenge = split_fields[3]; char *nethashv2 = split_fields[4]; char *cli_challenge = split_fields[5]; if (!strncmp(split_fields[1], FORMAT_TAG, FORMAT_TAG_LEN)) return split_fields[1]; if (!srv_challenge || !nethashv2 || !cli_challenge) return split_fields[1]; if (strlen(nethashv2) != CIPHERTEXT_LENGTH) return split_fields[1]; // this string suggests we have an improperly formatted NTLMv2 if (!strncmp(&nethashv2[32], "0101000000000000", 16)) return split_fields[1]; // Handle ESS (8 byte client challenge in "LM" field padded with zeros) if (strlen(srv_challenge) == 48 && !strncmp(&srv_challenge[16], "00000000000000000000000000000000", 32)) { memcpy(clientChal, srv_challenge,16); clientChal[16] = 0; } else clientChal[0] = 0; cp = mem_alloc(FORMAT_TAG_LEN+strlen(cli_challenge)+strlen(clientChal)+1+strlen(nethashv2)+1); sprintf(cp, "%s%s%s$%s", FORMAT_TAG, cli_challenge, clientChal, nethashv2); if (valid(cp,self)) { char *cp2 = str_alloc_copy(cp); MEM_FREE(cp); return cp2; } MEM_FREE(cp); return split_fields[1]; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[TOTAL_LENGTH + 1]; memset(out, 0, TOTAL_LENGTH + 1); strcpy(out, ciphertext); strlwr(&out[FORMAT_TAG_LEN]); /* Exclude: $NETNTLM$ */ return out; } static uint32_t *generate_des_format(uchar* binary) { static uint32_t out[6]; ARCH_WORD block[6]; int chr, src,dst,i; uchar value, mask; ARCH_WORD *ptr; memset(block, 0, sizeof(block)); for (chr = 0; chr < 24; chr=chr + 8) { dst = 0; for (i=0; i<8; i++) { value = binary[chr + i]; mask = 0x80; for (src = 0; src < 8; src++) { if (value & mask) block[(chr/4) + (dst>>5)] |= 1U << (dst & 0x1F); mask >>= 1; dst++; } } } /* Apply initial permutation on ciphertext blocks */ for (i=0; i<6; i=i+2) { ptr = DES_do_IP(&block[i]); out[i] = ptr[1]; out[i+1] = ptr[0]; } return out; } static void *get_binary(char *ciphertext) { uchar binary[BINARY_SIZE]; int i; uint32_t *ptr; ciphertext = strrchr(ciphertext, '$') + 1; for (i=0; i<BINARY_SIZE; i++) { binary[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])])<<4; binary[i] |= (atoi16[ARCH_INDEX(ciphertext[i*2+1])]); } /* Set binary in DES format */ ptr = generate_des_format(binary); return ptr; } inline static void setup_des_key(unsigned char key_56[], int index) { char key[8]; /* Right shift key bytes by 1 to bring in openssl format */ /* Each byte of key is xored with 0x80 to pass check for 0 in DES_bs_set_key() */ key[0] = (key_56[0] >> 1) | 0x80; key[1] = (((key_56[0] << 7) | (key_56[1] >> 1)) >>1) | 0x80; key[2] = (((key_56[1] << 6) | (key_56[2] >> 2)) >>1) | 0x80; key[3] = (((key_56[2] << 5) | (key_56[3] >> 3)) >>1) | 0x80; key[4] = (((key_56[3] << 4) | (key_56[4] >> 4)) >>1) | 0x80; key[5] = (((key_56[4] << 3) | (key_56[5] >> 5)) >>1) | 0x80; key[6] = (((key_56[5] << 2) | (key_56[6] >> 6)) >>1) | 0x80; key[7] = ((key_56[6] << 1) >>1 ) | 0x80; DES_bs_set_key((char*)key, index); } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int i; if (!keys_prepared) { #ifdef _OPENMP #pragma omp parallel for #endif for (i = 0; i < count; i++) { int len; /* Generate 16-byte NTLM hash */ len = E_md4hash((uchar *) saved_plain[i], saved_len[i], saved_key[i]); if (len <= 0) saved_plain[i][-len] = 0; // match truncation /* NULL-padding the 16-byte hash to 21-bytes is made in cmp_exact if needed */ setup_des_key(saved_key[i], i); } keys_prepared = 1; } /* Bitsliced des encryption */ DES_bs_crypt_plain(count); return count; } static int cmp_all(void *binary, int count) { return DES_bs_cmp_all((uint32_t *)binary, count); } static int cmp_one(void *binary, int index) { return DES_bs_cmp_one((uint32_t *)binary, 32, index); } static int cmp_exact(char *source, int index) { uint32_t *binary = get_binary(source); if (!DES_bs_cmp_one(binary, 64, index)) return 0; setup_des_key(&saved_key[index][7], 0); DES_bs_crypt_plain(1); if (!DES_bs_cmp_one(&binary[2], 64, 0)) { setup_des_key(saved_key[0], 0); DES_bs_crypt_plain(1); return 0; } /* NULL-pad 16-byte NTLM hash to 21-bytes (postponed until now) */ memset(&saved_key[index][16], 0, 5); setup_des_key(&saved_key[index][14], 0); DES_bs_crypt_plain(1); if (!DES_bs_cmp_one(&binary[4], 64, 0)) { setup_des_key(saved_key[0], 0); DES_bs_crypt_plain(1); return 0; } setup_des_key(saved_key[0], 0); DES_bs_crypt_plain(1); return 1; } static void *get_salt(char *ciphertext) { static uchar *binary_salt; int i, cnt,j; unsigned char temp[SALT_SIZE]; if (!binary_salt) binary_salt = mem_alloc_tiny(SALT_SIZE, MEM_ALIGN_WORD); if (ciphertext[25] == '$') { // Server challenge ciphertext += FORMAT_TAG_LEN; for (i = 0; i < SALT_SIZE; ++i) binary_salt[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) + atoi16[ARCH_INDEX(ciphertext[i*2+1])]; } else { uchar es_salt[2*SALT_SIZE], k1[2*SALT_SIZE]; MD5_CTX ctx; ciphertext += FORMAT_TAG_LEN; // Extended Session Security, // Concatenate Server & Client challenges for (i = 0;i < 2 * SALT_SIZE; ++i) es_salt[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) + atoi16[ARCH_INDEX(ciphertext[i*2+1])]; // MD5 the concatenated challenges, result is our key MD5_Init(&ctx); MD5_Update(&ctx, es_salt, 16); MD5_Final((void*)k1, &ctx); memcpy(binary_salt, k1, SALT_SIZE); // but only 8 bytes of it } /* Apply IP to salt */ memset(temp, 0, SALT_SIZE); for (i = 0; i < 64; i++) { cnt = DES_IP[i ^ 0x20]; j = (uchar)((binary_salt[cnt >> 3] >> (7 - (cnt & 7))) & 1); temp[i/8] |= j << (7 - (i % 8)); } memcpy(binary_salt, temp, SALT_SIZE); return (void*)binary_salt; } static void set_salt(void *salt) { challenge = salt; DES_bs_generate_plaintext(challenge); } static void netntlm_set_key(char *key, int index) { saved_len[index] = strnzcpyn(saved_plain[index], key, sizeof(*saved_plain)); keys_prepared = 0; } static char *get_key(int index) { return saved_plain[index]; } static int salt_hash(void *salt) { return *(uint32_t *)salt & (SALT_HASH_SIZE - 1); } struct fmt_main fmt_NETNTLM_old = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #if DES_BS FMT_BS | #if DES_bs_mt FMT_OMP | #endif #endif FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_UNICODE | FMT_UTF8, { NULL }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, prepare, valid, split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, netntlm_set_key, get_key, fmt_default_clear_keys, crypt_all, { DES_bs_get_hash_0, DES_bs_get_hash_1, DES_bs_get_hash_2, DES_bs_get_hash_3, DES_bs_get_hash_4, DES_bs_get_hash_5, DES_bs_get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_unop__log1p_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__log1p_fc32_fc32) // op(A') function: GB (_unop_tran__log1p_fc32_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = GB_clog1pf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_clog1pf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = GB_clog1pf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOG1P || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__log1p_fc32_fc32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_clog1pf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_clog1pf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__log1p_fc32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
constitute.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO N N SSSSS TTTTT IIIII TTTTT U U TTTTT EEEEE % % C O O NN N SS T I T U U T E % % C O O N N N ESSS T I T U U T EEE % % C O O N NN SS T I T U U T E % % CCCC OOO N N SSSSS T IIIII T UUU T EEEEE % % % % % % MagickCore Methods to Consitute an Image % % % % Software Design % % Cristy % % October 1998 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/cache.h" #include "MagickCore/client.h" #include "MagickCore/coder-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/constitute-private.h" #include "MagickCore/delegate.h" #include "MagickCore/geometry.h" #include "MagickCore/identify.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/profile.h" #include "MagickCore/profile-private.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/statistic.h" #include "MagickCore/stream.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/timer.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n s t i t u t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConstituteImage() returns an image from the pixel data you supply. % The pixel data must be in scanline order top-to-bottom. The data can be % char, short int, int, float, or double. Float and double require the % pixels to be normalized [0..1], otherwise [0..QuantumRange]. For example, to % create a 640x480 image from unsigned red-green-blue character data, use: % % image = ConstituteImage(640,480,"RGB",CharPixel,pixels,&exception); % % The format of the ConstituteImage method is: % % Image *ConstituteImage(const size_t columns,const size_t rows, % const char *map,const StorageType storage,const void *pixels, % ExceptionInfo *exception) % % A description of each parameter follows: % % o columns: width in pixels of the image. % % o rows: height in pixels of the image. % % o map: This string reflects the expected ordering of the pixel array. % It can be any combination or order of R = red, G = green, B = blue, % A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan, % Y = yellow, M = magenta, K = black, I = intensity (for grayscale), % P = pad. % % o storage: Define the data type of the pixels. Float and double types are % expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose % from these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel, % LongPixel, QuantumPixel, or ShortPixel. % % o pixels: This array of values contain the pixel components as defined by % map and type. You must preallocate this array where the expected % length varies depending on the values of width, height, map, and type. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConstituteImage(const size_t columns,const size_t rows, const char *map,const StorageType storage,const void *pixels, ExceptionInfo *exception) { Image *image; MagickBooleanType status; register ssize_t i; size_t length; /* Allocate image structure. */ assert(map != (const char *) NULL); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",map); assert(pixels != (void *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage((ImageInfo *) NULL,exception); if (image == (Image *) NULL) return((Image *) NULL); length=strlen(map); for (i=0; i < (ssize_t) length; i++) { switch (map[i]) { case 'a': case 'A': case 'O': case 'o': { image->alpha_trait=BlendPixelTrait; break; } case 'C': case 'c': case 'm': case 'M': case 'Y': case 'y': case 'K': case 'k': { image->colorspace=CMYKColorspace; break; } case 'I': case 'i': { image->colorspace=GRAYColorspace; break; } default: { if (length == 1) image->colorspace=GRAYColorspace; break; } } } status=SetImageExtent(image,columns,rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ResetImagePixels(image,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ImportImagePixels(image,0,0,columns,rows,map,storage,pixels,exception); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i n g I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PingImage() returns all the properties of an image or image sequence % except for the pixels. It is much faster and consumes far less memory % than ReadImage(). On failure, a NULL image is returned and exception % describes the reason for the failure. % % The format of the PingImage method is: % % Image *PingImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Ping the image defined by the file or filename members of % this structure. % % o exception: return any errors or warnings in this structure. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static size_t PingStream(const Image *magick_unused(image), const void *magick_unused(pixels),const size_t columns) { magick_unreferenced(image); magick_unreferenced(pixels); return(columns); } #if defined(__cplusplus) || defined(c_plusplus) } #endif MagickExport Image *PingImage(const ImageInfo *image_info, ExceptionInfo *exception) { Image *image; ImageInfo *ping_info; assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); ping_info=CloneImageInfo(image_info); ping_info->ping=MagickTrue; image=ReadStream(ping_info,&PingStream,exception); if (image != (Image *) NULL) { ResetTimer(&image->timer); if (ping_info->verbose != MagickFalse) (void) IdentifyImage(image,stdout,MagickFalse,exception); } ping_info=DestroyImageInfo(ping_info); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i n g I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PingImages() pings one or more images and returns them as an image list. % % The format of the PingImage method is: % % Image *PingImages(ImageInfo *image_info,const char *filename, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o filename: the image filename. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PingImages(ImageInfo *image_info,const char *filename, ExceptionInfo *exception) { char ping_filename[MagickPathExtent]; Image *image, *images; ImageInfo *read_info; /* Ping image list from a file. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); (void) SetImageOption(image_info,"filename",filename); (void) CopyMagickString(image_info->filename,filename,MagickPathExtent); (void) InterpretImageFilename(image_info,(Image *) NULL,image_info->filename, (int) image_info->scene,ping_filename,exception); if (LocaleCompare(ping_filename,image_info->filename) != 0) { ExceptionInfo *sans; ssize_t extent, scene; /* Images of the form image-%d.png[1-5]. */ read_info=CloneImageInfo(image_info); sans=AcquireExceptionInfo(); (void) SetImageInfo(read_info,0,sans); sans=DestroyExceptionInfo(sans); if (read_info->number_scenes == 0) { read_info=DestroyImageInfo(read_info); return(PingImage(image_info,exception)); } (void) CopyMagickString(ping_filename,read_info->filename, MagickPathExtent); images=NewImageList(); extent=(ssize_t) (read_info->scene+read_info->number_scenes); for (scene=(ssize_t) read_info->scene; scene < (ssize_t) extent; scene++) { (void) InterpretImageFilename(image_info,(Image *) NULL,ping_filename, (int) scene,read_info->filename,exception); image=PingImage(read_info,exception); if (image == (Image *) NULL) continue; AppendImageToList(&images,image); } read_info=DestroyImageInfo(read_info); return(images); } return(PingImage(image_info,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadImage() reads an image or image sequence from a file or file handle. % The method returns a NULL if there is a memory shortage or if the image % cannot be read. On failure, a NULL image is returned and exception % describes the reason for the failure. % % The format of the ReadImage method is: % % Image *ReadImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Read the image defined by the file or filename members of % this structure. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType IsCoderAuthorized(const char *coder, const PolicyRights rights,ExceptionInfo *exception) { if (IsRightsAuthorized(CoderPolicyDomain,rights,coder) == MagickFalse) { errno=EPERM; (void) ThrowMagickException(exception,GetMagickModule(),PolicyError, "NotAuthorized","`%s'",coder); return(MagickFalse); } return(MagickTrue); } MagickExport Image *ReadImage(const ImageInfo *image_info, ExceptionInfo *exception) { char filename[MagickPathExtent], magick[MagickPathExtent], magick_filename[MagickPathExtent]; const char *value; const DelegateInfo *delegate_info; const MagickInfo *magick_info; DecodeImageHandler *decoder; ExceptionInfo *sans_exception; GeometryInfo geometry_info; Image *image, *next; ImageInfo *read_info; MagickBooleanType status; MagickStatusType flags; /* Determine image type from filename prefix or suffix (e.g. image.jpg). */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image_info->filename != (char *) NULL); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); read_info=CloneImageInfo(image_info); (void) CopyMagickString(magick_filename,read_info->filename,MagickPathExtent); (void) SetImageInfo(read_info,0,exception); (void) CopyMagickString(filename,read_info->filename,MagickPathExtent); (void) CopyMagickString(magick,read_info->magick,MagickPathExtent); /* Call appropriate image reader based on image type. */ sans_exception=AcquireExceptionInfo(); magick_info=GetMagickInfo(read_info->magick,sans_exception); if (sans_exception->severity == PolicyError) magick_info=GetMagickInfo(read_info->magick,exception); sans_exception=DestroyExceptionInfo(sans_exception); if (magick_info != (const MagickInfo *) NULL) { if (GetMagickEndianSupport(magick_info) == MagickFalse) read_info->endian=UndefinedEndian; else if ((image_info->endian == UndefinedEndian) && (GetMagickRawSupport(magick_info) != MagickFalse)) { unsigned long lsb_first; lsb_first=1; read_info->endian=(*(char *) &lsb_first) == 1 ? LSBEndian : MSBEndian; } } if ((magick_info != (const MagickInfo *) NULL) && (GetMagickDecoderSeekableStream(magick_info) != MagickFalse)) { image=AcquireImage(read_info,exception); (void) CopyMagickString(image->filename,read_info->filename, MagickPathExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { read_info=DestroyImageInfo(read_info); image=DestroyImage(image); return((Image *) NULL); } if (IsBlobSeekable(image) == MagickFalse) { /* Coder requires a seekable stream. */ *read_info->filename='\0'; status=ImageToFile(image,read_info->filename,exception); if (status == MagickFalse) { (void) CloseBlob(image); read_info=DestroyImageInfo(read_info); image=DestroyImage(image); return((Image *) NULL); } read_info->temporary=MagickTrue; } (void) CloseBlob(image); image=DestroyImage(image); } image=NewImageList(); decoder=GetImageDecoder(magick_info); if (decoder == (DecodeImageHandler *) NULL) { delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception); if (delegate_info == (const DelegateInfo *) NULL) { (void) SetImageInfo(read_info,0,exception); (void) CopyMagickString(read_info->filename,filename, MagickPathExtent); magick_info=GetMagickInfo(read_info->magick,exception); decoder=GetImageDecoder(magick_info); } } if (decoder != (DecodeImageHandler *) NULL) { /* Call appropriate image reader based on image type. */ if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(read_info->magick,ReadPolicyRights,exception); image=(Image *) NULL; if (status != MagickFalse) image=decoder(read_info,exception); if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } else { delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception); if (delegate_info == (const DelegateInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'", read_info->magick); if (read_info->temporary != MagickFalse) (void) RelinquishUniqueFileResource(read_info->filename); read_info=DestroyImageInfo(read_info); return((Image *) NULL); } /* Let our decoding delegate process the image. */ image=AcquireImage(read_info,exception); if (image == (Image *) NULL) { read_info=DestroyImageInfo(read_info); return((Image *) NULL); } (void) CopyMagickString(image->filename,read_info->filename, MagickPathExtent); *read_info->filename='\0'; if (GetDelegateThreadSupport(delegate_info) == MagickFalse) LockSemaphoreInfo(delegate_info->semaphore); status=InvokeDelegate(read_info,image,read_info->magick,(char *) NULL, exception); if (GetDelegateThreadSupport(delegate_info) == MagickFalse) UnlockSemaphoreInfo(delegate_info->semaphore); image=DestroyImageList(image); read_info->temporary=MagickTrue; if (status != MagickFalse) (void) SetImageInfo(read_info,0,exception); magick_info=GetMagickInfo(read_info->magick,exception); decoder=GetImageDecoder(magick_info); if (decoder == (DecodeImageHandler *) NULL) { if (IsPathAccessible(read_info->filename) != MagickFalse) (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'", read_info->magick); else ThrowFileException(exception,FileOpenError,"UnableToOpenFile", read_info->filename); read_info=DestroyImageInfo(read_info); return((Image *) NULL); } /* Call appropriate image reader based on image type. */ if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(read_info->magick,ReadPolicyRights,exception); image=(Image *) NULL; if (status != MagickFalse) image=(decoder)(read_info,exception); if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } if (read_info->temporary != MagickFalse) { (void) RelinquishUniqueFileResource(read_info->filename); read_info->temporary=MagickFalse; if (image != (Image *) NULL) (void) CopyMagickString(image->filename,filename,MagickPathExtent); } if (image == (Image *) NULL) { read_info=DestroyImageInfo(read_info); return(image); } if (exception->severity >= ErrorException) (void) LogMagickEvent(ExceptionEvent,GetMagickModule(), "Coder (%s) generated an image despite an error (%d), " "notify the developers",image->magick,exception->severity); if (IsBlobTemporary(image) != MagickFalse) (void) RelinquishUniqueFileResource(read_info->filename); if ((IsSceneGeometry(read_info->scenes,MagickFalse) != MagickFalse) && (GetImageListLength(image) != 1)) { Image *clones; clones=CloneImages(image,read_info->scenes,exception); if (clones != (Image *) NULL) { image=DestroyImageList(image); image=GetFirstImageInList(clones); } } for (next=image; next != (Image *) NULL; next=GetNextImageInList(next)) { char magick_path[MagickPathExtent], *property, timestamp[MagickPathExtent]; const char *option; const StringInfo *profile; ssize_t option_type; static const char *source_date_epoch = (const char *) NULL; static MagickBooleanType epoch_initalized = MagickFalse; next->taint=MagickFalse; GetPathComponent(magick_filename,MagickPath,magick_path); if (*magick_path == '\0' && *next->magick == '\0') (void) CopyMagickString(next->magick,magick,MagickPathExtent); (void) CopyMagickString(next->magick_filename,magick_filename, MagickPathExtent); if (IsBlobTemporary(image) != MagickFalse) (void) CopyMagickString(next->filename,filename,MagickPathExtent); if (next->magick_columns == 0) next->magick_columns=next->columns; if (next->magick_rows == 0) next->magick_rows=next->rows; (void) GetImageProperty(next,"exif:*",exception); (void) GetImageProperty(next,"icc:*",exception); (void) GetImageProperty(next,"iptc:*",exception); (void) GetImageProperty(next,"xmp:*",exception); value=GetImageProperty(next,"exif:Orientation",exception); if (value == (char *) NULL) value=GetImageProperty(next,"tiff:Orientation",exception); if (value != (char *) NULL) { next->orientation=(OrientationType) StringToLong(value); (void) DeleteImageProperty(next,"tiff:Orientation"); (void) DeleteImageProperty(next,"exif:Orientation"); } value=GetImageProperty(next,"exif:XResolution",exception); if (value != (char *) NULL) { geometry_info.rho=next->resolution.x; geometry_info.sigma=1.0; flags=ParseGeometry(value,&geometry_info); if (geometry_info.sigma != 0) next->resolution.x=geometry_info.rho/geometry_info.sigma; if (strchr(value,',') != (char *) NULL) next->resolution.x=geometry_info.rho+geometry_info.sigma/1000.0; (void) DeleteImageProperty(next,"exif:XResolution"); } value=GetImageProperty(next,"exif:YResolution",exception); if (value != (char *) NULL) { geometry_info.rho=next->resolution.y; geometry_info.sigma=1.0; flags=ParseGeometry(value,&geometry_info); if (geometry_info.sigma != 0) next->resolution.y=geometry_info.rho/geometry_info.sigma; if (strchr(value,',') != (char *) NULL) next->resolution.y=geometry_info.rho+geometry_info.sigma/1000.0; (void) DeleteImageProperty(next,"exif:YResolution"); } value=GetImageProperty(next,"exif:ResolutionUnit",exception); if (value == (char *) NULL) value=GetImageProperty(next,"tiff:ResolutionUnit",exception); if (value != (char *) NULL) { option_type=ParseCommandOption(MagickResolutionOptions,MagickFalse, value); if (option_type >= 0) next->units=(ResolutionType) option_type; (void) DeleteImageProperty(next,"exif:ResolutionUnit"); (void) DeleteImageProperty(next,"tiff:ResolutionUnit"); } if (next->page.width == 0) next->page.width=next->columns; if (next->page.height == 0) next->page.height=next->rows; option=GetImageOption(read_info,"caption"); if (option != (const char *) NULL) { property=InterpretImageProperties(read_info,next,option,exception); (void) SetImageProperty(next,"caption",property,exception); property=DestroyString(property); } option=GetImageOption(read_info,"comment"); if (option != (const char *) NULL) { property=InterpretImageProperties(read_info,next,option,exception); (void) SetImageProperty(next,"comment",property,exception); property=DestroyString(property); } option=GetImageOption(read_info,"label"); if (option != (const char *) NULL) { property=InterpretImageProperties(read_info,next,option,exception); (void) SetImageProperty(next,"label",property,exception); property=DestroyString(property); } if (LocaleCompare(next->magick,"TEXT") == 0) (void) ParseAbsoluteGeometry("0x0+0+0",&next->page); if ((read_info->extract != (char *) NULL) && (read_info->stream == (StreamHandler) NULL)) { RectangleInfo geometry; SetGeometry(next,&geometry); flags=ParseAbsoluteGeometry(read_info->extract,&geometry); if ((next->columns != geometry.width) || (next->rows != geometry.height)) { if (((flags & XValue) != 0) || ((flags & YValue) != 0)) { Image *crop_image; crop_image=CropImage(next,&geometry,exception); if (crop_image != (Image *) NULL) ReplaceImageInList(&next,crop_image); } else if (((flags & WidthValue) != 0) || ((flags & HeightValue) != 0)) { Image *size_image; flags=ParseRegionGeometry(next,read_info->extract,&geometry, exception); size_image=ResizeImage(next,geometry.width,geometry.height, next->filter,exception); if (size_image != (Image *) NULL) ReplaceImageInList(&next,size_image); } } } profile=GetImageProfile(next,"icc"); if (profile == (const StringInfo *) NULL) profile=GetImageProfile(next,"icm"); profile=GetImageProfile(next,"iptc"); if (profile == (const StringInfo *) NULL) profile=GetImageProfile(next,"8bim"); if (epoch_initalized == MagickFalse) { source_date_epoch=getenv("SOURCE_DATE_EPOCH"); epoch_initalized=MagickTrue; } if (source_date_epoch == (const char *) NULL) { (void) FormatMagickTime((time_t) GetBlobProperties(next)->st_mtime, MagickPathExtent,timestamp); (void) SetImageProperty(next,"date:modify",timestamp,exception); (void) FormatMagickTime((time_t) GetBlobProperties(next)->st_ctime, MagickPathExtent,timestamp); (void) SetImageProperty(next,"date:create",timestamp,exception); } option=GetImageOption(image_info,"delay"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); if ((flags & GreaterValue) != 0) { if (next->delay > (size_t) floor(geometry_info.rho+0.5)) next->delay=(size_t) floor(geometry_info.rho+0.5); } else if ((flags & LessValue) != 0) { if (next->delay < (size_t) floor(geometry_info.rho+0.5)) next->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5); } else next->delay=(size_t) floor(geometry_info.rho+0.5); if ((flags & SigmaValue) != 0) next->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5); } option=GetImageOption(image_info,"dispose"); if (option != (const char *) NULL) { option_type=ParseCommandOption(MagickDisposeOptions,MagickFalse, option); if (option_type >= 0) next->dispose=(DisposeType) option_type; } if (read_info->verbose != MagickFalse) (void) IdentifyImage(next,stderr,MagickFalse,exception); image=next; } read_info=DestroyImageInfo(read_info); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadImages() reads one or more images and returns them as an image list. % % The format of the ReadImage method is: % % Image *ReadImages(ImageInfo *image_info,const char *filename, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o filename: the image filename. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ReadImages(ImageInfo *image_info,const char *filename, ExceptionInfo *exception) { char read_filename[MagickPathExtent]; Image *image, *images; ImageInfo *read_info; /* Read image list from a file. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); read_info=CloneImageInfo(image_info); *read_info->magick='\0'; (void) SetImageOption(read_info,"filename",filename); (void) CopyMagickString(read_info->filename,filename,MagickPathExtent); (void) InterpretImageFilename(read_info,(Image *) NULL,filename, (int) read_info->scene,read_filename,exception); if (LocaleCompare(read_filename,read_info->filename) != 0) { ExceptionInfo *sans; ssize_t extent, scene; /* Images of the form image-%d.png[1-5]. */ sans=AcquireExceptionInfo(); (void) SetImageInfo(read_info,0,sans); sans=DestroyExceptionInfo(sans); if (read_info->number_scenes != 0) { (void) CopyMagickString(read_filename,read_info->filename, MagickPathExtent); images=NewImageList(); extent=(ssize_t) (read_info->scene+read_info->number_scenes); scene=(ssize_t) read_info->scene; for ( ; scene < (ssize_t) extent; scene++) { (void) InterpretImageFilename(image_info,(Image *) NULL, read_filename,(int) scene,read_info->filename,exception); image=ReadImage(read_info,exception); if (image == (Image *) NULL) continue; AppendImageToList(&images,image); } read_info=DestroyImageInfo(read_info); return(images); } } (void) CopyMagickString(read_info->filename,filename,MagickPathExtent); image=ReadImage(read_info,exception); read_info=DestroyImageInfo(read_info); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d I n l i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadInlineImage() reads a Base64-encoded inline image or image sequence. % The method returns a NULL if there is a memory shortage or if the image % cannot be read. On failure, a NULL image is returned and exception % describes the reason for the failure. % % The format of the ReadInlineImage method is: % % Image *ReadInlineImage(const ImageInfo *image_info,const char *content, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o content: the image encoded in Base64. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ReadInlineImage(const ImageInfo *image_info, const char *content,ExceptionInfo *exception) { Image *image; ImageInfo *read_info; unsigned char *blob; size_t length; register const char *p; /* Skip over header (e.g. data:image/gif;base64,). */ image=NewImageList(); for (p=content; (*p != ',') && (*p != '\0'); p++) ; if (*p == '\0') ThrowReaderException(CorruptImageError,"CorruptImage"); p++; length=0; blob=Base64Decode(p,&length); if (length == 0) { blob=(unsigned char *) RelinquishMagickMemory(blob); ThrowReaderException(CorruptImageError,"CorruptImage"); } read_info=CloneImageInfo(image_info); (void) SetImageInfoProgressMonitor(read_info,(MagickProgressMonitor) NULL, (void *) NULL); *read_info->filename='\0'; *read_info->magick='\0'; image=BlobToImage(read_info,blob,length,exception); blob=(unsigned char *) RelinquishMagickMemory(blob); read_info=DestroyImageInfo(read_info); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteImage() writes an image or an image sequence to a file or file handle. % If writing to a file is on disk, the name is defined by the filename member % of the image structure. WriteImage() returns MagickFalse is there is a % memory shortage or if the image cannot be written. Check the exception % member of image to determine the cause for any failure. % % The format of the WriteImage method is: % % MagickBooleanType WriteImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WriteImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { char filename[MagickPathExtent]; const char *option; const DelegateInfo *delegate_info; const MagickInfo *magick_info; EncodeImageHandler *encoder; ExceptionInfo *sans_exception; ImageInfo *write_info; MagickBooleanType status, temporary; /* Determine image type from filename prefix or suffix (e.g. image.jpg). */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); sans_exception=AcquireExceptionInfo(); write_info=CloneImageInfo(image_info); (void) CopyMagickString(write_info->filename,image->filename, MagickPathExtent); (void) SetImageInfo(write_info,1,sans_exception); if (*write_info->magick == '\0') (void) CopyMagickString(write_info->magick,image->magick,MagickPathExtent); (void) CopyMagickString(filename,image->filename,MagickPathExtent); (void) CopyMagickString(image->filename,write_info->filename, MagickPathExtent); /* Call appropriate image writer based on image type. */ magick_info=GetMagickInfo(write_info->magick,sans_exception); if (sans_exception->severity == PolicyError) magick_info=GetMagickInfo(write_info->magick,exception); sans_exception=DestroyExceptionInfo(sans_exception); if (magick_info != (const MagickInfo *) NULL) { if (GetMagickEndianSupport(magick_info) == MagickFalse) image->endian=UndefinedEndian; else if ((image_info->endian == UndefinedEndian) && (GetMagickRawSupport(magick_info) != MagickFalse)) { unsigned long lsb_first; lsb_first=1; image->endian=(*(char *) &lsb_first) == 1 ? LSBEndian : MSBEndian; } } (void) SyncImageProfiles(image); DisassociateImageStream(image); option=GetImageOption(image_info,"delegate:bimodal"); if ((IsStringTrue(option) != MagickFalse) && (write_info->page == (char *) NULL) && (GetPreviousImageInList(image) == (Image *) NULL) && (GetNextImageInList(image) == (Image *) NULL) && (IsTaintImage(image) == MagickFalse) ) { delegate_info=GetDelegateInfo(image->magick,write_info->magick,exception); if ((delegate_info != (const DelegateInfo *) NULL) && (GetDelegateMode(delegate_info) == 0) && (IsPathAccessible(image->magick_filename) != MagickFalse)) { /* Process image with bi-modal delegate. */ (void) CopyMagickString(image->filename,image->magick_filename, MagickPathExtent); status=InvokeDelegate(write_info,image,image->magick, write_info->magick,exception); write_info=DestroyImageInfo(write_info); (void) CopyMagickString(image->filename,filename,MagickPathExtent); return(status); } } status=MagickFalse; temporary=MagickFalse; if ((magick_info != (const MagickInfo *) NULL) && (GetMagickEncoderSeekableStream(magick_info) != MagickFalse)) { char image_filename[MagickPathExtent]; (void) CopyMagickString(image_filename,image->filename,MagickPathExtent); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); (void) CopyMagickString(image->filename, image_filename,MagickPathExtent); if (status != MagickFalse) { if (IsBlobSeekable(image) == MagickFalse) { /* A seekable stream is required by the encoder. */ write_info->adjoin=MagickTrue; (void) CopyMagickString(write_info->filename,image->filename, MagickPathExtent); (void) AcquireUniqueFilename(image->filename); temporary=MagickTrue; } (void) CloseBlob(image); } } encoder=GetImageEncoder(magick_info); if (encoder != (EncodeImageHandler *) NULL) { /* Call appropriate image writer based on image type. */ if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(write_info->magick,WritePolicyRights,exception); if (status != MagickFalse) status=encoder(write_info,image,exception); if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } else { delegate_info=GetDelegateInfo((char *) NULL,write_info->magick,exception); if (delegate_info != (DelegateInfo *) NULL) { /* Process the image with delegate. */ *write_info->filename='\0'; if (GetDelegateThreadSupport(delegate_info) == MagickFalse) LockSemaphoreInfo(delegate_info->semaphore); status=InvokeDelegate(write_info,image,(char *) NULL, write_info->magick,exception); if (GetDelegateThreadSupport(delegate_info) == MagickFalse) UnlockSemaphoreInfo(delegate_info->semaphore); (void) CopyMagickString(image->filename,filename,MagickPathExtent); } else { sans_exception=AcquireExceptionInfo(); magick_info=GetMagickInfo(write_info->magick,sans_exception); if (sans_exception->severity == PolicyError) magick_info=GetMagickInfo(write_info->magick,exception); sans_exception=DestroyExceptionInfo(sans_exception); if ((write_info->affirm == MagickFalse) && (magick_info == (const MagickInfo *) NULL)) { (void) CopyMagickString(write_info->magick,image->magick, MagickPathExtent); magick_info=GetMagickInfo(write_info->magick,exception); } encoder=GetImageEncoder(magick_info); if (encoder == (EncodeImageHandler *) NULL) { char extension[MagickPathExtent]; GetPathComponent(image->filename,ExtensionPath,extension); if (*extension != '\0') magick_info=GetMagickInfo(extension,exception); else magick_info=GetMagickInfo(image->magick,exception); (void) CopyMagickString(image->filename,filename, MagickPathExtent); encoder=GetImageEncoder(magick_info); } if (encoder == (EncodeImageHandler *) NULL) { magick_info=GetMagickInfo(image->magick,exception); encoder=GetImageEncoder(magick_info); if (encoder == (EncodeImageHandler *) NULL) (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"NoEncodeDelegateForThisImageFormat", "`%s'",write_info->magick); } if (encoder != (EncodeImageHandler *) NULL) { /* Call appropriate image writer based on image type. */ if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(write_info->magick,WritePolicyRights, exception); if (status != MagickFalse) status=encoder(write_info,image,exception); if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } } } if (temporary != MagickFalse) { /* Copy temporary image file to permanent. */ status=OpenBlob(write_info,image,ReadBinaryBlobMode,exception); if (status != MagickFalse) { (void) RelinquishUniqueFileResource(write_info->filename); status=ImageToFile(image,write_info->filename,exception); } (void) CloseBlob(image); (void) RelinquishUniqueFileResource(image->filename); (void) CopyMagickString(image->filename,write_info->filename, MagickPathExtent); } if ((LocaleCompare(write_info->magick,"info") != 0) && (write_info->verbose != MagickFalse)) (void) IdentifyImage(image,stdout,MagickFalse,exception); write_info=DestroyImageInfo(write_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteImages() writes an image sequence into one or more files. While % WriteImage() can write an image sequence, it is limited to writing % the sequence into a single file using a format which supports multiple % frames. WriteImages(), however, does not have this limitation, instead it % generates multiple output files if necessary (or when requested). When % ImageInfo's adjoin flag is set to MagickFalse, the file name is expected % to include a printf-style formatting string for the frame number (e.g. % "image%02d.png"). % % The format of the WriteImages method is: % % MagickBooleanType WriteImages(const ImageInfo *image_info,Image *images, % const char *filename,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o images: the image list. % % o filename: the image filename. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WriteImages(const ImageInfo *image_info, Image *images,const char *filename,ExceptionInfo *exception) { #define WriteImageTag "Write/Image" ExceptionInfo *sans_exception; ImageInfo *write_info; MagickBooleanType proceed; MagickOffsetType progress; MagickProgressMonitor progress_monitor; MagickSizeType number_images; MagickStatusType status; register Image *p; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); write_info=CloneImageInfo(image_info); *write_info->magick='\0'; images=GetFirstImageInList(images); if (filename != (const char *) NULL) for (p=images; p != (Image *) NULL; p=GetNextImageInList(p)) (void) CopyMagickString(p->filename,filename,MagickPathExtent); (void) CopyMagickString(write_info->filename,images->filename, MagickPathExtent); sans_exception=AcquireExceptionInfo(); (void) SetImageInfo(write_info,(unsigned int) GetImageListLength(images), sans_exception); sans_exception=DestroyExceptionInfo(sans_exception); if (*write_info->magick == '\0') (void) CopyMagickString(write_info->magick,images->magick,MagickPathExtent); p=images; for ( ; GetNextImageInList(p) != (Image *) NULL; p=GetNextImageInList(p)) { register Image *next; next=GetNextImageInList(p); if (next == (Image *) NULL) break; if (p->scene >= next->scene) { register ssize_t i; /* Generate consistent scene numbers. */ i=(ssize_t) images->scene; for (p=images; p != (Image *) NULL; p=GetNextImageInList(p)) p->scene=(size_t) i++; break; } } /* Write images. */ status=MagickTrue; progress_monitor=(MagickProgressMonitor) NULL; progress=0; number_images=GetImageListLength(images); for (p=images; p != (Image *) NULL; p=GetNextImageInList(p)) { if (number_images != 1) progress_monitor=SetImageProgressMonitor(p,(MagickProgressMonitor) NULL, p->client_data); status&=WriteImage(write_info,p,exception); if (number_images != 1) (void) SetImageProgressMonitor(p,progress_monitor,p->client_data); if (write_info->adjoin != MagickFalse) break; if (number_images != 1) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(p,WriteImageTag,progress,number_images); if (proceed == MagickFalse) break; } } write_info=DestroyImageInfo(write_info); return(status != 0 ? MagickTrue : MagickFalse); }
ppc64le-varargs-f128.c
// RUN: %clang_cc1 -triple powerpc64le-unknown-linux-gnu -emit-llvm \ // RUN: -target-cpu pwr9 -target-feature +float128 -mabi=ieeelongdouble \ // RUN: -o - %s | FileCheck %s -check-prefix=IEEE // RUN: %clang_cc1 -triple powerpc64le-unknown-linux-gnu -emit-llvm \ // RUN: -target-cpu pwr9 -target-feature +float128 \ // RUN: -o - %s | FileCheck %s -check-prefix=IBM // RUN: %clang_cc1 -triple ppc64le -emit-llvm-bc %s -target-cpu pwr9 \ // RUN: -target-feature +float128 -mabi=ieeelongdouble -fopenmp \ // RUN: -fopenmp-targets=ppc64le -o %t-ppc-host.bc // RUN: %clang_cc1 -triple ppc64le -aux-triple ppc64le %s -target-cpu pwr9 \ // RUN: -target-feature +float128 -fopenmp -fopenmp-is-device -emit-llvm \ // RUN: -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s \ // RUN: -check-prefix=OMP-TARGET // RUN: %clang_cc1 -triple ppc64le %t-ppc-host.bc -emit-llvm -o - | FileCheck %s \ // RUN: -check-prefix=OMP-HOST #include <stdarg.h> void foo_ld(long double); void foo_fq(__float128); // Verify cases when OpenMP target's and host's long-double semantics differ. // OMP-TARGET-LABEL: define internal void @.omp_outlined.( // OMP-TARGET: %[[CUR:[0-9a-zA-Z_.]+]] = load i8*, i8** // OMP-TARGET: %[[V2:[0-9a-zA-Z_.]+]] = bitcast i8* %[[CUR]] to ppc_fp128* // OMP-TARGET: %[[V3:[0-9a-zA-Z_.]+]] = load ppc_fp128, ppc_fp128* %[[V2]], align 8 // OMP-TARGET: call void @foo_ld(ppc_fp128 %[[V3]]) // OMP-HOST-LABEL: define{{.*}} void @omp( // OMP-HOST: %[[AP1:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP:[0-9a-zA-Z_.]+]] to i8* // OMP-HOST: call void @llvm.va_start(i8* %[[AP1]]) // OMP-HOST: %[[CUR:[0-9a-zA-Z_.]+]] = load i8*, i8** %[[AP]], align 8 // OMP-HOST: %[[V0:[0-9a-zA-Z_.]+]] = ptrtoint i8* %[[CUR]] to i64 // OMP-HOST: %[[V1:[0-9a-zA-Z_.]+]] = add i64 %[[V0]], 15 // OMP-HOST: %[[V2:[0-9a-zA-Z_.]+]] = and i64 %[[V1]], -16 // OMP-HOST: %[[ALIGN:[0-9a-zA-Z_.]+]] = inttoptr i64 %[[V2]] to i8* // OMP-HOST: %[[V3:[0-9a-zA-Z_.]+]] = bitcast i8* %[[ALIGN]] to fp128* // OMP-HOST: %[[V4:[0-9a-zA-Z_.]+]] = load fp128, fp128* %[[V3]], align 16 // OMP-HOST: call void @foo_ld(fp128 %[[V4]]) void omp(int n, ...) { va_list ap; va_start(ap, n); foo_ld(va_arg(ap, long double)); #pragma omp target parallel for (int i = 1; i < n; ++i) { foo_ld(va_arg(ap, long double)); } va_end(ap); } // IEEE-LABEL: define{{.*}} void @f128 // IEEE: %[[AP1:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP:[0-9a-zA-Z_.]+]] to i8* // IEEE: call void @llvm.va_start(i8* %[[AP1]]) // IEEE: %[[CUR:[0-9a-zA-Z_.]+]] = load i8*, i8** %[[AP]] // IEEE: %[[V0:[0-9a-zA-Z_.]+]] = ptrtoint i8* %[[CUR]] to i64 // IEEE: %[[V1:[0-9a-zA-Z_.]+]] = add i64 %[[V0]], 15 // IEEE: %[[V2:[0-9a-zA-Z_.]+]] = and i64 %[[V1]], -16 // IEEE: %[[ALIGN:[0-9a-zA-Z_.]+]] = inttoptr i64 %[[V2]] to i8* // IEEE: %[[V3:[0-9a-zA-Z_.]+]] = bitcast i8* %[[ALIGN]] to fp128* // IEEE: %[[V4:[0-9a-zA-Z_.]+]] = load fp128, fp128* %[[V3]], align 16 // IEEE: call void @foo_fq(fp128 %[[V4]]) // IEEE: %[[AP2:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP]] to i8* // IEEE: call void @llvm.va_end(i8* %[[AP2]]) void f128(int n, ...) { va_list ap; va_start(ap, n); foo_fq(va_arg(ap, __float128)); va_end(ap); } // IEEE-LABEL: define{{.*}} void @long_double // IEEE: %[[AP1:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP:[0-9a-zA-Z_.]+]] to i8* // IEEE: call void @llvm.va_start(i8* %[[AP1]]) // IEEE: %[[CUR:[0-9a-zA-Z_.]+]] = load i8*, i8** %[[AP]] // IEEE: %[[V0:[0-9a-zA-Z_.]+]] = ptrtoint i8* %[[CUR]] to i64 // IEEE: %[[V1:[0-9a-zA-Z_.]+]] = add i64 %[[V0]], 15 // IEEE: %[[V2:[0-9a-zA-Z_.]+]] = and i64 %[[V1]], -16 // IEEE: %[[ALIGN:[0-9a-zA-Z_.]+]] = inttoptr i64 %[[V2]] to i8* // IEEE: %[[V3:[0-9a-zA-Z_.]+]] = bitcast i8* %[[ALIGN]] to fp128* // IEEE: %[[V4:[0-9a-zA-Z_.]+]] = load fp128, fp128* %[[V3]], align 16 // IEEE: call void @foo_ld(fp128 %[[V4]]) // IEEE: %[[AP2:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP]] to i8* // IEEE: call void @llvm.va_end(i8* %[[AP2]]) // IBM-LABEL: define{{.*}} void @long_double // IBM: %[[AP1:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP:[0-9a-zA-Z_.]+]] to i8* // IBM: call void @llvm.va_start(i8* %[[AP1]]) // IBM: %[[CUR:[0-9a-zA-Z_.]+]] = load i8*, i8** %[[AP]] // IBM: %[[V3:[0-9a-zA-Z_.]+]] = bitcast i8* %[[CUR]] to ppc_fp128* // IBM: %[[V4:[0-9a-zA-Z_.]+]] = load ppc_fp128, ppc_fp128* %[[V3]], align 8 // IBM: call void @foo_ld(ppc_fp128 %[[V4]]) // IBM: %[[AP2:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP]] to i8* // IBM: call void @llvm.va_end(i8* %[[AP2]]) void long_double(int n, ...) { va_list ap; va_start(ap, n); foo_ld(va_arg(ap, long double)); va_end(ap); }
image_input_op.h
#ifndef CAFFE2_IMAGE_IMAGE_INPUT_OP_H_ #define CAFFE2_IMAGE_IMAGE_INPUT_OP_H_ #include <omp.h> #include <opencv2/opencv.hpp> #include <iostream> #include "caffe/proto/caffe.pb.h" #include "caffe2/core/db.h" #include "caffe2/utils/math.h" #include "caffe2/operators/prefetch_op.h" namespace caffe2 { template <class Context> class ImageInputOp final : public PrefetchOperator<Context> { public: using OperatorBase::OutputSize; using PrefetchOperator<Context>::context_; using PrefetchOperator<Context>::prefetch_thread_; explicit ImageInputOp(const OperatorDef& operator_def, Workspace* ws); ~ImageInputOp() { PrefetchOperator<Context>::Finalize(); } bool Prefetch() override; bool CopyPrefetched() override; private: bool GetImageAndLabelFromDBValue( const string& value, cv::Mat* img, int* label); unique_ptr<db::DBReader> owned_reader_; const db::DBReader* reader_; CPUContext cpu_context_; TensorCPU prefetched_image_; TensorCPU prefetched_label_; Tensor<Context> prefetched_image_on_device_; Tensor<Context> prefetched_label_on_device_; int batch_size_; float mean_; float std_; bool color_; int scale_; bool warp_; int crop_; bool mirror_; bool use_caffe_datum_; }; template <class Context> ImageInputOp<Context>::ImageInputOp( const OperatorDef& operator_def, Workspace* ws) : PrefetchOperator<Context>(operator_def, ws), reader_(nullptr), batch_size_( OperatorBase::template GetSingleArgument<int>("batch_size", 0)), mean_(OperatorBase::template GetSingleArgument<float>("mean", 0.)), std_(OperatorBase::template GetSingleArgument<float>("std", 1.)), color_(OperatorBase::template GetSingleArgument<int>("color", 1)), scale_(OperatorBase::template GetSingleArgument<int>("scale", -1)), warp_(OperatorBase::template GetSingleArgument<int>("warp", 0)), crop_(OperatorBase::template GetSingleArgument<int>("crop", -1)), mirror_(OperatorBase::template GetSingleArgument<int>("mirror", 0)), use_caffe_datum_(OperatorBase::template GetSingleArgument<int>( "use_caffe_datum", 0)) { if (operator_def.input_size() == 0) { LOG(ERROR) << "You are using an old ImageInputOp format that creates " "a local db reader. Consider moving to the new style " "that takes in a DBReader blob instead."; string db_name = OperatorBase::template GetSingleArgument<string>("db", ""); CHECK_GT(db_name.size(), 0) << "Must specify a db name."; owned_reader_.reset(new db::DBReader( OperatorBase::template GetSingleArgument<string>( "db_type", "leveldb"), db_name)); reader_ = owned_reader_.get(); } CHECK_GT(batch_size_, 0) << "Batch size should be nonnegative."; CHECK_GT(scale_, 0) << "Must provide the scaling factor."; CHECK_GT(crop_, 0) << "Must provide the cropping value."; CHECK_GE(scale_, crop_) << "The scale value must be no smaller than the crop value."; LOG(INFO) << "Creating an image input op with the following setting: "; LOG(INFO) << " Outputting in batches of " << batch_size_ << " images;"; LOG(INFO) << " Treating input image as " << (color_ ? "color " : "grayscale ") << "image;"; LOG(INFO) << " Scaling image to " << scale_ << (warp_ ? " with " : " without ") << "warping;"; LOG(INFO) << " Cropping image to " << crop_ << (mirror_ ? " with " : " without ") << "random mirroring;"; LOG(INFO) << " Subtract mean " << mean_ << " and divide by std " << std_ << "."; prefetched_image_.Resize( vector<TIndex>{TIndex(batch_size_), TIndex(crop_), TIndex(crop_), TIndex(color_ ? 3 : 1)}); prefetched_label_.Resize(vector<TIndex>(1, batch_size_)); } template <class Context> bool ImageInputOp<Context>::GetImageAndLabelFromDBValue( const string& value, cv::Mat* img, int* label) { if (use_caffe_datum_) { // The input is a caffe datum format. caffe::Datum datum; CHECK(datum.ParseFromString(value)); *label = datum.label(); if (datum.encoded()) { // encoded image in datum. *img = cv::imdecode( cv::Mat(1, datum.data().size(), CV_8UC1, const_cast<char*>(datum.data().data())), color_ ? CV_LOAD_IMAGE_COLOR : CV_LOAD_IMAGE_GRAYSCALE); } else { // Raw image in datum. *img = cv::Mat(datum.height(), datum.width(), color_ ? CV_8UC3 : CV_8UC1); // Note(Yangqing): I believe that the mat should be created continuous. CHECK(img->isContinuous()); CHECK((color_ && datum.channels() == 3) || datum.channels() == 1); if (datum.channels() == 1) { memcpy(img->ptr<uchar>(0), datum.data().data(), datum.data().size()); } else { // Datum stores things in CHW order, let's do HWC for images to make // things more consistent with conventional image storage. for (int c = 0; c < 3; ++c) { const char* datum_buffer = datum.data().data() + datum.height() * datum.width() * c; uchar* ptr = img->ptr<uchar>(0) + c; for (int h = 0; h < datum.height(); ++h) { for (int w = 0; w < datum.width(); ++w) { *ptr = *(datum_buffer++); ptr += 3; } } } } } } else { // The input is a caffe2 format. TensorProtos protos; CHECK(protos.ParseFromString(value)); const TensorProto& image_proto = protos.protos(0); const TensorProto& label_proto = protos.protos(1); if (image_proto.data_type() == TensorProto::STRING) { // encoded image string. DCHECK_EQ(image_proto.string_data_size(), 1); const string& encoded_image_str = image_proto.string_data(0); int encoded_size = encoded_image_str.size(); // We use a cv::Mat to wrap the encoded str so we do not need a copy. *img = cv::imdecode( cv::Mat(1, &encoded_size, CV_8UC1, const_cast<char*>(encoded_image_str.data())), color_ ? CV_LOAD_IMAGE_COLOR : CV_LOAD_IMAGE_GRAYSCALE); } else if (image_proto.data_type() == TensorProto::BYTE) { // raw image content. CHECK_EQ(image_proto.dims_size(), (color_ ? 3 : 2)); CHECK_GE(image_proto.dims(0), crop_) << "Image height must be bigger than crop."; CHECK_GE(image_proto.dims(1), crop_) << "Image width must be bigger than crop."; CHECK(!color_ || image_proto.dims(2) == 3); *img = cv::Mat( image_proto.dims(0), image_proto.dims(1), color_ ? CV_8UC3 : CV_8UC1); memcpy(img->ptr<uchar>(0), image_proto.byte_data().data(), image_proto.byte_data().size()); } else { LOG(FATAL) << "Unknown image data type."; } DCHECK_EQ(label_proto.data_type(), TensorProto::INT32); DCHECK_EQ(label_proto.int32_data_size(), 1); *label = label_proto.int32_data(0); } // TODO(Yangqing): return false if any error happens. return true; } template <class Context> bool ImageInputOp<Context>::Prefetch() { if (!owned_reader_.get()) { // if we are not owning the reader, we will get the reader pointer from // input. Otherwise the constructor should have already set the reader // pointer. reader_ = &OperatorBase::Input<db::DBReader>(0); } const int channels = color_ ? 3 : 1; // Call mutable_data() once to allocate the underlying memory. prefetched_image_.mutable_data<float>(); prefetched_label_.mutable_data<int>(); // TODO(jiayq): Handle this prefetching with a real thread pool. Currently, // with 4 threads we should be able to get a decent sheed for AlexNet type // training already. std::mt19937 meta_randgen(time(nullptr)); std::vector<std::mt19937> randgen_per_thread; for (int i = 0; i < 4; ++i) { randgen_per_thread.emplace_back(meta_randgen()); } #pragma omp parallel for num_threads(4) for (int item_id = 0; item_id < batch_size_; ++item_id) { std::bernoulli_distribution mirror_this_image(0.5); std::mt19937& randgen = randgen_per_thread[omp_get_thread_num()]; float* image_data = prefetched_image_.mutable_data<float>() + crop_ * crop_ * channels * item_id; string key, value; cv::Mat img; int label; cv::Mat scaled_img; // process data reader_->Read(&key, &value); CHECK(GetImageAndLabelFromDBValue(value, &img, &label)); // deal with scaling. int scaled_width, scaled_height; if (warp_) { scaled_width = scale_; scaled_height = scale_; } else if (img.rows > img.cols) { scaled_width = scale_; scaled_height = static_cast<float>(img.rows) * scale_ / img.cols; } else { scaled_height = scale_; scaled_width = static_cast<float>(img.cols) * scale_ / img.rows; } if (scaled_height != img.rows || scaled_width != img.cols) { cv::resize(img, scaled_img, cv::Size(scaled_width, scaled_height), 0, 0, cv::INTER_LINEAR); } else { // No scaling needs to be done. scaled_img = img; } // find the cropped region, and copy it to the destination matrix with // mean subtraction and scaling. int width_offset = std::uniform_int_distribution<>(0, scaled_img.cols - crop_)(randgen); int height_offset = std::uniform_int_distribution<>(0, scaled_img.rows - crop_)(randgen); if (mirror_ && mirror_this_image(randgen)) { // Copy mirrored image. for (int h = height_offset; h < height_offset + crop_; ++h) { for (int w = width_offset + crop_ - 1; w >= width_offset; --w) { const cv::Vec3b& cv_data = scaled_img.at<cv::Vec3b>(h, w); for (int c = 0; c < channels; ++c) { *(image_data++) = (static_cast<uint8_t>(cv_data[c]) - mean_) / std_; } } } } else { // Copy normally. for (int h = height_offset; h < height_offset + crop_; ++h) { for (int w = width_offset; w < width_offset + crop_; ++w) { const cv::Vec3b& cv_data = scaled_img.at<cv::Vec3b>(h, w); for (int c = 0; c < channels; ++c) { *(image_data++) = (static_cast<uint8_t>(cv_data[c]) - mean_) / std_; } } } } // Copy the label prefetched_label_.mutable_data<int>()[item_id] = label; } // If the context is not CPUContext, we will need to do a copy in the // prefetch function as well. if (!std::is_same<Context, CPUContext>::value) { prefetched_image_on_device_.CopyFrom(prefetched_image_, &context_); prefetched_label_on_device_.CopyFrom(prefetched_label_, &context_); } return true; } template <class Context> bool ImageInputOp<Context>::CopyPrefetched() { auto* image_output = OperatorBase::Output<Tensor<Context> >(0); auto* label_output = OperatorBase::Output<Tensor<Context> >(1); // Note(jiayq): The if statement below should be optimized away by the // compiler since std::is_same is a constexpr. if (std::is_same<Context, CPUContext>::value) { image_output->CopyFrom(prefetched_image_, &context_); label_output->CopyFrom(prefetched_label_, &context_); } else { image_output->CopyFrom(prefetched_image_on_device_, &context_); label_output->CopyFrom(prefetched_label_on_device_, &context_); } return true; } } // namespace caffe2 #endif // CAFFE2_IMAGE_IMAGE_INPUT_OP_H_
GB_unop__one_int32_int32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__one_int32_int32 // op(A') function: GB_unop_tran__one_int32_int32 // C type: int32_t // A type: int32_t // cast: ; // unaryop: cij = 1 #define GB_ATYPE \ int32_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = 1 ; // casting #define GB_CAST(z, aij) \ ; ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ ; ; \ /* Cx [pC] = op (cast (aij)) */ \ ; ; \ Cx [pC] = 1 ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ONE || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__one_int32_int32 ( int32_t *Cx, // Cx and Ax may be aliased const int32_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; ; ; Cx [p] = 1 ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; ; ; ; ; Cx [p] = 1 ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__one_int32_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
geo_particle_iter.kernel_runtime.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> #include "local_header.h" #include "openmp_pscmc_inc.h" #include "geo_particle_iter.kernel_inc.h" int openmp_geo_rel_1st_bwd_init (openmp_pscmc_env * pe ,openmp_geo_rel_1st_bwd_struct * kerstr ){ return 0 ;} void openmp_geo_rel_1st_bwd_get_struct_len (size_t * len ){ ((len)[0] = sizeof(openmp_geo_rel_1st_bwd_struct )); } int openmp_geo_rel_1st_bwd_get_num_compute_units (openmp_geo_rel_1st_bwd_struct * kerstr ){ return omp_get_max_threads ( ) ;} int openmp_geo_rel_1st_bwd_get_xlen (){ return 1 ;} int openmp_geo_rel_1st_bwd_exec (openmp_geo_rel_1st_bwd_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){ #pragma omp parallel { int xid ; int yid ; int numt = omp_get_num_threads ( ) ; int tid = omp_get_thread_num ( ) ; int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ; int ymin = ( tid * ysingle ) ; int ymax = ( ( 1 + tid ) * ysingle ) ; for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt ))) { for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 ))) { openmp_geo_rel_1st_bwd_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->xoffset , ( kerstr )->yoffset , ( kerstr )->zoffset , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->fieldB1 , ( kerstr )->FoutJ , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Mass0)[0] , ( ( kerstr )->Charge0)[0] , ( ( kerstr )->Deltat)[0] , ( ( kerstr )->Tori_X0)[0] , ( ( kerstr )->Solve_Err)[0] , yid , scmc_internal_g_ylen ); }}} return 0 ;} int openmp_geo_rel_1st_bwd_scmc_set_parameter_inoutput (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->inoutput = pm->d_data); } int openmp_geo_rel_1st_bwd_scmc_set_parameter_xyzw (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xyzw = pm->d_data); } int openmp_geo_rel_1st_bwd_scmc_set_parameter_cu_cache (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->cu_cache = pm->d_data); } int openmp_geo_rel_1st_bwd_scmc_set_parameter_cu_xyzw (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->cu_xyzw = pm->d_data); } int openmp_geo_rel_1st_bwd_scmc_set_parameter_xoffset (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xoffset = pm->d_data); } int openmp_geo_rel_1st_bwd_scmc_set_parameter_yoffset (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->yoffset = pm->d_data); } int openmp_geo_rel_1st_bwd_scmc_set_parameter_zoffset (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->zoffset = pm->d_data); } int openmp_geo_rel_1st_bwd_scmc_set_parameter_fieldE (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->fieldE = pm->d_data); } int openmp_geo_rel_1st_bwd_scmc_set_parameter_fieldB (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->fieldB = pm->d_data); } int openmp_geo_rel_1st_bwd_scmc_set_parameter_fieldB1 (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->fieldB1 = pm->d_data); } int openmp_geo_rel_1st_bwd_scmc_set_parameter_FoutJ (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->FoutJ = pm->d_data); } int openmp_geo_rel_1st_bwd_scmc_set_parameter_XLEN (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->XLEN = pm->d_data); } int openmp_geo_rel_1st_bwd_scmc_set_parameter_YLEN (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->YLEN = pm->d_data); } int openmp_geo_rel_1st_bwd_scmc_set_parameter_ZLEN (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ZLEN = pm->d_data); } int openmp_geo_rel_1st_bwd_scmc_set_parameter_ovlp (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ovlp = pm->d_data); } int openmp_geo_rel_1st_bwd_scmc_set_parameter_numvec (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->numvec = pm->d_data); } int openmp_geo_rel_1st_bwd_scmc_set_parameter_num_ele (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->num_ele = pm->d_data); } int openmp_geo_rel_1st_bwd_scmc_set_parameter_grid_cache_len (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->grid_cache_len = pm->d_data); } int openmp_geo_rel_1st_bwd_scmc_set_parameter_cu_cache_length (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->cu_cache_length = pm->d_data); } int openmp_geo_rel_1st_bwd_scmc_set_parameter_DELTA_X (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DELTA_X = pm->d_data); } int openmp_geo_rel_1st_bwd_scmc_set_parameter_DELTA_Y (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DELTA_Y = pm->d_data); } int openmp_geo_rel_1st_bwd_scmc_set_parameter_DELTA_Z (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DELTA_Z = pm->d_data); } int openmp_geo_rel_1st_bwd_scmc_set_parameter_Mass0 (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->Mass0 = pm->d_data); } int openmp_geo_rel_1st_bwd_scmc_set_parameter_Charge0 (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->Charge0 = pm->d_data); } int openmp_geo_rel_1st_bwd_scmc_set_parameter_Deltat (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->Deltat = pm->d_data); } int openmp_geo_rel_1st_bwd_scmc_set_parameter_Tori_X0 (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->Tori_X0 = pm->d_data); } int openmp_geo_rel_1st_bwd_scmc_set_parameter_Solve_Err (openmp_geo_rel_1st_bwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->Solve_Err = pm->d_data); } int openmp_geo_rel_1st_fwd_init (openmp_pscmc_env * pe ,openmp_geo_rel_1st_fwd_struct * kerstr ){ return 0 ;} void openmp_geo_rel_1st_fwd_get_struct_len (size_t * len ){ ((len)[0] = sizeof(openmp_geo_rel_1st_fwd_struct )); } int openmp_geo_rel_1st_fwd_get_num_compute_units (openmp_geo_rel_1st_fwd_struct * kerstr ){ return omp_get_max_threads ( ) ;} int openmp_geo_rel_1st_fwd_get_xlen (){ return 1 ;} int openmp_geo_rel_1st_fwd_exec (openmp_geo_rel_1st_fwd_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){ #pragma omp parallel { int xid ; int yid ; int numt = omp_get_num_threads ( ) ; int tid = omp_get_thread_num ( ) ; int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ; int ymin = ( tid * ysingle ) ; int ymax = ( ( 1 + tid ) * ysingle ) ; for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt ))) { for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 ))) { openmp_geo_rel_1st_fwd_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->xoffset , ( kerstr )->yoffset , ( kerstr )->zoffset , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->fieldB1 , ( kerstr )->FoutJ , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Mass0)[0] , ( ( kerstr )->Charge0)[0] , ( ( kerstr )->Deltat)[0] , ( ( kerstr )->Tori_X0)[0] , ( ( kerstr )->Solve_Err)[0] , yid , scmc_internal_g_ylen ); }}} return 0 ;} int openmp_geo_rel_1st_fwd_scmc_set_parameter_inoutput (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->inoutput = pm->d_data); } int openmp_geo_rel_1st_fwd_scmc_set_parameter_xyzw (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xyzw = pm->d_data); } int openmp_geo_rel_1st_fwd_scmc_set_parameter_cu_cache (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->cu_cache = pm->d_data); } int openmp_geo_rel_1st_fwd_scmc_set_parameter_cu_xyzw (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->cu_xyzw = pm->d_data); } int openmp_geo_rel_1st_fwd_scmc_set_parameter_xoffset (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xoffset = pm->d_data); } int openmp_geo_rel_1st_fwd_scmc_set_parameter_yoffset (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->yoffset = pm->d_data); } int openmp_geo_rel_1st_fwd_scmc_set_parameter_zoffset (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->zoffset = pm->d_data); } int openmp_geo_rel_1st_fwd_scmc_set_parameter_fieldE (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->fieldE = pm->d_data); } int openmp_geo_rel_1st_fwd_scmc_set_parameter_fieldB (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->fieldB = pm->d_data); } int openmp_geo_rel_1st_fwd_scmc_set_parameter_fieldB1 (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->fieldB1 = pm->d_data); } int openmp_geo_rel_1st_fwd_scmc_set_parameter_FoutJ (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->FoutJ = pm->d_data); } int openmp_geo_rel_1st_fwd_scmc_set_parameter_XLEN (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->XLEN = pm->d_data); } int openmp_geo_rel_1st_fwd_scmc_set_parameter_YLEN (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->YLEN = pm->d_data); } int openmp_geo_rel_1st_fwd_scmc_set_parameter_ZLEN (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ZLEN = pm->d_data); } int openmp_geo_rel_1st_fwd_scmc_set_parameter_ovlp (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ovlp = pm->d_data); } int openmp_geo_rel_1st_fwd_scmc_set_parameter_numvec (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->numvec = pm->d_data); } int openmp_geo_rel_1st_fwd_scmc_set_parameter_num_ele (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->num_ele = pm->d_data); } int openmp_geo_rel_1st_fwd_scmc_set_parameter_grid_cache_len (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->grid_cache_len = pm->d_data); } int openmp_geo_rel_1st_fwd_scmc_set_parameter_cu_cache_length (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->cu_cache_length = pm->d_data); } int openmp_geo_rel_1st_fwd_scmc_set_parameter_DELTA_X (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DELTA_X = pm->d_data); } int openmp_geo_rel_1st_fwd_scmc_set_parameter_DELTA_Y (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DELTA_Y = pm->d_data); } int openmp_geo_rel_1st_fwd_scmc_set_parameter_DELTA_Z (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DELTA_Z = pm->d_data); } int openmp_geo_rel_1st_fwd_scmc_set_parameter_Mass0 (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->Mass0 = pm->d_data); } int openmp_geo_rel_1st_fwd_scmc_set_parameter_Charge0 (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->Charge0 = pm->d_data); } int openmp_geo_rel_1st_fwd_scmc_set_parameter_Deltat (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->Deltat = pm->d_data); } int openmp_geo_rel_1st_fwd_scmc_set_parameter_Tori_X0 (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->Tori_X0 = pm->d_data); } int openmp_geo_rel_1st_fwd_scmc_set_parameter_Solve_Err (openmp_geo_rel_1st_fwd_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->Solve_Err = pm->d_data); }
GB_unop__acos_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__acos_fc64_fc64 // op(A') function: GB_unop_tran__acos_fc64_fc64 // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = cacos (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = cacos (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = cacos (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ACOS || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__acos_fc64_fc64 ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = cacos (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__acos_fc64_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pt_to_pt_multiPingping.c
/***************************************************************************** * * * Mixed-mode OpenMP/MPI MicroBenchmark Suite - Version 1.0 * * * * produced by * * * * Mark Bull, Jim Enright and Fiona Reid * * * * at * * * * Edinburgh Parallel Computing Centre * * * * email: markb@epcc.ed.ac.uk, fiona@epcc.ed.ac.uk * * * * * * Copyright 2012, The University of Edinburgh * * * * * * Licensed under the Apache License, Version 2.0 (the "License"); * * you may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * * limitations under the License. * * * ****************************************************************************/ /*-----------------------------------------------------------*/ /* Contains the point-to-point multi-pingping mixed mode */ /* OpenMP/MPI benchmarks. */ /* This includes: -masteronly multiPingping */ /* -funnelled multiPingping */ /* -multiple multiPingping */ /*-----------------------------------------------------------*/ #include "pt_to_pt_multiPingping.h" /*-----------------------------------------------------------*/ /* multiPingPing */ /* */ /* Driver subroutine for the multi-pingping benchmark. */ /*-----------------------------------------------------------*/ int multiPingping(int benchmarkType){ int dataSizeIter; char otherProcName[MPI_MAX_PROCESSOR_NAME]; int balance; pingNodeA = 0; pingNodeB = 1; /* Check if there's a balance in num of MPI processes on pingNodeA and pingNodeB. */ balance = crossCommBalance(pingNodeA, pingNodeB); /* If not balanced.. */ if (balance == FALSE){ /* ..master prints error */ if (myMPIRank == 0){ printBalanceError(); } /* ..and all process exit function. */ return 1; } /* Exchange MPI_COMM_WORLD ranks for processes in same crossComm */ exchangeWorldRanks(pingNodeA, pingNodeB, &otherPingRank); /* Processes on pongNode send processor name to pingNode procs. */ sendProcName(pingNodeA, pingNodeB, otherProcName); /* Print comm world ranks & processor name of processes * taking part in multi-pingpong benchmark. */ printMultiProcInfo(pingNodeA, otherPingRank, otherProcName); /* Barrier to ensure that all procs have completed * printMultiProcInfo before prinring column headings. */ MPI_Barrier(comm); /* Master process then prints report column headings */ if (myMPIRank == 0){ printBenchHeader(); } /* Initialise repsToDo to defaultReps at start of benchmark */ repsToDo = defaultReps; /* Initialise dataSizeIter */ dataSizeIter = minDataSize; /* Start loop over data sizes */ while (dataSizeIter <= maxDataSize){ /* set size of buffer */ sizeofBuffer = dataSizeIter * numThreads; /* Allocate space for the main data arrays */ allocateMultiPingpingData(sizeofBuffer); /* warm-up */ if (benchmarkType == MASTERONLY){ /* Masteronly warm-up */ masteronlyMultiPingping(warmUpIters, dataSizeIter); } else if (benchmarkType == FUNNELLED){ /* Funnelled warm-up sweep */ funnelledMultiPingping(warmUpIters, dataSizeIter); } else if (benchmarkType == MULTIPLE){ /* Multiple pingpong warm-up */ multipleMultiPingping(warmUpIters, dataSizeIter); } /* Verification test for multi-pingpong */ testMultiPingping(sizeofBuffer, dataSizeIter); /* Initialise benchmark */ benchComplete = FALSE; /* Keep executing benchmark until target time is reached */ while (benchComplete != TRUE){ /* MPI_Barrier to synchronise processes. Then start the timer. */ MPI_Barrier(comm); startTime = MPI_Wtime(); if (benchmarkType == MASTERONLY){ /* Execute masteronly multipingpong repsToDo times */ masteronlyMultiPingping(repsToDo, dataSizeIter); } else if (benchmarkType == FUNNELLED){ /* Execute funnelled multipingpong */ funnelledMultiPingping(repsToDo, dataSizeIter); } else if (benchmarkType == MULTIPLE){ multipleMultiPingping(repsToDo, dataSizeIter); } /* Stop the timer..MPI_Barrier to synchronise processes * for more accurate timing. */ MPI_Barrier(comm); finishTime = MPI_Wtime(); totalTime = finishTime - startTime; /* Call repTimeCheck to check if target time is reached. */ if (myMPIRank==0){ benchComplete = repTimeCheck(totalTime, repsToDo); } /* Ensure all procs have the same value of benchComplete */ /* and repsToDo */ MPI_Bcast(&benchComplete, 1, MPI_INT, 0, comm); MPI_Bcast(&repsToDo, 1, MPI_INT, 0, comm); } /* End of loop to check if benchComplete is true */ /* Master process sets benchmark results */ if (myMPIRank == 0){ setReportParams(dataSizeIter, repsToDo, totalTime); printReport(); } /* Free the allocated space for the main data arrays */ freeMultiPingpingData(); /* Update dataSize before next iteration */ dataSizeIter = dataSizeIter * 2; } return 0; } /*-----------------------------------------------------------*/ /* masteronlyMultiPingping */ /* */ /* All Processes with rank of pingNodeA or pingNodeB in */ /* crossComm send a message to each other. */ /* MPI communication takes place outside of the parallel */ /* region. */ /*-----------------------------------------------------------*/ int masteronlyMultiPingping(int totalReps, int dataSize){ int repIter, i; int destRank; /* set destRank to ID of other process */ if (crossCommRank == pingNodeA){ destRank = pingNodeB; } else if (crossCommRank == pingNodeB){ destRank = pingNodeA; } /* loop totalRep times */ for (repIter=1; repIter<=totalReps; repIter++){ if ((crossCommRank == pingNodeA) || (crossCommRank == pingNodeB) ){ /* Each thread writes its globalID to pingSendBuf * with a parallel for directive. */ #pragma omp parallel for default(none) \ private(i) \ shared(pingSendBuf,dataSize,sizeofBuffer,globalIDarray) \ schedule(static,dataSize) for (i=0; i<sizeofBuffer; i++){ pingSendBuf[i] = globalIDarray[myThreadID]; } /* Process calls non-blocking send to start transfer of * pingSendBuf to other process. */ MPI_Isend(pingSendBuf, sizeofBuffer, MPI_INT, destRank, TAG,\ crossComm, &requestID); /* Processes then wait for message from other process. */ MPI_Recv(pingRecvBuf, sizeofBuffer, MPI_INT, destRank, TAG, \ crossComm, &status); /* Finish the send operation with an MPI_Wait */ MPI_Wait(&requestID, &status); /* Threads under the MPI processes read their part of the * received buffer. */ #pragma omp parallel for default(none) \ private(i) \ shared(finalRecvBuf,dataSize,sizeofBuffer,pingRecvBuf) \ schedule(static,dataSize) for (i=0; i<sizeofBuffer; i++){ finalRecvBuf[i] = pingRecvBuf[i]; } } } /* End repetitions loop */ return 0; } /*-----------------------------------------------------------*/ /* funnelledMultiPingping */ /* */ /* All processes with rank of pingNodeA or pingNodeB in */ /* crossComm send a message to each other. */ /* Inter-process communication takes place inside the */ /* OpenMP parallel region by the master thread. */ /*-----------------------------------------------------------*/ int funnelledMultiPingping(int totalReps, int dataSize){ int repIter, i; int destRank; /* Set destRank to id of other process */ if (crossCommRank == pingNodeA){ destRank = pingNodeB; } else if (crossCommRank == pingNodeB){ destRank = pingNodeA; } /* Open the parallel region */ #pragma omp parallel \ private(i,repIter) \ shared(dataSize,sizeofBuffer,pingSendBuf,globalIDarray) \ shared(pingRecvBuf,finalRecvBuf,status,requestID,destRank) \ shared(crossComm,crossCommRank,pingNodeA,pingNodeB,totalReps) { /* loop totalRep times */ for (repIter = 1; repIter <= totalReps; repIter++){ if (crossCommRank == pingNodeA || crossCommRank == pingNodeB){ /* Each thread writes its globalID to its part of * pingSendBuf with an omp for. */ #pragma omp for schedule(static,dataSize) for (i=0; i<sizeofBuffer; i++){ pingSendBuf[i] = globalIDarray[myThreadID]; } /* Implicit barrier here takes care of necessary synchronisation. */ #pragma omp master { /* Master thread of each process starts send. */ MPI_Isend(pingSendBuf, sizeofBuffer, MPI_INT, \ destRank, TAG, crossComm, &requestID); /* Processes then wait for message. */ MPI_Recv(pingRecvBuf, sizeofBuffer, MPI_INT, \ destRank, TAG, crossComm, &status); /* Finish the send operation with an MPI_Wait */ MPI_Wait(&requestID, &status); } /* Barrier to ensure master thread has completed transfer. */ #pragma omp barrier /* Each thread reads its part of the received buffer */ #pragma omp for schedule(static,dataSize) for (i=0; i<sizeofBuffer; i++){ finalRecvBuf[i] = pingRecvBuf[i]; } } } /* End repetitions loop */ } /* End parallel region */ return 0; } /*-----------------------------------------------------------*/ /* multipleMultiPingping */ /* */ /* All processes with crossCommRank of pingNodeA and */ /* pingNodeB in crossComm send a message to each other. */ /* Multiple threads take part in the communication. */ /*-----------------------------------------------------------*/ int multipleMultiPingping(int totalReps, int dataSize){ int repIter, i; int destRank; int lBound; /* set destRank to be id of other process */ if (crossCommRank == pingNodeA){ destRank = pingNodeB; } else if (crossCommRank == pingNodeB){ destRank = pingNodeA; } /* Open parallel region */ #pragma omp parallel \ private(i,repIter,lBound,requestID,status) \ shared(dataSize,sizeofBuffer,pingSendBuf,globalIDarray) \ shared(pingRecvBuf,finalRecvBuf,destRank,crossComm) \ shared(crossCommRank,pingNodeA,pingNodeB,totalReps) { /* loop totalRep times */ for (repIter = 1; repIter <= totalReps; repIter++){ if (crossCommRank == pingNodeA || crossCommRank == pingNodeB){ /* Calculate lower bound of each threads portion * of the data array. */ lBound = (myThreadID * dataSize); /* Each thread writes to its part of pingSendBuf */ #pragma omp for nowait schedule(static,dataSize) for (i=0; i<sizeofBuffer; i++){ pingSendBuf[i] = globalIDarray[myThreadID]; } /* Each thread starts send of dataSize items from * pingSendBuf. */ MPI_Isend(&pingSendBuf[lBound], dataSize, MPI_INT, \ destRank, myThreadID, crossComm, &requestID); /* Thread then waits for message from destRank * with tag equal to its threadID. */ MPI_Recv(&pingRecvBuf[lBound], dataSize, MPI_INT, destRank, \ myThreadID, crossComm, &status); /* Thread completes send using MPI_Wait */ MPI_Wait(&requestID, &status); /* Each thread reads its part of received buffer. */ #pragma omp for nowait schedule(static,dataSize) for (i=0; i<sizeofBuffer; i++){ finalRecvBuf[i] = pingRecvBuf[i]; } } } /* End repetitions loop */ } return 0; } /*-----------------------------------------------------------*/ /* allocateMultiPingpingData */ /* */ /* Allocates space for the main data arrays. */ /* Size of each array is specified by subroutine argument. */ /*-----------------------------------------------------------*/ int allocateMultiPingpingData(int sizeofBuffer){ if (crossCommRank == pingNodeA || crossCommRank == pingNodeB){ pingSendBuf = (int *)malloc(sizeof(int) * sizeofBuffer); pingRecvBuf = (int *)malloc(sizeof(int) * sizeofBuffer); finalRecvBuf = (int *)malloc(sizeof(int) * sizeofBuffer); } return 0; } /*-----------------------------------------------------------*/ /* freeMultiPingpingData */ /* */ /* Free allocated memory for main data arrays. */ /*-----------------------------------------------------------*/ int freeMultiPingpingData(){ if (crossCommRank == pingNodeA || crossCommRank == pingNodeB){ free(pingSendBuf); free(pingRecvBuf); free(finalRecvBuf); } return 0; } /*-----------------------------------------------------------*/ /* testMultiPingping */ /* */ /* Verifies the the multi-pingping benchmark worked */ /* correctly. */ /*-----------------------------------------------------------*/ int testMultiPingping(int sizeofBuffer, int dataSize){ int i; int testFlag, localTestFlag; /* set localTestFlag to true */ localTestFlag = TRUE; /* Testing done for processes on pingNodeA & pingNodeB */ if (crossCommRank == pingNodeA || crossCommRank == pingNodeB) { /* allocate space for testBuf */ testBuf = (int *)malloc(sizeof(int) * sizeofBuffer); /* Construct testBuf with correct values */ #pragma omp parallel for default(none) \ private(i) \ shared(otherPingRank,numThreads,dataSize,sizeofBuffer,testBuf) \ schedule(static,dataSize) for (i=0; i<sizeofBuffer; i++){ /* calculate globalID of thread expected in finalRecvBuf. * This is done using otherPingRank. */ testBuf[i] = (otherPingRank * numThreads) + myThreadID; } /* Compare each element of testBuf and finalRecvBuf */ for (i=0; i<sizeofBuffer; i++){ if (testBuf[i] != finalRecvBuf[i]){ localTestFlag = FALSE; } } /* Free space for testBuf */ free(testBuf); } /* Reduce testFlag into master with logical AND */ MPI_Reduce(&localTestFlag, &testFlag, 1, MPI_INT, MPI_LAND, 0, comm); /* master sets testOutcome flag */ if (myMPIRank == 0){ setTestOutcome(testFlag); } return 0; }
pomp.h
// license:BSD-3-Clause // copyright-holders:Couriersud #ifndef POMP_H_ #define POMP_H_ /// /// \file pomp.h /// /// Wrap all OPENMP stuff here in a hopefully c++ compliant way. /// #include "pconfig.h" #include "ptypes.h" #include <cstdint> #if PHAS_OPENMP #include "omp.h" #endif namespace plib { namespace omp { template <typename I, class T> void for_static(std::size_t numops, const I start, const I end, const T &what) noexcept(noexcept(what)) { if (numops>1000) { #if PHAS_OPENMP && PUSE_OPENMP #pragma omp parallel for schedule(static) #endif for (I i = start; i < end; i++) what(i); } else for (I i = start; i < end; i++) what(i); } template <typename I, class T> void for_static(const I start, const I end, const T &what) noexcept(noexcept(what)) { #if PHAS_OPENMP && PUSE_OPENMP #pragma omp parallel for schedule(static) #endif for (I i = start; i < end; i++) what(i); } template <typename I, class T> void for_static_np(const I start, const I end, const T &what) noexcept(noexcept(what)) { for (I i = start; i < end; i++) what(i); } inline void set_num_threads(const std::size_t threads) noexcept { #if PHAS_OPENMP && PUSE_OPENMP omp_set_num_threads(threads); #else plib::unused_var(threads); #endif } inline std::size_t get_max_threads() noexcept { #if PHAS_OPENMP && PUSE_OPENMP return omp_get_max_threads(); #else return 1; #endif } // ---------------------------------------------------------------------------------------- // pdynlib: dynamic loading of libraries ... // ---------------------------------------------------------------------------------------- } // namespace omp } // namespace plib #endif // PSTRING_H_
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 24; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
index.c
#include <string.h> #include <stdint.h> #include <omp.h> #include "allocator.h" #include "index.h" /* Merge the two sorted lists using temporary array */ static void imerge( uint32_t *b, uint32_t *l1, uint32_t *h1, uint32_t *l2, uint32_t *h2, uint32_t *l_) { for(;(l1 != h1) && (l2 != h2);) { if(*(b + *(l2)) < *(b + *(l1))) memcpy(l_++, l2++, sizeof(uint32_t)); else memcpy(l_++, l1++, sizeof(uint32_t)); } /* Move the leftover data */ memcpy(l_, l1, (size_t) (h1 - l1) * sizeof(uint32_t)); memcpy(l_, l2, (size_t) (h2 - l2) * sizeof(uint32_t)); } /* Sequential sort used to sort a small array list */ static void srt(uint32_t *b, uint32_t *l, uint32_t *h) { uint32_t * i; for(i = l; i < h; i++) { uint32_t * elm = b + *(i); uint32_t * j; for(j = (i+1); j < h; j++) { if(*(elm) > *(b + *(j))) { XCHG(*(i), *(j)); // Swap the elements elm = b + *(i); // Change the base index } } } } /* Index sort: Find the index of an array that gives a * sorted array */ static void isort( uint32_t *b, uint32_t *l, uint32_t *h, uint32_t *l_, uint8_t flg) { if((h - l) <= LIMIT) // Maximum sort limit { srt(b, l, h); if(!flg) memcpy(l_, l, (size_t) (h - l) * sizeof(uint32_t)); } else { /* Use merge-sort to divide the array into subarrays */ uint32_t * m = l + (h - l) / 2; uint32_t * m_ = l_ + (m - l); uint32_t * h_ = l_ + (h - l); /* Launch OpenMP task-based parallelism for each half of * the array */ #pragma omp task isort(b, l, m, l_, (unsigned char) !flg); isort(b, m, h, m_, (unsigned char) !flg); #pragma omp taskwait /* Merge the sorted sequences */ if(flg) imerge(b, l_, m_, m_, h_, l); else imerge(b, l, m, m, h, l_); } } /* Initialize the find indexing function * sz: size of the original array * b: the base array * l: the low address of the output permutation array */ void imain(const size_t sz, uint32_t *b, uint32_t *d) { /* initialize the output array * Assume that the original list is already sorted */ uint32_t i; for(i = 0; i < sz; i++) d[i] = i; /* A temporary buffer used for sorting */ uint32_t *l = (uint32_t *) fun3d_malloc(sz, sizeof(uint32_t)); /* Launch the parallel region and start sorting based on an * index list */ #pragma omp parallel { #pragma omp single { isort(b, d, (d + sz), l, 1); // Index sort } } fun3d_free(l); }
GB_export.c
//------------------------------------------------------------------------------ // GB_export: export a matrix or vector //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // No conversion is done, except to convert to non-iso if requested. The // matrix is exported in its current sparsity structure and by-row/by-col // format. #include "GB_export.h" #define GB_FREE_ALL \ { \ GB_FREE (&Ap_new, Ap_new_size) ; \ GB_FREE (&Ah_new, Ah_new_size) ; \ } GrB_Info GB_export // export/unpack a matrix in any format ( bool unpacking, // unpack if true, export and free if false GrB_Matrix *A, // handle of matrix to export and free, or unpack GrB_Type *type, // type of matrix to export GrB_Index *vlen, // vector length GrB_Index *vdim, // vector dimension bool is_sparse_vector, // true if A is a sparse GrB_Vector // the 5 arrays: GrB_Index **Ap, // pointers GrB_Index *Ap_size, // size of Ap in bytes GrB_Index **Ah, // vector indices GrB_Index *Ah_size, // size of Ah in bytes int8_t **Ab, // bitmap GrB_Index *Ab_size, // size of Ab in bytes GrB_Index **Ai, // indices GrB_Index *Ai_size, // size of Ai in bytes void **Ax, // values GrB_Index *Ax_size, // size of Ax in bytes // additional information for specific formats: GrB_Index *nvals, // # of entries for bitmap format. bool *jumbled, // if true, sparse/hypersparse may be jumbled. GrB_Index *nvec, // size of Ah for hypersparse format. // information for all formats: int *sparsity, // hypersparse, sparse, bitmap, or full bool *is_csc, // if true then matrix is by-column, else by-row bool *iso, // if true then A is iso and only one entry is returned // in Ax, regardless of nvals(A). GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Info info ; int64_t *Ap_new = NULL ; size_t Ap_new_size = 0 ; int64_t *Ah_new = NULL ; size_t Ah_new_size = 0 ; ASSERT (A != NULL) ; GB_RETURN_IF_NULL_OR_FAULTY (*A) ; ASSERT_MATRIX_OK (*A, "A to export", GB0) ; ASSERT (!GB_ZOMBIES (*A)) ; ASSERT (GB_JUMBLED_OK (*A)) ; ASSERT (!GB_PENDING (*A)) ; GB_RETURN_IF_NULL (type) ; GB_RETURN_IF_NULL (vlen) ; GB_RETURN_IF_NULL (vdim) ; GB_RETURN_IF_NULL (Ax) ; GB_RETURN_IF_NULL (Ax_size) ; int s = GB_sparsity (*A) ; switch (s) { case GxB_HYPERSPARSE : GB_RETURN_IF_NULL (nvec) ; GB_RETURN_IF_NULL (Ah) ; GB_RETURN_IF_NULL (Ah_size) ; case GxB_SPARSE : if (is_sparse_vector) { GB_RETURN_IF_NULL (nvals) ; } else { GB_RETURN_IF_NULL (Ap) ; GB_RETURN_IF_NULL (Ap_size) ; } GB_RETURN_IF_NULL (Ai) ; GB_RETURN_IF_NULL (Ai_size) ; break ; case GxB_BITMAP : GB_RETURN_IF_NULL (nvals) ; GB_RETURN_IF_NULL (Ab) ; GB_RETURN_IF_NULL (Ab_size) ; case GxB_FULL : break ; default: ; } //-------------------------------------------------------------------------- // allocate new space for Ap and Ah if unpacking //-------------------------------------------------------------------------- int64_t avdim = (*A)->vdim ; int64_t plen_new, nvec_new ; if (unpacking) { plen_new = (avdim == 0) ? 0 : 1 ; nvec_new = (avdim == 1) ? 1 : 0 ; Ap_new = GB_CALLOC (plen_new+1, int64_t, &(Ap_new_size)) ; if (avdim > 1) { // A is sparse if avdim <= 1, hypersparse if avdim > 1 Ah_new = GB_CALLOC (1, int64_t, &(Ah_new_size)) ; } if (Ap_new == NULL || (avdim > 1 && Ah_new == NULL)) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } } //-------------------------------------------------------------------------- // ensure A is non-iso if requested, or export A as-is //-------------------------------------------------------------------------- if (iso == NULL) { // ensure A is non-iso // set A->iso = false OK if ((*A)->iso) { GBURBLE ("(iso to non-iso export) ") ; } GB_OK (GB_convert_any_to_non_iso (*A, true, Context)) ; ASSERT (!((*A)->iso)) ; } else { // do not convert the matrix; export A as-is, either iso or non-iso (*iso) = (*A)->iso ; if (*iso) { GBURBLE ("(iso export) ") ; } } //-------------------------------------------------------------------------- // export the matrix //-------------------------------------------------------------------------- (*type) = (*A)->type ; (*vlen) = (*A)->vlen ; (*vdim) = avdim ; // export A->x #ifdef GB_MEMDUMP printf ("export A->x from memtable: %p\n", (*A)->x) ; #endif GB_Global_memtable_remove ((*A)->x) ; (*Ax) = (*A)->x ; (*A)->x = NULL ; (*Ax_size) = (*A)->x_size ; switch (s) { case GxB_HYPERSPARSE : (*nvec) = (*A)->nvec ; // export A->h #ifdef GB_MEMDUMP printf ("export A->h from memtable: %p\n", (*A)->h) ; #endif GB_Global_memtable_remove ((*A)->h) ; (*Ah) = (GrB_Index *) ((*A)->h) ; (*A)->h = NULL ; (*Ah_size) = (*A)->h_size ; case GxB_SPARSE : if (jumbled != NULL) { (*jumbled) = (*A)->jumbled ; } // export A->p, unless A is a sparse vector in CSC format if (is_sparse_vector) { (*nvals) = (*A)->p [1] ; } else { #ifdef GB_MEMDUMP printf ("export A->p from memtable: %p\n", (*A)->p) ; #endif GB_Global_memtable_remove ((*A)->p) ; (*Ap) = (GrB_Index *) ((*A)->p) ; (*A)->p = NULL ; (*Ap_size) = (*A)->p_size ; } // export A->i #ifdef GB_MEMDUMP printf ("export A->i from memtable: %p\n", (*A)->i) ; #endif GB_Global_memtable_remove ((*A)->i) ; (*Ai) = (GrB_Index *) ((*A)->i) ; (*A)->i = NULL ; (*Ai_size) = (*A)->i_size ; break ; case GxB_BITMAP : (*nvals) = (*A)->nvals ; // export A->b #ifdef GB_MEMDUMP printf ("export A->b from memtable: %p\n", (*A)->b) ; #endif GB_Global_memtable_remove ((*A)->b) ; (*Ab) = (*A)->b ; (*A)->b = NULL ; (*Ab_size) = (*A)->b_size ; case GxB_FULL : default: ; } if (sparsity != NULL) { (*sparsity) = s ; } if (is_csc != NULL) { (*is_csc) = (*A)->is_csc ; } //-------------------------------------------------------------------------- // free or clear the GrB_Matrix //-------------------------------------------------------------------------- if (unpacking) { // unpack: clear the matrix, leaving it hypersparse (or sparse if // it is a vector (vdim of 1) or has vdim of zero) GB_phbix_free (*A) ; (*A)->plen = plen_new ; (*A)->nvec = nvec_new ; (*A)->p = Ap_new ; (*A)->p_size = Ap_new_size ; (*A)->h = Ah_new ; (*A)->h_size = Ah_new_size ; (*A)->magic = GB_MAGIC ; ASSERT_MATRIX_OK (*A, "A unpacked", GB0) ; } else { // export: free the header of A, and A->p if A is a sparse GrB_Vector GB_Matrix_free (A) ; ASSERT ((*A) == NULL) ; } #pragma omp flush return (GrB_SUCCESS) ; }
workshare1.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> #define CHUNKSIZE 10 #define N 100 int main (int argc, char *argv[]) { int nthreads, tid, i, chunk; float a[N], b[N], c[N]; /* Some initializations */ for (i=0; i < N; i++) a[i] = b[i] = i * 1.0; chunk = CHUNKSIZE; #pragma omp parallel shared(a,b,c,nthreads,chunk) private(i,tid) { tid = omp_get_thread_num(); if (tid == 0) { nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); } printf("Thread %d starting...\n",tid); #pragma omp for schedule(dynamic,chunk) for (i=0; i<N; i++) { c[i] = a[i] + b[i]; printf("Thread %d: c[%d]= %f\n",tid,i,c[i]); } } /* end of parallel section */ }
taylor_expand.h
#ifndef MATH_TAYLOR_EXPAND_H #define MATH_TAYLOR_EXPAND_H namespace math { namespace details { inline arma::umat indices_with_same_sum(const arma::uword dim, const arma::uword sum) { const arma::umat all = math::space::auto_iteration_over_dims( (sum + 1) * arma::ones<arma::uvec>(dim)); const arma::uvec each_sum = arma::sum(all).t(); return all.cols(arma::find(each_sum == sum)); } } template<typename Function, typename T> auto taylor_expand(const Function & function, const arma::uword grade) { std::vector<std::vector<Function>> function_derivative_map(grade); std::vector<arma::umat> indices(grade); for (arma::uword i = 0; i < grade; i++) { const arma::uword true_i = i + 1; const arma::umat indices_at_true_i = details::indices_with_same_sum(function.dim(), true_i); indices[i] = indices_at_true_i; std::vector<Function> function_derivatives_at_i(indices_at_true_i.n_cols); #pragma omp parallel for for (arma::uword j = 0; j < indices_at_true_i.n_cols; j++) { const arma::uvec derivative_operator = indices_at_true_i.col(j); function_derivatives_at_i[j] = derivative(function, derivative_operator); } function_derivative_map[i] = function_derivatives_at_i; } return [indices, function_derivative_map, function, grade]( const arma::Col<T> & position, const arma::Col<T> & translation) -> auto { auto result = function.at(translation); for (arma::uword i = 0; i < grade; i++) { #pragma omp parallel for for (arma::uword j = 0; j < indices[i].size(); j++) { const math::polynomial::Term<T> term(T{1.0}, indices[i].col(j)); const arma::Col<T> tranlated_position = position - translation; result += function_derivative_map[i][j].at(translation) * term.at(tranlated_position) / factorial(i + 1); } } return result; }; } template<typename Function, typename T> auto taylor_expand(const Function & function, const arma::vec & translation, const arma::uword grade) { math::Polynomial<T> result(function.dim(), function.at(translation)); for (arma::uword i = 0; i < grade; i++) { const arma::uword true_i = i + 1; const arma::umat indices_at_true_i = details::indices_with_same_sum(function.dim(), true_i); for (arma::uword j = 0; j < indices_at_true_i.n_cols; j++) { const arma::uvec derivative_operator = indices_at_true_i.col(j); const math::polynomial::Term<T> term(derivative(function, derivative_operator).at(translation) / factorial(i + 1), derivative_operator); result = result + term; } } return result; } } #endif //MATH_TAYLOR_EXPAND_H
tree_reduce.c
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // RUN: %libomp-compile -DNOWAIT && %libomp-run | %sort-threads | FileCheck %s // REQUIRES: ompt // UNSUPPORTED: gcc #include "callback.h" #include <omp.h> #ifdef NOWAIT #define FOR_CLAUSE nowait #else #define FOR_CLAUSE #endif int main() { int sum = 0, a = 0, b = 0; int i; #pragma omp parallel num_threads(5) // for 32-bit architecture we need at least 3 variables to trigger tree #pragma omp for reduction(+ : sum, a, b) FOR_CLAUSE for (i = 0; i < 10000; i++) { a = b = sum += i; } printf("%i\n", sum); // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID:[0-9]+]] // order and distribution to threads not determined // CHECK: {{^}}{{[0-f]+}}: ompt_event_reduction_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}} // CHECK: {{^}}{{[0-f]+}}: ompt_event_reduction_end: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}} // CHECK: {{^}}{{[0-f]+}}: ompt_event_reduction_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}} // CHECK: {{^}}{{[0-f]+}}: ompt_event_reduction_end: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}} // CHECK: {{^}}{{[0-f]+}}: ompt_event_reduction_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}} // CHECK: {{^}}{{[0-f]+}}: ompt_event_reduction_end: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}} // CHECK: {{^}}{{[0-f]+}}: ompt_event_reduction_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}} // CHECK: {{^}}{{[0-f]+}}: ompt_event_reduction_end: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}} return 0; }
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 8; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
nstream-target.c
/// /// Copyright (c) 2019, Intel Corporation /// /// Redistribution and use in source and binary forms, with or without /// modification, are permitted provided that the following conditions /// are met: /// /// * Redistributions of source code must retain the above copyright /// notice, this list of conditions and the following disclaimer. /// * Redistributions in binary form must reproduce the above /// copyright notice, this list of conditions and the following /// disclaimer in the documentation and/or other materials provided /// with the distribution. /// * Neither the name of Intel Corporation nor the names of its /// contributors may be used to endorse or promote products /// derived from this software without specific prior written /// permission. /// /// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS /// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT /// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS /// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE /// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, /// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, /// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; /// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER /// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT /// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN /// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE /// POSSIBILITY OF SUCH DAMAGE. ////////////////////////////////////////////////////////////////////// /// /// NAME: nstream /// /// PURPOSE: To compute memory bandwidth when adding a vector of a given /// number of double precision values to the scalar multiple of /// another vector of the same length, and storing the result in /// a third vector. /// /// USAGE: The program takes as input the number /// of iterations to loop over the triad vectors and /// the length of the vectors. /// /// <progname> <# iterations> <vector length> /// /// The output consists of diagnostics to make sure the /// algorithm worked, and of timing statistics. /// /// NOTES: Bandwidth is determined as the number of words read, plus the /// number of words written, times the size of the words, divided /// by the execution time. For a vector length of N, the total /// number of words read and written is 4*N*sizeof(double). /// /// /// HISTORY: This code is loosely based on the Stream benchmark by John /// McCalpin, but does not follow all the Stream rules. Hence, /// reported results should not be associated with Stream in /// external publications /// /// Converted to C++11 by Jeff Hammond, November 2017. /// Converted to C11 by Jeff Hammond, February 2019. /// ////////////////////////////////////////////////////////////////////// #include "prk_util.h" #include "prk_openmp.h" int main(int argc, char * argv[]) { printf("Parallel Research Kernels version %d\n", PRKVERSION ); printf("C11/OpenMP TARGET STREAM triad: A = B + scalar * C\n"); ////////////////////////////////////////////////////////////////////// /// Read and test input parameters ////////////////////////////////////////////////////////////////////// if (argc < 3) { printf("Usage: <# iterations> <vector length>\n"); return 1; } int iterations = atoi(argv[1]); if (iterations < 1) { printf("ERROR: iterations must be >= 1\n"); return 1; } // length of a the vector size_t length = atol(argv[2]); if (length <= 0) { printf("ERROR: Vector length must be greater than 0\n"); return 1; } int device = (argc > 3) ? atol(argv[3]) : omp_get_default_device(); if ( (device < 0 || omp_get_num_devices() <= device ) && (device != omp_get_default_device()) ) { printf("ERROR: device number %d is not valid.\n", device); return 1; } printf("Number of iterations = %d\n", iterations); printf("Vector length = %zu\n", length); printf("OpenMP Device = %d\n", device); ////////////////////////////////////////////////////////////////////// // Allocate space and perform the computation ////////////////////////////////////////////////////////////////////// double nstream_time = 0.0; size_t bytes = length*sizeof(double); double * restrict A = prk_malloc(bytes); double * restrict B = prk_malloc(bytes); double * restrict C = prk_malloc(bytes); double scalar = 3.0; #pragma omp parallel for simd for (size_t i=0; i<length; i++) { A[i] = 0.0; B[i] = 2.0; C[i] = 2.0; } #pragma omp target data map(tofrom: A[0:length]) map(to: B[0:length], C[0:length]) { for (int iter = 0; iter<=iterations; iter++) { if (iter==1) nstream_time = omp_get_wtime(); #pragma omp target teams distribute parallel for simd for (size_t i=0; i<length; i++) { A[i] += B[i] + scalar * C[i]; } } nstream_time = omp_get_wtime() - nstream_time; } prk_free(C); prk_free(B); ////////////////////////////////////////////////////////////////////// /// Analyze and output results ////////////////////////////////////////////////////////////////////// double ar = 0.0; double br = 2.0; double cr = 2.0; for (int i=0; i<=iterations; i++) { ar += br + scalar * cr; } ar *= length; double asum = 0.0; #pragma omp parallel for reduction(+:asum) for (size_t i=0; i<length; i++) { asum += fabs(A[i]); } prk_free(A); double epsilon=1.e-8; if (fabs(ar-asum)/asum > epsilon) { printf("Failed Validation on output array\n" " Expected checksum: %lf\n" " Observed checksum: %lf\n" "ERROR: solution did not validate\n", ar, asum); return 1; } else { printf("Solution validates\n"); double avgtime = nstream_time/iterations; double nbytes = 4.0 * length * sizeof(double); printf("Rate (MB/s): %lf Avg time (s): %lf\n", 1.e-6*nbytes/avgtime, avgtime); } return 0; }
flexProxDualDataHuber.h
#ifndef flexProxDualDataHuber_H #define flexProxDualDataHuber_H #include "flexProx.h" //! represents prox for a Huber data term /*! \f$ \alpha\|\cdot-f\|_\epsilon \f$ */ template<typename T> class flexProxDualDataHuber : public flexProx<T> { #ifdef __CUDACC__ typedef thrust::device_vector<T> Tdata; #else typedef std::vector<T> Tdata; #endif private: T huberEpsilon; public: flexProxDualDataHuber(T aHuberEpsilon) : flexProx<T>(dualHuberProx) { huberEpsilon = aHuberEpsilon; } ~flexProxDualDataHuber() { if (VERBOSE > 0) printf("Destructor prox\n!"); } #ifdef __CUDACC__ struct flexProxDualDataL1Functor { __host__ __device__ flexProxDualDataL1Functor(T alpha) : alpha(alpha){} template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = min(this->alpha, max(-this->alpha,thrust::get<1>(t) - this->alpha * thrust::get<2>(t) * thrust::get<3>(t))); } T alpha; }; #endif void applyProx(T alpha, flexBoxData<T>* data, const std::vector<int> &dualNumbers, const std::vector<int> &primalNumbers) { } void applyProx(T alpha, flexBoxData<T>* data, const std::vector<int> &dualNumbers, const std::vector<int> &primalNumbers, std::vector<Tdata> &fList) { #ifdef __CUDACC__ printf("flexProxDualDataHuber not implemented on GPU \n"); #else for (int i = 0; i < dualNumbers.size(); i++) { T* ptrY = data->y[dualNumbers[i]].data(); T* ptrYtilde = data->yTilde[dualNumbers[i]].data(); T* ptrSigma = data->sigmaElt[dualNumbers[i]].data(); T* ptrF = fList[i].data(); int numElements = (int)data->yTilde[dualNumbers[i]].size(); #pragma omp parallel for for (int j = 0; j < numElements; j++) { T tmp = (ptrYtilde[j] - ptrSigma[j] * ptrF[j]) * alpha / (alpha + this->huberEpsilon * ptrSigma[j]); ptrY[j] = tmp / myMax<T>((T)1, std::abs(tmp) / alpha); } } #endif } }; #endif
declare_variant_ast_print.c
// RUN: %clang_cc1 -verify -fopenmp -x c -std=c99 -ast-print %s -o - -Wno-openmp-clauses | FileCheck %s // RUN: %clang_cc1 -verify -fopenmp-simd -x c -std=c99 -ast-print %s -o - -Wno-openmp-clauses | FileCheck %s // expected-no-diagnostics int foo(void); #pragma omp declare variant(foo) match(xxx={}, yyy={ccc}) #pragma omp declare variant(foo) match(xxx={vvv}) #pragma omp declare variant(foo) match(implementation={vendor(score(0):llvm)}, device={kind(fpga)}) #pragma omp declare variant(foo) match(implementation={vendor(llvm), xxx}) #pragma omp declare variant(foo) match(implementation={vendor(unknown)}, device={kind(gpu)}) #pragma omp declare variant(foo) match(implementation={vendor(score(5): ibm, xxx, ibm)}, device={kind(cpu, nohost)}) #pragma omp declare variant(foo) match(device={kind(host)}) #pragma omp declare variant(foo) match(device={kind(nohost), xxx}) #pragma omp declare variant(foo) match(implementation={extension(match_all)}) #pragma omp declare variant(foo) match(implementation={extension(match_any)}) #pragma omp declare variant(foo) match(implementation={extension(match_none)}) int bar(void); // CHECK: int foo(); // CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={extension(match_none)}) // CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={extension(match_any)}) // CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={extension(match_all)}) // CHECK-NEXT: #pragma omp declare variant(foo) match(device={kind(nohost)}) // CHECK-NEXT: #pragma omp declare variant(foo) match(device={kind(host)}) // CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={vendor(score(5): ibm)}, device={kind(cpu, nohost)}) // CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={vendor(unknown)}, device={kind(gpu)}) // CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={vendor(llvm)}) // CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={vendor(score(0): llvm)}, device={kind(fpga)}) // CHECK-NEXT: int bar();
thread_scheduling.c
// RUN: %libomp-compile && env KMP_ABT_NUM_ESS=4 %libomp-run // REQUIRES: abt #include "omp_testsuite.h" #include "bolt_scheduling_util.h" int test_thread_scheduling(int num_threads) { int i, vals[num_threads]; memset(vals, 0, sizeof(int) * num_threads); timeout_barrier_t barrier; timeout_barrier_init(&barrier); #pragma omp parallel num_threads(num_threads) { check_num_ess(4); // The barrier must be run by all ESs. timeout_barrier_wait(&barrier, 4); vals[omp_get_thread_num()] += 1; } #pragma omp parallel for num_threads(num_threads) for (i = 0; i < num_threads; i++) { check_num_ess(4); // The barrier must be run by all ESs. timeout_barrier_wait(&barrier, 4); vals[i] += 2; } for (i = 0; i < num_threads; i++) { if (vals[i] != 3) { printf("vals[%d] == %d\n", i, vals[i]); return 0; } } return 1; } int main() { int i, num_failed = 0; for (i = 1; i < 4; i++) { if (!test_thread_scheduling(i * 4)) { num_failed++; } } return num_failed; }
no_type_ops.h
/* Copyright 2015 The math21 Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #pragma once #include "inner.h" namespace math21 { template<typename T, typename S, template<typename> class Container1, template<typename> class Container2> void math21_operator_container_assign(const Container1<T> &A, Container2<S> &B) { MATH21_ASSERT(A.size() == B.size()) NumN n = A.size(); //#pragma omp parallel for for (NumN i = 1; i <= n; ++i) { B.at(i) = A(i); } } // Set B using values of A. offset in [0, n). template<typename T, typename S, template<typename> class Container1, template<typename> class Container2> void math21_operator_container_set_partially(const Container1<T> &A, Container2<S> &B, NumN offset1 = 0, NumN offset2 = 0, NumN count = 0) { MATH21_ASSERT(offset1 < A.size() && offset2 < B.size()); if (count == 0) { count = xjmin(A.size() - offset1, B.size() - offset2); } else { MATH21_ASSERT(count + offset1 <= A.size()); MATH21_ASSERT(count + offset2 <= B.size()); } NumN k; for (k = 1; k <= count; ++k) { B(offset2 + k) = A(offset1 + k); } } // set using map template<typename T, typename S, template<typename> class Container1, template<typename> class Container2, template<typename> class Container3> void math21_operator_container_set_by_map(const Container1<T> &A, Container2<S> &B, const Container3<NumN> &map) { MATH21_ASSERT(B.size() == map.size()) NumN n = B.size(); for (NumN i = 1; i <= n; ++i) { B.at(i) = A(map(i)); } } template<typename T, template<typename> class Container> void math21_operator_container_swap(Container<T> &x, NumZ pos, NumZ pos2) { NumN p1 = math21_number_container_pos_check(x.size(), pos); NumN p2 = math21_number_container_pos_check(x.size(), pos2); m21_swap(x(p1), x(p2)); } // abcde => acdbe if from = 2, to = 4 // abcde => adbce if from = 4, to = 2 template<typename VecType> void math21_operator_container_move(VecType &x, NumZ from0, NumZ to0) { MATH21_ASSERT(!x.isEmpty()); NumN n1 = x.size(); NumN from = math21_number_container_pos_check(n1, from0); NumN to = math21_number_container_pos_check(n1, to0); if(from==to){ return; } VecType s; if(from<to){ math21_operator_container_subcontainer(x, s, from+1, to); x(to) = x(from); math21_operator_container_set_partially(s, x, 0, from-1); }else{ math21_operator_container_subcontainer(x, s, to, from-1); x(to) = x(from); math21_operator_container_set_partially(s, x, 0, to); } } ////// template<typename VecType> void math21_operator_container_reverse(const VecType &x, VecType &y) { NumN n = x.size(); MATH21_ASSERT(y.size() == n); for (NumN i = 1, j = n; i <= n; ++i, --j) { y(i) = x(j); } } template<typename VecType> void math21_operator_reverse(VecType &x) { NumN n = x.size(); NumN n2 = n / 2; //#pragma omp parallel for for (NumN k = 1; k <= n2; ++k) { m21_swap(x.at(k), x.at(n + 1 - k)); } } // see math21_op_vector_concatenate template<typename VecType, typename VecType2, typename VecType3> void math21_operator_merge(const VecType &x, const VecType2 &y, VecType3 &z) { NumN n1 = x.size(); NumN n2 = y.size(); NumN n = n1 + n2; z.setSize(n); //#pragma omp parallel for for (NumN k = 1; k <= n; ++k) { if (k <= n1) { z.at(k) = x(k); } else { z.at(k) = y(k - n1); } } } template<typename VecType> void math21_operator_container_sub_from_start(VecType &x, NumN size = 0) { MATH21_ASSERT(!x.isEmpty()) NumN n1 = x.size(); if (size == 0) { x.clear(); return; } if (n1 == size) { return; } MATH21_ASSERT(size <= n1) NumN n2 = size; VecType y; y.setSize(n2); for (NumN k = 1; k <= n2; ++k) { y.at(k) = x(k); } x.setSize(size); x.assign(y); } // now support negative index // 1 <= from <= to <= x.size(), [from , to] template<typename VecType> void math21_operator_container_subcontainer(const VecType &x, VecType &y, NumZ from, NumZ to = -1) { MATH21_ASSERT(!x.isEmpty()) NumN n1 = x.size(); from = math21_number_container_pos_check(n1, from); to = math21_number_container_pos_check(n1, to); MATH21_ASSERT(from >= 1 && from <= to && to <= n1) NumN n2 = (NumN) (to + 1 - from); if (y.size() != n2) { y.setSize(n2); } NumN offset = (NumN) (from - 1); for (NumN k = 1; k <= n2; ++k) { y.at(k) = x(offset + k); } } // Todo: change NumN to NumZ // u is max, v is number. return 0 if fail. template<template<typename> class Container, template<typename> class Container2> NumB math21_operator_container_increaseNumFromRight(const Container<NumN> &u, Container2<NumN> &v) { MATH21_ASSERT(!v.isEmpty()); MATH21_ASSERT(u.size() == v.size()); for (NumN i = v.size(); i >= 1; --i) { if (v(i) < u(i)) { v.at(i) = v(i) + 1; return 1; } else { MATH21_ASSERT(v(i) == u(i), "v(i) = " << v(i) << ", u(i) = " << u(i) << "\n"); v.at(i) = 1; } } return 0; } // Todo: change NumN to NumZ // u is max, v is number. return 0 if fail. // u is end index, start is start index. v is current index. template<template<typename> class Container, template<typename> class Container2> NumB math21_operator_container_increaseNumFromRight(const Container<NumN> &u, Container2<NumN> &v, const Container<NumN> &start) { MATH21_ASSERT(!v.isEmpty()); MATH21_ASSERT(u.size() == v.size()); MATH21_ASSERT(u.size() == start.size()); for (NumN i = v.size(); i >= 1; --i) { if (v(i) < u(i)) { v.at(i) = v(i) + 1; return 1; } else { MATH21_ASSERT(v(i) == u(i)); v.at(i) = start(i); } } return 0; } // d is shape, a is start index. // y-a(n) = sum ((x(i)-a(i))*k(i)) template<template<typename> class Container, typename VecZType, typename VecZType2> void math21_operator_number_index_1d_to_nd(VecZType &x, NumZ y, const Container<NumN> &d, const VecZType2 &a) { NumN n = d.size(); MATH21_ASSERT(!x.isEmpty() && x.size() == n) Container<NumN> k(n); k(n) = 1; NumN i; for (i = n - 1; i >= 1; --i) { k(i) = d(i + 1) * k(i + 1); } y = y - (NumZ) a(n); for (i = 1; i <= n; ++i) { x(i) = y / k(i) + a(i); y = y % k(i); } } // d is shape, a is start index. // y-a(n) = sum ((x(i)-a(i))*k(i)) template<template<typename> class Container, template<typename> class Container2, typename NumZType> void math21_operator_number_index_1d_to_tensor_nd(Container2<NumZType> &x, NumZ y, const Container<NumN> &d) { Container<NumZType> a(d.size()); a = 1; math21_operator_number_index_1d_to_nd(x, y, d, a); } // see math21_device_index_replace_inc // replace A by R where A(i) = x. template<typename T, template<typename> class Container, typename VecType> void math21_operator_container_replace_inc(const Container<T> &A, Container<T> &B, const VecType &R, const T &x) { MATH21_ASSERT(B.size() == A.size()) NumN j = 1; for (NumN i = 1; i <= B.size(); ++i) { if (A(i) == x) { B(i) = R(j); ++j; } else { B(i) = A(i); } } MATH21_ASSERT(j - 1 == R.size(), "j is " << j); } // replace A by R where A(i) = x. template<typename T, template<typename> class Container, typename VecType> void math21_operator_container_replace_by_same_pos(const Container<T> &A, Container<T> &B, const VecType &R, const T &x) { MATH21_ASSERT(B.size() == A.size()) MATH21_ASSERT(R.size() == A.size()) for (NumN i = 1; i <= B.size(); ++i) { if (A(i) == x) { B(i) = R(i); } else { B(i) = A(i); } } } // normal mode // number to representation from right, x, i(k) in [1, ...]. template<template<typename> class Container, template<typename> class Container2> void math21_operator_number_num_to_index_right(NumN x, Container<NumN> &i, const Container2<NumN> &d) { MATH21_ASSERT(d.size() == i.size()) x = x - 1; NumN n = d.size(); for (NumN k = n; k >= 1; --k) { i(k) = x % d(k) + 1; x = x / d(k); } } // number to representation from left, x, i(k) in [1, ...]. template<template<typename> class Container, template<typename> class Container2> void math21_operator_number_num_to_index_left(NumN x, Container<NumN> &i, const Container2<NumN> &d) { printf("Check this please! left is changed to right in code."); MATH21_ASSERT(d.size() == i.size()) x = x - 1; NumN n = d.size(); for (NumN k = 1; k <= n; ++k) { i(k) = x % d(k) + 1; x = x / d(k); } } // representation to number from right, x, i(k) in [1, ...]. template<template<typename> class Container, template<typename> class Container2> NumN math21_operator_number_index_to_num_right(const Container<NumN> &i, const Container2<NumN> &d) { MATH21_ASSERT(d.size() == i.size()) NumN x; x = 0; NumN n = d.size(); for (NumN k = 1; k <= n; ++k) { x = x * d(k) + i(k) - 1; } x = x + 1; return x; } // representation to number from left, x, i(k) in [1, ...]. template<template<typename> class Container, template<typename> class Container2> NumN math21_operator_number_index_to_num_left(const Container<NumN> &i, const Container2<NumN> &d) { MATH21_ASSERT(d.size() == i.size()) NumN x; x = 0; NumN n = d.size(); for (NumN k = n; k >= 1; --k) { x = x * d(k) + i(k) - 1; } x = x + 1; return x; } template<typename VecType, template<typename> class Container> NumN math21_operator_container_2d_size(const Container<VecType> &v) { NumN k = 0; NumN n = v.size(); for (NumN i = 1; i <= n; ++i) { k = k + v(i).size(); } return k; } template<typename VecType, template<typename> class Container> NumN math21_operator_container_2d_element_size_max(const Container<VecType> &v) { // NumN k = 0; NumN size = 0; NumN n = v.size(); for (NumN i = 1; i <= n; ++i) { if (v(i).size() > size) { // k = i; size = v(i).size(); } } return size; } template<typename VecType, template<typename> class Container> void math21_operator_container_element_setSize(Container<VecType> &v, NumN k) { NumN n = v.size(); //#pragma omp parallel for for (NumN i = 1; i <= n; ++i) { v.at(i).setSize(k); } } template<typename T> T math21_operator_number_clip(const T &x0, const NumR &min, const NumR &max) { T x = x0; if (x > max) { x = max; } else if (x < min) { x = min; } return x; } }
adam_op.h
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <math.h> // for sqrt in CPU and CUDA #include <Eigen/Dense> #include <string> #include <unordered_map> #include <vector> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/threadpool.h" #include "paddle/fluid/operators/jit/kernels.h" #include "paddle/fluid/operators/math/selected_rows_functor.h" #include "paddle/fluid/platform/for_range.h" #include "paddle/fluid/platform/profiler.h" #include "paddle/pten/kernels/funcs/algorithm.h" namespace paddle { namespace operators { namespace scatter = paddle::operators::math::scatter; static inline float GetAttrFromTensor(const framework::Tensor* tensor) { const float* tensor_data = tensor->data<float>(); framework::Tensor cpu_tensor; if (platform::is_gpu_place(tensor->place())) { paddle::framework::TensorCopySync(*tensor, platform::CPUPlace(), &cpu_tensor); tensor_data = cpu_tensor.data<float>(); } if (platform::is_xpu_place(tensor->place())) { paddle::framework::TensorCopySync(*tensor, platform::CPUPlace(), &cpu_tensor); tensor_data = cpu_tensor.data<float>(); } return tensor_data[0]; } class AdamOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override; framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override; framework::OpKernelType GetKernelTypeForVar( const std::string& var_name, const framework::Tensor& tensor, const framework::OpKernelType& expected_kernel_type) const override; }; struct GPUAdam; struct CPUAdam; template <typename T, typename Flavour> class AdamFunctor; template <typename T> class AdamFunctor<T, GPUAdam> { private: T beta1_; T beta2_; T epsilon_; const T* beta1_pow_; const T* beta2_pow_; const T* moment1_; T* moment1_out_; const T* moment2_; T* moment2_out_; const T* lr_; const T* grad_; const T* param_; T* param_out_; public: AdamFunctor(T beta1, T beta2, T epsilon, const T* beta1_pow, const T* beta2_pow, const T* mom1, T* mom1_out, const T* mom2, T* mom2_out, const T* lr, const T* grad, const T* param, T* param_out) : beta1_(beta1), beta2_(beta2), epsilon_(epsilon), beta1_pow_(beta1_pow), beta2_pow_(beta2_pow), moment1_(mom1), moment1_out_(mom1_out), moment2_(mom2), moment2_out_(mom2_out), lr_(lr), grad_(grad), param_(param), param_out_(param_out) {} inline HOSTDEVICE void operator()(size_t i) const { // Merge all memory access together. T g = grad_[i]; T mom1 = moment1_[i]; T mom2 = moment2_[i]; T lr = *lr_; T beta1_pow = *beta1_pow_; T beta2_pow = *beta2_pow_; T p = param_[i]; // Calculation lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow); mom1 = beta1_ * mom1 + (1 - beta1_) * g; mom2 = beta2_ * mom2 + (1 - beta2_) * g * g; p -= lr * (mom1 / (sqrt(mom2) + epsilon_ * sqrt(1 - beta2_pow))); // Write back to global memory moment1_out_[i] = mom1; moment2_out_[i] = mom2; param_out_[i] = p; } }; template <typename T> class AdamFunctor<T, CPUAdam> { private: T beta1_; T beta2_; T epsilon_; const T* beta1_pow_; const T* beta2_pow_; const T* moment1_; T* moment1_out_; const T* moment2_; T* moment2_out_; const T* lr_; const T* grad_; const T* param_; T* param_out_; public: AdamFunctor(T beta1, T beta2, T epsilon, const T* beta1_pow, const T* beta2_pow, const T* mom1, T* mom1_out, const T* mom2, T* mom2_out, const T* lr, const T* grad, const T* param, T* param_out) : beta1_(beta1), beta2_(beta2), epsilon_(epsilon), beta1_pow_(beta1_pow), beta2_pow_(beta2_pow), moment1_(mom1), moment1_out_(mom1_out), moment2_(mom2), moment2_out_(mom2_out), lr_(lr), grad_(grad), param_(param), param_out_(param_out) {} void operator()(size_t numel) const { Eigen::Map<const Eigen::Array<T, 1, Eigen::Dynamic>> g{ grad_, static_cast<Eigen::Index>(numel)}; Eigen::Map<const Eigen::Array<T, 1, Eigen::Dynamic>> mom1{ moment1_, static_cast<Eigen::Index>(numel)}; Eigen::Map<const Eigen::Array<T, 1, Eigen::Dynamic>> mom2{ moment2_, static_cast<Eigen::Index>(numel)}; Eigen::Map<const Eigen::Array<T, 1, Eigen::Dynamic>> param{ param_, static_cast<Eigen::Index>(numel)}; Eigen::Map<Eigen::Array<T, 1, Eigen::Dynamic>> param_out{ param_out_, static_cast<Eigen::Index>(numel)}; Eigen::Map<Eigen::Array<T, 1, Eigen::Dynamic>> moment1_out{ moment1_out_, static_cast<Eigen::Index>(numel)}; Eigen::Map<Eigen::Array<T, 1, Eigen::Dynamic>> moment2_out{ moment2_out_, static_cast<Eigen::Index>(numel)}; T lr = *lr_; T beta1_pow = *beta1_pow_; T beta2_pow = *beta2_pow_; // Calculation lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow); moment1_out = beta1_ * mom1 + (1 - beta1_) * g; moment2_out = beta2_ * mom2 + (1 - beta2_) * g * g; param_out = param - lr * (moment1_out / (moment2_out.sqrt() + epsilon_ * sqrt(1 - beta2_pow))); } }; template <typename T, typename Flavour, typename MT = T> class SparseAdamFunctor; template <typename T, typename MT> class SparseAdamFunctor<T, GPUAdam, MT> { private: MT beta1_; MT beta2_; MT epsilon_; const MT* beta1_pow_; const MT* beta2_pow_; const MT* moment1_; MT* moment1_out_; const MT* moment2_; MT* moment2_out_; const MT* lr_; const T* grad_; const T* param_; T* param_out_; const MT* master_param_; MT* master_param_out_; const int64_t* rows_; int64_t row_numel_; int64_t row_count_; bool lazy_mode_; public: SparseAdamFunctor(MT beta1, MT beta2, MT epsilon, const MT* beta1_pow, const MT* beta2_pow, const MT* mom1, MT* mom1_out, const MT* mom2, MT* mom2_out, const MT* lr, const T* grad, const T* param, T* param_out, const MT* master_param, MT* master_param_out, const int64_t* rows, int64_t row_numel, int64_t row_count, bool lazy_mode) : beta1_(beta1), beta2_(beta2), epsilon_(epsilon), beta1_pow_(beta1_pow), beta2_pow_(beta2_pow), moment1_(mom1), moment1_out_(mom1_out), moment2_(mom2), moment2_out_(mom2_out), lr_(lr), grad_(grad), param_(param), param_out_(param_out), master_param_(master_param), master_param_out_(master_param_out), rows_(rows), row_numel_(row_numel), row_count_(row_count), lazy_mode_(lazy_mode) {} inline HOSTDEVICE void adam_update(size_t i, MT g) const { // The following code is the same as dense MT mom1 = moment1_[i]; MT mom2 = moment2_[i]; MT lr = *lr_; MT beta1_pow = *beta1_pow_; MT beta2_pow = *beta2_pow_; MT p = master_param_ ? master_param_[i] : static_cast<MT>(param_[i]); // Calculation lr *= sqrt(static_cast<MT>(1.0) - beta2_pow) / (static_cast<MT>(1.0) - beta1_pow); mom1 = beta1_ * mom1 + (static_cast<MT>(1.0) - beta1_) * g; mom2 = beta2_ * mom2 + (static_cast<MT>(1.0) - beta2_) * g * g; p -= lr * (mom1 / (sqrt(mom2) + epsilon_ * sqrt(static_cast<MT>(1.0) - beta2_pow))); // Write back to global memory moment1_out_[i] = mom1; moment2_out_[i] = mom2; param_out_[i] = static_cast<T>(p); if (master_param_out_) { master_param_out_[i] = p; } } inline HOSTDEVICE void operator()(size_t i) const { auto row_idx = pten::funcs::BinarySearch<int64_t>(rows_, row_count_, i / row_numel_); if (lazy_mode_ && row_idx < 0) { return; } else { MT g = row_idx >= 0 ? static_cast<MT>(grad_[row_idx * row_numel_ + i % row_numel_]) : static_cast<MT>(0); adam_update(i, g); } } }; template <typename T> class SparseAdamFunctor<T, CPUAdam, T> { private: T beta1_; T beta2_; T epsilon_; const T* beta1_pow_; const T* beta2_pow_; const T* moment1_; T* moment1_out_; const T* moment2_; T* moment2_out_; const T* lr_; const T* grad_; const T* param_; T* param_out_; const int64_t* rows_; int64_t row_numel_; int64_t row_count_; public: SparseAdamFunctor(T beta1, T beta2, T epsilon, const T* beta1_pow, const T* beta2_pow, const T* mom1, T* mom1_out, const T* mom2, T* mom2_out, const T* lr, const T* grad, const T* param, T* param_out, const int64_t* rows, int64_t row_numel, int64_t row_count, bool lazy_mode) : beta1_(beta1), beta2_(beta2), epsilon_(epsilon), beta1_pow_(beta1_pow), beta2_pow_(beta2_pow), moment1_(mom1), moment1_out_(mom1_out), moment2_(mom2), moment2_out_(mom2_out), lr_(lr), grad_(grad), param_(param), param_out_(param_out), rows_(rows), row_numel_(row_numel), row_count_(row_count) {} inline HOSTDEVICE void adam_update(size_t i, T g) const { // The following code is the same as dense T mom1 = moment1_[i]; T mom2 = moment2_[i]; T lr = *lr_; T beta1_pow = *beta1_pow_; T beta2_pow = *beta2_pow_; T p = param_[i]; // Calculation lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow); mom1 = beta1_ * mom1 + (1 - beta1_) * g; mom2 = beta2_ * mom2 + (1 - beta2_) * g * g; p -= lr * (mom1 / (sqrt(mom2) + epsilon_ * sqrt(1 - beta2_pow))); // Write back to global memory moment1_out_[i] = mom1; moment2_out_[i] = mom2; param_out_[i] = p; } inline void operator()(size_t numel) const { // lr could be reuse T lr = *lr_; T beta1_pow = *beta1_pow_; T beta2_pow = *beta2_pow_; lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow); int64_t row_count = static_cast<int64_t>(numel / row_numel_); for (int64_t i = 0, j = 0; i != row_count; ++i) { if (i == *(rows_ + j)) { for (int64_t k = 0; k != row_numel_; ++k) { T g = grad_[j * row_numel_ + k]; adam_update(i * row_numel_ + k, g); } ++j; } else { for (int64_t k = 0; k != row_numel_; ++k) { T mom1 = moment1_[i * row_numel_ + k]; T mom2 = moment2_[i * row_numel_ + k]; T p = param_[i * row_numel_ + k]; mom1 = beta1_ * mom1; mom2 = beta2_ * mom2; p -= lr * (mom1 / (sqrt(mom2) + epsilon_)); // Write back to global memory moment1_out_[i * row_numel_ + k] = mom1; moment2_out_[i * row_numel_ + k] = mom2; param_out_[i * row_numel_ + k] = p; } } } } }; template <typename DeviceContext, typename T> class AdamOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { const auto* param_var = ctx.InputVar("Param"); PADDLE_ENFORCE_EQ(param_var->IsType<framework::LoDTensor>(), true, platform::errors::InvalidArgument( "The Var(%s)'s type should be LoDTensor, " "but the received is %s", ctx.InputNames("Param").front(), framework::ToTypeName(param_var->Type()))); using paddle::framework::LoDTensor; int64_t min_row_size_to_use_multithread = ctx.Attr<int64_t>("min_row_size_to_use_multithread"); bool lazy_mode = ctx.Attr<bool>("lazy_mode"); bool use_global_beta_pow = ctx.Attr<bool>("use_global_beta_pow"); VLOG(4) << "use_global_beta_pow:" << use_global_beta_pow; auto* param = ctx.Input<LoDTensor>("Param"); auto* grad_var = ctx.InputVar("Grad"); auto* mom1 = ctx.Input<LoDTensor>("Moment1"); auto* mom2 = ctx.Input<LoDTensor>("Moment2"); auto* lr = ctx.Input<LoDTensor>("LearningRate"); auto* beta1_pow = ctx.Input<LoDTensor>("Beta1Pow"); auto* beta2_pow = ctx.Input<LoDTensor>("Beta2Pow"); auto* param_out = ctx.Output<LoDTensor>("ParamOut"); auto* mom1_out = ctx.Output<LoDTensor>("Moment1Out"); auto* mom2_out = ctx.Output<LoDTensor>("Moment2Out"); auto* beta1_pow_out = ctx.Output<LoDTensor>("Beta1PowOut"); auto* beta2_pow_out = ctx.Output<LoDTensor>("Beta2PowOut"); bool skip_update = false; if (ctx.HasInput("SkipUpdate")) { auto* skip_update_tensor = ctx.Input<framework::Tensor>("SkipUpdate"); PADDLE_ENFORCE_EQ(skip_update_tensor->numel(), 1, platform::errors::InvalidArgument( "Input(SkipUpdate) size must be 1, but get %d", skip_update_tensor->numel())); std::vector<bool> skip_update_vec; paddle::framework::TensorToVector(*skip_update_tensor, ctx.device_context(), &skip_update_vec); skip_update = skip_update_vec[0]; } // skip_update=true, just copy input to output, and TensorCopy will call // mutable_data if (skip_update) { VLOG(4) << "Adam skip update"; framework::TensorCopy( *param, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), param_out); framework::TensorCopy( *mom1, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), mom1_out); framework::TensorCopy( *mom2, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), mom2_out); framework::TensorCopy( *beta1_pow, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), beta1_pow_out); framework::TensorCopy( *beta2_pow, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), beta2_pow_out); return; } T beta1 = static_cast<T>(ctx.Attr<float>("beta1")); if (ctx.HasInput("Beta1Tensor")) { auto* beta1_tensor = ctx.Input<framework::Tensor>("Beta1Tensor"); PADDLE_ENFORCE_EQ(beta1_tensor->numel(), 1, platform::errors::InvalidArgument( "Input(Beta1Tensor) size must be 1, but get %d", beta1_tensor->numel())); beta1 = static_cast<T>(GetAttrFromTensor(beta1_tensor)); } T beta2 = static_cast<T>(ctx.Attr<float>("beta2")); if (ctx.HasInput("Beta2Tensor")) { auto* beta2_tensor = ctx.Input<framework::Tensor>("Beta2Tensor"); PADDLE_ENFORCE_EQ(beta2_tensor->numel(), 1, platform::errors::InvalidArgument( "Input(Beta2Tensor) size must be 1, but get %d", beta2_tensor->numel())); beta2 = static_cast<T>(GetAttrFromTensor(beta2_tensor)); } T epsilon = static_cast<T>(ctx.Attr<float>("epsilon")); if (ctx.HasInput("EpsilonTensor")) { auto* epsilon_tensor = ctx.Input<framework::Tensor>("EpsilonTensor"); PADDLE_ENFORCE_EQ(epsilon_tensor->numel(), 1, platform::errors::InvalidArgument( "Input(EpsilonTensor) size must be 1, but get %d", epsilon_tensor->numel())); epsilon = static_cast<T>(GetAttrFromTensor(epsilon_tensor)); } VLOG(3) << "beta1_pow.numel() : " << beta1_pow->numel() << "beta2_pow.numel() : " << beta2_pow->numel(); VLOG(3) << "param.numel(): " << param->numel(); PADDLE_ENFORCE_EQ(beta1_pow_out->numel(), 1, platform::errors::InvalidArgument( "beta1 pow output size should be 1, but received " "value is:%d.", beta1_pow_out->numel())); PADDLE_ENFORCE_EQ(beta2_pow_out->numel(), 1, platform::errors::InvalidArgument( "beta2 pow output size should be 1, but received " "value is:%d.", beta2_pow_out->numel())); if (grad_var->IsType<framework::LoDTensor>()) { T beta1_p = beta1_pow->data<T>()[0]; T beta2_p = beta2_pow->data<T>()[0]; if (!use_global_beta_pow) { beta1_pow_out->mutable_data<T>(ctx.GetPlace())[0] = beta1 * beta1_pow->data<T>()[0]; beta2_pow_out->mutable_data<T>(ctx.GetPlace())[0] = beta2 * beta2_pow->data<T>()[0]; } auto* grad = ctx.Input<LoDTensor>("Grad"); T* param_out_ptr = param_out->mutable_data<T>(ctx.GetPlace()); T* mom1_out_ptr = mom1_out->mutable_data<T>(ctx.GetPlace()); T* mom2_out_ptr = mom2_out->mutable_data<T>(ctx.GetPlace()); T learning_rate = lr->data<T>()[0] * (sqrt(1 - beta2_p) / (1 - beta1_p)); T eps = epsilon * sqrt(1 - beta2_p); jit::adam_attr_t attr(beta1, beta2); int64_t numel = param->numel(); const T* param_ptr = param->data<T>(); const T* mom1_ptr = mom1->data<T>(); const T* mom2_ptr = mom2->data<T>(); const T* grad_ptr = grad->data<T>(); auto adam = jit::KernelFuncs<jit::AdamTuple<T>, platform::CPUPlace>::Cache().At( attr); static constexpr int64_t chunk_size = 512; #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int64_t i = 0; i < numel / chunk_size; ++i) { const int64_t offset = i * chunk_size; adam(beta1, beta2, -learning_rate, eps, chunk_size, grad_ptr + offset, mom1_ptr + offset, mom2_ptr + offset, param_ptr + offset, mom1_out_ptr + offset, mom2_out_ptr + offset, param_out_ptr + offset); } if (numel % chunk_size != 0) { const int64_t offset = (numel / chunk_size) * chunk_size; const int64_t tail_numel = numel % chunk_size; adam(beta1, beta2, -learning_rate, eps, tail_numel, grad_ptr + offset, mom1_ptr + offset, mom2_ptr + offset, param_ptr + offset, mom1_out_ptr + offset, mom2_out_ptr + offset, param_out_ptr + offset); } } else if (grad_var->IsType<pten::SelectedRows>()) { auto* grad = ctx.Input<pten::SelectedRows>("Grad"); if (grad->rows().size() == 0) { VLOG(3) << "grad row size is 0!!"; return; } std::vector<int64_t> cpu_rows(grad->rows().begin(), grad->rows().end()); bool is_strict_sorted = true; for (size_t i = 1; i < cpu_rows.size(); ++i) { if (cpu_rows[i - 1] >= cpu_rows[i]) { is_strict_sorted = false; break; } } pten::SelectedRows tmp_grad_merge; const pten::SelectedRows* grad_merge_ptr; if (is_strict_sorted) { grad_merge_ptr = grad; } else { // merge duplicated rows if any. // The rows of grad_merge have been sorted inside MergeAdd functor scatter::MergeAdd<DeviceContext, T> merge_func; merge_func(ctx.template device_context<DeviceContext>(), *grad, &tmp_grad_merge, true); grad_merge_ptr = &tmp_grad_merge; } auto& grad_merge = *grad_merge_ptr; auto& grad_tensor = grad_merge.value(); const T* grad_data = grad_tensor.template data<T>(); const int64_t* rows = grad_merge.rows().Data(ctx.GetPlace()); auto row_numel = grad_tensor.numel() / grad_merge.rows().size(); SparseAdamFunctor<T, CPUAdam> functor( beta1, beta2, epsilon, beta1_pow->data<T>(), beta2_pow->data<T>(), mom1->data<T>(), mom1_out->mutable_data<T>(ctx.GetPlace()), mom2->data<T>(), mom2_out->mutable_data<T>(ctx.GetPlace()), lr->data<T>(), grad_data, param->data<T>(), param_out->mutable_data<T>(ctx.GetPlace()), rows, row_numel, grad_merge.rows().size(), lazy_mode); // update beta1 and beta2 if (!use_global_beta_pow) { beta1_pow_out->mutable_data<T>(ctx.GetPlace())[0] = beta1 * beta1_pow->data<T>()[0]; beta2_pow_out->mutable_data<T>(ctx.GetPlace())[0] = beta2 * beta2_pow->data<T>()[0]; } if (lazy_mode) { VLOG(3) << "run cpu lazy mode"; size_t row_count = grad_merge.rows().size(); std::vector<int64_t> cpu_rows(grad_merge.rows()); for (size_t row_index = 0; row_index < row_count; ++row_index) { for (size_t offset = 0; offset < row_numel; ++offset) { size_t i = cpu_rows[row_index] * row_numel + offset; functor.adam_update(i, grad_data[row_index * row_numel + offset]); } } } #ifndef _WIN32 else if (FLAGS_inner_op_parallelism > 1 && // NOLINT min_row_size_to_use_multithread > 0 && param->dims()[0] > min_row_size_to_use_multithread) { VLOG(3) << "use multi thread, inner_op_parallelism=" << FLAGS_inner_op_parallelism << " min_row_size_to_use_multithread=" << min_row_size_to_use_multithread; if (FLAGS_inner_op_parallelism > 10) { VLOG(1) << "FLAGS_inner_op_parallelism " << FLAGS_inner_op_parallelism << " is two large!"; } auto& grad_rows = grad_merge.rows(); std::unordered_map<size_t, int> row_id_to_grad_row_offset; size_t param_row_count = param->numel() / row_numel; if (param_row_count < 1000) { VLOG(1) << "param_row_count should be larger then 1000 to use " "multi thread, currently " << param_row_count; } for (size_t i = 0; i < grad_rows.size(); ++i) { row_id_to_grad_row_offset[grad_rows[i]] = i; } std::vector<std::future<void>> fs; int64_t line_in_each_thread = param_row_count / FLAGS_inner_op_parallelism + 1; for (int i = 0; i < FLAGS_inner_op_parallelism; ++i) { int64_t start = i * line_in_each_thread; int64_t end = (i + 1) * line_in_each_thread; if (start >= static_cast<int64_t>(param_row_count)) { break; } if (end > static_cast<int64_t>(param_row_count)) { end = static_cast<int64_t>(param_row_count); } fs.push_back(framework::Async([&functor, &row_id_to_grad_row_offset, &grad_data, row_numel, start, end]() { for (int64_t row_id = start; row_id < end; ++row_id) { auto iter = row_id_to_grad_row_offset.find(row_id); if (iter != row_id_to_grad_row_offset.end()) { for (size_t row_offset = 0U; row_offset < row_numel; ++row_offset) { functor.adam_update( row_id * row_numel + row_offset, grad_data[iter->second * row_numel + row_offset]); } } else { for (size_t row_offset = 0U; row_offset < row_numel; ++row_offset) { functor.adam_update(row_id * row_numel + row_offset, 0); } } } })); } for (size_t i = 0; i < fs.size(); ++i) fs[i].wait(); } #endif // !_WIN32 else { // NOLINT functor(param->numel()); } } else { PADDLE_THROW(platform::errors::InvalidArgument( "Variable type not supported by adam_op")); } } }; } // namespace operators } // namespace paddle
3d25pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 8; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=2*Nt-2;t1++) { lbp=ceild(t1+2,2); ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1,4),ceild(4*t2-Nz+5,8));t3<=min(min(floord(4*Nt+Ny-9,8),floord(2*t1+Ny-3,8)),floord(4*t2+Ny-9,8));t3++) { for (t4=max(max(ceild(t1-124,128),ceild(4*t2-Nz-243,256)),ceild(8*t3-Ny-243,256));t4<=min(min(min(floord(4*Nt+Nx-9,256),floord(2*t1+Nx-3,256)),floord(4*t2+Nx-9,256)),floord(8*t3+Nx-5,256));t4++) { for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(256*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) { for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) { lbv=max(256*t4,4*t5+4); ubv=min(256*t4+255,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
convolution_1x1.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON #include "mat.h" namespace ncnn { static void conv1x1s1_sgemm_transform_kernel_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch) { const float* kernel = _kernel; // interleave #if __ARM_NEON && __aarch64__ kernel_tm.create(4 * 8, inch / 4 + inch % 4, outch / 8 + (outch % 8) / 4 + outch % 4); #else kernel_tm.create(4 * 4, inch / 4 + inch % 4, outch / 4 + outch % 4); #endif // __ARM_NEON && __aarch64__ int p = 0; #if __ARM_NEON && __aarch64__ for (; p + 7 < outch; p += 8) { const float* kernel0 = kernel + (p + 0)*inch; const float* kernel1 = kernel + (p + 1)*inch; const float* kernel2 = kernel + (p + 2)*inch; const float* kernel3 = kernel + (p + 3)*inch; const float* kernel4 = kernel + (p + 4)*inch; const float* kernel5 = kernel + (p + 5)*inch; const float* kernel6 = kernel + (p + 6)*inch; const float* kernel7 = kernel + (p + 7)*inch; float* ktmp = kernel_tm.channel(p / 8); for (int q = 0; q < inch; q++) { // kernel0...7 0 ktmp[0] = kernel0[0]; ktmp[1] = kernel1[0]; ktmp[2] = kernel2[0]; ktmp[3] = kernel3[0]; ktmp[4] = kernel4[0]; ktmp[5] = kernel5[0]; ktmp[6] = kernel6[0]; ktmp[7] = kernel7[0]; ktmp += 8; kernel0 += 1; kernel1 += 1; kernel2 += 1; kernel3 += 1; kernel4 += 1; kernel5 += 1; kernel6 += 1; kernel7 += 1; } } #endif // __ARM_NEON && __aarch64__ for (; p + 3 < outch; p += 4) { const float* kernel0 = kernel + (p + 0)*inch; const float* kernel1 = kernel + (p + 1)*inch; const float* kernel2 = kernel + (p + 2)*inch; const float* kernel3 = kernel + (p + 3)*inch; #if __ARM_NEON && __aarch64__ float* ktmp = kernel_tm.channel(p / 8 + (p % 8) / 4); #else float* ktmp = kernel_tm.channel(p / 4); #endif // __ARM_NEON && __aarch64__ for (int q = 0; q < inch; q++) { // kernel0...3 0 ktmp[0] = kernel0[0]; ktmp[1] = kernel1[0]; ktmp[2] = kernel2[0]; ktmp[3] = kernel3[0]; ktmp += 4; kernel0 += 1; kernel1 += 1; kernel2 += 1; kernel3 += 1; } } for (; p < outch; p++) { const float* kernel0 = kernel + p*inch; #if __ARM_NEON && __aarch64__ float* ktmp = kernel_tm.channel(p / 8 + (p % 8) / 4 + p % 4); #else float* ktmp = kernel_tm.channel(p / 4 + p % 4); #endif // __ARM_NEON && __aarch64__ for (int q = 0; q < inch; q++) { ktmp[0] = kernel0[0]; ktmp++; kernel0++; } } } static void conv1x1s1_sgemm_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; const int size = w * h; const float* bias = _bias; // interleave Mat tmp(8 * 4, inch / 4 + inch % 4, size / 8 + (size % 8) / 4 + size % 4, 4u); { int nn_size = size >> 3; int remain_size_start = nn_size << 3; //#pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 8; const float* img0 = bottom_blob.channel(0); img0 += i; float* tmpptr = tmp.channel(i / 8); for (int q = 0; q < inch; q++) { #if __ARM_NEON #if __aarch64__ vst1q_f32(tmpptr, vld1q_f32(img0)); vst1q_f32(tmpptr + 4, vld1q_f32(img0 + 4)); tmpptr += 8; img0 += bottom_blob.cstep; #else asm volatile( "pld [%0, #256] \n" "vld1.f32 {d0-d3}, [%0 :128] \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0", "q1" ); img0 += bottom_blob.cstep; #endif // __aarch64__ #else tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr[4] = img0[4]; tmpptr[5] = img0[5]; tmpptr[6] = img0[6]; tmpptr[7] = img0[7]; tmpptr += 8; img0 += bottom_blob.cstep; #endif // __ARM_NEON } } nn_size = (size - remain_size_start) >> 2; //#pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; const float* img0 = bottom_blob.channel(0); img0 += i; float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); for (int q = 0; q < inch; q++) { #if __ARM_NEON #if __aarch64__ vst1q_f32(tmpptr, vld1q_f32(img0)); tmpptr += 4; img0 += bottom_blob.cstep; #else asm volatile( "pld [%0, #128] \n" "vld1.f32 {d0-d1}, [%0 :128] \n" "vst1.f32 {d0-d1}, [%1 :128]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0" ); img0 += bottom_blob.cstep; #endif // __aarch64__ #else tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr += 4; img0 += bottom_blob.cstep; #endif // __ARM_NEON } } remain_size_start += nn_size << 2; //#pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { const float* img0 = bottom_blob.channel(0); img0 += i; float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); for (int q = 0; q < inch; q++) { tmpptr[0] = img0[0]; tmpptr++; img0 += bottom_blob.cstep; } } } int nn_outch = 0; int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; //#pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; float* outptr0 = top_blob.channel(p); float* outptr1 = top_blob.channel(p + 1); float* outptr2 = top_blob.channel(p + 2); float* outptr3 = top_blob.channel(p + 3); float* outptr4 = top_blob.channel(p + 4); float* outptr5 = top_blob.channel(p + 5); float* outptr6 = top_blob.channel(p + 6); float* outptr7 = top_blob.channel(p + 7); const float zeros[8] = { 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f }; const float* biasptr = bias ? bias + p : zeros; int i = 0; for (; i + 7 < size; i += 8) { const float* tmpptr = tmp.channel(i / 8); const float* kptr = kernel.channel(p / 8); asm volatile( "ld1 {v0.4s, v1.4s}, [%20] \n" "dup v16.4s, v0.s[0] \n" "dup v17.4s, v0.s[0] \n" "dup v18.4s, v0.s[1] \n" "dup v19.4s, v0.s[1] \n" "dup v20.4s, v0.s[2] \n" "dup v21.4s, v0.s[2] \n" "dup v22.4s, v0.s[3] \n" "dup v23.4s, v0.s[3] \n" "dup v24.4s, v1.s[0] \n" "dup v25.4s, v1.s[0] \n" "dup v26.4s, v1.s[1] \n" "dup v27.4s, v1.s[1] \n" "dup v28.4s, v1.s[2] \n" "dup v29.4s, v1.s[2] \n" "dup v30.4s, v1.s[3] \n" "dup v31.4s, v1.s[3] \n" // inch loop "lsr w4, %w21, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v18.4s, v8.4s, v0.s[1] \n" "fmla v20.4s, v8.4s, v0.s[2] \n" "fmla v22.4s, v8.4s, v0.s[3] \n" "fmla v17.4s, v9.4s, v0.s[0] \n" "fmla v19.4s, v9.4s, v0.s[1] \n" "fmla v21.4s, v9.4s, v0.s[2] \n" "fmla v23.4s, v9.4s, v0.s[3] \n" "fmla v24.4s, v8.4s, v1.s[0] \n" "fmla v26.4s, v8.4s, v1.s[1] \n" "fmla v28.4s, v8.4s, v1.s[2] \n" "fmla v30.4s, v8.4s, v1.s[3] \n" "fmla v25.4s, v9.4s, v1.s[0] \n" "fmla v27.4s, v9.4s, v1.s[1] \n" "fmla v29.4s, v9.4s, v1.s[2] \n" "fmla v31.4s, v9.4s, v1.s[3] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%8], #64 \n" "fmla v16.4s, v10.4s, v2.s[0] \n" "fmla v18.4s, v10.4s, v2.s[1] \n" "fmla v20.4s, v10.4s, v2.s[2] \n" "fmla v22.4s, v10.4s, v2.s[3] \n" "fmla v17.4s, v11.4s, v2.s[0] \n" "fmla v19.4s, v11.4s, v2.s[1] \n" "fmla v21.4s, v11.4s, v2.s[2] \n" "fmla v23.4s, v11.4s, v2.s[3] \n" "fmla v24.4s, v10.4s, v3.s[0] \n" "fmla v26.4s, v10.4s, v3.s[1] \n" "fmla v28.4s, v10.4s, v3.s[2] \n" "fmla v30.4s, v10.4s, v3.s[3] \n" "fmla v25.4s, v11.4s, v3.s[0] \n" "fmla v27.4s, v11.4s, v3.s[1] \n" "fmla v29.4s, v11.4s, v3.s[2] \n" "fmla v31.4s, v11.4s, v3.s[3] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n" "fmla v16.4s, v12.4s, v4.s[0] \n" "fmla v18.4s, v12.4s, v4.s[1] \n" "fmla v20.4s, v12.4s, v4.s[2] \n" "fmla v22.4s, v12.4s, v4.s[3] \n" "fmla v17.4s, v13.4s, v4.s[0] \n" "fmla v19.4s, v13.4s, v4.s[1] \n" "fmla v21.4s, v13.4s, v4.s[2] \n" "fmla v23.4s, v13.4s, v4.s[3] \n" "fmla v24.4s, v12.4s, v5.s[0] \n" "fmla v26.4s, v12.4s, v5.s[1] \n" "fmla v28.4s, v12.4s, v5.s[2] \n" "fmla v30.4s, v12.4s, v5.s[3] \n" "fmla v25.4s, v13.4s, v5.s[0] \n" "fmla v27.4s, v13.4s, v5.s[1] \n" "fmla v29.4s, v13.4s, v5.s[2] \n" "fmla v31.4s, v13.4s, v5.s[3] \n" "subs w4, w4, #1 \n" "fmla v16.4s, v14.4s, v6.s[0] \n" "fmla v18.4s, v14.4s, v6.s[1] \n" "fmla v20.4s, v14.4s, v6.s[2] \n" "fmla v22.4s, v14.4s, v6.s[3] \n" "fmla v17.4s, v15.4s, v6.s[0] \n" "fmla v19.4s, v15.4s, v6.s[1] \n" "fmla v21.4s, v15.4s, v6.s[2] \n" "fmla v23.4s, v15.4s, v6.s[3] \n" "fmla v24.4s, v14.4s, v7.s[0] \n" "fmla v26.4s, v14.4s, v7.s[1] \n" "fmla v28.4s, v14.4s, v7.s[2] \n" "fmla v30.4s, v14.4s, v7.s[3] \n" "fmla v25.4s, v15.4s, v7.s[0] \n" "fmla v27.4s, v15.4s, v7.s[1] \n" "fmla v29.4s, v15.4s, v7.s[2] \n" "fmla v31.4s, v15.4s, v7.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w21, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%8, #256] \n" "ld1 {v8.4s, v9.4s}, [%8], #32 \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4s, v1.4s}, [%9], #32 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v18.4s, v8.4s, v0.s[1] \n" "fmla v20.4s, v8.4s, v0.s[2] \n" "fmla v22.4s, v8.4s, v0.s[3] \n" "fmla v17.4s, v9.4s, v0.s[0] \n" "fmla v19.4s, v9.4s, v0.s[1] \n" "fmla v21.4s, v9.4s, v0.s[2] \n" "fmla v23.4s, v9.4s, v0.s[3] \n" "subs w4, w4, #1 \n" "fmla v24.4s, v8.4s, v1.s[0] \n" "fmla v26.4s, v8.4s, v1.s[1] \n" "fmla v28.4s, v8.4s, v1.s[2] \n" "fmla v30.4s, v8.4s, v1.s[3] \n" "fmla v25.4s, v9.4s, v1.s[0] \n" "fmla v27.4s, v9.4s, v1.s[1] \n" "fmla v29.4s, v9.4s, v1.s[2] \n" "fmla v31.4s, v9.4s, v1.s[3] \n" "bne 2b \n" "3: \n" "st1 {v16.4s, v17.4s}, [%0], #32 \n" "st1 {v18.4s, v19.4s}, [%1], #32 \n" "st1 {v20.4s, v21.4s}, [%2], #32 \n" "st1 {v22.4s, v23.4s}, [%3], #32 \n" "st1 {v24.4s, v25.4s}, [%4], #32 \n" "st1 {v26.4s, v27.4s}, [%5], #32 \n" "st1 {v28.4s, v29.4s}, [%6], #32 \n" "st1 {v30.4s, v31.4s}, [%7], #32 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(outptr4), // %4 "=r"(outptr5), // %5 "=r"(outptr6), // %6 "=r"(outptr7), // %7 "=r"(tmpptr), // %8 "=r"(kptr) // %9 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(outptr4), "5"(outptr5), "6"(outptr6), "7"(outptr7), "8"(tmpptr), "9"(kptr), "r"(biasptr), // %20 "r"(inch) // %21 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } for (; i + 3 < size; i += 4) { const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const float* kptr = kernel.channel(p / 8); asm volatile( "ld1 {v0.4s, v1.4s}, [%20] \n" "dup v16.4s, v0.s[0] \n" "dup v17.4s, v0.s[1] \n" "dup v18.4s, v0.s[2] \n" "dup v19.4s, v0.s[3] \n" "dup v20.4s, v1.s[0] \n" "dup v21.4s, v1.s[1] \n" "dup v22.4s, v1.s[2] \n" "dup v23.4s, v1.s[3] \n" // inch loop "lsr w4, %w21, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v0.s[1] \n" "fmla v18.4s, v8.4s, v0.s[2] \n" "fmla v19.4s, v8.4s, v0.s[3] \n" "fmla v20.4s, v8.4s, v1.s[0] \n" "fmla v21.4s, v8.4s, v1.s[1] \n" "fmla v22.4s, v8.4s, v1.s[2] \n" "fmla v23.4s, v8.4s, v1.s[3] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n" "fmla v16.4s, v9.4s, v2.s[0] \n" "fmla v17.4s, v9.4s, v2.s[1] \n" "fmla v18.4s, v9.4s, v2.s[2] \n" "fmla v19.4s, v9.4s, v2.s[3] \n" "fmla v20.4s, v9.4s, v3.s[0] \n" "fmla v21.4s, v9.4s, v3.s[1] \n" "fmla v22.4s, v9.4s, v3.s[2] \n" "fmla v23.4s, v9.4s, v3.s[3] \n" "subs w4, w4, #1 \n" "fmla v16.4s, v10.4s, v4.s[0] \n" "fmla v17.4s, v10.4s, v4.s[1] \n" "fmla v18.4s, v10.4s, v4.s[2] \n" "fmla v19.4s, v10.4s, v4.s[3] \n" "fmla v20.4s, v10.4s, v5.s[0] \n" "fmla v21.4s, v10.4s, v5.s[1] \n" "fmla v22.4s, v10.4s, v5.s[2] \n" "fmla v23.4s, v10.4s, v5.s[3] \n" "fmla v16.4s, v11.4s, v6.s[0] \n" "fmla v17.4s, v11.4s, v6.s[1] \n" "fmla v18.4s, v11.4s, v6.s[2] \n" "fmla v19.4s, v11.4s, v6.s[3] \n" "fmla v20.4s, v11.4s, v7.s[0] \n" "fmla v21.4s, v11.4s, v7.s[1] \n" "fmla v22.4s, v11.4s, v7.s[2] \n" "fmla v23.4s, v11.4s, v7.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w21, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%8, #128] \n" "ld1 {v8.4s}, [%8], #16 \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4s, v1.4s}, [%9], #32 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v0.s[1] \n" "fmla v18.4s, v8.4s, v0.s[2] \n" "fmla v19.4s, v8.4s, v0.s[3] \n" "subs w4, w4, #1 \n" "fmla v20.4s, v8.4s, v1.s[0] \n" "fmla v21.4s, v8.4s, v1.s[1] \n" "fmla v22.4s, v8.4s, v1.s[2] \n" "fmla v23.4s, v8.4s, v1.s[3] \n" "bne 2b \n" "3: \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" "st1 {v18.4s}, [%2], #16 \n" "st1 {v19.4s}, [%3], #16 \n" "st1 {v20.4s}, [%4], #16 \n" "st1 {v21.4s}, [%5], #16 \n" "st1 {v22.4s}, [%6], #16 \n" "st1 {v23.4s}, [%7], #16 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(outptr4), // %4 "=r"(outptr5), // %5 "=r"(outptr6), // %6 "=r"(outptr7), // %7 "=r"(tmpptr), // %8 "=r"(kptr) // %9 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(outptr4), "5"(outptr5), "6"(outptr6), "7"(outptr7), "8"(tmpptr), "9"(kptr), "r"(biasptr), // %20 "r"(inch) // %21 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); } for (; i < size; i++) { const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); const float* kptr = kernel.channel(p / 8); asm volatile( "ld1 {v24.4s, v25.4s}, [%20] \n" // inch loop "lsr w4, %w21, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "0: \n" "prfm pldl1keep, [%8, #128] \n" "ld1 {v8.4s}, [%8], #16 \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "fmla v16.4s, v0.4s, v8.s[0] \n" "fmla v17.4s, v1.4s, v8.s[0] \n" "fmla v18.4s, v2.4s, v8.s[1] \n" "fmla v19.4s, v3.4s, v8.s[1] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n" "subs w4, w4, #1 \n" "fmla v20.4s, v4.4s, v8.s[2] \n" "fmla v21.4s, v5.4s, v8.s[2] \n" "fmla v22.4s, v6.4s, v8.s[3] \n" "fmla v23.4s, v7.4s, v8.s[3] \n" "bne 0b \n" "fadd v16.4s, v16.4s, v18.4s \n" "fadd v17.4s, v17.4s, v19.4s \n" "fadd v20.4s, v20.4s, v22.4s \n" "fadd v21.4s, v21.4s, v23.4s \n" "fadd v16.4s, v16.4s, v20.4s \n" "fadd v17.4s, v17.4s, v21.4s \n" "fadd v24.4s, v24.4s, v16.4s \n" "fadd v25.4s, v25.4s, v17.4s \n" "1: \n" // remain loop "and w4, %w21, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%8, #32] \n" "ld1r {v8.4s}, [%8], #4 \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4s, v1.4s}, [%9], #32 \n" "subs w4, w4, #1 \n" "fmla v24.4s, v8.4s, v0.4s \n" "fmla v25.4s, v8.4s, v1.4s \n" "bne 2b \n" "3: \n" "st1 {v24.s}[0],[%0], #4 \n" "st1 {v24.s}[1],[%1], #4 \n" "st1 {v24.s}[2],[%2], #4 \n" "st1 {v24.s}[3],[%3], #4 \n" "st1 {v25.s}[0],[%4], #4 \n" "st1 {v25.s}[1],[%5], #4 \n" "st1 {v25.s}[2],[%6], #4 \n" "st1 {v25.s}[3],[%7], #4 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(outptr4), // %4 "=r"(outptr5), // %5 "=r"(outptr6), // %6 "=r"(outptr7), // %7 "=r"(tmpptr), // %8 "=r"(kptr) // %9 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(outptr4), "5"(outptr5), "6"(outptr6), "7"(outptr7), "8"(tmpptr), "9"(kptr), "r"(biasptr), // %20 "r"(inch) // %21 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25" ); } } #endif // __ARM_NEON && __aarch64__ nn_outch = (outch - remain_outch_start) >> 2; //#pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; float* outptr0 = top_blob.channel(p); float* outptr1 = top_blob.channel(p + 1); float* outptr2 = top_blob.channel(p + 2); float* outptr3 = top_blob.channel(p + 3); const float zeros[4] = { 0.f, 0.f, 0.f, 0.f }; const float* biasptr = bias ? bias + p : zeros; int i = 0; for (; i + 7 < size; i += 8) { const float* tmpptr = tmp.channel(i / 8); #if __ARM_NEON && __aarch64__ const float* kptr = kernel.channel(p / 8 + (p % 8) / 4); #else const float* kptr = kernel.channel(p / 4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%12] \n" "dup v8.4s, v0.s[0] \n" "dup v9.4s, v0.s[0] \n" "dup v10.4s, v0.s[1] \n" "dup v11.4s, v0.s[1] \n" "dup v12.4s, v0.s[2] \n" "dup v13.4s, v0.s[2] \n" "dup v14.4s, v0.s[3] \n" "dup v15.4s, v0.s[3] \n" // inch loop "lsr w4, %w13, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v10.4s, v4.4s, v0.s[1] \n" "fmla v12.4s, v4.4s, v0.s[2] \n" "fmla v14.4s, v4.4s, v0.s[3] \n" "fmla v9.4s, v5.4s, v0.s[0] \n" "fmla v11.4s, v5.4s, v0.s[1] \n" "fmla v13.4s, v5.4s, v0.s[2] \n" "fmla v15.4s, v5.4s, v0.s[3] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v8.4s, v6.4s, v1.s[0] \n" "fmla v10.4s, v6.4s, v1.s[1] \n" "fmla v12.4s, v6.4s, v1.s[2] \n" "fmla v14.4s, v6.4s, v1.s[3] \n" "fmla v9.4s, v7.4s, v1.s[0] \n" "fmla v11.4s, v7.4s, v1.s[1] \n" "fmla v13.4s, v7.4s, v1.s[2] \n" "fmla v15.4s, v7.4s, v1.s[3] \n" "subs w4, w4, #1 \n" "fmla v8.4s, v16.4s, v2.s[0] \n" "fmla v10.4s, v16.4s, v2.s[1] \n" "fmla v12.4s, v16.4s, v2.s[2] \n" "fmla v14.4s, v16.4s, v2.s[3] \n" "fmla v9.4s, v17.4s, v2.s[0] \n" "fmla v11.4s, v17.4s, v2.s[1] \n" "fmla v13.4s, v17.4s, v2.s[2] \n" "fmla v15.4s, v17.4s, v2.s[3] \n" "fmla v8.4s, v18.4s, v3.s[0] \n" "fmla v10.4s, v18.4s, v3.s[1] \n" "fmla v12.4s, v18.4s, v3.s[2] \n" "fmla v14.4s, v18.4s, v3.s[3] \n" "fmla v9.4s, v19.4s, v3.s[0] \n" "fmla v11.4s, v19.4s, v3.s[1] \n" "fmla v13.4s, v19.4s, v3.s[2] \n" "fmla v15.4s, v19.4s, v3.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w13, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v4.4s, v5.4s}, [%4], #32 \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4s}, [%5], #16 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v10.4s, v4.4s, v0.s[1] \n" "fmla v12.4s, v4.4s, v0.s[2] \n" "fmla v14.4s, v4.4s, v0.s[3] \n" "subs w4, w4, #1 \n" "fmla v9.4s, v5.4s, v0.s[0] \n" "fmla v11.4s, v5.4s, v0.s[1] \n" "fmla v13.4s, v5.4s, v0.s[2] \n" "fmla v15.4s, v5.4s, v0.s[3] \n" "bne 2b \n" "3: \n" "st1 {v8.4s, v9.4s}, [%0], #32 \n" "st1 {v10.4s, v11.4s}, [%1], #32 \n" "st1 {v12.4s, v13.4s}, [%2], #32 \n" "st1 {v14.4s, v15.4s}, [%3], #32 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(biasptr), // %12 "r"(inch) // %13 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19" ); #else // __aarch64__ asm volatile( "vld1.f32 {d0-d1}, [%12] \n" "vdup.f32 q8, d0[0] \n" "vdup.f32 q9, d0[0] \n" "vdup.f32 q10, d0[1] \n" "vdup.f32 q11, d0[1] \n" "vdup.f32 q12, d1[0] \n" "vdup.f32 q13, d1[0] \n" "vdup.f32 q14, d1[1] \n" "vdup.f32 q15, d1[1] \n" // inch loop "lsr r4, %13, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%4, #512] \n" "vldm %4!, {d8-d15} \n" // "vld1.f32 {d8-d11}, [%4 :128]! \n" // "vld1.f32 {d12-d15}, [%4 :128]! \n" "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n" // "vld1.f32 {d0-d3}, [%5 :128]! \n" // "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q10, q4, d0[1] \n" "vmla.f32 q12, q4, d1[0] \n" "vmla.f32 q14, q4, d1[1] \n" "vmla.f32 q9, q5, d0[0] \n" "vmla.f32 q11, q5, d0[1] \n" "vmla.f32 q13, q5, d1[0] \n" "vmla.f32 q15, q5, d1[1] \n" "vmla.f32 q8, q6, d2[0] \n" "vmla.f32 q10, q6, d2[1] \n" "vmla.f32 q12, q6, d3[0] \n" "vmla.f32 q14, q6, d3[1] \n" "vmla.f32 q9, q7, d2[0] \n" "vmla.f32 q11, q7, d2[1] \n" "vmla.f32 q13, q7, d3[0] \n" "vmla.f32 q15, q7, d3[1] \n" "pld [%4, #512] \n" "vldm %4!, {d8-d15} \n" // "vld1.f32 {d8-d11}, [%4 :128]! \n" // "vld1.f32 {d12-d15}, [%4 :128]! \n" "vmla.f32 q8, q4, d4[0] \n" "vmla.f32 q10, q4, d4[1] \n" "vmla.f32 q12, q4, d5[0] \n" "vmla.f32 q14, q4, d5[1] \n" "vmla.f32 q9, q5, d4[0] \n" "vmla.f32 q11, q5, d4[1] \n" "vmla.f32 q13, q5, d5[0] \n" "vmla.f32 q15, q5, d5[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q6, d6[0] \n" "vmla.f32 q10, q6, d6[1] \n" "vmla.f32 q12, q6, d7[0] \n" "vmla.f32 q14, q6, d7[1] \n" "vmla.f32 q9, q7, d6[0] \n" "vmla.f32 q11, q7, d6[1] \n" "vmla.f32 q13, q7, d7[0] \n" "vmla.f32 q15, q7, d7[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %13, #3 \n"// r4 = remain = inch & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%4, #256] \n" "vld1.f32 {d8-d11}, [%4 :128]! \n" "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5 :128]! \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q10, q4, d0[1] \n" "vmla.f32 q12, q4, d1[0] \n" "vmla.f32 q14, q4, d1[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q9, q5, d0[0] \n" "vmla.f32 q11, q5, d0[1] \n" "vmla.f32 q13, q5, d1[0] \n" "vmla.f32 q15, q5, d1[1] \n" "bne 2b \n" "3: \n" "vst1.f32 {d16-d19}, [%0 :128]! \n" "vst1.f32 {d20-d23}, [%1 :128]! \n" "vst1.f32 {d24-d27}, [%2 :128]! \n" "vst1.f32 {d28-d31}, [%3 :128]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(biasptr), // %12 "r"(inch) // %13 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else float sum0_0 = biasptr[0]; float sum0_1 = biasptr[0]; float sum0_2 = biasptr[0]; float sum0_3 = biasptr[0]; float sum0_4 = biasptr[0]; float sum0_5 = biasptr[0]; float sum0_6 = biasptr[0]; float sum0_7 = biasptr[0]; float sum1_0 = biasptr[1]; float sum1_1 = biasptr[1]; float sum1_2 = biasptr[1]; float sum1_3 = biasptr[1]; float sum1_4 = biasptr[1]; float sum1_5 = biasptr[1]; float sum1_6 = biasptr[1]; float sum1_7 = biasptr[1]; float sum2_0 = biasptr[2]; float sum2_1 = biasptr[2]; float sum2_2 = biasptr[2]; float sum2_3 = biasptr[2]; float sum2_4 = biasptr[2]; float sum2_5 = biasptr[2]; float sum2_6 = biasptr[2]; float sum2_7 = biasptr[2]; float sum3_0 = biasptr[3]; float sum3_1 = biasptr[3]; float sum3_2 = biasptr[3]; float sum3_3 = biasptr[3]; float sum3_4 = biasptr[3]; float sum3_5 = biasptr[3]; float sum3_6 = biasptr[3]; float sum3_7 = biasptr[3]; for (int q = 0; q < inch; q++) { sum0_0 += tmpptr[0] * kptr[0]; sum0_1 += tmpptr[1] * kptr[0]; sum0_2 += tmpptr[2] * kptr[0]; sum0_3 += tmpptr[3] * kptr[0]; sum0_4 += tmpptr[4] * kptr[0]; sum0_5 += tmpptr[5] * kptr[0]; sum0_6 += tmpptr[6] * kptr[0]; sum0_7 += tmpptr[7] * kptr[0]; sum1_0 += tmpptr[0] * kptr[1]; sum1_1 += tmpptr[1] * kptr[1]; sum1_2 += tmpptr[2] * kptr[1]; sum1_3 += tmpptr[3] * kptr[1]; sum1_4 += tmpptr[4] * kptr[1]; sum1_5 += tmpptr[5] * kptr[1]; sum1_6 += tmpptr[6] * kptr[1]; sum1_7 += tmpptr[7] * kptr[1]; sum2_0 += tmpptr[0] * kptr[2]; sum2_1 += tmpptr[1] * kptr[2]; sum2_2 += tmpptr[2] * kptr[2]; sum2_3 += tmpptr[3] * kptr[2]; sum2_4 += tmpptr[4] * kptr[2]; sum2_5 += tmpptr[5] * kptr[2]; sum2_6 += tmpptr[6] * kptr[2]; sum2_7 += tmpptr[7] * kptr[2]; sum3_0 += tmpptr[0] * kptr[3]; sum3_1 += tmpptr[1] * kptr[3]; sum3_2 += tmpptr[2] * kptr[3]; sum3_3 += tmpptr[3] * kptr[3]; sum3_4 += tmpptr[4] * kptr[3]; sum3_5 += tmpptr[5] * kptr[3]; sum3_6 += tmpptr[6] * kptr[3]; sum3_7 += tmpptr[7] * kptr[3]; tmpptr += 8; kptr += 4; } outptr0[0] = sum0_0; outptr0[1] = sum0_1; outptr0[2] = sum0_2; outptr0[3] = sum0_3; outptr0[4] = sum0_4; outptr0[5] = sum0_5; outptr0[6] = sum0_6; outptr0[7] = sum0_7; outptr1[0] = sum1_0; outptr1[1] = sum1_1; outptr1[2] = sum1_2; outptr1[3] = sum1_3; outptr1[4] = sum1_4; outptr1[5] = sum1_5; outptr1[6] = sum1_6; outptr1[7] = sum1_7; outptr2[0] = sum2_0; outptr2[1] = sum2_1; outptr2[2] = sum2_2; outptr2[3] = sum2_3; outptr2[4] = sum2_4; outptr2[5] = sum2_5; outptr2[6] = sum2_6; outptr2[7] = sum2_7; outptr3[0] = sum3_0; outptr3[1] = sum3_1; outptr3[2] = sum3_2; outptr3[3] = sum3_3; outptr3[4] = sum3_4; outptr3[5] = sum3_5; outptr3[6] = sum3_6; outptr3[7] = sum3_7; outptr0 += 8; outptr1 += 8; outptr2 += 8; outptr3 += 8; #endif // __ARM_NEON } for (; i + 3 < size; i += 4) { const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); #if __ARM_NEON && __aarch64__ const float* kptr = kernel.channel(p / 8 + (p % 8) / 4); #else const float* kptr = kernel.channel(p / 4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%12] \n" "dup v8.4s, v0.s[0] \n" "dup v9.4s, v0.s[1] \n" "dup v10.4s, v0.s[2] \n" "dup v11.4s, v0.s[3] \n" // inch loop "lsr w4, %w13, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v8.4s, v5.4s, v1.s[0] \n" "fmla v9.4s, v5.4s, v1.s[1] \n" "fmla v10.4s, v5.4s, v1.s[2] \n" "fmla v11.4s, v5.4s, v1.s[3] \n" "subs w4, w4, #1 \n" "fmla v8.4s, v6.4s, v2.s[0] \n" "fmla v9.4s, v6.4s, v2.s[1] \n" "fmla v10.4s, v6.4s, v2.s[2] \n" "fmla v11.4s, v6.4s, v2.s[3] \n" "fmla v8.4s, v7.4s, v3.s[0] \n" "fmla v9.4s, v7.4s, v3.s[1] \n" "fmla v10.4s, v7.4s, v3.s[2] \n" "fmla v11.4s, v7.4s, v3.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w13, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v4.4s}, [%4], #16 \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4s}, [%5], #16 \n" "subs w4, w4, #1 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "bne 2b \n" "3: \n" "st1 {v8.4s}, [%0], #16 \n" "st1 {v9.4s}, [%1], #16 \n" "st1 {v10.4s}, [%2], #16 \n" "st1 {v11.4s}, [%3], #16 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(biasptr), // %12 "r"(inch) // %13 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11" ); #else // __aarch64__ asm volatile( "vld1.f32 {d0-d1}, [%12] \n" "vdup.f32 q8, d0[0] \n" "vdup.f32 q9, d0[1] \n" "vdup.f32 q10, d1[0] \n" "vdup.f32 q11, d1[1] \n" // inch loop "lsr r4, %13, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%4, #512] \n" "vldm %4!, {d8-d15} \n" // "vld1.f32 {d8-d11}, [%4 :128]! \n" // "vld1.f32 {d12-d15}, [%4 :128]! \n" "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n" // "vld1.f32 {d0-d3}, [%5 :128]! \n" // "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "vmla.f32 q8, q5, d2[0] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q10, q5, d3[0] \n" "vmla.f32 q11, q5, d3[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q6, d4[0] \n" "vmla.f32 q9, q6, d4[1] \n" "vmla.f32 q10, q6, d5[0] \n" "vmla.f32 q11, q6, d5[1] \n" "vmla.f32 q8, q7, d6[0] \n" "vmla.f32 q9, q7, d6[1] \n" "vmla.f32 q10, q7, d7[0] \n" "vmla.f32 q11, q7, d7[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %13, #3 \n"// r4 = remain = inch & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%4, #128] \n" "vld1.f32 {d8-d9}, [%4 :128]! \n" "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5 :128]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "bne 2b \n" "3: \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" "vst1.f32 {d20-d21}, [%2 :128]! \n" "vst1.f32 {d22-d23}, [%3 :128]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(biasptr), // %12 "r"(inch) // %13 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11" ); #endif // __aarch64__ #else float sum0_0 = biasptr[0]; float sum0_1 = biasptr[0]; float sum0_2 = biasptr[0]; float sum0_3 = biasptr[0]; float sum1_0 = biasptr[1]; float sum1_1 = biasptr[1]; float sum1_2 = biasptr[1]; float sum1_3 = biasptr[1]; float sum2_0 = biasptr[2]; float sum2_1 = biasptr[2]; float sum2_2 = biasptr[2]; float sum2_3 = biasptr[2]; float sum3_0 = biasptr[3]; float sum3_1 = biasptr[3]; float sum3_2 = biasptr[3]; float sum3_3 = biasptr[3]; for (int q = 0; q < inch; q++) { sum0_0 += tmpptr[0] * kptr[0]; sum0_1 += tmpptr[1] * kptr[0]; sum0_2 += tmpptr[2] * kptr[0]; sum0_3 += tmpptr[3] * kptr[0]; sum1_0 += tmpptr[0] * kptr[1]; sum1_1 += tmpptr[1] * kptr[1]; sum1_2 += tmpptr[2] * kptr[1]; sum1_3 += tmpptr[3] * kptr[1]; sum2_0 += tmpptr[0] * kptr[2]; sum2_1 += tmpptr[1] * kptr[2]; sum2_2 += tmpptr[2] * kptr[2]; sum2_3 += tmpptr[3] * kptr[2]; sum3_0 += tmpptr[0] * kptr[3]; sum3_1 += tmpptr[1] * kptr[3]; sum3_2 += tmpptr[2] * kptr[3]; sum3_3 += tmpptr[3] * kptr[3]; tmpptr += 4; kptr += 4; } outptr0[0] = sum0_0; outptr0[1] = sum0_1; outptr0[2] = sum0_2; outptr0[3] = sum0_3; outptr1[0] = sum1_0; outptr1[1] = sum1_1; outptr1[2] = sum1_2; outptr1[3] = sum1_3; outptr2[0] = sum2_0; outptr2[1] = sum2_1; outptr2[2] = sum2_2; outptr2[3] = sum2_3; outptr3[0] = sum3_0; outptr3[1] = sum3_1; outptr3[2] = sum3_2; outptr3[3] = sum3_3; outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; #endif // __ARM_NEON } for (; i < size; i++) { const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); #if __ARM_NEON && __aarch64__ const float* kptr = kernel.channel(p / 8 + (p % 8) / 4); #else const float* kptr = kernel.channel(p / 4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v12.4s}, [%12] \n" // inch loop "lsr w4, %w13, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "0: \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v4.4s}, [%4], #16 \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" "subs w4, w4, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v9.4s, v1.4s, v4.s[1] \n" "fmla v10.4s, v2.4s, v4.s[2] \n" "fmla v11.4s, v3.4s, v4.s[3] \n" "bne 0b \n" "fadd v8.4s, v8.4s, v9.4s \n" "fadd v10.4s, v10.4s, v11.4s \n" "fadd v8.4s, v8.4s, v10.4s \n" "fadd v12.4s, v12.4s, v8.4s \n" "1: \n" // remain loop "and w4, %w13, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%4, #32] \n" "ld1r {v4.4s}, [%4], #4 \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4s}, [%5], #16 \n" "subs w4, w4, #1 \n" "fmla v12.4s, v4.4s, v0.4s \n" "bne 2b \n" "3: \n" "st1 {v12.s}[0], [%0], #4 \n" "st1 {v12.s}[1], [%1], #4 \n" "st1 {v12.s}[2], [%2], #4 \n" "st1 {v12.s}[3], [%3], #4 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(biasptr), // %12 "r"(inch) // %13 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11", "v12" ); #else // __aarch64__ asm volatile( "vld1.f32 {d24-d25}, [%12] \n" // inch loop "lsr r4, %13, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "veor q8, q8, q8 \n" "veor q9, q9, q9 \n" "veor q10, q10, q10 \n" "veor q11, q11, q11 \n" "0: \n" "pld [%4, #128] \n" "vld1.f32 {d8-d9}, [%4 :128]! \n" "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n" // "vld1.f32 {d0-d3}, [%5 :128]! \n" // "vld1.f32 {d4-d7}, [%5 :128]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q0, d8[0] \n" "vmla.f32 q9, q1, d8[1] \n" "vmla.f32 q10, q2, d9[0] \n" "vmla.f32 q11, q3, d9[1] \n" "bne 0b \n" "vadd.f32 q8, q8, q9 \n" "vadd.f32 q10, q10, q11 \n" "vadd.f32 q8, q8, q10 \n" "vadd.f32 q12, q12, q8 \n" "1: \n" // remain loop "and r4, %13, #3 \n"// r4 = remain = inch & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%4, #32] \n" "vld1.f32 {d8[],d9[]}, [%4]! \n" "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5 :128]! \n" "subs r4, r4, #1 \n" "vmla.f32 q12, q4, q0 \n" "bne 2b \n" "3: \n" "vst1.f32 {d24[0]}, [%0]! \n" "vst1.f32 {d24[1]}, [%1]! \n" "vst1.f32 {d25[0]}, [%2]! \n" "vst1.f32 {d25[1]}, [%3]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(biasptr), // %12 "r"(inch) // %13 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12" ); #endif // __aarch64__ #else float sum0 = biasptr[0]; float sum1 = biasptr[1]; float sum2 = biasptr[2]; float sum3 = biasptr[3]; for (int q = 0; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[0] * kptr[1]; sum2 += tmpptr[0] * kptr[2]; sum3 += tmpptr[0] * kptr[3]; tmpptr++; kptr += 4; } outptr0[0] = sum0; outptr1[0] = sum1; outptr2[0] = sum2; outptr3[0] = sum3; outptr0++; outptr1++; outptr2++; outptr3++; #endif // __ARM_NEON } } remain_outch_start += nn_outch << 2; //#pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out0 = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; float* outptr0 = out0; int i = 0; for (; i + 7 < size; i += 8) { const float* tmpptr = tmp.channel(i / 8); #if __ARM_NEON && __aarch64__ const float* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4); #else const float* kptr = kernel.channel(p / 4 + p % 4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "dup v8.4s, %w6 \n" "dup v9.4s, %w6 \n" // inch loop "lsr w4, %w7, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4s}, [%2], #16 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v5.4s, v0.s[0] \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "fmla v8.4s, v6.4s, v0.s[1] \n" "fmla v9.4s, v7.4s, v0.s[1] \n" "subs w4, w4, #1 \n" "fmla v8.4s, v12.4s, v0.s[2] \n" "fmla v9.4s, v13.4s, v0.s[2] \n" "fmla v8.4s, v14.4s, v0.s[3] \n" "fmla v9.4s, v15.4s, v0.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w7, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v4.4s, v5.4s}, [%1], #32 \n" "prfm pldl1keep, [%2, #32] \n" "ld1r {v0.4s}, [%2], #4 \n" "subs w4, w4, #1 \n" "fmla v8.4s, v4.4s, v0.4s \n" "fmla v9.4s, v5.4s, v0.4s \n" "bne 2b \n" "3: \n" "st1 {v8.4s, v9.4s}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(tmpptr), // %1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(bias0), // %6 "r"(inch) // %7 : "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v12", "v13", "v14", "v15" ); #else // __aarch64__ asm volatile( "vdup.f32 q8, %6 \n" "vdup.f32 q9, %6 \n" // inch loop "lsr r4, %7, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%1, #512] \n" "vldm %1!, {d8-d15} \n" // "vld1.f32 {d8-d11}, [%1 :128]! \n" // "vld1.f32 {d12-d15}, [%1 :128]! \n" "pld [%2, #128] \n" "vld1.f32 {d0-d1}, [%2 :128]! \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q5, d0[0] \n" "pld [%1, #512] \n" "vldm %1!, {d24-d31} \n" // "vld1.f32 {d24-d27}, [%1 :128]! \n" // "vld1.f32 {d28-d31}, [%1 :128]! \n" "vmla.f32 q8, q6, d0[1] \n" "vmla.f32 q9, q7, d0[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q12, d1[0] \n" "vmla.f32 q9, q13, d1[0] \n" "vmla.f32 q8, q14, d1[1] \n" "vmla.f32 q9, q15, d1[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %7, #3 \n"// r4 = remain = inch & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%1, #256] \n" "vld1.f32 {d8-d11}, [%1 :128]! \n" "pld [%2, #32] \n" "vld1.f32 {d0[],d1[]}, [%2]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, q0 \n" "vmla.f32 q9, q5, q0 \n" "bne 2b \n" "3: \n" "vst1.f32 {d16-d19}, [%0 :128]! \n" : "=r"(outptr0), // %0 "=r"(tmpptr), // %1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(bias0), // %6 "r"(inch) // %7 : "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else float sum0 = bias0; float sum1 = bias0; float sum2 = bias0; float sum3 = bias0; float sum4 = bias0; float sum5 = bias0; float sum6 = bias0; float sum7 = bias0; for (int q = 0; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[1] * kptr[0]; sum2 += tmpptr[2] * kptr[0]; sum3 += tmpptr[3] * kptr[0]; sum4 += tmpptr[4] * kptr[0]; sum5 += tmpptr[5] * kptr[0]; sum6 += tmpptr[6] * kptr[0]; sum7 += tmpptr[7] * kptr[0]; tmpptr += 8; kptr++; } outptr0[0] = sum0; outptr0[1] = sum1; outptr0[2] = sum2; outptr0[3] = sum3; outptr0[4] = sum4; outptr0[5] = sum5; outptr0[6] = sum6; outptr0[7] = sum7; outptr0 += 8; #endif // __ARM_NEON } for (; i + 3 < size; i += 4) { const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); #if __ARM_NEON && __aarch64__ const float* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4); #else const float* kptr = kernel.channel(p / 4 + p % 4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "dup v8.4s, %w6 \n" // inch loop "lsr w4, %w7, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4s}, [%2], #16 \n" "subs w4, w4, #1 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v8.4s, v5.4s, v0.s[1] \n" "fmla v8.4s, v6.4s, v0.s[2] \n" "fmla v8.4s, v7.4s, v0.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w7, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v4.4s}, [%1], #16 \n" "prfm pldl1keep, [%2, #32] \n" "ld1r {v0.4s}, [%2], #4 \n" "subs w4, w4, #1 \n" "fmla v8.4s, v4.4s, v0.4s \n" "bne 2b \n" "3: \n" "st1 {v8.4s}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(tmpptr), // %1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(bias0), // %6 "r"(inch) // %7 : "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8" ); #else // __aarch64__ asm volatile( "vdup.f32 q8, %6 \n" // inch loop "lsr r4, %7, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%1, #512] \n" "vldm %1!, {d8-d15} \n" // "vld1.f32 {d8-d11}, [%1 :128]! \n" // "vld1.f32 {d12-d15}, [%1 :128]! \n" "pld [%2, #128] \n" "vld1.f32 {d0-d1}, [%2]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q8, q7, d1[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %7, #3 \n"// r4 = remain = inch & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%1, #128] \n" "vld1.f32 {d8-d9}, [%1 :128]! \n" "pld [%2, #32] \n" "vld1.f32 {d0[],d1[]}, [%2]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, q0 \n" "bne 2b \n" "3: \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" : "=r"(outptr0), // %0 "=r"(tmpptr), // %1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(bias0), // %6 "r"(inch) // %7 : "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8" ); #endif // __aarch64__ #else float sum0 = bias0; float sum1 = bias0; float sum2 = bias0; float sum3 = bias0; for (int q = 0; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[1] * kptr[0]; sum2 += tmpptr[2] * kptr[0]; sum3 += tmpptr[3] * kptr[0]; tmpptr += 4; kptr++; } outptr0[0] = sum0; outptr0[1] = sum1; outptr0[2] = sum2; outptr0[3] = sum3; outptr0 += 4; #endif // __ARM_NEON } for (; i < size; i++) { const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); #if __ARM_NEON && __aarch64__ const float* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4); #else const float* kptr = kernel.channel(p / 4 + p % 4); #endif // __ARM_NEON && __aarch64__ int q = 0; #if __ARM_NEON float32x4_t _sum0 = vdupq_n_f32(0.f); for (; q + 3 < inch; q += 4) { float32x4_t _p0 = vld1q_f32(tmpptr); tmpptr += 4; float32x4_t _k0 = vld1q_f32(kptr); kptr += 4; #if __aarch64__ _sum0 = vfmaq_f32(_sum0, _p0, _k0); #else _sum0 = vmlaq_f32(_sum0, _p0, _k0); #endif } #if __aarch64__ float sum0 = bias0 + vaddvq_f32(_sum0); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float sum0 = bias0 + vget_lane_f32(vpadd_f32(_ss, _ss), 0); #endif #else float sum0 = bias0; #endif // __ARM_NEON for (; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; tmpptr++; kptr++; } outptr0[0] = sum0; outptr0++; } } // // NOTE sgemm // for (; p<outch; p++) // { // Mat out0 = top_blob.channel(p); // // const float bias0 = bias ? bias[p] : 0.f; // // float* outptr0 = out0; // // for (int i=0; i<size; i++) // { // float sum = bias0; // // const float* kptr = _kernel.channel(p/8 + p%8); // // for (int q=0; q<inch; q++) // { // const float* img0 = bottom_blob.channel(q); // // sum += img0[i] * kptr[0]; // kptr ++; // } // // outptr0[i] = sum; // } // } } }
TemporalMaxPooling.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/TemporalMaxPooling.c" #else static inline void THNN_(TemporalMaxPooling_shapeCheck)( THNNState *state, THTensor *input, THTensor *gradOutput, THIndexTensor *indices, int kW, int dW) { int64_t niframe; int64_t framesize; int64_t noframe; int dimS = 0; // sequence dimension int dimF = 1; // feature dimension int ndims = input->nDimension; if (input->nDimension == 3) { dimS = 1; dimF = 2; } niframe = input->size[dimS]; framesize = input->size[dimF]; noframe = (niframe - kW) / dW + 1; THArgCheck(kW > 0, 5, "kernel size should be greater than zero, but got kW: %d", kW); THArgCheck(dW > 0, 6, "stride should be greater than zero, but got dW: %d", dW); THNN_ARGCHECK(input->nDimension == 2 || input->nDimension == 3, 2, input, "2D or 3D (batch mode) tensor expected for input, but got: %s"); THArgCheck(input->size[dimS] >= kW, 2, "input sequence smaller than kernel size. Got: %d, Expected: %d", input->size[dimS], kW); if (gradOutput != NULL) { THNN_CHECK_DIM_SIZE(gradOutput, ndims, dimS, noframe); THNN_CHECK_DIM_SIZE(gradOutput, ndims, dimF, framesize) } if (indices != NULL) { THNN_CHECK_DIM_SIZE_INDICES(indices, ndims, dimS, noframe); THNN_CHECK_DIM_SIZE_INDICES(indices, ndims, dimF, framesize); } } void THNN_(TemporalMaxPooling_updateOutput)( THNNState *state, THTensor *input, THTensor *output, THIndexTensor *indices, int kW, int dW) { int64_t niframe; int64_t framesize; int64_t noframe; real *input_data; real *output_data; THIndex_t *indices_data; int64_t t, y; int dimS = 0; // sequence dimension int dimF = 1; // feature dimension THNN_(TemporalMaxPooling_shapeCheck)(state, input, NULL, NULL, kW, dW); if (input->nDimension == 3) { dimS = 1; dimF = 2; } /* sizes */ niframe = input->size[dimS]; framesize = input->size[dimF]; noframe = (niframe - kW) / dW + 1; /* get contiguous input */ input = THTensor_(newContiguous)(input); if (input->nDimension == 2) { /* resize output */ THTensor_(resize2d)(output, noframe, framesize); /* indices will contain index locations for each output point */ THIndexTensor_(resize2d)(indices, noframe, framesize); /* get raw pointers */ input_data = THTensor_(data)(input); output_data = THTensor_(data)(output); indices_data = THIndexTensor_(data)(indices); for(t = 0; t < noframe; t++) { real *ip = input_data + t*framesize*dW; real *op = output_data + t*framesize; THIndex_t *xp = indices_data + t*framesize; #pragma omp parallel for private(y) for(y = 0; y < framesize; y++) { /* compute local max: */ int64_t maxindex = -1; real maxval = -THInf; int64_t x; for(x = 0; x < kW; x++) { real val = ip[x*framesize+y]; if (val > maxval) { maxval = val; maxindex = x; } } /* set output to local max */ op[y] = maxval; xp[y] = (real)maxindex; } } } else { /* number of batch frames */ int64_t nbframe = input->size[0]; int64_t i; /* resize output */ THTensor_(resize3d)(output, nbframe, noframe, framesize); /* indices will contain index locations for each output point */ THIndexTensor_(resize3d)(indices, nbframe, noframe, framesize); /* get raw pointers */ input_data = THTensor_(data)(input); output_data = THTensor_(data)(output); indices_data = THIndexTensor_(data)(indices); for(i = 0; i < nbframe; i++) { real *inputSample_data = input_data + i*niframe*framesize; real *outputSample_data = output_data + i*noframe*framesize; THIndex_t *indicesSample_data = indices_data + i*noframe*framesize; for(t = 0; t < noframe; t++) { real *ip = inputSample_data + t*framesize*dW; real *op = outputSample_data + t*framesize; THIndex_t *xp = indicesSample_data + t*framesize; #pragma omp parallel for private(y) for(y = 0; y < framesize; y++) { /* compute local max: */ int64_t maxindex = -1; real maxval = -THInf; int64_t x; for(x = 0; x < kW; x++) { real val = ip[x*framesize+y]; if (val > maxval) { maxval = val; maxindex = x; } } /* set output to local max */ op[y] = maxval; xp[y] = (real)maxindex; } } } } /* cleanup */ THTensor_(free)(input); } void THNN_(TemporalMaxPooling_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THIndexTensor *indices, int kW, int dW) { int64_t niframe; int noframe; int64_t framesize; real *gradInput_data; real *gradOutput_data; THIndex_t *indices_data; int64_t t, y; THNN_(TemporalMaxPooling_shapeCheck)(state, input, gradOutput, indices, kW, dW); /* get contiguous gradOutput */ gradOutput = THTensor_(newContiguous)(gradOutput); /* resize and zero */ THTensor_(resizeAs)(gradInput, input); THTensor_(zero)(gradInput); int dimS = 0; // sequence dimension int dimF = 1; // feature dimension if (input->nDimension == 3) { dimS = 1; dimF = 2; } /* sizes */ niframe = input->size[dimS]; noframe = gradOutput->size[dimS]; framesize = gradOutput->size[dimF]; /* get raw pointers */ gradInput_data = THTensor_(data)(gradInput); gradOutput_data = THTensor_(data)(gradOutput); indices_data = THIndexTensor_(data)(indices); if (input->nDimension == 2) { for(t = 0; t < noframe; t++) { real *gip = gradInput_data + t*framesize*dW; real *gop = gradOutput_data + t*framesize; THIndex_t *xp = indices_data + t*framesize; #pragma omp parallel for private(y) for(y = 0; y < framesize; y++) { /* compute local max: */ int64_t maxindex = (int64_t)xp[y]; if (maxindex != -1) gip[maxindex*framesize+y] += gop[y]; } } } else { /* number of batch frames */ int64_t nbframe = input->size[0]; int64_t i; for(i = 0; i < nbframe; i++) { real *gradInputSample_data = gradInput_data + i*niframe*framesize; real *gradOutputSample_data = gradOutput_data + i*noframe*framesize; THIndex_t *indicesSample_data = indices_data + i*noframe*framesize; for(t = 0; t < noframe; t++) { real *gip = gradInputSample_data + t*framesize*dW; real *gop = gradOutputSample_data + t*framesize; THIndex_t *xp = indicesSample_data + t*framesize; #pragma omp parallel for private(y) for(y = 0; y < framesize; y++) { /* compute local max: */ int64_t maxindex = (int64_t)xp[y]; if (maxindex != -1) gip[maxindex*framesize+y] += gop[y]; } } } } /* cleanup */ THTensor_(free)(gradOutput); } #endif
ppc64le-varargs-f128.c
// RUN: %clang_cc1 -triple powerpc64le-unknown-linux-gnu -emit-llvm \ // RUN: -target-cpu pwr9 -target-feature +float128 -mabi=ieeelongdouble \ // RUN: -o - %s | FileCheck %s -check-prefix=IEEE // RUN: %clang_cc1 -triple powerpc64le-unknown-linux-gnu -emit-llvm \ // RUN: -target-cpu pwr9 -target-feature +float128 \ // RUN: -o - %s | FileCheck %s -check-prefix=IBM // RUN: %clang_cc1 -triple ppc64le -emit-llvm-bc %s -target-cpu pwr9 \ // RUN: -target-feature +float128 -mabi=ieeelongdouble -fopenmp \ // RUN: -fopenmp-targets=ppc64le -o %t-ppc-host.bc // RUN: %clang_cc1 -triple ppc64le -aux-triple ppc64le %s -target-cpu pwr9 \ // RUN: -target-feature +float128 -fopenmp -fopenmp-is-device -emit-llvm \ // RUN: -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s \ // RUN: -check-prefix=OMP-TARGET // RUN: %clang_cc1 -triple ppc64le %t-ppc-host.bc -emit-llvm -o - | FileCheck %s \ // RUN: -check-prefix=OMP-HOST #include <stdarg.h> void foo_ld(long double); void foo_fq(__float128); // Verify cases when OpenMP target's and host's long-double semantics differ. // OMP-TARGET-LABEL: define internal void @.omp_outlined.( // OMP-TARGET: %[[CUR:[0-9a-zA-Z_.]+]] = load i8*, i8** // OMP-TARGET: %[[V2:[0-9a-zA-Z_.]+]] = bitcast i8* %[[CUR]] to ppc_fp128* // OMP-TARGET: %[[V3:[0-9a-zA-Z_.]+]] = load ppc_fp128, ppc_fp128* %[[V2]], align 8 // OMP-TARGET: call void @foo_ld(ppc_fp128 %[[V3]]) // OMP-HOST-LABEL: define{{.*}} void @omp( // OMP-HOST: %[[AP1:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP:[0-9a-zA-Z_.]+]] to i8* // OMP-HOST: call void @llvm.va_start(i8* %[[AP1]]) // OMP-HOST: %[[CUR:[0-9a-zA-Z_.]+]] = load i8*, i8** %[[AP]], align 8 // OMP-HOST: %[[V0:[0-9a-zA-Z_.]+]] = ptrtoint i8* %[[CUR]] to i64 // OMP-HOST: %[[V1:[0-9a-zA-Z_.]+]] = add i64 %[[V0]], 15 // OMP-HOST: %[[V2:[0-9a-zA-Z_.]+]] = and i64 %[[V1]], -16 // OMP-HOST: %[[ALIGN:[0-9a-zA-Z_.]+]] = inttoptr i64 %[[V2]] to i8* // OMP-HOST: %[[V3:[0-9a-zA-Z_.]+]] = bitcast i8* %[[ALIGN]] to fp128* // OMP-HOST: %[[V4:[0-9a-zA-Z_.]+]] = load fp128, fp128* %[[V3]], align 16 // OMP-HOST: call void @foo_ld(fp128 %[[V4]]) void omp(int n, ...) { va_list ap; va_start(ap, n); foo_ld(va_arg(ap, long double)); #pragma omp target parallel for (int i = 1; i < n; ++i) { foo_ld(va_arg(ap, long double)); } va_end(ap); } // IEEE-LABEL: define{{.*}} void @f128 // IEEE: %[[AP1:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP:[0-9a-zA-Z_.]+]] to i8* // IEEE: call void @llvm.va_start(i8* %[[AP1]]) // IEEE: %[[CUR:[0-9a-zA-Z_.]+]] = load i8*, i8** %[[AP]] // IEEE: %[[V0:[0-9a-zA-Z_.]+]] = ptrtoint i8* %[[CUR]] to i64 // IEEE: %[[V1:[0-9a-zA-Z_.]+]] = add i64 %[[V0]], 15 // IEEE: %[[V2:[0-9a-zA-Z_.]+]] = and i64 %[[V1]], -16 // IEEE: %[[ALIGN:[0-9a-zA-Z_.]+]] = inttoptr i64 %[[V2]] to i8* // IEEE: %[[V3:[0-9a-zA-Z_.]+]] = bitcast i8* %[[ALIGN]] to fp128* // IEEE: %[[V4:[0-9a-zA-Z_.]+]] = load fp128, fp128* %[[V3]], align 16 // IEEE: call void @foo_fq(fp128 %[[V4]]) // IEEE: %[[AP2:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP]] to i8* // IEEE: call void @llvm.va_end(i8* %[[AP2]]) void f128(int n, ...) { va_list ap; va_start(ap, n); foo_fq(va_arg(ap, __float128)); va_end(ap); } // IEEE-LABEL: define{{.*}} void @long_double // IEEE: %[[AP1:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP:[0-9a-zA-Z_.]+]] to i8* // IEEE: call void @llvm.va_start(i8* %[[AP1]]) // IEEE: %[[CUR:[0-9a-zA-Z_.]+]] = load i8*, i8** %[[AP]] // IEEE: %[[V0:[0-9a-zA-Z_.]+]] = ptrtoint i8* %[[CUR]] to i64 // IEEE: %[[V1:[0-9a-zA-Z_.]+]] = add i64 %[[V0]], 15 // IEEE: %[[V2:[0-9a-zA-Z_.]+]] = and i64 %[[V1]], -16 // IEEE: %[[ALIGN:[0-9a-zA-Z_.]+]] = inttoptr i64 %[[V2]] to i8* // IEEE: %[[V3:[0-9a-zA-Z_.]+]] = bitcast i8* %[[ALIGN]] to fp128* // IEEE: %[[V4:[0-9a-zA-Z_.]+]] = load fp128, fp128* %[[V3]], align 16 // IEEE: call void @foo_ld(fp128 %[[V4]]) // IEEE: %[[AP2:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP]] to i8* // IEEE: call void @llvm.va_end(i8* %[[AP2]]) // IBM-LABEL: define{{.*}} void @long_double // IBM: %[[AP1:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP:[0-9a-zA-Z_.]+]] to i8* // IBM: call void @llvm.va_start(i8* %[[AP1]]) // IBM: %[[CUR:[0-9a-zA-Z_.]+]] = load i8*, i8** %[[AP]] // IBM: %[[V3:[0-9a-zA-Z_.]+]] = bitcast i8* %[[CUR]] to ppc_fp128* // IBM: %[[V4:[0-9a-zA-Z_.]+]] = load ppc_fp128, ppc_fp128* %[[V3]], align 8 // IBM: call void @foo_ld(ppc_fp128 %[[V4]]) // IBM: %[[AP2:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP]] to i8* // IBM: call void @llvm.va_end(i8* %[[AP2]]) void long_double(int n, ...) { va_list ap; va_start(ap, n); foo_ld(va_arg(ap, long double)); va_end(ap); }
PoW.c
// Copyright (c) 2016-2018 The Ulord Core Foundation #include "PoW.h" #include <stdio.h> #include <stdint.h> #include <string.h> #include <stdlib.h> #include <assert.h> #ifndef MAC_OSX #include <omp.h> #endif #include "my_time.h" #include "common.h" #include "my_rand48_r.h" #include "oneWayFunction.h" // #define SSE_VERSION /* * Step 1: Initialize working memory. */ void initWorkMemory(uint8_t *input, uint32_t inputLen, uint8_t *Maddr, const uint32_t K) { uint32_t i, j; uint8_t a[OUTPUT_LEN], b[OUTPUT_LEN]; funcInfor[0].func(input, inputLen, a); uint64_t randSeed[4] = {0, 0, 0, 0}; #ifndef SSE_VERSION struct my_rand48_data randBuffer[4]; #else struct vrand48_data randBuffer[2]; #endif const uint32_t iterNum = WORK_MEMORY_SIZE >> 5; for (i = 0; i < iterNum; ++i) { if (i % K) { #ifndef SSE_VERSION uint64_t num = 0; for (j = 0; j < 4; ++j) { my_rand64_r(&randBuffer[j], &num); memcpy(b + (j << 3), (uint8_t *)&num, 8*sizeof(uint8_t)); } #else vrand64(b, randBuffer); #endif uint8_t shift_num; uint8_t result[OUTPUT_LEN]; reduce_bit((uint8_t *)&i, 4, (uint8_t *)&shift_num, 8); rrs(b, OUTPUT_LEN, result, shift_num); memcpy(Maddr + (i << 5), result, OUTPUT_LEN*sizeof(uint8_t)); for (j = 0; j < 32; ++j) { a[j] ^= result[j]; } } else { uint8_t t = 0, shift_num = 0; reduce_bit(a, 32, (uint8_t *)&t, 8); t = (t & 0x0f) ^ (t >> 4); reduce_bit((uint8_t *)&i, 4, (uint8_t *)&shift_num, 8); uint8_t a_rrs[INPUT_LEN]; rrs(a, OUTPUT_LEN, a_rrs, shift_num); funcInfor[t].func(a_rrs, 32, a); reduce_bit(a, 8, (uint8_t *)&randSeed[0], 48); reduce_bit(a + 8, 8, (uint8_t *)&randSeed[1], 48); reduce_bit(a + 16, 8, (uint8_t *)&randSeed[2], 48); reduce_bit(a + 24, 8, (uint8_t *)&randSeed[3], 48); #ifndef SSE_VERSION my_seed48_r(randSeed[0], &randBuffer[0]); my_seed48_r(randSeed[1], &randBuffer[1]); my_seed48_r(randSeed[2], &randBuffer[2]); my_seed48_r(randSeed[3], &randBuffer[3]); #else vseed48(randSeed , &randBuffer[0]); vseed48(randSeed + 2, &randBuffer[1]); #endif memcpy(Maddr + (i << 5), a, 32*sizeof(uint8_t)); } } } /* * Step 2: Modify the working memory contents. */ void modifyWorkMemory(uint8_t *Maddr, const uint32_t L, const uint32_t C, uint8_t *result) { uint32_t i, j; uint8_t a[OUTPUT_LEN], b[64]; funcInfor[0].func(Maddr + WORK_MEMORY_SIZE - 32, 32, a); memcpy(result, a, OUTPUT_LEN*sizeof(uint8_t)); uint64_t r = 0; reduce_bit(a, 32, (uint8_t *)&r, 64); const uint32_t iterNum = L << 6; for (i = 0; i < C; ++i) { uint64_t randSeed = 0; reduce_bit(a, 32, (uint8_t *)&randSeed, 48); struct my_rand48_data randBuffer; my_seed48_r(randSeed, &randBuffer); uint8_t t1, t2, s; uint64_t randNum = 0, base = 0; for (j = 0; j < iterNum; ++j) { my_rand48_r(&randBuffer, &randNum); base = randNum + r; uint64_t offset = 0; reduce_bit((uint8_t *)&r, 8, (uint8_t *)&offset, 8); offset = (offset << 8) + 1; uint64_t addr1 = (base + WORK_MEMORY_SIZE - offset) % WORK_MEMORY_SIZE; uint64_t addr2 = (base + offset) % WORK_MEMORY_SIZE; t1 = Maddr[addr1]; t2 = Maddr[addr2]; s = a[j & 0x1f]; Maddr[addr1] = t2 ^ s; Maddr[addr2] = t1 ^ s; b[j & 0x3f] = t1 ^ t2; r = r + s + t1 + t2; } uint8_t t = 0; reduce_bit((uint8_t *)&r, 8, (uint8_t *)&t, 8); t = (t & 0x0f) ^ (t >> 4); reduce_bit(b, 64, a, 256); uint8_t shift_num = 0; uint64_t ir = r + i; reduce_bit((uint8_t *)&ir, 8, (uint8_t *)&shift_num, 8); uint8_t a_rrs[INPUT_LEN]; rrs(a, OUTPUT_LEN, a_rrs, shift_num); funcInfor[t].func(a_rrs, 32, a); for (j = 0; j < OUTPUT_LEN; ++j) { result[j] ^= a[j]; } } } /* * Step 3: Calculate the final result. */ void calculateFinalResult(uint8_t *Maddr, uint8_t *c, const uint32_t D, uint8_t *result) { uint32_t i = 0, j = 0, k = 0; memcpy(result, c, OUTPUT_LEN*sizeof(uint8_t)); const uint32_t num = (WORK_MEMORY_SIZE >> 5) - 1; uint32_t it = 0; uint8_t result_rrs[OUTPUT_LEN]; while(1) { uint8_t t = 0, shift_num = 0; uint32_t d = 0; reduce_bit(result, 32, (uint8_t *)&t, 8); t = (t & 0x0f) ^ (t >> 4); reduce_bit(result, 32, (uint8_t *)&d, D); ++d; for (j = 0; j < d; ++j) { uint32_t index = i << 5; for (k = 0; k < 32; ++k) { result[k] ^= Maddr[index + k]; } ++i; if (i == num) { it = i + t; reduce_bit((uint8_t *)&it, 4, (uint8_t *)&shift_num, 8); rrs(result, OUTPUT_LEN, result_rrs, shift_num); funcInfor[0].func(result_rrs, 32, result); return; } } it = t + i; reduce_bit((uint8_t *)&it, 4, (uint8_t *)&shift_num, 8); rrs(result, OUTPUT_LEN, result_rrs, shift_num); funcInfor[t].func(result_rrs, 32, result); } } /* * Correctness & Performance test for Proof of work */ void testPowFunction(uint8_t *mess, uint32_t messLen, const int64_t iterNum) { int64_t j; uint32_t inputLen = messLen; uint8_t input[INPUT_LEN], output[OUTPUT_LEN]; memset(input, 0, INPUT_LEN*sizeof(uint8_t)); memcpy(input, mess, messLen*sizeof(char)); // Init all one-way function initOneWayFunction(); uint8_t *Maddr = (uint8_t *)malloc(64 * WORK_MEMORY_SIZE*sizeof(uint8_t)); assert(NULL != Maddr); memset(Maddr, 0, 64 * WORK_MEMORY_SIZE*sizeof(uint8_t)); printf("****************************** Correctness test (PoW function) ******************************\n"); printf("Test message: %s\n", mess); powFunction(input, inputLen, Maddr, output); view_data_u8("PoW", output, OUTPUT_LEN); printf("*********************************************************************************************\n"); /* printf("*************************************************** Performance test (PoW function) ***************************************************\n"); uint8_t *result = (uint8_t *)malloc(iterNum * OUTPUT_LEN * sizeof(uint8_t)); assert(NULL != result); memset(result, 0, iterNum * OUTPUT_LEN * sizeof(uint8_t)); uint32_t threadNumArr[] = {1, 4, 8, 12, 16, 20, 24, 32, 48, 64}; uint32_t threadNumTypes = sizeof(threadNumArr) / sizeof(uint32_t); printf(" %-18s", "Algorithm"); for (uint32_t ix = 0; ix < threadNumTypes; ++ix) printf("%12d", threadNumArr[ix]); printf("\n"); printf("00 %-18s\t", "PoW"); for (uint32_t ix = 0; ix < threadNumTypes; ++ix) { omp_set_num_threads(threadNumArr[ix]); double startTime = get_wall_time(); if (threadNumArr[ix] == 1) { for (j = 0; j < iterNum; ++j) { powFunction(input, inputLen, Maddr, result + j * OUTPUT_LEN); } } else { #pragma omp parallel for firstprivate(input), private(j) shared(result) for (j = 0; j < iterNum; ++j) { powFunction(input, inputLen, Maddr + omp_get_thread_num() * WORK_MEMORY_SIZE, result + j * OUTPUT_LEN); } } double endTime = get_wall_time(); double costTime = endTime - startTime; printf("%5.0f bps ", iterNum / costTime); fflush(stdout); // Check result for (j = 0; j < iterNum; j += 1) { if (memcmp(output, result + j * OUTPUT_LEN, OUTPUT_LEN)) { printf("Thread num: %d, j: %ld\n", threadNumArr[ix], j); view_data_u8("output", output, OUTPUT_LEN); view_data_u8("result", result + j * OUTPUT_LEN, OUTPUT_LEN); abort(); } } } printf("\n"); printf("***************************************************************************************************************************************\n"); if (NULL != result) { free(result); result = NULL; } */ if (NULL != Maddr) { free(Maddr); Maddr = NULL; } } #define OUTPUT_BUFFER_SIZE (32 * 1024UL * 1024UL) #define MAX_TEST_INPUT_LEN 140 #define MAX_OUT_FILE_NAME_LEN 25 const char testInputCase[][MAX_TEST_INPUT_LEN] = { "", "HelloWorld", "0123456789" }; void powNistTest(const char *outFileName) { const uint64_t iterNum = 1024UL * 1024UL; // const uint64_t iterNum = 1024UL; uint8_t *outputBuffer = (uint8_t *)malloc(OUTPUT_BUFFER_SIZE * sizeof(uint8_t)); assert(NULL != outputBuffer); memset(outputBuffer, 0, OUTPUT_BUFFER_SIZE * sizeof(uint8_t)); uint8_t *Maddr = (uint8_t *)malloc(WORK_MEMORY_SIZE*sizeof(uint8_t)); assert(NULL != Maddr); memset(Maddr, 0, WORK_MEMORY_SIZE*sizeof(uint8_t)); initOneWayFunction(); uint32_t testInputCaseNum = sizeof(testInputCase) / sizeof(const char [MAX_TEST_INPUT_LEN]); for (uint32_t testCaseIx = 0; testCaseIx < testInputCaseNum; ++testCaseIx) { char curOutFileName[MAX_OUT_FILE_NAME_LEN] = ""; sprintf(curOutFileName, "%s-%u.txt", outFileName, testCaseIx); FILE *fp = NULL; if (NULL != (fp = fopen(curOutFileName, "wb"))) { const uint32_t testInputCaseLen = strlen((char *)testInputCase[testCaseIx]); uint8_t input[MAX_TEST_INPUT_LEN]; memset(input, 0, MAX_TEST_INPUT_LEN*sizeof(uint8_t)); memcpy(input, testInputCase[testCaseIx], testInputCaseLen*sizeof(uint8_t)); double startTime = get_wall_time(); powFunction(input, testInputCaseLen, Maddr, outputBuffer); for (uint64_t i = 1, j = 0; i < iterNum; ++i) { memcpy(input, outputBuffer + j, OUTPUT_LEN * sizeof(uint32_t)); j += OUTPUT_LEN; powFunction(input, OUTPUT_LEN, Maddr, outputBuffer + j); /* if (j == OUTPUT_BUFFER_SIZE) { fwrite(outputBuffer, sizeof(uint8_t), OUTPUT_BUFFER_SIZE / sizeof(uint8_t), fp); j = 0; } */ } double endTime = get_wall_time(); double costTime = endTime - startTime; fprintf(stdout, "TestCaseIx: %d, Input: %s, IterNum: %llu, Time: %4.2f, Performance: %5.2f bps\n", testCaseIx, \ testInputCase[testCaseIx], iterNum, costTime, ((double)(iterNum * OUTPUT_LEN)) / costTime); fflush(stdout); fwrite(outputBuffer, sizeof(uint8_t), OUTPUT_BUFFER_SIZE / sizeof(uint8_t), fp); fclose(fp); } else { fprintf(stderr, "Error: Open %s failed!\n", curOutFileName); abort(); } } if (NULL != outputBuffer) { free(outputBuffer); outputBuffer = NULL; } if (NULL != Maddr) { free(Maddr); Maddr = NULL; } } void helloHash(const uint8_t *mess, uint32_t messLen, uint8_t output[OUTPUT_LEN]) { if(messLen != INPUT_LEN) { printf("helloHash:Invalid message length %d\n", messLen); return; } int64_t j; uint32_t inputLen =messLen; uint8_t input[INPUT_LEN]; memset(input, 0, INPUT_LEN*sizeof(uint8_t)); memcpy(input, mess, inputLen*sizeof(char)); //operation: input uint8_t *Maddr = (uint8_t *)malloc(WORK_MEMORY_SIZE*sizeof(uint8_t)); //1024*1024*1 assert(NULL != Maddr); memset(Maddr, 0, WORK_MEMORY_SIZE*sizeof(uint8_t)); powFunction(input, inputLen,Maddr, output); //view_data_u8("PoW", output, OUTPUT_LEN); //output if (NULL != Maddr) { free(Maddr); Maddr = NULL; } } int my_rand64_r (struct my_rand48_data *buffer, uint64_t *result) { uint64_t X = buffer->__x; X = (X * buffer->__a + buffer->__c) & 0xffffffffffffULL; buffer->__x = X; buffer->__x = (X * buffer->__a + buffer->__c) & 0xffffffffffffULL; X ^= buffer->__x << 16; *result = X; return 0; } int my_seed48_r (uint64_t seedval, struct my_rand48_data *buffer) { buffer->__x = seedval & 0xffffffffffffULL; buffer->__a = 0x5deece66dULL; buffer->__c = 0xb; return 0; } void powFunction(uint8_t *input, uint32_t inputLen, uint8_t *Maddr, uint8_t *output) { uint8_t c[OUTPUT_LEN]; // Step 1: Initialize working memory. initWorkMemory(input, inputLen, Maddr, 128); // view_data_u8("Maddr", Maddr, OUTPUT_LEN); // Step 2: Modify the working memory contents. modifyWorkMemory(Maddr, 4, WORK_MEMORY_SIZE >> 11, c); // view_data_u8("c", c, OUTPUT_LEN); // Step 3: Calculate the final result. calculateFinalResult(Maddr, c, 8, output); // view_data_u8("output", output, OUTPUT_LEN); } int my_rand48_r (struct my_rand48_data *buffer, uint64_t *result) { *result = (buffer->__x * buffer->__a + buffer->__c) & 0xffffffffffffULL; buffer->__x = *result; return 0; }
relu_kernel_arm.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: haitao@openailab.com */ #include "relu_kernel_arm.h" #include <math.h> #include <arm_neon.h> #define MIN(a, b) ((a) < (b) ? (a) : (b)) #define MAX(a, b) ((a) > (b) ? (a) : (b)) static inline int relu_kernel(const int i, const int id, const void* data, const float* input, float* output, const float slope) { float32x4_t _zero = vdupq_n_f32(0.f); int step = ((int*)data)[0]; const float* cur_input = input + id * step; float* cur_output = output + id * step; if (slope == 0) { for (int l = 0; l < (step & -4); l += 4) { float32x4_t _p = vld1q_f32(cur_input); _p = vmaxq_f32(_p, _zero); vst1q_f32(cur_output, _p); cur_input += 4; cur_output += 4; } for (int i = step & ~3; i < step; i++) { *cur_output++ = MAX(*cur_input++, 0.f); } } else { float32x4_t _slope = vdupq_n_f32(slope); for (int l = 0; l < (step & -4); l += 4) { float32x4_t _p = vld1q_f32(cur_input); // ri = ai <= bi ? 1...1:0...0 uint32x4_t _lemask = vcleq_f32(_p, _zero); float32x4_t _ps = vmulq_f32(_p, _slope); // bitwise select _p = vbslq_f32(_lemask, _ps, _p); vst1q_f32(cur_output, _p); cur_input += 4; cur_output += 4; } for (int i = step & ~3; i < step; i++) { *cur_output++ = MAX(cur_input[0], 0.f) + slope * MIN(cur_input[0], 0.f); cur_input++; } } return 0; } int relu_arm_run(struct tensor* output_tensor, struct tensor* input_tensor, struct relu_param* relu_param, int num_thread) { float* data = (float*)input_tensor->data; float* out_data = (float*)output_tensor->data; float negativeslope = relu_param->negative_slope; int chan_num = input_tensor->dims[0] * input_tensor->dims[1]; int chan_size = input_tensor->dims[2] * input_tensor->dims[3]; // #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < chan_num; i++) { int offset = i * chan_size; relu_kernel(0, 0, &chan_size, data + offset, out_data + offset, negativeslope); } return 0; }
781.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "correlation.h" /* Array initialization. */ static void init_array (int m, int n, DATA_TYPE *float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n)) { int i, j; *float_n = 1.2; for (i = 0; i < m; i++) for (j = 0; j < n; j++) data[i][j] = ((DATA_TYPE) i*j) / M; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m)) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]); if ((i * m + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_correlation(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n), DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m), DATA_TYPE POLYBENCH_1D(mean,M,m), DATA_TYPE POLYBENCH_1D(stddev,M,m)) { int i, j, j1, j2; DATA_TYPE eps = 0.1f; #define sqrt_of_array_cell(x,j) sqrt(x[j]) #pragma scop /* Determine mean of column vectors of input data matrix */ #pragma omp parallel private(i, j, j2) num_threads(#P11) { #pragma omp target teams distribute #p #p for (j = 0; j < _PB_M; j++) { mean[j] = 0.0; for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } /* Determine standard deviations of column vectors of data matrix. */ #pragma omp target teams distribute #p #p for (j = 0; j < _PB_M; j++) { stddev[j] = 0.0; for (i = 0; i < _PB_N; i++) stddev[j] += (data[i][j] - mean[j]) * (data[i][j] - mean[j]); stddev[j] /= float_n; stddev[j] = sqrt_of_array_cell(stddev, j); /* The following in an inelegant but usual way to handle near-zero std. dev. values, which below would cause a zero- divide. */ stddev[j] = stddev[j] <= eps ? 1.0 : stddev[j]; } /* Center and reduce the column vectors. */ #pragma omp target teams distribute #p #p for (i = 0; i < _PB_N; i++) { #pragma omp for (j = 0; j < _PB_M; j++) { data[i][j] -= mean[j]; data[i][j] /= sqrt(float_n) * stddev[j]; } } /* Calculate the m * m correlation matrix. */ #pragma omp target teams distribute #p #p for (j1 = 0; j1 < _PB_M-1; j1++) { symmat[j1][j1] = 1.0; for (j2 = j1+1; j2 < _PB_M; j2++) { symmat[j1][j2] = 0.0; for (i = 0; i < _PB_N; i++) symmat[j1][j2] += (data[i][j1] * data[i][j2]); symmat[j2][j1] = symmat[j1][j2]; } } } #pragma endscop symmat[_PB_M-1][_PB_M-1] = 1.0; } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE float_n; POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n); POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m); POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m); POLYBENCH_1D_ARRAY_DECL(stddev,DATA_TYPE,M,m); /* Initialize array(s). */ init_array (m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_correlation (m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(mean), POLYBENCH_ARRAY(stddev)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(symmat); POLYBENCH_FREE_ARRAY(mean); POLYBENCH_FREE_ARRAY(stddev); return 0; }
callback.h
#ifndef _BSD_SOURCE #define _BSD_SOURCE #endif #ifndef _DEFAULT_SOURCE #define _DEFAULT_SOURCE #endif #include <stdio.h> #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS #endif #include <inttypes.h> #include <omp.h> #include <omp-tools.h> #include "ompt-signal.h" // Used to detect architecture #include "../../src/kmp_platform.h" #ifndef _TOOL_PREFIX #define _TOOL_PREFIX "" // If no _TOOL_PREFIX is set, we assume that we run as part of an OMPT test #define _OMPT_TESTS #endif static const char *ompt_thread_t_values[] = { "ompt_thread_UNDEFINED", "ompt_thread_initial", "ompt_thread_worker", "ompt_thread_other"}; static const char *ompt_task_status_t_values[] = { "ompt_task_UNDEFINED", "ompt_task_complete", // 1 "ompt_task_yield", // 2 "ompt_task_cancel", // 3 "ompt_task_detach", // 4 "ompt_task_early_fulfill", // 5 "ompt_task_late_fulfill", // 6 "ompt_task_switch" // 7 }; static const char* ompt_cancel_flag_t_values[] = { "ompt_cancel_parallel", "ompt_cancel_sections", "ompt_cancel_loop", "ompt_cancel_taskgroup", "ompt_cancel_activated", "ompt_cancel_detected", "ompt_cancel_discarded_task" }; static const char *ompt_dependence_type_t_values[] = { "ompt_dependence_type_UNDEFINED", "ompt_dependence_type_in", // 1 "ompt_dependence_type_out", // 2 "ompt_dependence_type_inout", // 3 "ompt_dependence_type_mutexinoutset", // 4 "ompt_dependence_type_source", // 5 "ompt_dependence_type_sink", // 6 "ompt_dependence_type_inoutset" // 7 }; static void format_task_type(int type, char *buffer) { char *progress = buffer; if (type & ompt_task_initial) progress += sprintf(progress, "ompt_task_initial"); if (type & ompt_task_implicit) progress += sprintf(progress, "ompt_task_implicit"); if (type & ompt_task_explicit) progress += sprintf(progress, "ompt_task_explicit"); if (type & ompt_task_target) progress += sprintf(progress, "ompt_task_target"); if (type & ompt_task_undeferred) progress += sprintf(progress, "|ompt_task_undeferred"); if (type & ompt_task_untied) progress += sprintf(progress, "|ompt_task_untied"); if (type & ompt_task_final) progress += sprintf(progress, "|ompt_task_final"); if (type & ompt_task_mergeable) progress += sprintf(progress, "|ompt_task_mergeable"); if (type & ompt_task_merged) progress += sprintf(progress, "|ompt_task_merged"); } static ompt_set_callback_t ompt_set_callback; static ompt_get_callback_t ompt_get_callback; static ompt_get_state_t ompt_get_state; static ompt_get_task_info_t ompt_get_task_info; static ompt_get_task_memory_t ompt_get_task_memory; static ompt_get_thread_data_t ompt_get_thread_data; static ompt_get_parallel_info_t ompt_get_parallel_info; static ompt_get_unique_id_t ompt_get_unique_id; static ompt_finalize_tool_t ompt_finalize_tool; static ompt_get_num_procs_t ompt_get_num_procs; static ompt_get_num_places_t ompt_get_num_places; static ompt_get_place_proc_ids_t ompt_get_place_proc_ids; static ompt_get_place_num_t ompt_get_place_num; static ompt_get_partition_place_nums_t ompt_get_partition_place_nums; static ompt_get_proc_id_t ompt_get_proc_id; static ompt_enumerate_states_t ompt_enumerate_states; static ompt_enumerate_mutex_impls_t ompt_enumerate_mutex_impls; static void print_ids(int level) { int task_type, thread_num; ompt_frame_t *frame; ompt_data_t *task_parallel_data; ompt_data_t *task_data; int exists_task = ompt_get_task_info(level, &task_type, &task_data, &frame, &task_parallel_data, &thread_num); char buffer[2048]; format_task_type(task_type, buffer); if (frame) printf("%" PRIu64 ": task level %d: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", exit_frame=%p, reenter_frame=%p, " "task_type=%s=%d, thread_num=%d\n", ompt_get_thread_data()->value, level, exists_task ? task_parallel_data->value : 0, exists_task ? task_data->value : 0, frame->exit_frame.ptr, frame->enter_frame.ptr, buffer, task_type, thread_num); } #define get_frame_address(level) __builtin_frame_address(level) #define print_frame(level) \ printf("%" PRIu64 ": __builtin_frame_address(%d)=%p\n", \ ompt_get_thread_data()->value, level, get_frame_address(level)) // clang (version 5.0 and above) adds an intermediate function call with debug flag (-g) #if defined(TEST_NEED_PRINT_FRAME_FROM_OUTLINED_FN) #if defined(DEBUG) && defined(__clang__) && __clang_major__ >= 5 #define print_frame_from_outlined_fn(level) print_frame(level+1) #else #define print_frame_from_outlined_fn(level) print_frame(level) #endif #if defined(__clang__) && __clang_major__ >= 5 #warning "Clang 5.0 and later add an additional wrapper for outlined functions when compiling with debug information." #warning "Please define -DDEBUG iff you manually pass in -g to make the tests succeed!" #endif #endif // This macro helps to define a label at the current position that can be used // to get the current address in the code. // // For print_current_address(): // To reliably determine the offset between the address of the label and the // actual return address, we insert a NOP instruction as a jump target as the // compiler would otherwise insert an instruction that we can't control. The // instruction length is target dependent and is explained below. // // (The empty block between "#pragma omp ..." and the __asm__ statement is a // workaround for a bug in the Intel Compiler.) #define define_ompt_label(id) \ {} \ __asm__("nop"); \ ompt_label_##id: // This macro helps to get the address of a label that is inserted by the above // macro define_ompt_label(). The address is obtained with a GNU extension // (&&label) that has been tested with gcc, clang and icc. #define get_ompt_label_address(id) (&& ompt_label_##id) // This macro prints the exact address that a previously called runtime function // returns to. #define print_current_address(id) \ define_ompt_label(id) \ print_possible_return_addresses(get_ompt_label_address(id)) #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // On X86 the NOP instruction is 1 byte long. In addition, the compiler inserts // a MOV instruction for non-void runtime functions which is 3 bytes long. #define print_possible_return_addresses(addr) \ printf("%" PRIu64 ": current_address=%p or %p for non-void functions\n", \ ompt_get_thread_data()->value, ((char *)addr) - 1, ((char *)addr) - 4) #elif KMP_ARCH_PPC64 // On Power the NOP instruction is 4 bytes long. In addition, the compiler // inserts a second NOP instruction (another 4 bytes). For non-void runtime // functions Clang inserts a STW instruction (but only if compiling under // -fno-PIC which will be the default with Clang 8.0, another 4 bytes). #define print_possible_return_addresses(addr) \ printf("%" PRIu64 ": current_address=%p or %p\n", ompt_get_thread_data()->value, \ ((char *)addr) - 8, ((char *)addr) - 12) #elif KMP_ARCH_AARCH64 // On AArch64 the NOP instruction is 4 bytes long, can be followed by inserted // store instruction (another 4 bytes long). #define print_possible_return_addresses(addr) \ printf("%" PRIu64 ": current_address=%p or %p\n", ompt_get_thread_data()->value, \ ((char *)addr) - 4, ((char *)addr) - 8) #elif KMP_ARCH_RISCV64 #if __riscv_compressed // On RV64GC the C.NOP instruction is 2 byte long. In addition, the compiler // inserts a J instruction (targeting the successor basic block), which // accounts for another 4 bytes. Finally, an additional J instruction may // appear (adding 4 more bytes) when the C.NOP is referenced elsewhere (ie. // another branch). #define print_possible_return_addresses(addr) \ printf("%" PRIu64 ": current_address=%p or %p\n", \ ompt_get_thread_data()->value, ((char *)addr) - 6, ((char *)addr) - 10) #else // On RV64G the NOP instruction is 4 byte long. In addition, the compiler // inserts a J instruction (targeting the successor basic block), which // accounts for another 4 bytes. Finally, an additional J instruction may // appear (adding 4 more bytes) when the NOP is referenced elsewhere (ie. // another branch). #define print_possible_return_addresses(addr) \ printf("%" PRIu64 ": current_address=%p or %p\n", \ ompt_get_thread_data()->value, ((char *)addr) - 8, ((char *)addr) - 12) #endif #else #error Unsupported target architecture, cannot determine address offset! #endif // This macro performs a somewhat similar job to print_current_address(), except // that it discards a certain number of nibbles from the address and only prints // the most significant bits / nibbles. This can be used for cases where the // return address can only be approximated. // // To account for overflows (ie the most significant bits / nibbles have just // changed as we are a few bytes above the relevant power of two) the addresses // of the "current" and of the "previous block" are printed. #define print_fuzzy_address(id) \ define_ompt_label(id) \ print_fuzzy_address_blocks(get_ompt_label_address(id)) // If you change this define you need to adapt all capture patterns in the tests // to include or discard the new number of nibbles! #define FUZZY_ADDRESS_DISCARD_NIBBLES 2 #define FUZZY_ADDRESS_DISCARD_BYTES (1 << ((FUZZY_ADDRESS_DISCARD_NIBBLES) * 4)) #define print_fuzzy_address_blocks(addr) \ printf("%" PRIu64 ": fuzzy_address=0x%" PRIx64 " or 0x%" PRIx64 \ " or 0x%" PRIx64 " or 0x%" PRIx64 " (%p)\n", \ ompt_get_thread_data()->value, \ ((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES - 1, \ ((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES, \ ((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES + 1, \ ((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES + 2, addr) #define register_ompt_callback_t(name, type) \ do { \ type f_##name = &on_##name; \ if (ompt_set_callback(name, (ompt_callback_t)f_##name) == ompt_set_never) \ printf("0: Could not register callback '" #name "'\n"); \ } while (0) #define register_ompt_callback(name) register_ompt_callback_t(name, name##_t) #ifndef USE_PRIVATE_TOOL static void on_ompt_callback_mutex_acquire( ompt_mutex_t kind, unsigned int hint, unsigned int impl, ompt_wait_id_t wait_id, const void *codeptr_ra) { switch(kind) { case ompt_mutex_lock: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_wait_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; case ompt_mutex_nest_lock: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_wait_nest_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; case ompt_mutex_critical: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_wait_critical: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; case ompt_mutex_atomic: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_wait_atomic: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; case ompt_mutex_ordered: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_wait_ordered: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; default: break; } } static void on_ompt_callback_mutex_acquired( ompt_mutex_t kind, ompt_wait_id_t wait_id, const void *codeptr_ra) { switch(kind) { case ompt_mutex_lock: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_acquired_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_nest_lock: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_acquired_nest_lock_first: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_critical: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_acquired_critical: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_atomic: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_acquired_atomic: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_ordered: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_acquired_ordered: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; default: break; } } static void on_ompt_callback_mutex_released( ompt_mutex_t kind, ompt_wait_id_t wait_id, const void *codeptr_ra) { switch(kind) { case ompt_mutex_lock: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_release_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_nest_lock: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_release_nest_lock_last: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_critical: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_release_critical: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_atomic: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_release_atomic: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_ordered: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_release_ordered: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; default: break; } } static void on_ompt_callback_nest_lock( ompt_scope_endpoint_t endpoint, ompt_wait_id_t wait_id, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_acquired_nest_lock_next: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_scope_end: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_release_nest_lock_prev: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_scope_beginend: printf("ompt_scope_beginend should never be passed to %s\n", __func__); exit(-1); } } static void on_ompt_callback_sync_region( ompt_sync_region_t kind, ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: switch(kind) { case ompt_sync_region_barrier: case ompt_sync_region_barrier_implicit: case ompt_sync_region_barrier_implicit_workshare: case ompt_sync_region_barrier_implicit_parallel: case ompt_sync_region_barrier_teams: case ompt_sync_region_barrier_explicit: case ompt_sync_region_barrier_implementation: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_barrier_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); print_ids(0); break; case ompt_sync_region_taskwait: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_taskwait_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_sync_region_taskgroup: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_taskgroup_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_sync_region_reduction: printf("ompt_sync_region_reduction should never be passed to " "on_ompt_callback_sync_region\n"); exit(-1); break; } break; case ompt_scope_end: switch(kind) { case ompt_sync_region_barrier: case ompt_sync_region_barrier_implicit: case ompt_sync_region_barrier_explicit: case ompt_sync_region_barrier_implicit_workshare: case ompt_sync_region_barrier_implicit_parallel: case ompt_sync_region_barrier_teams: case ompt_sync_region_barrier_implementation: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_barrier_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data) ? parallel_data->value : 0, task_data->value, codeptr_ra); break; case ompt_sync_region_taskwait: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_taskwait_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data) ? parallel_data->value : 0, task_data->value, codeptr_ra); break; case ompt_sync_region_taskgroup: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_taskgroup_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data) ? parallel_data->value : 0, task_data->value, codeptr_ra); break; case ompt_sync_region_reduction: printf("ompt_sync_region_reduction should never be passed to " "on_ompt_callback_sync_region\n"); exit(-1); break; } break; case ompt_scope_beginend: printf("ompt_scope_beginend should never be passed to %s\n", __func__); exit(-1); } } static void on_ompt_callback_sync_region_wait( ompt_sync_region_t kind, ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: switch(kind) { case ompt_sync_region_barrier: case ompt_sync_region_barrier_implicit: case ompt_sync_region_barrier_implicit_workshare: case ompt_sync_region_barrier_implicit_parallel: case ompt_sync_region_barrier_teams: case ompt_sync_region_barrier_explicit: case ompt_sync_region_barrier_implementation: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_wait_barrier_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_sync_region_taskwait: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_wait_taskwait_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_sync_region_taskgroup: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_wait_taskgroup_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_sync_region_reduction: printf("ompt_sync_region_reduction should never be passed to " "on_ompt_callback_sync_region_wait\n"); exit(-1); break; } break; case ompt_scope_end: switch(kind) { case ompt_sync_region_barrier: case ompt_sync_region_barrier_implicit: case ompt_sync_region_barrier_implicit_workshare: case ompt_sync_region_barrier_implicit_parallel: case ompt_sync_region_barrier_teams: case ompt_sync_region_barrier_explicit: case ompt_sync_region_barrier_implementation: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_wait_barrier_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data) ? parallel_data->value : 0, task_data->value, codeptr_ra); break; case ompt_sync_region_taskwait: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_wait_taskwait_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data) ? parallel_data->value : 0, task_data->value, codeptr_ra); break; case ompt_sync_region_taskgroup: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_wait_taskgroup_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data) ? parallel_data->value : 0, task_data->value, codeptr_ra); break; case ompt_sync_region_reduction: printf("ompt_sync_region_reduction should never be passed to " "on_ompt_callback_sync_region_wait\n"); exit(-1); break; } break; case ompt_scope_beginend: printf("ompt_scope_beginend should never be passed to %s\n", __func__); exit(-1); } } static void on_ompt_callback_reduction(ompt_sync_region_t kind, ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, const void *codeptr_ra) { switch (endpoint) { case ompt_scope_begin: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_reduction_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data) ? parallel_data->value : 0, task_data->value, codeptr_ra); break; case ompt_scope_end: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_reduction_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data) ? parallel_data->value : 0, task_data->value, codeptr_ra); break; case ompt_scope_beginend: printf("ompt_scope_beginend should never be passed to %s\n", __func__); exit(-1); } } static void on_ompt_callback_flush( ompt_data_t *thread_data, const void *codeptr_ra) { printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_flush: codeptr_ra=%p\n", thread_data->value, codeptr_ra); } static void on_ompt_callback_cancel( ompt_data_t *task_data, int flags, const void *codeptr_ra) { const char* first_flag_value; const char* second_flag_value; if(flags & ompt_cancel_parallel) first_flag_value = ompt_cancel_flag_t_values[0]; else if(flags & ompt_cancel_sections) first_flag_value = ompt_cancel_flag_t_values[1]; else if(flags & ompt_cancel_loop) first_flag_value = ompt_cancel_flag_t_values[2]; else if(flags & ompt_cancel_taskgroup) first_flag_value = ompt_cancel_flag_t_values[3]; if(flags & ompt_cancel_activated) second_flag_value = ompt_cancel_flag_t_values[4]; else if(flags & ompt_cancel_detected) second_flag_value = ompt_cancel_flag_t_values[5]; else if(flags & ompt_cancel_discarded_task) second_flag_value = ompt_cancel_flag_t_values[6]; printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_cancel: task_data=%" PRIu64 ", flags=%s|%s=%" PRIu32 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, task_data->value, first_flag_value, second_flag_value, flags, codeptr_ra); } static void on_ompt_callback_implicit_task( ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, unsigned int team_size, unsigned int thread_num, int flags) { switch(endpoint) { case ompt_scope_begin: if(task_data->ptr) printf("%s\n", "0: task_data initially not null"); task_data->value = ompt_get_unique_id(); //there is no parallel_begin callback for implicit parallel region //thus it is initialized in initial task if(flags & ompt_task_initial) { char buffer[2048]; format_task_type(flags, buffer); // Only check initial task not created by teams construct if (team_size == 1 && thread_num == 1 && parallel_data->ptr) printf("%s\n", "0: parallel_data initially not null"); parallel_data->value = ompt_get_unique_id(); printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_initial_task_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", actual_parallelism=%" PRIu32 ", index=%" PRIu32 ", flags=%" PRIu32 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, team_size, thread_num, flags); } else { printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_implicit_task_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", team_size=%" PRIu32 ", thread_num=%" PRIu32 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, team_size, thread_num); } break; case ompt_scope_end: if(flags & ompt_task_initial){ printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_initial_task_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", actual_parallelism=%" PRIu32 ", index=%" PRIu32 "\n", ompt_get_thread_data()->value, (parallel_data) ? parallel_data->value : 0, task_data->value, team_size, thread_num); } else { printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_implicit_task_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", team_size=%" PRIu32 ", thread_num=%" PRIu32 "\n", ompt_get_thread_data()->value, (parallel_data) ? parallel_data->value : 0, task_data->value, team_size, thread_num); } break; case ompt_scope_beginend: printf("ompt_scope_beginend should never be passed to %s\n", __func__); exit(-1); } } static void on_ompt_callback_lock_init( ompt_mutex_t kind, unsigned int hint, unsigned int impl, ompt_wait_id_t wait_id, const void *codeptr_ra) { switch(kind) { case ompt_mutex_lock: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_init_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; case ompt_mutex_nest_lock: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_init_nest_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; default: break; } } static void on_ompt_callback_lock_destroy( ompt_mutex_t kind, ompt_wait_id_t wait_id, const void *codeptr_ra) { switch(kind) { case ompt_mutex_lock: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_destroy_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_nest_lock: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_destroy_nest_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; default: break; } } static void on_ompt_callback_work( ompt_work_t wstype, ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, uint64_t count, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: switch(wstype) { case ompt_work_loop: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_loop_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_sections: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_sections_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_single_executor: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_single_in_block_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_single_other: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_single_others_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_workshare: //impl break; case ompt_work_distribute: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_distribute_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_taskloop: //impl printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_taskloop_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_scope: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_scope_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; } break; case ompt_scope_end: switch(wstype) { case ompt_work_loop: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_loop_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_sections: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_sections_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_single_executor: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_single_in_block_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_single_other: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_single_others_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_workshare: //impl break; case ompt_work_distribute: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_distribute_end: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_taskloop: //impl printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_taskloop_end: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_scope: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_scope_end: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; } break; case ompt_scope_beginend: printf("ompt_scope_beginend should never be passed to %s\n", __func__); exit(-1); } } static void on_ompt_callback_masked(ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_masked_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_scope_end: printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_masked_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_scope_beginend: printf("ompt_scope_beginend should never be passed to %s\n", __func__); exit(-1); } } static void on_ompt_callback_parallel_begin( ompt_data_t *encountering_task_data, const ompt_frame_t *encountering_task_frame, ompt_data_t *parallel_data, uint32_t requested_team_size, int flag, const void *codeptr_ra) { if(parallel_data->ptr) printf("0: parallel_data initially not null\n"); parallel_data->value = ompt_get_unique_id(); int invoker = flag & 0xF; const char *event = (flag & ompt_parallel_team) ? "parallel" : "teams"; const char *size = (flag & ompt_parallel_team) ? "team_size" : "num_teams"; printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_%s_begin: parent_task_id=%" PRIu64 ", parent_task_frame.exit=%p, parent_task_frame.reenter=%p, " "parallel_id=%" PRIu64 ", requested_%s=%" PRIu32 ", codeptr_ra=%p, invoker=%d\n", ompt_get_thread_data()->value, event, encountering_task_data->value, encountering_task_frame->exit_frame.ptr, encountering_task_frame->enter_frame.ptr, parallel_data->value, size, requested_team_size, codeptr_ra, invoker); } static void on_ompt_callback_parallel_end(ompt_data_t *parallel_data, ompt_data_t *encountering_task_data, int flag, const void *codeptr_ra) { int invoker = flag & 0xF; const char *event = (flag & ompt_parallel_team) ? "parallel" : "teams"; printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_%s_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", invoker=%d, codeptr_ra=%p\n", ompt_get_thread_data()->value, event, parallel_data->value, encountering_task_data->value, invoker, codeptr_ra); } static void on_ompt_callback_task_create( ompt_data_t *encountering_task_data, const ompt_frame_t *encountering_task_frame, ompt_data_t* new_task_data, int type, int has_dependences, const void *codeptr_ra) { if(new_task_data->ptr) printf("0: new_task_data initially not null\n"); new_task_data->value = ompt_get_unique_id(); char buffer[2048]; format_task_type(type, buffer); printf( "%" PRIu64 ":" _TOOL_PREFIX " ompt_event_task_create: parent_task_id=%" PRIu64 ", parent_task_frame.exit=%p, parent_task_frame.reenter=%p, " "new_task_id=%" PRIu64 ", codeptr_ra=%p, task_type=%s=%d, has_dependences=%s\n", ompt_get_thread_data()->value, encountering_task_data ? encountering_task_data->value : 0, encountering_task_frame ? encountering_task_frame->exit_frame.ptr : NULL, encountering_task_frame ? encountering_task_frame->enter_frame.ptr : NULL, new_task_data->value, codeptr_ra, buffer, type, has_dependences ? "yes" : "no"); } static void on_ompt_callback_task_schedule( ompt_data_t *first_task_data, ompt_task_status_t prior_task_status, ompt_data_t *second_task_data) { printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_task_schedule: first_task_id=%" PRIu64 ", second_task_id=%" PRIu64 ", prior_task_status=%s=%d\n", ompt_get_thread_data()->value, first_task_data->value, (second_task_data ? second_task_data->value : -1), ompt_task_status_t_values[prior_task_status], prior_task_status); if (prior_task_status == ompt_task_complete || prior_task_status == ompt_task_late_fulfill) { printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_task_end: task_id=%" PRIu64 "\n", ompt_get_thread_data()->value, first_task_data->value); } } static void on_ompt_callback_dependences( ompt_data_t *task_data, const ompt_dependence_t *deps, int ndeps) { char buffer[2048]; char *progress = buffer; for (int i = 0; i < ndeps && progress < buffer + 2000; i++) { if (deps[i].dependence_type == ompt_dependence_type_source || deps[i].dependence_type == ompt_dependence_type_sink) progress += sprintf(progress, "(%" PRIu64 ", %s), ", deps[i].variable.value, ompt_dependence_type_t_values[deps[i].dependence_type]); else progress += sprintf(progress, "(%p, %s), ", deps[i].variable.ptr, ompt_dependence_type_t_values[deps[i].dependence_type]); } if (ndeps > 0) progress[-2] = 0; printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_dependences: task_id=%" PRIu64 ", deps=[%s], ndeps=%d\n", ompt_get_thread_data()->value, task_data->value, buffer, ndeps); } static void on_ompt_callback_task_dependence( ompt_data_t *first_task_data, ompt_data_t *second_task_data) { printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_task_dependence_pair: first_task_id=%" PRIu64 ", second_task_id=%" PRIu64 "\n", ompt_get_thread_data()->value, first_task_data->value, second_task_data->value); } static void on_ompt_callback_thread_begin( ompt_thread_t thread_type, ompt_data_t *thread_data) { if(thread_data->ptr) printf("%s\n", "0: thread_data initially not null"); thread_data->value = ompt_get_unique_id(); printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_thread_begin: thread_type=%s=%d, thread_id=%" PRIu64 "\n", ompt_get_thread_data()->value, ompt_thread_t_values[thread_type], thread_type, thread_data->value); } static void on_ompt_callback_thread_end( ompt_data_t *thread_data) { printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_thread_end: thread_id=%" PRIu64 "\n", ompt_get_thread_data()->value, thread_data->value); } static int on_ompt_callback_control_tool( uint64_t command, uint64_t modifier, void *arg, const void *codeptr_ra) { ompt_frame_t* omptTaskFrame; ompt_get_task_info(0, NULL, (ompt_data_t**) NULL, &omptTaskFrame, NULL, NULL); printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_control_tool: command=%" PRIu64 ", modifier=%" PRIu64 ", arg=%p, codeptr_ra=%p, current_task_frame.exit=%p, " "current_task_frame.reenter=%p \n", ompt_get_thread_data()->value, command, modifier, arg, codeptr_ra, omptTaskFrame->exit_frame.ptr, omptTaskFrame->enter_frame.ptr); // the following would interfere with expected output for OMPT tests, so skip #ifndef _OMPT_TESTS // print task data int task_level = 0; ompt_data_t *task_data; while (ompt_get_task_info(task_level, NULL, (ompt_data_t **)&task_data, NULL, NULL, NULL)) { printf("%" PRIu64 ":" _TOOL_PREFIX " task level %d: task_id=%" PRIu64 "\n", ompt_get_thread_data()->value, task_level, task_data->value); task_level++; } // print parallel data int parallel_level = 0; ompt_data_t *parallel_data; while (ompt_get_parallel_info(parallel_level, (ompt_data_t **)&parallel_data, NULL)) { printf("%" PRIu64 ":" _TOOL_PREFIX " parallel level %d: parallel_id=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_level, parallel_data->value); parallel_level++; } #endif return 0; //success } int ompt_initialize( ompt_function_lookup_t lookup, int initial_device_num, ompt_data_t *tool_data) { ompt_set_callback = (ompt_set_callback_t) lookup("ompt_set_callback"); ompt_get_callback = (ompt_get_callback_t) lookup("ompt_get_callback"); ompt_get_state = (ompt_get_state_t) lookup("ompt_get_state"); ompt_get_task_info = (ompt_get_task_info_t) lookup("ompt_get_task_info"); ompt_get_task_memory = (ompt_get_task_memory_t)lookup("ompt_get_task_memory"); ompt_get_thread_data = (ompt_get_thread_data_t) lookup("ompt_get_thread_data"); ompt_get_parallel_info = (ompt_get_parallel_info_t) lookup("ompt_get_parallel_info"); ompt_get_unique_id = (ompt_get_unique_id_t) lookup("ompt_get_unique_id"); ompt_finalize_tool = (ompt_finalize_tool_t)lookup("ompt_finalize_tool"); ompt_get_unique_id(); ompt_get_num_procs = (ompt_get_num_procs_t) lookup("ompt_get_num_procs"); ompt_get_num_places = (ompt_get_num_places_t) lookup("ompt_get_num_places"); ompt_get_place_proc_ids = (ompt_get_place_proc_ids_t) lookup("ompt_get_place_proc_ids"); ompt_get_place_num = (ompt_get_place_num_t) lookup("ompt_get_place_num"); ompt_get_partition_place_nums = (ompt_get_partition_place_nums_t) lookup("ompt_get_partition_place_nums"); ompt_get_proc_id = (ompt_get_proc_id_t) lookup("ompt_get_proc_id"); ompt_enumerate_states = (ompt_enumerate_states_t) lookup("ompt_enumerate_states"); ompt_enumerate_mutex_impls = (ompt_enumerate_mutex_impls_t) lookup("ompt_enumerate_mutex_impls"); register_ompt_callback(ompt_callback_mutex_acquire); register_ompt_callback_t(ompt_callback_mutex_acquired, ompt_callback_mutex_t); register_ompt_callback_t(ompt_callback_mutex_released, ompt_callback_mutex_t); register_ompt_callback(ompt_callback_nest_lock); register_ompt_callback(ompt_callback_sync_region); register_ompt_callback_t(ompt_callback_sync_region_wait, ompt_callback_sync_region_t); register_ompt_callback_t(ompt_callback_reduction, ompt_callback_sync_region_t); register_ompt_callback(ompt_callback_control_tool); register_ompt_callback(ompt_callback_flush); register_ompt_callback(ompt_callback_cancel); register_ompt_callback(ompt_callback_implicit_task); register_ompt_callback_t(ompt_callback_lock_init, ompt_callback_mutex_acquire_t); register_ompt_callback_t(ompt_callback_lock_destroy, ompt_callback_mutex_t); register_ompt_callback(ompt_callback_work); register_ompt_callback(ompt_callback_masked); register_ompt_callback(ompt_callback_parallel_begin); register_ompt_callback(ompt_callback_parallel_end); register_ompt_callback(ompt_callback_task_create); register_ompt_callback(ompt_callback_task_schedule); register_ompt_callback(ompt_callback_dependences); register_ompt_callback(ompt_callback_task_dependence); register_ompt_callback(ompt_callback_thread_begin); register_ompt_callback(ompt_callback_thread_end); printf("0: NULL_POINTER=%p\n", (void*)NULL); return 1; //success } void ompt_finalize(ompt_data_t *tool_data) { printf("0: ompt_event_runtime_shutdown\n"); } #ifdef __cplusplus extern "C" { #endif ompt_start_tool_result_t* ompt_start_tool( unsigned int omp_version, const char *runtime_version) { static ompt_start_tool_result_t ompt_start_tool_result = {&ompt_initialize,&ompt_finalize, 0}; return &ompt_start_tool_result; } #ifdef __cplusplus } #endif #endif // ifndef USE_PRIVATE_TOOL #ifdef _OMPT_TESTS #undef _OMPT_TESTS #endif
recon3d.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <omp.h> #ifndef MSVC /* not included in MS Visual C++ */ #include <sys/time.h> #endif #include "MBIRModularDefs.h" #include "MBIRModularUtils.h" #include "allocate.h" #include "icd3d.h" #include "heap.h" #include "A_comp.h" #include "initialize.h" #include "recon3d.h" #define TEST //#define COMP_COST //#define COMP_RMSE /* Internal functions */ void super_voxel_recon(int jj,struct SVParams svpar,unsigned long *NumUpdates,float *totalValue,float *totalChange,int iter, char *phaseMap,long *order,int *indexList,float *weight,float *sinoerr, struct AValues_char **A_Padded_Map,float *Aval_max_ptr,struct heap_node *headNodeArray, struct SinoParams3DParallel sinoparams,struct ReconParams reconparams,struct ParamExt param_ext,float *image, struct ImageParams3D imgparams, float *proximalmap, char *group_array,int group_id); void SVproject(float *proj,float *image,struct AValues_char **A_Padded_Map,float *Aval_max_ptr, struct ImageParams3D imgparams,struct SinoParams3DParallel sinoparams,struct SVParams svpar,char backproject_flag); void coordinateShuffle(int *order1, int *order2,int len); void three_way_shuffle(long *order1, char *order2, struct heap_node *headNodeArray,int len); float MAPCostFunction3D(float *x,float *e,float *w,struct ImageParams3D imgparams,struct SinoParams3DParallel sinoparams, struct ReconParams reconparams,struct ParamExt param_ext); void MBIRReconstruct( float *image, float *sino, float *weight, float *proj_init, float *proximalmap, struct ImageParams3D imgparams, struct SinoParams3DParallel sinoparams, struct ReconParams reconparams, char *Amatrix_fname, char verboseLevel) { float *sinoerr, *proximalmap_loc=NULL; int i,j,jj,p,t,iter,it_print=1; size_t k; #ifndef MSVC /* not included in MS Visual C++ */ struct timeval tm1,tm2; #endif /* image/sino/recon parameters */ int Nx = imgparams.Nx; int Ny = imgparams.Ny; int Nz = imgparams.Nz; int Nxy = Nx*Ny; int Nvc = sinoparams.NViews * sinoparams.NChannels; int MaxIterations = reconparams.MaxIterations; float StopThreshold = reconparams.StopThreshold; /* Initialize/allocate SV parameters */ struct AValues_char **A_Padded_Map; float *Aval_max_ptr; struct SVParams svpar; initSVParams(&svpar, imgparams, sinoparams); int Nsv = svpar.Nsv; int SVLength = svpar.SVLength; int SV_per_Z = svpar.SV_per_Z; int SVsPerRow = svpar.SVsPerRow; /* Activate proximal map mode if given as input */ if(proximalmap != NULL) { reconparams.ReconType = MBIR_MODULAR_RECONTYPE_PandP; /* 'image' is reconstructed in place, so if proximal map is the same array, make a local copy */ if(proximalmap == image) { proximalmap_loc = (float *) mget_spc((size_t)Nx*Ny*Nz,sizeof(float)); for(k=0; k<(size_t)Nx*Ny*Nz; k++) proximalmap_loc[k] = proximalmap[k]; } else proximalmap_loc = proximalmap; } /* print summary to stdout */ if(verboseLevel>1) { fprintf(stdout,"MBIRReconstruct() -- build time: %s, %s\n", __DATE__, __TIME__); printSinoParams3DParallel(&sinoparams); printImageParams3D(&imgparams); if(reconparams.ReconType == MBIR_MODULAR_RECONTYPE_QGGMRF_3D) printReconParamsQGGMRF3D(&reconparams); if(reconparams.ReconType == MBIR_MODULAR_RECONTYPE_PandP) printReconParamsPandP(&reconparams); } /* Allocate and generate recon mask based on ROIRadius */ char * ImageReconMask = GenImageReconMask(&imgparams); /* Read/compute/write System Matrix */ A_Padded_Map = (struct AValues_char **)multialloc(sizeof(struct AValues_char),2,Nsv,(2*SVLength+1)*(2*SVLength+1)); Aval_max_ptr = (float *) mget_spc(Nx*Ny,sizeof(float)); if(Amatrix_fname != NULL) { if(verboseLevel) fprintf(stdout,"Reading system matrix...\n"); readAmatrix(Amatrix_fname, A_Padded_Map, Aval_max_ptr, &imgparams, &sinoparams, svpar); } else { if(verboseLevel) fprintf(stdout,"Computing system matrix...\n"); A_comp(A_Padded_Map,Aval_max_ptr,svpar,&sinoparams,ImageReconMask,&imgparams); } /* Project image for sinogram error */ if(proj_init != NULL) sinoerr = proj_init; else { if(verboseLevel) fprintf(stdout,"Projecting image...\n"); sinoerr = (float *) mget_spc((size_t)Nz*Nvc,sizeof(float)); SVproject(sinoerr,image,A_Padded_Map,Aval_max_ptr,imgparams,sinoparams,svpar,0); } for(k=0; k<(size_t)Nz*Nvc; k++) sinoerr[k] = sino[k]-sinoerr[k]; /* Recon parameters */ NormalizePriorWeights3D(&reconparams); struct ParamExt param_ext; param_ext.pow_sigmaX_p = powf(reconparams.SigmaX,reconparams.p); param_ext.pow_sigmaX_q = powf(reconparams.SigmaX,reconparams.q); param_ext.pow_T_qmp = powf(reconparams.T,reconparams.q - reconparams.p); param_ext.SigmaXsq = reconparams.SigmaX * reconparams.SigmaX; unsigned long NumUpdates=0; float totalValue=0,totalChange=0,equits=0; float avg_update=0,avg_update_rel=0; float c_ratio=0.07; float convergence_rho=0.7; int rep_num=(int)ceil(1/(4*c_ratio*convergence_rho)); struct heap priorityheap; initialize_heap(&priorityheap); long *order; char *phaseMap, **group_id_list; int NumMaskVoxels=0; for(j=0;j<Nxy;j++) if(ImageReconMask[j]) NumMaskVoxels++; #ifdef COMP_RMSE struct Image3D Image_ref; Image_ref.imgparams.Nx = imgparams.Nx; Image_ref.imgparams.Ny = imgparams.Ny; Image_ref.imgparams.Nz = imgparams.Nz; Image_ref.imgparams.FirstSliceNumber = imgparams.FirstSliceNumber; Image_ref.imgparams.NumSliceDigits = imgparams.NumSliceDigits; AllocateImageData3D(&Image_ref); ReadImage3D("ref/ref",&Image_ref); float ** image_ref = Image_ref.image; double rms_err=0,rms_val=0; int jz, Nz0=0, Nz1=Nz; for(jz=Nz0; jz<Nz1; jz++) for(j=0; j<Nxy; j++) if(ImageReconMask[j]) { rms_val += image_ref[jz][j]*image_ref[jz][j]; rms_err += (image[(size_t)jz*Nxy+j]-image_ref[jz][j])*(image[(size_t)jz*Nxy+j]-image_ref[jz][j]); } rms_val = sqrt(rms_val/((float)NumMaskVoxels*(Nz1-Nz0))); rms_err = sqrt(rms_err/((float)NumMaskVoxels*(Nz1-Nz0))); FILE *fp_mse=fopen("rmse.txt","w"); fprintf(fp_mse,"equits|rms_err|rms_val|rms_err/rms_val\n"); fprintf(fp_mse,"%.2f %g %g %g\n",equits,rms_err,rms_val,rms_err/rms_val); #endif order = (long *) mget_spc(Nsv*SV_per_Z,sizeof(long)); phaseMap = (char *) mget_spc(Nsv*SV_per_Z,sizeof(char)); group_id_list = (char **) multialloc(sizeof(char),2,SV_per_Z,4); /* Order of pixel updates need NOT be raster order, just initialize */ t=0; for(p=0;p<Nz;p+=svpar.SVDepth) for(i=0;i<Ny;i+=(SVLength*2-svpar.overlap)) for(j=0;j<Nx;j+=(SVLength*2-svpar.overlap)) { order[t]=(long)p*Nxy+i*Nx+j; /* order is the first voxel coordinate, not the center */ t++; } for(i=0;i<SV_per_Z;i++) for(jj=0;jj<Nsv;jj++) { if((jj/SVsPerRow)%2==0) { if((jj%SVsPerRow)%2==0) phaseMap[i*Nsv+jj]=0; else phaseMap[i*Nsv+jj]=1; } else { if((jj%SVsPerRow)%2==0) phaseMap[i*Nsv+jj]=2; else phaseMap[i*Nsv+jj]=3; } } for(i=0;i<SV_per_Z;i++) { if(i%4==0){ group_id_list[i][0]=0; group_id_list[i][1]=3; group_id_list[i][2]=1; group_id_list[i][3]=2; } else if(i%4==1) { group_id_list[i][0]=3; group_id_list[i][1]=0; group_id_list[i][2]=2; group_id_list[i][3]=1; } else if(i%4==2) { group_id_list[i][0]=1; group_id_list[i][1]=2; group_id_list[i][2]=3; group_id_list[i][3]=0; } else{ group_id_list[i][0]=2; group_id_list[i][1]=1; group_id_list[i][2]=0; group_id_list[i][3]=3; } } #ifdef TEST srand(0); #else srand(time(NULL)); #endif struct heap_node *headNodeArray; headNodeArray = (struct heap_node *) mget_spc(Nsv*SV_per_Z,sizeof(struct heap_node)); for(i=0;i<SV_per_Z;i++) for(jj=0;jj<Nsv;jj++) { headNodeArray[i*Nsv+jj].pt=i*Nsv+jj; headNodeArray[i*Nsv+jj].x=0.0; } //int indexList_size=(int) Nsv*SV_per_Z*4*c_ratio*(1-convergence_rho); int indexList_size= Nsv*SV_per_Z/4; int * indexList = (int *) mget_spc(indexList_size,sizeof(int)); //coordinateShuffle(&order[0],&phaseMap[0],Nsv*SV_per_Z); long tmp_long; char tmp_char; for(i=0; i<Nsv*SV_per_Z-1; i++) { j = i + (rand() % (Nsv*SV_per_Z-i)); tmp_long = order[j]; order[j] = order[i]; order[i] = tmp_long; tmp_char = phaseMap[j]; phaseMap[j] = phaseMap[i]; phaseMap[i] = tmp_char; } iter=0; char stop_FLAG=0; int startIndex=0; int endIndex=0; if(verboseLevel) { fprintf(stdout,"Reconstructing...\n"); #ifndef MSVC /* not included in MS Visual C++ */ gettimeofday(&tm1,NULL); #endif } // Limit threads for smaller problem size if no positivity constraint int max_threads = omp_get_max_threads(); if(reconparams.Positivity==0) { i = ((Nx < Ny) ? Nx : Ny) / (2*SVLength+1) * SV_per_Z; max_threads = ( i < max_threads) ? i : max_threads ; } #pragma omp parallel num_threads(max_threads) { while(stop_FLAG==0 && equits<MaxIterations && iter<100*MaxIterations) { #pragma omp single { if(iter==0) { startIndex=0; endIndex=Nsv*SV_per_Z; } else { if((iter-1)%(2*rep_num)==0 && iter!=1) three_way_shuffle(&order[0],&phaseMap[0],&headNodeArray[0],Nsv*SV_per_Z); if(iter%2==1) { priorityheap.size=0; for(jj=0;jj<Nsv*SV_per_Z;jj++){ heap_insert(&priorityheap, &(headNodeArray[jj])); } startIndex=0; endIndex=indexList_size; for(i=0;i<endIndex;i++) { struct heap_node tempNode; get_heap_max(&priorityheap, &tempNode); indexList[i]=tempNode.pt; } } else { startIndex=((iter-2)/2)%rep_num*Nsv*SV_per_Z/rep_num; endIndex=(((iter-2)/2)%rep_num+1)*Nsv*SV_per_Z/rep_num; } } } int group=0; for (group = 0; group < 4; group++) { if(iter%2 == 1 && reconparams.Positivity==0) { // Non-homogeneous update + no positivity constraint // SJK: using static scheduling for non-homogeneous case because // structural features can cause spatial alignment among SVs at // the top of the priority queue. Dynamic scheduling will tend to // schedule these at the same time--bad because of potential instability // in the case positivity constraint is turned off #pragma omp for schedule(static) reduction(+:NumUpdates) reduction(+:totalValue) reduction(+:totalChange) for (jj = startIndex; jj < endIndex; jj+=1) super_voxel_recon(jj,svpar,&NumUpdates,&totalValue,&totalChange,iter, &phaseMap[0],order,&indexList[0],weight,sinoerr,A_Padded_Map,&Aval_max_ptr[0], &headNodeArray[0],sinoparams,reconparams,param_ext,image,imgparams,proximalmap_loc, &group_id_list[0][0],group); } else // iter%2==0 Homogeneous update { #pragma omp for schedule(dynamic) reduction(+:NumUpdates) reduction(+:totalValue) reduction(+:totalChange) for (jj = startIndex; jj < endIndex; jj+=1) super_voxel_recon(jj,svpar,&NumUpdates,&totalValue,&totalChange,iter, &phaseMap[0],order,&indexList[0],weight,sinoerr,A_Padded_Map,&Aval_max_ptr[0], &headNodeArray[0],sinoparams,reconparams,param_ext,image,imgparams,proximalmap_loc, &group_id_list[0][0],group); } } #pragma omp single { avg_update=avg_update_rel=0.0; if(NumUpdates>0) { avg_update = totalChange/NumUpdates; float avg_value = totalValue/NumUpdates; if(avg_value > 0.0) avg_update_rel = avg_update/avg_value * 100; else avg_update_rel = avg_update; //printf("avg_update %f, avg_value %f, avg_update_rel %f\n",avg_update,avg_value,avg_update_rel); } #ifdef COMP_COST float cost = MAPCostFunction3D(image,sinoerr,weight,imgparams,sinoparams,reconparams,param_ext); fprintf(stdout, "it %d cost = %-15f, avg_update %f \n", iter, cost, avg_update); #endif if (avg_update_rel < StopThreshold && (endIndex!=0)) stop_FLAG = 1; iter++; equits += (float)NumUpdates/((float)NumMaskVoxels*Nz); if(verboseLevel && equits > it_print) { fprintf(stdout,"\titeration %d, average change %.4f %%\n",it_print,avg_update_rel); it_print++; } #ifdef COMP_RMSE rms_err=0; for(jz=Nz0; jz<Nz1; jz++) for(j=0; j<Nxy; j++) if(ImageReconMask[j]) rms_err += (image[(size_t)jz*Nxy+j]-image_ref[jz][j])*(image[(size_t)jz*Nxy+j]-image_ref[jz][j]); rms_err = sqrt(rms_err/((float)NumMaskVoxels*(Nz1-Nz0))); fprintf(fp_mse,"%.2f %g %g %g\n",equits,rms_err,rms_val,rms_err/rms_val); #endif NumUpdates=0; totalValue=0; totalChange=0; } } } if(verboseLevel) { if(StopThreshold <= 0) fprintf(stdout,"\tNo stopping condition--running fixed iterations\n"); else if(stop_FLAG == 1) fprintf(stdout,"\tReached stopping condition\n"); else fprintf(stdout,"\tWarning: Didn't reach stopping condition\n"); if(verboseLevel>1) { fprintf(stdout,"\tEquivalent iterations = %.1f, (non-homogeneous iterations = %d)\n",equits,iter); fprintf(stdout,"\tAverage update in last iteration (relative) = %f %%\n",avg_update_rel); fprintf(stdout,"\tAverage update in last iteration (magnitude) = %.4g\n",avg_update); } #ifndef MSVC /* not included in MS Visual C++ */ gettimeofday(&tm2,NULL); unsigned long long tt = 1000 * (tm2.tv_sec - tm1.tv_sec) + (tm2.tv_usec - tm1.tv_usec) / 1000; printf("\tReconstruction time = %llu ms (iterations only)\n", tt); #endif } /* If initial projection was supplied, update to return final projection */ if(proj_init != NULL) { for(k=0; k<(size_t)Nz*Nvc; k++) proj_init[k] = sino[k]-sinoerr[k]; } else free((void *)sinoerr); /* If local copy of proximal map was made, free it */ if(proximalmap == image) free((void *)proximalmap_loc); free((void *)headNodeArray); free_heap((void *)&priorityheap); free((void *)order); free((void *)phaseMap); multifree(group_id_list,2); free((void *)indexList); #ifdef COMP_RMSE FreeImageData3D(&Image_ref); fclose(fp_mse); #endif /* Free SV memory */ for(i=0;i<Nsv;i++) { free((void *)svpar.bandMinMap[i].bandMin); free((void *)svpar.bandMaxMap[i].bandMax); } free((void *)svpar.bandMinMap); free((void *)svpar.bandMaxMap); /* Free system matrix */ for(i=0;i<Nsv;i++) for(j=0;j<(2*SVLength+1)*(2*SVLength+1);j++) if(A_Padded_Map[i][j].length>0) { free((void *)A_Padded_Map[i][j].val); free((void *)A_Padded_Map[i][j].pieceWiseMin); free((void *)A_Padded_Map[i][j].pieceWiseWidth); } multifree(A_Padded_Map,2); free((void *)Aval_max_ptr); free((void *)ImageReconMask); } /* END MBIRReconstruct() */ void super_voxel_recon( int jj, struct SVParams svpar, unsigned long *NumUpdates, float *totalValue, float *totalChange, int iter, char *phaseMap, long *order, int *indexList, float *weight, float *sinoerr, struct AValues_char ** A_Padded_Map, float *Aval_max_ptr, struct heap_node *headNodeArray, struct SinoParams3DParallel sinoparams, struct ReconParams reconparams, struct ParamExt param_ext, float *image, struct ImageParams3D imgparams, float *proximalmap, char *group_array, int group_id) { int p,i,q,t,j,currentSlice; float *tempProxMap=NULL; int NumUpdates_loc=0; float totalValue_loc=0,totalChange_loc=0; int Nx = imgparams.Nx; int Ny = imgparams.Ny; int Nz = imgparams.Nz; int Nxy = Nx*Ny; int Nvc = sinoparams.NViews * sinoparams.NChannels; char PositivityFlag = reconparams.Positivity; int SV_depth_modified, SVPosition; int SVLength = svpar.SVLength; int overlappingDistance = svpar.overlap; int SV_depth = svpar.SVDepth; int SVsPerRow = svpar.SVsPerRow; struct minStruct * bandMinMap = svpar.bandMinMap; struct maxStruct * bandMaxMap = svpar.bandMaxMap; int pieceLength = svpar.pieceLength; int NViewSets = sinoparams.NViews/pieceLength; int jj_new; if(iter%2==0) jj_new=jj; else jj_new=indexList[jj]; int startSlice = order[jj_new] / Nxy; if(phaseMap[jj_new] != group_array[startSlice/SV_depth*4+group_id]) return; int jy = (order[jj_new] - startSlice*Nxy) / Nx; int jx = (order[jj_new] - startSlice*Nxy) % Nx; if((startSlice+SV_depth)>Nz) SV_depth_modified=Nz-startSlice; else SV_depth_modified=SV_depth; SVPosition = jy/(2*SVLength-overlappingDistance)*SVsPerRow+jx/(2*SVLength-overlappingDistance); int countNumber=0; /* number of voxels in given SV */ int coordinateSize=(2*SVLength+1)*(2*SVLength+1); int * k_newCoordinate = (int *) mget_spc(coordinateSize,sizeof(int)); int * j_newCoordinate = (int *) mget_spc(coordinateSize,sizeof(int)); int j_newAA,k_newAA; int voxelIncrement=0; /* choosing the voxels locations in an SV */ for(j_newAA=jy;j_newAA<=(jy+2*SVLength);j_newAA++) for(k_newAA=jx;k_newAA<=(jx+2*SVLength);k_newAA++) { if(j_newAA>=0 && k_newAA >=0 && j_newAA <Ny && k_newAA < Nx) { if(A_Padded_Map[SVPosition][voxelIncrement].length >0) { j_newCoordinate[countNumber]=j_newAA; k_newCoordinate[countNumber]=k_newAA; countNumber++; } } voxelIncrement++; } /* if no voxels in this region skip this loop iteration */ if(countNumber==0) { free((void *)k_newCoordinate); free((void *)j_newCoordinate); return; } coordinateShuffle(&j_newCoordinate[0],&k_newCoordinate[0],countNumber); /*XW: for a supervoxel, bandMin records the starting position of the sinogram band at each view*/ /*XW: for a supervoxel, bandMax records the end position of the sinogram band at each view */ channel_t * bandMin = (channel_t *) mget_spc(sinoparams.NViews,sizeof(channel_t)); channel_t * bandMax = (channel_t *) mget_spc(sinoparams.NViews,sizeof(channel_t)); channel_t * bandWidthTemp = (channel_t *) mget_spc(sinoparams.NViews,sizeof(channel_t)); channel_t * bandWidth = (channel_t *) mget_spc(NViewSets,sizeof(channel_t)); memcpy(&bandMin[0],&bandMinMap[SVPosition].bandMin[0],sizeof(channel_t)*(sinoparams.NViews)); memcpy(&bandMax[0],&bandMaxMap[SVPosition].bandMax[0],sizeof(channel_t)*(sinoparams.NViews)); //#pragma vector aligned for(p=0;p< sinoparams.NViews;p++) bandWidthTemp[p]=bandMax[p]-bandMin[p]; for (p = 0; p < NViewSets; p++) { int bandWidthMax=bandWidthTemp[p*pieceLength]; for(t=0;t<pieceLength;t++){ if(bandWidthTemp[p*pieceLength+t]>bandWidthMax) bandWidthMax=bandWidthTemp[p*pieceLength+t]; } bandWidth[p]=bandWidthMax; } float ** newWArray = (float **)malloc(sizeof(float *) * NViewSets); float ** newEArray = (float **)malloc(sizeof(float *) * NViewSets); float ** CopyNewEArray = (float **)malloc(sizeof(float *) * NViewSets); for (p = 0; p < NViewSets; p++) { newWArray[p] = (float *)malloc(sizeof(float)*bandWidth[p]*pieceLength*SV_depth_modified); newEArray[p] = (float *)malloc(sizeof(float)*bandWidth[p]*pieceLength*SV_depth_modified); CopyNewEArray[p] = (float *)malloc(sizeof(float)*bandWidth[p]*pieceLength*SV_depth_modified); } float *newWArrayPointer; float *newEArrayPointer; float **newWArrayTransposed; float **newEArrayTransposed; float *WTransposeArrayPointer; float *ETransposeArrayPointer; /*XW: copy the interlaced we into the memory buffer*/ for (p = 0; p < NViewSets; p++) { newWArrayPointer=&newWArray[p][0]; newEArrayPointer=&newEArray[p][0]; for(i=0;i<SV_depth_modified;i++) for(q=0;q<pieceLength;q++) { memcpy(newWArrayPointer,&weight[(startSlice+i)*Nvc+p*pieceLength*sinoparams.NChannels+q*sinoparams.NChannels+bandMin[p*pieceLength+q]],sizeof(float)*(bandWidth[p])); memcpy(newEArrayPointer,&sinoerr[(startSlice+i)*Nvc+p*pieceLength*sinoparams.NChannels+q*sinoparams.NChannels+bandMin[p*pieceLength+q]],sizeof(float)*(bandWidth[p])); newWArrayPointer+=bandWidth[p]; newEArrayPointer+=bandWidth[p]; } } for (p = 0; p < NViewSets; p++) memcpy(&CopyNewEArray[p][0],&newEArray[p][0],sizeof(float)*bandWidth[p]*pieceLength*SV_depth_modified); newWArrayTransposed = (float **)malloc(sizeof(float *) * NViewSets); newEArrayTransposed = (float **)malloc(sizeof(float *) * NViewSets); for (p = 0; p < NViewSets; p++) { newWArrayTransposed[p] = (float *)malloc(sizeof(float)*bandWidth[p]*pieceLength*SV_depth_modified); newEArrayTransposed[p] = (float *)malloc(sizeof(float)*bandWidth[p]*pieceLength*SV_depth_modified); } for (p = 0; p < NViewSets; p++) for(currentSlice=0;currentSlice<(SV_depth_modified);currentSlice++) { WTransposeArrayPointer=&newWArrayTransposed[p][currentSlice*bandWidth[p]*pieceLength]; ETransposeArrayPointer=&newEArrayTransposed[p][currentSlice*bandWidth[p]*pieceLength]; newEArrayPointer=&newEArray[p][currentSlice*bandWidth[p]*pieceLength]; newWArrayPointer=&newWArray[p][currentSlice*bandWidth[p]*pieceLength]; for(q=0;q<bandWidth[p];q++) { #pragma vector aligned for(t=0;t<pieceLength;t++) { ETransposeArrayPointer[q*pieceLength+t]=newEArrayPointer[bandWidth[p]*t+q]; WTransposeArrayPointer[q*pieceLength+t]=newWArrayPointer[bandWidth[p]*t+q]; } } } WTransposeArrayPointer=&newWArrayTransposed[0][0]; ETransposeArrayPointer=&newEArrayTransposed[0][0]; newEArrayPointer=&newEArray[0][0]; for (p = 0; p < NViewSets; p++) free((void *)newWArray[p]); free((void **)newWArray); /* Turn off zero-skipping for 1st iteration */ char zero_skip_enable=0; // 1: enable, 0: disable if(iter>0 && PositivityFlag) zero_skip_enable=1; /*XW: the start of the loop to compute theta1, theta2*/ float * THETA1 = (float *) get_spc(SV_depth_modified,sizeof(float)); float * THETA2 = (float *) get_spc(SV_depth_modified,sizeof(float)); float * tempV = (float *) mget_spc(SV_depth_modified,sizeof(float)); float * diff = (float *) mget_spc(SV_depth_modified,sizeof(float)); float ** neighbors = (float **) multialloc(sizeof(float),2,SV_depth_modified,10); char * zero_skip_FLAG = (char *) mget_spc(SV_depth_modified,sizeof(char)); if(reconparams.ReconType == MBIR_MODULAR_RECONTYPE_PandP) tempProxMap = (float *) mget_spc(SV_depth_modified,sizeof(float)); for(i=0;i<countNumber;i++) { const short j_new = j_newCoordinate[i]; /*XW: get the voxel's x,y location*/ const short k_new = k_newCoordinate[i]; float Aval_max = Aval_max_ptr[j_new*Nx+k_new]; for(p=0;p<SV_depth_modified;p++) THETA1[p]=THETA2[p]=0.0; int theVoxelPosition=(j_new-jy)*(2*SVLength+1)+(k_new-jx); unsigned char * A_padd_Tranpose_pointer = &A_Padded_Map[SVPosition][theVoxelPosition].val[0]; for(currentSlice=0;currentSlice<SV_depth_modified;currentSlice++) { tempV[currentSlice] = (float)(image[(size_t)(startSlice+currentSlice)*Nxy + j_new*Nx+k_new]); /* current voxel value */ zero_skip_FLAG[currentSlice] = 0; if(reconparams.ReconType == MBIR_MODULAR_RECONTYPE_QGGMRF_3D) { ExtractNeighbors3D(&neighbors[currentSlice][0],k_new,j_new,&image[(size_t)(startSlice+currentSlice)*Nxy],imgparams); if((startSlice+currentSlice)==0) neighbors[currentSlice][8]=0.0; else neighbors[currentSlice][8]=image[(size_t)(startSlice+currentSlice-1)*Nxy + j_new*Nx+k_new]; if((startSlice+currentSlice)<(Nz-1)) neighbors[currentSlice][9]=image[(size_t)(startSlice+currentSlice+1)*Nxy + j_new*Nx+k_new]; else neighbors[currentSlice][9]=0.0; if(zero_skip_enable) if(tempV[currentSlice] == 0.0) { zero_skip_FLAG[currentSlice] = 1; for (j = 0; j < 10; j++) { if (neighbors[currentSlice][j] != 0.0) { zero_skip_FLAG[currentSlice] = 0; break; } } } } if(reconparams.ReconType == MBIR_MODULAR_RECONTYPE_PandP) tempProxMap[currentSlice] = proximalmap[(startSlice+currentSlice)*Nxy + j_new*Nx+k_new]; } A_padd_Tranpose_pointer = &A_Padded_Map[SVPosition][theVoxelPosition].val[0]; for(p=0;p<NViewSets;p++) { int myCount=A_Padded_Map[SVPosition][theVoxelPosition].pieceWiseWidth[p]; int pieceMin=A_Padded_Map[SVPosition][theVoxelPosition].pieceWiseMin[p]; #pragma vector aligned for(currentSlice=0;currentSlice<SV_depth_modified;currentSlice++) if(zero_skip_FLAG[currentSlice] == 0) { WTransposeArrayPointer=&newWArrayTransposed[p][currentSlice*bandWidth[p]*pieceLength]; ETransposeArrayPointer=&newEArrayTransposed[p][currentSlice*bandWidth[p]*pieceLength]; WTransposeArrayPointer+=pieceMin*pieceLength; ETransposeArrayPointer+=pieceMin*pieceLength; float tempTHETA1=0.0; float tempTHETA2=0.0; //Not finding evidence this makes a difference --SJK //Deprecated by Intel anyway //#pragma vector aligned //#pragma simd reduction(+:tempTHETA2,tempTHETA1) for(t=0;t<myCount*pieceLength;t++) { /* summing over voxels which are not skipped or masked*/ tempTHETA1 += A_padd_Tranpose_pointer[t]*WTransposeArrayPointer[t]*ETransposeArrayPointer[t]; tempTHETA2 += A_padd_Tranpose_pointer[t]*WTransposeArrayPointer[t]*A_padd_Tranpose_pointer[t]; } THETA1[currentSlice]+=tempTHETA1; THETA2[currentSlice]+=tempTHETA2; } A_padd_Tranpose_pointer += myCount*pieceLength; } for(currentSlice=0;currentSlice<SV_depth_modified;currentSlice++) { THETA1[currentSlice]=-THETA1[currentSlice]*Aval_max*(1.0/255); THETA2[currentSlice]=THETA2[currentSlice]*Aval_max*(1.0/255)*Aval_max*(1.0/255); } A_padd_Tranpose_pointer = &A_Padded_Map[SVPosition][theVoxelPosition].val[0]; ETransposeArrayPointer = &newEArrayTransposed[0][0]; for(currentSlice=0;currentSlice<SV_depth_modified;currentSlice++) if(zero_skip_FLAG[currentSlice] == 0) { float pixel,step; if(reconparams.ReconType == MBIR_MODULAR_RECONTYPE_QGGMRF_3D) { step = QGGMRF3D_Update(reconparams,param_ext,tempV[currentSlice],&neighbors[currentSlice][0],THETA1[currentSlice],THETA2[currentSlice]); } else if(reconparams.ReconType == MBIR_MODULAR_RECONTYPE_PandP) { step = PandP_Update(param_ext.SigmaXsq,tempV[currentSlice],tempProxMap[currentSlice],THETA1[currentSlice],THETA2[currentSlice]); } else { fprintf(stderr,"Error** Unrecognized ReconType in ICD update\n"); exit(-1); } pixel = tempV[currentSlice] + step; /* can apply over-relaxation to the step size here */ if(PositivityFlag) image[(size_t)(startSlice+currentSlice)*Nxy + j_new*Nx+k_new] = ((pixel < 0.0) ? 0.0 : pixel); else image[(size_t)(startSlice+currentSlice)*Nxy + j_new*Nx+k_new] = pixel; diff[currentSlice] = image[(size_t)(startSlice+currentSlice)*Nxy + j_new*Nx+k_new] - tempV[currentSlice]; totalChange_loc += fabs(diff[currentSlice]); totalValue_loc += fabs(tempV[currentSlice]); NumUpdates_loc++; diff[currentSlice]=diff[currentSlice]*Aval_max*(1.0/255); } for(p=0;p<NViewSets;p++) { int myCount=A_Padded_Map[SVPosition][theVoxelPosition].pieceWiseWidth[p]; int pieceMin=A_Padded_Map[SVPosition][theVoxelPosition].pieceWiseMin[p]; #pragma vector aligned for(currentSlice=0;currentSlice<SV_depth_modified;currentSlice++) if(fabsf(diff[currentSlice])>0 && zero_skip_FLAG[currentSlice] == 0) { ETransposeArrayPointer=&newEArrayTransposed[p][currentSlice*bandWidth[p]*pieceLength]; ETransposeArrayPointer+=pieceMin*pieceLength; #pragma vector aligned for(t=0;t<(myCount*pieceLength);t++) ETransposeArrayPointer[t]= ETransposeArrayPointer[t]-A_padd_Tranpose_pointer[t]*diff[currentSlice]; } A_padd_Tranpose_pointer+=myCount*pieceLength; } } free((void *)THETA1); free((void *)THETA2); free((void *)tempV); free((void *)diff); multifree(neighbors,2); free((void *)zero_skip_FLAG); if(reconparams.ReconType == MBIR_MODULAR_RECONTYPE_PandP) free((void *)tempProxMap); for (p = 0; p < NViewSets; p++) free((void *)newWArrayTransposed[p]); free((void **)newWArrayTransposed); free((void *)k_newCoordinate); free((void *)j_newCoordinate); for (p = 0; p < NViewSets; p++) for(currentSlice=0;currentSlice<SV_depth_modified;currentSlice++) { ETransposeArrayPointer=&newEArrayTransposed[p][currentSlice*bandWidth[p]*pieceLength]; newEArrayPointer=&newEArray[p][currentSlice*bandWidth[p]*pieceLength]; for(q=0;q<bandWidth[p];q++) { #pragma vector aligned for(t=0;t<pieceLength;t++) newEArrayPointer[bandWidth[p]*t+q]=ETransposeArrayPointer[q*pieceLength+t]; } } for (p = 0; p < NViewSets; p++) free((void *)newEArrayTransposed[p]); free((void **)newEArrayTransposed); for (p = 0; p < NViewSets; p++) /*XW: update the error term in the memory buffer*/ { float *CopyNewEArrayPointer; float *eArrayPointer; newEArrayPointer=&newEArray[p][0]; CopyNewEArrayPointer=&CopyNewEArray[p][0]; for (currentSlice=0; currentSlice< SV_depth_modified;currentSlice++) { //#pragma vector aligned for(q=0;q<pieceLength;q++) { eArrayPointer=&sinoerr[(startSlice+currentSlice)*Nvc+p*pieceLength*sinoparams.NChannels+q*sinoparams.NChannels+bandMin[p*pieceLength+q]]; for(t=0;t<bandWidth[p];t++) { #pragma omp atomic *eArrayPointer += (*newEArrayPointer)-(*CopyNewEArrayPointer); newEArrayPointer++; CopyNewEArrayPointer++; eArrayPointer++; } } } } for (p = 0; p < NViewSets; p++) { free((void *)newEArray[p]); free((void *)CopyNewEArray[p]); } free((void **)newEArray); free((void **)CopyNewEArray); free((void *)bandMin); free((void *)bandMax); free((void *)bandWidth); free((void *)bandWidthTemp); headNodeArray[jj_new].x=totalChange_loc; *NumUpdates += NumUpdates_loc; *totalValue += totalValue_loc; *totalChange += totalChange_loc; } /* END super_voxel_recon() */ void coordinateShuffle(int *order1, int *order2,int len) { int i, j, tmp1,tmp2; for (i = 0; i < len-1; i++) { j = i + (rand() % (len-i)); tmp1 = order1[j]; tmp2 = order2[j]; order1[j] = order1[i]; order2[j] = order2[i]; order1[i] = tmp1; order2[i] = tmp2; } } void three_way_shuffle(long *order1, char *order2, struct heap_node *headNodeArray, int len) { int i,j; long tmp_long; char tmp_char; float temp_x; for (i = 0; i < len-1; i++) { j = i + (rand() % (len-i)); tmp_long = order1[j]; order1[j] = order1[i]; order1[i] = tmp_long; tmp_char = order2[j]; order2[j] = order2[i]; order2[i] = tmp_char; temp_x=headNodeArray[j].x; headNodeArray[j].x=headNodeArray[i].x; headNodeArray[i].x=temp_x; } } float MAPCostFunction3D( float *x, float *e, float *w, struct ImageParams3D imgparams, struct SinoParams3DParallel sinoparams, struct ReconParams reconparams, struct ParamExt param_ext) { int i, M, j, jx, jy, jz, Nx, Ny, Nz, Nxy, plusx, minusx, plusy, plusz; float nloglike, nlogprior_nearest, nlogprior_diag, nlogprior_interslice, x0; M = sinoparams.NViews * sinoparams.NChannels ; Nx = imgparams.Nx; Ny = imgparams.Ny; Nz = imgparams.Nz; Nxy = Nx*Ny; nloglike = 0.0; for (i = 0; i <sinoparams.NSlices; i++) for (j = 0; j < M; j++) nloglike += e[i*M+j]*w[i*M+j]*e[i*M+j]; nloglike /= 2.0; nlogprior_nearest = 0.0; nlogprior_diag = 0.0; nlogprior_interslice = 0.0; for (jz = 0; jz < Nz; jz++) for (jy = 0; jy < Ny; jy++) for (jx = 0; jx < Nx; jx++) { plusx = jx + 1; plusx = ((plusx < Nx) ? plusx : 0); minusx = jx - 1; minusx = ((minusx < 0) ? Nx-1 : minusx); plusy = jy + 1; plusy = ((plusy < Ny) ? plusy : 0); plusz = jz + 1; plusz = ((plusz < Nz) ? plusz : 0); j = jy*Nx + jx; x0 = x[jz*Nxy+j]; nlogprior_nearest += QGGMRF_Potential((x0-x[jz*Nxy+jy*Nx+plusx]),reconparams,param_ext); nlogprior_nearest += QGGMRF_Potential((x0-x[jz*Nxy+plusy*Nx+jx]),reconparams,param_ext); nlogprior_diag += QGGMRF_Potential((x0-x[jz*Nxy+plusy*Nx+minusx]),reconparams,param_ext); nlogprior_diag += QGGMRF_Potential((x0-x[jz*Nxy+plusy*Nx+plusx]),reconparams,param_ext); nlogprior_interslice += QGGMRF_Potential((x0-x[plusz*Nxy+jy*Nx+jx]),reconparams,param_ext); } return (nloglike + reconparams.b_nearest * nlogprior_nearest + reconparams.b_diag * nlogprior_diag + reconparams.b_interslice * nlogprior_interslice) ; } /* Forward projection using input SV system matrix */ void SVproject( float *proj, float *image, struct AValues_char **A_Padded_Map, float *Aval_max_ptr, struct ImageParams3D imgparams, struct SinoParams3DParallel sinoparams, struct SVParams svpar, char backproject_flag) { size_t i; int jz; int Nx = imgparams.Nx; int Ny = imgparams.Ny; int Nz = imgparams.Nz; int NChannels = sinoparams.NChannels; int Nvc = sinoparams.NViews * sinoparams.NChannels; int SVLength = svpar.SVLength; int pieceLength = svpar.pieceLength; int SVsPerRow = svpar.SVsPerRow; int NViewSets = sinoparams.NViews/pieceLength; struct minStruct * bandMinMap = svpar.bandMinMap; /* initialize output */ if(backproject_flag) for (i = 0; i < (size_t)Nx*Ny*Nz; i++) image[i] = 0.0; else for (i = 0; i < (size_t)Nvc*Nz; i++) proj[i] = 0.0; #pragma omp parallel for schedule(dynamic) for(jz=0;jz<Nz;jz++) { int jx,jy,k,r,p; for (jy = 0; jy < Ny; jy++) for (jx = 0; jx < Nx; jx++) { int SV_ind_y = jy/(2*SVLength-svpar.overlap); int SV_ind_x = jx/(2*SVLength-svpar.overlap); int SVPosition = SV_ind_y*SVsPerRow + SV_ind_x; int SV_jy = SV_ind_y*(2*SVLength-svpar.overlap); int SV_jx = SV_ind_x*(2*SVLength-svpar.overlap); int VoxelPosition = (jy-SV_jy)*(2*SVLength+1)+(jx-SV_jx); // The second condition should always be true if (A_Padded_Map[SVPosition][VoxelPosition].length > 0 && VoxelPosition < ((2*SVLength+1)*(2*SVLength+1))) { unsigned char* A_padd_Tr_ptr = &A_Padded_Map[SVPosition][VoxelPosition].val[0]; float rescale = Aval_max_ptr[jy*Nx+jx]*(1.0/255); size_t image_idx = (size_t)jz*Nx*Ny + jy*Nx + jx; float xval = image[image_idx]; for(p=0;p<NViewSets;p++) { int myCount = A_Padded_Map[SVPosition][VoxelPosition].pieceWiseWidth[p]; int pieceWiseMin = A_Padded_Map[SVPosition][VoxelPosition].pieceWiseMin[p]; int position = p*pieceLength*NChannels + pieceWiseMin; for(r=0;r<myCount;r++) for(k=0;k<pieceLength;k++) { channel_t bandMin = bandMinMap[SVPosition].bandMin[p*pieceLength+k]; size_t proj_idx = jz*Nvc + position + k*NChannels + bandMin + r; if((pieceWiseMin + bandMin + r) >= NChannels || (position + k*NChannels + bandMin + r) >= Nvc ) { fprintf(stderr,"SVproject() out of bounds: p %d r %d k %d\n",p,r,k); fprintf(stderr,"SVproject() out of bounds: total_1 %d total_2 %d\n",pieceWiseMin+bandMin+r,position+k*NChannels+bandMin+r); exit(-1); } else { if(backproject_flag) image[image_idx] += A_padd_Tr_ptr[r*pieceLength+k]*rescale * proj[proj_idx]; else proj[proj_idx] += A_padd_Tr_ptr[r*pieceLength+k]*rescale * xval; } } A_padd_Tr_ptr += myCount*pieceLength; } } } } } /* Forward projection wrapper that first reads or computes SV matrix */ void forwardProject( float *proj, float *image, struct ImageParams3D imgparams, struct SinoParams3DParallel sinoparams, char *Amatrix_fname, char backproject_flag, char verboseLevel) { int i,j; struct AValues_char **A_Padded_Map; float *Aval_max_ptr; struct SVParams svpar; /* print summary to stdout */ if(verboseLevel>1) { fprintf(stdout,"forwardProject() -- build time: %s, %s\n", __DATE__, __TIME__); printSinoParams3DParallel(&sinoparams); printImageParams3D(&imgparams); } /* Initialize/allocate SV parameters */ initSVParams(&svpar, imgparams, sinoparams); int Nsv = svpar.Nsv; int SVLength = svpar.SVLength; /* Read/compute/write System Matrix */ A_Padded_Map = (struct AValues_char **)multialloc(sizeof(struct AValues_char),2,Nsv,(2*SVLength+1)*(2*SVLength+1)); Aval_max_ptr = (float *) mget_spc(imgparams.Nx*imgparams.Ny,sizeof(float)); if(Amatrix_fname != NULL) { if(verboseLevel) fprintf(stdout,"Reading system matrix...\n"); readAmatrix(Amatrix_fname, A_Padded_Map, Aval_max_ptr, &imgparams, &sinoparams, svpar); } else { if(verboseLevel) fprintf(stdout,"Computing system matrix...\n"); char * ImageReconMask = GenImageReconMask(&imgparams); A_comp(A_Padded_Map,Aval_max_ptr,svpar,&sinoparams,ImageReconMask,&imgparams); free((void *)ImageReconMask); } /* Project */ if(verboseLevel) { if(backproject_flag) fprintf(stdout,"Back-projecting sinogram...\n"); else fprintf(stdout,"Projecting image...\n"); } SVproject(proj,image,A_Padded_Map,Aval_max_ptr,imgparams,sinoparams,svpar,backproject_flag); /* Free SV memory */ for(i=0;i<Nsv;i++) { free((void *)svpar.bandMinMap[i].bandMin); free((void *)svpar.bandMaxMap[i].bandMax); } free((void *)svpar.bandMinMap); free((void *)svpar.bandMaxMap); /* Free system matrix */ for(i=0;i<Nsv;i++) for(j=0;j<(2*SVLength+1)*(2*SVLength+1);j++) if(A_Padded_Map[i][j].length>0) { free((void *)A_Padded_Map[i][j].val); free((void *)A_Padded_Map[i][j].pieceWiseMin); free((void *)A_Padded_Map[i][j].pieceWiseWidth); } multifree(A_Padded_Map,2); free((void *)Aval_max_ptr); } /* END forwardProject() */
initialize.c
//-------------------------------------------------------------------------// // // // This benchmark is a serial C version of the NPB SP code. This C // // version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the serial Fortran versions in // // "NPB3.3-SER" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this C version to cmp@aces.snu.ac.kr // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: cmp@aces.snu.ac.kr // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// #include "header.h" //--------------------------------------------------------------------- // This subroutine initializes the field variable u using // tri-linear transfinite interpolation of the boundary values //--------------------------------------------------------------------- void initialize() { int i, j, k, m, ix, iy, iz; double xi, eta, zeta, Pface[2][3][5], Pxi, Peta, Pzeta, temp[5]; //--------------------------------------------------------------------- // Later (in compute_rhs) we compute 1/u for every element. A few of // the corner elements are not used, but it convenient (and faster) // to compute the whole thing with a simple loop. Make sure those // values are nonzero by initializing the whole thing here. //--------------------------------------------------------------------- for (k = 0; k <= grid_points[2]-1; k++) { for (j = 0; j <= grid_points[1]-1; j++) { for (i = 0; i <= grid_points[0]-1; i++) { u[0][k][j][i] = 1.0; u[1][k][j][i] = 0.0; u[2][k][j][i] = 0.0; u[3][k][j][i] = 0.0; u[4][k][j][i] = 1.0; } } } //--------------------------------------------------------------------- // first store the "interpolated" values everywhere on the grid //--------------------------------------------------------------------- for (k = 0; k <= grid_points[2]-1; k++) { zeta = (double)k * dnzm1; for (j = 0; j <= grid_points[1]-1; j++) { eta = (double)j * dnym1; for (i = 0; i <= grid_points[0]-1; i++) { xi = (double)i * dnxm1; for (ix = 0; ix < 2; ix++) { Pxi = (double)ix; exact_solution(Pxi, eta, zeta, &Pface[ix][0][0]); } for (iy = 0; iy < 2; iy++) { Peta = (double)iy; exact_solution(xi, Peta, zeta, &Pface[iy][1][0]); } for (iz = 0; iz < 2; iz++) { Pzeta = (double)iz; exact_solution(xi, eta, Pzeta, &Pface[iz][2][0]); } for (m = 0; m < 5; m++) { Pxi = xi * Pface[1][0][m] + (1.0-xi) * Pface[0][0][m]; Peta = eta * Pface[1][1][m] + (1.0-eta) * Pface[0][1][m]; Pzeta = zeta * Pface[1][2][m] + (1.0-zeta) * Pface[0][2][m]; u[m][k][j][i] = Pxi + Peta + Pzeta - Pxi*Peta - Pxi*Pzeta - Peta*Pzeta + Pxi*Peta*Pzeta; } } } } //--------------------------------------------------------------------- // now store the exact values on the boundaries //--------------------------------------------------------------------- //--------------------------------------------------------------------- // west face //--------------------------------------------------------------------- xi = 0.0; i = 0; for (k = 0; k <= grid_points[2]-1; k++) { zeta = (double)k * dnzm1; for (j = 0; j <= grid_points[1]-1; j++) { eta = (double)j * dnym1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[m][k][j][i] = temp[m]; } } } //--------------------------------------------------------------------- // east face //--------------------------------------------------------------------- xi = 1.0; i = grid_points[0]-1; for (k = 0; k <= grid_points[2]-1; k++) { zeta = (double)k * dnzm1; for (j = 0; j <= grid_points[1]-1; j++) { eta = (double)j * dnym1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[m][k][j][i] = temp[m]; } } } //--------------------------------------------------------------------- // south face //--------------------------------------------------------------------- eta = 0.0; j = 0; for (k = 0; k <= grid_points[2]-1; k++) { zeta = (double)k * dnzm1; for (i = 0; i <= grid_points[0]-1; i++) { xi = (double)i * dnxm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[m][k][j][i] = temp[m]; } } } //--------------------------------------------------------------------- // north face //--------------------------------------------------------------------- eta = 1.0; j = grid_points[1]-1; for (k = 0; k <= grid_points[2]-1; k++) { zeta = (double)k * dnzm1; for (i = 0; i <= grid_points[0]-1; i++) { xi = (double)i * dnxm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[m][k][j][i] = temp[m]; } } } //--------------------------------------------------------------------- // bottom face //--------------------------------------------------------------------- zeta = 0.0; k = 0; for (j = 0; j <= grid_points[1]-1; j++) { eta = (double)j * dnym1; for (i =0; i <= grid_points[0]-1; i++) { xi = (double)i * dnxm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[m][k][j][i] = temp[m]; } } } //--------------------------------------------------------------------- // top face //--------------------------------------------------------------------- zeta = 1.0; k = grid_points[2]-1; for (j = 0; j <= grid_points[1]-1; j++) { eta = (double)j * dnym1; for (i =0; i <= grid_points[0]-1; i++) { xi = (double)i * dnxm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[m][k][j][i] = temp[m]; } } } #pragma omp target update to(u) } /* void lhsinit(int ni, int nj) { int j, m; //--------------------------------------------------------------------- // zap the whole left hand side for starters // set all diagonal values to 1. This is overkill, but convenient //--------------------------------------------------------------------- for (j = 1; j <= nj; j++) { for (m = 0; m < 5; m++) { lhs [j][0][m] = 0.0; lhsp[j][0][m] = 0.0; lhsm[j][0][m] = 0.0; lhs [j][ni][m] = 0.0; lhsp[j][ni][m] = 0.0; lhsm[j][ni][m] = 0.0; } lhs [j][0][2] = 1.0; lhsp[j][0][2] = 1.0; lhsm[j][0][2] = 1.0; lhs [j][ni][2] = 1.0; lhsp[j][ni][2] = 1.0; lhsm[j][ni][2] = 1.0; } } void lhsinitj(int nj, int ni) { int i, m; //--------------------------------------------------------------------- // zap the whole left hand side for starters // set all diagonal values to 1. This is overkill, but convenient //--------------------------------------------------------------------- for (i = 1; i <= ni; i++) { for (m = 0; m < 5; m++) { lhs [0][i][m] = 0.0; lhsp[0][i][m] = 0.0; lhsm[0][i][m] = 0.0; lhs [nj][i][m] = 0.0; lhsp[nj][i][m] = 0.0; lhsm[nj][i][m] = 0.0; } lhs [0][i][2] = 1.0; lhsp[0][i][2] = 1.0; lhsm[0][i][2] = 1.0; lhs [nj][i][2] = 1.0; lhsp[nj][i][2] = 1.0; lhsm[nj][i][2] = 1.0; } }*/
GB_unop__identity_uint64_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint64_fc32) // op(A') function: GB (_unop_tran__identity_uint64_fc32) // C type: uint64_t // A type: GxB_FC32_t // cast: uint64_t cij = GB_cast_to_uint64_t ((double) crealf (aij)) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint64_t z = GB_cast_to_uint64_t ((double) crealf (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = GB_cast_to_uint64_t ((double) crealf (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint64_fc32) ( uint64_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; uint64_t z = GB_cast_to_uint64_t ((double) crealf (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; uint64_t z = GB_cast_to_uint64_t ((double) crealf (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint64_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
utils.h
#ifndef UTILS_H #define UTILS_H using Real = double; #include <array> #include <random> namespace particleKernels { namespace utils { inline Real differencePBC(Real t, Real lBox, Real lBoxInverse ) {return ( t - std::round(t*lBoxInverse )*lBox);} Real restrictToBox(Real x, Real left, Real lBox, Real lBoxInverse); void restrictToBox(Real * positionsPBC, const Real * positionsOld, int iStart,int iEnd, int N, const std::array<Real,3> & left, const std::array<Real,3> & lBox ); void initRandom(Real * particles,int dims,int iStart,int iEnd,size_t N, std::default_random_engine & generator, Real xmin=0,Real xmax=1); template<class V_t,int dims > Real twoBodyDistancesIsotropicReduction(const __restrict Real * particles,const V_t & op,size_t N,const std::array<Real,dims> & lBox) { Real sum2b=0; std::array<Real,dims> lBoxInverse; for(int d=0;d<dims;d++) { lBoxInverse[d]=1./lBox[d]; } #pragma omp parallel for reduction(+:sum2b) schedule(runtime) for (int iParticle=0;iParticle<N;iParticle++) { for (int jParticle=0;jParticle<iParticle;jParticle++) { Real r2=0; for(int d=0;d<dims;d++) { Real diffd=( particles[iParticle + d*N ] - particles[jParticle + d*N] ); diffd= utils::differencePBC(diffd,lBox[d],lBoxInverse[d]); r2+= diffd * diffd; } sum2b+=op(std::sqrt(r2)); } } return sum2b; } struct gaussianInteractionWithCutOff { gaussianInteractionWithCutOff(Real alpha, Real cutOff) : _alpha(alpha), _cutOff(cutOff) {} inline Real operator()(Real r) const { if (r < _cutOff) { return std::exp(-_alpha*r*r); } else { return 0; } } private: Real _alpha; Real _cutOff; }; struct tetaInteraction { tetaInteraction(Real V0,Real cutOff) : _cutOff(cutOff), _V0(V0) {} inline Real operator()(Real r) const { if (r < _cutOff) { return _V0; } else { return 0; } } private: Real _V0; Real _cutOff; }; }; } #endif
operation.h
#ifndef MATH_LINEAR_ALGEBRA_OPERATION_H #define MATH_LINEAR_ALGEBRA_OPERATION_H #include "vector.h" #include "matrix.h" #include <math.h> // Included this for sqrt // NOTE(Nabil/htmlboss): Going to try to use the built in OpenMP to speed up Matrix operations #include <omp.h> namespace math { // vector geometric operations // --------------------------------------- template <std::size_t n, typename T> inline T length(vector<n, T> vec) { T result = {}; for(std::size_t i = 0; i < n; ++i) result += vec[i] * vec[i]; return sqrt(result); } // often we only care about the relative length differences // between vectors and not their exact length values. Seeing as a square // root can be costly, it's more efficient to compare these lengtsh without // the square root. template <std::size_t n, typename T> inline T lengthSquared(vector<n, T> vec) { T result = {}; for(std::size_t i = 0; i < n; ++i) result += vec[i] * vec[i]; return result; } template <std::size_t n, typename T> inline float distance(vector<n, T> lhs, vector<n, T> rhs) { return length(lhs - rhs); } template <std::size_t n, typename T> inline float distanceSquared(vector<n, T> lhs, vector<n, T> rhs) { return lengthSquared(lhs - rhs); } template <std::size_t n, typename T> inline vector<n, T> normalize(vector<n, T> vec) { vector<n, T> result; T len = length(vec); for(std::size_t i = 0; i < n; ++i) result[i] = vec[i] / len; return result; } template <std::size_t n, typename T> inline T dot(vector<n, T> lhs, vector<n, T> rhs) { T result = {}; for(std::size_t i = 0; i < n; ++i) result += lhs[i] * rhs[i]; return result; } // perpendicular is only defined as is for 2D vectors template<typename T> inline vector<2, T> perpendicular(const vector<2, T>& vec) { vector<2, T> result; result.x = -vec.y; result.y = vec.x; return result; } // cross product is only defined for 3D vectors template<typename T> inline vector<3, T> cross(const vector<3, T>& lhs, const vector<3, T>& rhs) { vector<3, T> result; result.x = lhs.y*rhs.z - lhs.z*rhs.y; result.y = lhs.z*rhs.x - lhs.x*rhs.z; result.z = lhs.x*rhs.y - lhs.y*rhs.x; return result; } // matrix algebraic operations // --------------------------------------- template <unsigned long m, unsigned long n, typename T> // type Unsigned to Unsigned Long inline matrix<m, n, T> transpose(matrix<m, n, T>& mat) { matrix<n, m, T> result; // note that we take the rows and cols as nxm instead of mxn this time. // We switched the < m and < n around in loop condition (as result matrix has // reversed dimensions). // TODO: add in a test for omp parallel #pragma omp parallel for for (std::size_t col = 0; col < m; ++col) { for (std::size_t row = 0; row < n; ++row) { result[col][row] = mat[row][col]; } } return result; } template <std::size_t m, std::size_t n, typename T> inline matrix<m, n, T> inverse(const matrix<m, n, T>& mat) { matrix<m, n, T> result; // calculate determinant algebraically and retrieve inverse. return result; } } // namespace math #endif
map.c
#include <stdlib.h> #include <string.h> #include <assert.h> #include <errno.h> #include <cinttypes> #include <algorithm> #include <tuple> #include <omp.h> #include <iostream> #include "kthread.h" #include "kvec.h" #include "kalloc.h" #include "sdust.h" #include "mmpriv.h" #include "bseq.h" #include "khash.h" struct mm_tbuf_s { void *km; int rep_len, frag_gap; }; mm_tbuf_t *mm_tbuf_init(void) { mm_tbuf_t *b; b = (mm_tbuf_t*)calloc(1, sizeof(mm_tbuf_t)); if (!(mm_dbg_flag & 1)) b->km = km_init(); return b; } void mm_tbuf_destroy(mm_tbuf_t *b) { if (b == 0) return; km_destroy(b->km); free(b); } void *mm_tbuf_get_km(mm_tbuf_t *b) { return b->km; } static int mm_dust_minier(void *km, int n, mm128_t *a, int l_seq, const char *seq, int sdust_thres) { int n_dreg, j, k, u = 0; const uint64_t *dreg; sdust_buf_t *sdb; if (sdust_thres <= 0) return n; sdb = sdust_buf_init(km); dreg = sdust_core((const uint8_t*)seq, l_seq, sdust_thres, 64, &n_dreg, sdb); for (j = k = 0; j < n; ++j) { // squeeze out minimizers that significantly overlap with LCRs int32_t qpos = (uint32_t)a[j].y>>1, span = a[j].x&0xff; int32_t s = qpos - (span - 1), e = s + span; while (u < n_dreg && (int32_t)dreg[u] <= s) ++u; if (u < n_dreg && (int32_t)(dreg[u]>>32) < e) { int v, l = 0; for (v = u; v < n_dreg && (int32_t)(dreg[v]>>32) < e; ++v) { // iterate over LCRs overlapping this minimizer int ss = s > (int32_t)(dreg[v]>>32)? s : dreg[v]>>32; int ee = e < (int32_t)dreg[v]? e : (uint32_t)dreg[v]; l += ee - ss; } if (l <= span>>1) a[k++] = a[j]; // keep the minimizer if less than half of it falls in masked region } else a[k++] = a[j]; } sdust_buf_destroy(sdb); return k; // the new size } static void collect_minimizers(void *km, const mm_mapopt_t *opt, const mm_idx_t *mi, int n_segs, const int *qlens, const char* const* seqs, mm128_v *mv) { int i, n, sum = 0; mv->n = 0; for (i = n = 0; i < n_segs; ++i) { size_t j; mm_sketch(km, seqs[i], qlens[i], mi->w, mi->k, i, mi->flag&MM_I_HPC, mv, mi); for (j = n; j < mv->n; ++j) mv->a[j].y += sum << 1; if (opt->sdust_thres > 0) // mask low-complexity minimizers mv->n = n + mm_dust_minier(km, mv->n - n, mv->a + n, qlens[i], seqs[i], opt->sdust_thres); sum += qlens[i], n = mv->n; } } #include "ksort.h" #define heap_lt(a, b) ((a).x > (b).x) KSORT_INIT(heap, mm128_t, heap_lt) typedef struct { uint32_t n; uint32_t q_pos, q_span; uint32_t seg_id:31, is_tandem:1; const uint64_t *cr; } mm_match_t; static mm_match_t *collect_matches(void *km, int *_n_m, int max_occ, const mm_idx_t *mi, const mm128_v *mv, int64_t *n_a, int *rep_len, int *n_mini_pos, uint64_t **mini_pos) { int rep_st = 0, rep_en = 0, n_m; size_t i; mm_match_t *m; *n_mini_pos = 0; *mini_pos = (uint64_t*)kmalloc(km, mv->n * sizeof(uint64_t)); m = (mm_match_t*)kmalloc(km, mv->n * sizeof(mm_match_t)); for (i = 0, n_m = 0, *rep_len = 0, *n_a = 0; i < mv->n; ++i) { const uint64_t *cr; mm128_t *p = &mv->a[i]; uint32_t q_pos = (uint32_t)p->y, q_span = p->x & 0xff; int t; cr = mm_idx_get(mi, p->x>>8, &t); if (t >= max_occ) { int en = (q_pos >> 1) + 1, st = en - q_span; if (st > rep_en) { *rep_len += rep_en - rep_st; rep_st = st, rep_en = en; } else rep_en = en; } else { mm_match_t *q = &m[n_m++]; q->q_pos = q_pos, q->q_span = q_span, q->cr = cr, q->n = t, q->seg_id = p->y >> 32; q->is_tandem = 0; if (i > 0 && p->x>>8 == mv->a[i - 1].x>>8) q->is_tandem = 1; if (i < mv->n - 1 && p->x>>8 == mv->a[i + 1].x>>8) q->is_tandem = 1; *n_a += q->n; (*mini_pos)[(*n_mini_pos)++] = (uint64_t)q_span<<32 | q_pos>>1; } } *rep_len += rep_en - rep_st; *_n_m = n_m; return m; } static inline int skip_seed(int flag, uint64_t r, const mm_match_t *q, const char *qname, int qlen, const mm_idx_t *mi, int *is_self) { *is_self = 0; if (qname && (flag & (MM_F_NO_DIAG|MM_F_NO_DUAL))) { const mm_idx_seq_t *s = &mi->seq[r>>32]; int cmp; cmp = strcmp(qname, s->name); if ((flag&MM_F_NO_DIAG) && cmp == 0 && (int)s->len == qlen) { if ((uint32_t)r>>1 == (q->q_pos>>1)) return 1; // avoid the diagnonal anchors if ((r&1) == (q->q_pos&1)) *is_self = 1; // this flag is used to avoid spurious extension on self chain } if ((flag&MM_F_NO_DUAL) && cmp > 0) // all-vs-all mode: map once return 1; } if (flag & (MM_F_FOR_ONLY|MM_F_REV_ONLY)) { if ((r&1) == (q->q_pos&1)) { // forward strand if (flag & MM_F_REV_ONLY) return 1; } else { if (flag & MM_F_FOR_ONLY) return 1; } } return 0; } static mm128_t *collect_seed_hits_heap(void *km, const mm_mapopt_t *opt, int max_occ, const mm_idx_t *mi, const char *qname, const mm128_v *mv, int qlen, int64_t *n_a, int *rep_len, int *n_mini_pos, uint64_t **mini_pos) { int i, n_m, heap_size = 0; int64_t j, n_for = 0, n_rev = 0; mm_match_t *m; mm128_t *a, *heap; m = collect_matches(km, &n_m, max_occ, mi, mv, n_a, rep_len, n_mini_pos, mini_pos); heap = (mm128_t*)kmalloc(km, n_m * sizeof(mm128_t)); a = (mm128_t*)kmalloc(km, *n_a * sizeof(mm128_t)); for (i = 0, heap_size = 0; i < n_m; ++i) { if (m[i].n > 0) { heap[heap_size].x = m[i].cr[0]; heap[heap_size].y = (uint64_t)i<<32; ++heap_size; } } ks_heapmake_heap(heap_size, heap); while (heap_size > 0) { mm_match_t *q = &m[heap->y>>32]; mm128_t *p; uint64_t r = heap->x; int32_t is_self, rpos = (uint32_t)r >> 1; if (!skip_seed(opt->flag, r, q, qname, qlen, mi, &is_self)) { if ((r&1) == (q->q_pos&1)) { // forward strand p = &a[n_for++]; p->x = (r&0xffffffff00000000ULL) | rpos; p->y = (uint64_t)q->q_span << 32 | q->q_pos >> 1; } else { // reverse strand p = &a[(*n_a) - (++n_rev)]; p->x = 1ULL<<63 | (r&0xffffffff00000000ULL) | rpos; p->y = (uint64_t)q->q_span << 32 | (qlen - ((q->q_pos>>1) + 1 - q->q_span) - 1); } p->y |= (uint64_t)q->seg_id << MM_SEED_SEG_SHIFT; if (q->is_tandem) p->y |= MM_SEED_TANDEM; if (is_self) p->y |= MM_SEED_SELF; } // update the heap if ((uint32_t)heap->y < q->n - 1) { ++heap[0].y; heap[0].x = m[heap[0].y>>32].cr[(uint32_t)heap[0].y]; } else { heap[0] = heap[heap_size - 1]; --heap_size; } ks_heapdown_heap(0, heap_size, heap); } kfree(km, m); kfree(km, heap); // reverse anchors on the reverse strand, as they are in the descending order for (j = 0; j < n_rev>>1; ++j) { mm128_t t = a[(*n_a) - 1 - j]; a[(*n_a) - 1 - j] = a[(*n_a) - (n_rev - j)]; a[(*n_a) - (n_rev - j)] = t; } if (*n_a > n_for + n_rev) { memmove(a + n_for, a + (*n_a) - n_rev, n_rev * sizeof(mm128_t)); *n_a = n_for + n_rev; } return a; } static mm128_t *collect_seed_hits(void *km, const mm_mapopt_t *opt, int max_occ, const mm_idx_t *mi, const char *qname, const mm128_v *mv, int qlen, int64_t *n_a, int *rep_len, int *n_mini_pos, uint64_t **mini_pos) { int i, n_m; mm_match_t *m; mm128_t *a; m = collect_matches(km, &n_m, max_occ, mi, mv, n_a, rep_len, n_mini_pos, mini_pos); a = (mm128_t*)kmalloc(km, *n_a * sizeof(mm128_t)); for (i = 0, *n_a = 0; i < n_m; ++i) { mm_match_t *q = &m[i]; const uint64_t *r = q->cr; uint32_t k; for (k = 0; k < q->n; ++k) { int32_t is_self, rpos = (uint32_t)r[k] >> 1; mm128_t *p; if (skip_seed(opt->flag, r[k], q, qname, qlen, mi, &is_self)) continue; p = &a[(*n_a)++]; if ((r[k]&1) == (q->q_pos&1)) { // forward strand p->x = (r[k]&0xffffffff00000000ULL) | rpos; p->y = (uint64_t)q->q_span << 32 | q->q_pos >> 1; } else { // reverse strand p->x = 1ULL<<63 | (r[k]&0xffffffff00000000ULL) | rpos; p->y = (uint64_t)q->q_span << 32 | (qlen - ((q->q_pos>>1) + 1 - q->q_span) - 1); } p->y |= (uint64_t)q->seg_id << MM_SEED_SEG_SHIFT; if (q->is_tandem) p->y |= MM_SEED_TANDEM; if (is_self) p->y |= MM_SEED_SELF; } } kfree(km, m); radix_sort_128x(a, a + (*n_a)); return a; } static void chain_post(const mm_mapopt_t *opt, int max_chain_gap_ref, const mm_idx_t *mi, void *km, int qlen, int n_segs, const int *qlens, int *n_regs, mm_reg1_t *regs, mm128_t *a) { if (!(opt->flag & MM_F_ALL_CHAINS)) { // don't choose primary mapping(s) mm_set_parent(km, opt->mask_level, opt->mask_len, *n_regs, regs, opt->a * 2 + opt->b, opt->flag&MM_F_HARD_MLEVEL, opt->alt_drop); if (n_segs <= 1) mm_select_sub(km, opt->pri_ratio, mi->k*2, opt->best_n, n_regs, regs); else mm_select_sub_multi(km, opt->pri_ratio, 0.2f, 0.7f, max_chain_gap_ref, mi->k*2, opt->best_n, n_segs, qlens, n_regs, regs); if (!(opt->flag & (MM_F_SPLICE|MM_F_SR|MM_F_NO_LJOIN))) // long join not working well without primary chains mm_join_long(km, opt, qlen, n_regs, regs, a); } } static mm_reg1_t *align_regs(const mm_mapopt_t *opt, const mm_idx_t *mi, void *km, int qlen, const char *seq, int *n_regs, mm_reg1_t *regs, mm128_t *a) { if (!(opt->flag & MM_F_CIGAR)) return regs; regs = mm_align_skeleton(km, opt, mi, qlen, seq, n_regs, regs, a); // this calls mm_filter_regs() if (!(opt->flag & MM_F_ALL_CHAINS)) { // don't choose primary mapping(s) mm_set_parent(km, opt->mask_level, opt->mask_len, *n_regs, regs, opt->a * 2 + opt->b, opt->flag&MM_F_HARD_MLEVEL, opt->alt_drop); mm_select_sub(km, opt->pri_ratio, mi->k*2, opt->best_n, n_regs, regs); mm_set_sam_pri(*n_regs, regs); } return regs; } void mm_map_frag(const mm_idx_t *mi, int n_segs, const int *qlens, const char **seqs, int *n_regs, mm_reg1_t **regs, mm_tbuf_t *b, const mm_mapopt_t *opt, const char *qname) { int i, j, rep_len, qlen_sum, n_regs0, n_mini_pos; int max_chain_gap_qry, max_chain_gap_ref, min_chain_gap_ref, is_splice = !!(opt->flag & MM_F_SPLICE), is_sr = !!(opt->flag & MM_F_SR); uint32_t hash; int64_t n_a; uint64_t *u, *mini_pos; mm128_t *a; mm128_v mv = {0,0,0}; mm_reg1_t *regs0; km_stat_t kmst; //TODO: generalize this to n_segs > 1 assert (n_segs == 1); //deal with long reads (or asm contigs) only mm128_t **collect_a; int64_t *collect_n_a; mm_tbuf_t *b_master = b; //buffer for main thread entering this function //stage1: Pre-compute confident read alignments of substrings of input read //define new set of options for first stage //generate many candidate alignments to improve mapq estimation mm_mapopt_t opt2 = *opt; mm_mapopt_t *opt_2 = &opt2; opt_2->best_n = std::max(5, opt_2->best_n); //set minimum int countStartingPositions = 1 + std::ceil(qlens[0] * 1.0 / opt_2->suffixSampleOffset); collect_a = (mm128_t**)kmalloc(b->km, countStartingPositions * sizeof(mm128_t*)); collect_n_a = (int64_t *)kmalloc(b->km, countStartingPositions * sizeof(int64_t)); memset(collect_n_a, 0, countStartingPositions * sizeof(int64_t)); //create a boolean vector to indicate what portion of read were mapped using MCASs int8_t* seqMapped = (int8_t *)kmalloc(b->km, qlens[0] * sizeof(int8_t)); memset(seqMapped, 0, qlens[0] * sizeof(int8_t)); //check if SVaware mode enabled and query length is sufficient if (opt_2->SVaware && qlens[0] >= opt_2->SVawareMinReadLength) { //parallelize single read alignment further for better load balance #pragma omp parallel num_threads(OMP_PER_READ_THREADS) { //make all these variables private to openmp thread by redefining them int i, j, rep_len, qlen_sum, n_regs0, n_mini_pos; uint32_t hash; int64_t n_a; uint64_t *u, *mini_pos; mm128_t *a; mm128_v mv = {0,0,0}; mm_reg1_t *regs0; mm_tbuf_t *b = (mm_tbuf_t*)calloc(1, sizeof(mm_tbuf_t)); //omp thread local buffer b->km = km_init(); int* sub_qlens = (int *)kmalloc(b->km, 1 * sizeof(int)); char **sub_seqs = (char **) kmalloc(b->km, 1 * sizeof(char*)); sub_seqs[0] = (char *)kmalloc(b->km, qlens[0] * sizeof(char)); #pragma omp for schedule(dynamic) for (int sub_begin = 0; sub_begin < qlens[0] + opt_2->suffixSampleOffset - 1; sub_begin += opt_2->suffixSampleOffset) { int suffix_id = sub_begin / opt_2->suffixSampleOffset; //id for this string end-point bool mappingFound = false; int max_mapq_currentPos = 0; if (sub_begin >= qlens[0]) sub_begin = qlens[0]-1; //for last iter assert (sub_begin >= 0 && sub_begin < qlens[0]); for (int sub_len = opt_2->minPrefixLength; sub_len <= opt_2->maxPrefixLength; sub_len *= opt_2->prefixIncrementFactor) { //consider 'sub_len' bases to the right if (sub_begin + sub_len <= qlens[0]) //check substring end boundary limit { mv = {0,0,0}; sub_qlens[0] = sub_len; memcpy (sub_seqs[0], &(seqs[0][sub_begin]), sub_len); for (i = 0, qlen_sum = 0; i < n_segs; ++i) qlen_sum += sub_qlens[i], n_regs[i] = 0, regs[i] = 0, n_regs0 = 0; if (qlen_sum == 0 || n_segs <= 0 || n_segs > MM_MAX_SEG) break; if (opt_2->max_qlen > 0 && qlen_sum > opt_2->max_qlen) break; hash = qname? __ac_X31_hash_string(qname) : 0; hash ^= __ac_Wang_hash(qlen_sum) + __ac_Wang_hash(opt_2->seed); hash = __ac_Wang_hash(hash); collect_minimizers(b->km, opt_2, mi, n_segs, sub_qlens, sub_seqs, &mv); if (opt_2->flag & MM_F_HEAP_SORT) a = collect_seed_hits_heap(b->km, opt_2, opt_2->mid_occ, mi, qname, &mv, qlen_sum, &n_a, &rep_len, &n_mini_pos, &mini_pos); else a = collect_seed_hits(b->km, opt_2, opt_2->mid_occ, mi, qname, &mv, qlen_sum, &n_a, &rep_len, &n_mini_pos, &mini_pos); if (mm_dbg_flag & MM_DBG_PRINT_SEED) { fprintf(stderr, "RS\t%d\n", rep_len); for (i = 0; i < n_a; ++i) fprintf(stderr, "SD\t%s\t%d\t%c\t%d\t%d\t%d\n", mi->seq[a[i].x<<1>>33].name, (int32_t)a[i].x, "+-"[a[i].x>>63], (int32_t)a[i].y, (int32_t)(a[i].y>>32&0xff), i == 0? 0 : ((int32_t)a[i].y - (int32_t)a[i-1].y) - ((int32_t)a[i].x - (int32_t)a[i-1].x)); } // set max chaining gap on the query and the reference sequence if (is_sr) max_chain_gap_qry = qlen_sum > opt_2->max_gap? qlen_sum : opt_2->max_gap; else max_chain_gap_qry = opt_2->max_gap; if (opt_2->max_gap_ref > 0) { max_chain_gap_ref = opt_2->max_gap_ref; // always honor mm_mapopt_2_t::max_gap_ref if set } else if (opt_2->max_frag_len > 0) { max_chain_gap_ref = opt_2->max_frag_len - qlen_sum; if (max_chain_gap_ref < opt_2->max_gap) max_chain_gap_ref = opt_2->max_gap; } else max_chain_gap_ref = opt_2->max_gap; if (opt_2->min_gap_ref < max_chain_gap_ref) min_chain_gap_ref = opt_2->min_gap_ref; else min_chain_gap_ref = max_chain_gap_ref; a = mm_chain_dp(max_chain_gap_ref, min_chain_gap_ref, max_chain_gap_qry, opt_2->bw, opt_2->max_chain_skip, opt_2->max_chain_iter, opt_2->min_cnt, opt_2->min_chain_score, opt->chain_gap_scale, is_splice, n_segs, n_a, a, &n_regs0, &u, b->km); if (opt_2->max_occ > opt_2->mid_occ && rep_len > 0) { int rechain = 0; if (n_regs0 > 0) { // test if the best chain has all the segments int n_chained_segs = 1, max = 0, max_i = -1, max_off = -1, off = 0; for (i = 0; i < n_regs0; ++i) { // find the best chain if (max < (int)(u[i]>>32)) max = u[i]>>32, max_i = i, max_off = off; off += (uint32_t)u[i]; } for (i = 1; i < (int32_t)u[max_i]; ++i) // count the number of segments in the best chain if ((a[max_off+i].y&MM_SEED_SEG_MASK) != (a[max_off+i-1].y&MM_SEED_SEG_MASK)) ++n_chained_segs; if (n_chained_segs < n_segs) rechain = 1; } else rechain = 1; if (rechain) { // redo chaining with a higher max_occ threshold kfree(b->km, a); kfree(b->km, u); kfree(b->km, mini_pos); if (opt_2->flag & MM_F_HEAP_SORT) a = collect_seed_hits_heap(b->km, opt_2, opt_2->max_occ, mi, qname, &mv, qlen_sum, &n_a, &rep_len, &n_mini_pos, &mini_pos); else a = collect_seed_hits(b->km, opt_2, opt_2->max_occ, mi, qname, &mv, qlen_sum, &n_a, &rep_len, &n_mini_pos, &mini_pos); a = mm_chain_dp(max_chain_gap_ref, min_chain_gap_ref, max_chain_gap_qry, opt_2->bw, opt_2->max_chain_skip, opt_2->max_chain_iter, opt_2->min_cnt, opt_2->min_chain_score, opt->chain_gap_scale, is_splice, n_segs, n_a, a, &n_regs0, &u, b->km); } } b->frag_gap = max_chain_gap_ref; b->rep_len = rep_len; regs0 = mm_gen_regs(b->km, hash, qlen_sum, n_regs0, u, a); if (mm_dbg_flag & MM_DBG_PRINT_SEED) for (j = 0; j < n_regs0; ++j) for (i = regs0[j].as; i < regs0[j].as + regs0[j].cnt; ++i) fprintf(stderr, "CN\t%d\t%s\t%d\t%c\t%d\t%d\t%d\n", j, mi->seq[a[i].x<<1>>33].name, (int32_t)a[i].x, "+-"[a[i].x>>63], (int32_t)a[i].y, (int32_t)(a[i].y>>32&0xff), i == regs0[j].as? 0 : ((int32_t)a[i].y - (int32_t)a[i-1].y) - ((int32_t)a[i].x - (int32_t)a[i-1].x)); chain_post(opt_2, max_chain_gap_ref, mi, b->km, qlen_sum, n_segs, qlens, &n_regs0, regs0, a); if (!is_sr) mm_est_err(mi, qlen_sum, n_regs0, regs0, a, n_mini_pos, mini_pos); if (n_segs == 1) { // uni-segment regs0 = align_regs(opt_2, mi, b->km, sub_qlens[0], sub_seqs[0], &n_regs0, regs0, a); mm_set_mapq(b->km, n_regs0, regs0, opt_2->min_chain_score, opt_2->a, rep_len, is_sr); n_regs[0] = n_regs0, regs[0] = regs0; } else { // multi-segment mm_seg_t *seg; seg = mm_seg_gen(b->km, hash, n_segs, qlens, n_regs0, regs0, n_regs, regs, a); // split fragment chain to separate segment chains free(regs0); for (i = 0; i < n_segs; ++i) { mm_set_parent(b->km, opt_2->mask_level, opt_2->mask_len, n_regs[i], regs[i], opt_2->a * 2 + opt_2->b, opt_2->flag&MM_F_HARD_MLEVEL, opt_2->alt_drop); // update mm_reg1_t::parent regs[i] = align_regs(opt_2, mi, b->km, qlens[i], seqs[i], &n_regs[i], regs[i], seg[i].a); mm_set_mapq(b->km, n_regs[i], regs[i], opt_2->min_chain_score, opt_2->a, rep_len, is_sr); } mm_seg_free(b->km, n_segs, seg); if (n_segs == 2 && opt_2->pe_ori >= 0 && (opt_2->flag&MM_F_CIGAR)) mm_pair(b->km, max_chain_gap_ref, opt_2->pe_bonus, opt_2->a * 2 + opt_2->b, opt_2->a, qlens, n_regs, regs); // pairing } int mostPromisingMapping = -1; int max_mapq_fragment = 0; //For valid mapping, save anchors for (j = 0; j < n_regs0; ++j) { max_mapq_fragment = std::max ((int32_t)regs0[j].mapq, max_mapq_fragment); max_mapq_currentPos = std::max (max_mapq_fragment, max_mapq_currentPos); //Check for high confidence (mapq), length if (regs0[j].mapq >= opt_2->min_mapq && regs0[j].blen >= opt_2->min_qcov * sub_len && regs0[j].cnt > 0) { mappingFound = true; mostPromisingMapping = j; collect_n_a[suffix_id] = regs0[j].cnt; if (mm_dbg_flag & MM_DBG_POLISH) { //print MCAS information in paf-like format, helpful for debugging & dot-plotting MCAS alignments fprintf(stderr, "PO\t%s %d %d %d %c %s %d %d %d %d %d %d %d [FOUND] \n", qname, qlens[0], sub_begin + regs0[j].qs, sub_begin + regs0[j].qe, "+-"[regs0[j].rev] , mi->seq[regs0[j].rid].name, mi->seq[regs0[j].rid].len, regs0[j].rs, regs0[j].re, regs0[j].mapq, suffix_id, sub_begin, sub_len); } break; } } if ((mm_dbg_flag & MM_DBG_POLISH) && !mappingFound) fprintf(stderr, "PO\tqname:%s, suffid:%d, begin:%d, len:%d, max_mapq:%d, n_regs0:%d [NONE FOUND] \n", qname, suffix_id, sub_begin, sub_len, max_mapq_fragment, n_regs0); if (mappingFound) { assert (collect_n_a[suffix_id] > 0); assert (mostPromisingMapping >= 0); #pragma omp critical { collect_a[suffix_id] = (mm128_t*)kmalloc(b_master->km, collect_n_a[suffix_id] * sizeof(mm128_t)); } j = mostPromisingMapping; for (i = 0; i < regs0[j].cnt; ++i) { mm128_t _a_ = a[i + regs0[j].as]; //correct coordinates of each anchor while storing if (_a_.x >> 63) //reverse strand _a_.y += (qlens[0] - sub_begin - sub_len); else _a_.y += sub_begin; collect_a[suffix_id][i] = _a_; } //mark mapped interval in boolean vector #pragma omp critical { for(i = sub_begin; i < sub_begin + sub_len; i++) seqMapped[i] = 1; } } for (j = 0; j < n_regs0; ++j) {free (regs0[j].p);} free (regs0); kfree(b->km, mv.a); kfree(b->km, a); kfree(b->km, u); kfree(b->km, mini_pos); if (mappingFound || !n_regs0) break; // mappingFound-> found shortest prefix; !n_regs0-> no candidate } //consider 'sub_len' bases to the left if (sub_begin - sub_len + 1 >= 0) //check substring start boundary limit { mv = {0,0,0}; sub_qlens[0] = sub_len; memcpy (sub_seqs[0], &(seqs[0][sub_begin - sub_len +1]), sub_len); for (i = 0, qlen_sum = 0; i < n_segs; ++i) qlen_sum += sub_qlens[i], n_regs[i] = 0, regs[i] = 0, n_regs0 = 0; if (qlen_sum == 0 || n_segs <= 0 || n_segs > MM_MAX_SEG) break; if (opt_2->max_qlen > 0 && qlen_sum > opt_2->max_qlen) break; hash = qname? __ac_X31_hash_string(qname) : 0; hash ^= __ac_Wang_hash(qlen_sum) + __ac_Wang_hash(opt_2->seed); hash = __ac_Wang_hash(hash); collect_minimizers(b->km, opt_2, mi, n_segs, sub_qlens, sub_seqs, &mv); if (opt_2->flag & MM_F_HEAP_SORT) a = collect_seed_hits_heap(b->km, opt_2, opt_2->mid_occ, mi, qname, &mv, qlen_sum, &n_a, &rep_len, &n_mini_pos, &mini_pos); else a = collect_seed_hits(b->km, opt_2, opt_2->mid_occ, mi, qname, &mv, qlen_sum, &n_a, &rep_len, &n_mini_pos, &mini_pos); if (mm_dbg_flag & MM_DBG_PRINT_SEED) { fprintf(stderr, "RS\t%d\n", rep_len); for (i = 0; i < n_a; ++i) fprintf(stderr, "SD\t%s\t%d\t%c\t%d\t%d\t%d\n", mi->seq[a[i].x<<1>>33].name, (int32_t)a[i].x, "+-"[a[i].x>>63], (int32_t)a[i].y, (int32_t)(a[i].y>>32&0xff), i == 0? 0 : ((int32_t)a[i].y - (int32_t)a[i-1].y) - ((int32_t)a[i].x - (int32_t)a[i-1].x)); } // set max chaining gap on the query and the reference sequence if (is_sr) max_chain_gap_qry = qlen_sum > opt_2->max_gap? qlen_sum : opt_2->max_gap; else max_chain_gap_qry = opt_2->max_gap; if (opt_2->max_gap_ref > 0) { max_chain_gap_ref = opt_2->max_gap_ref; // always honor mm_mapopt_2_t::max_gap_ref if set } else if (opt_2->max_frag_len > 0) { max_chain_gap_ref = opt_2->max_frag_len - qlen_sum; if (max_chain_gap_ref < opt_2->max_gap) max_chain_gap_ref = opt_2->max_gap; } else max_chain_gap_ref = opt_2->max_gap; if (opt_2->min_gap_ref < max_chain_gap_ref) min_chain_gap_ref = opt_2->min_gap_ref; else min_chain_gap_ref = max_chain_gap_ref; a = mm_chain_dp(max_chain_gap_ref, min_chain_gap_ref, max_chain_gap_qry, opt_2->bw, opt_2->max_chain_skip, opt_2->max_chain_iter, opt_2->min_cnt, opt_2->min_chain_score, opt->chain_gap_scale, is_splice, n_segs, n_a, a, &n_regs0, &u, b->km); if (opt_2->max_occ > opt_2->mid_occ && rep_len > 0) { int rechain = 0; if (n_regs0 > 0) { // test if the best chain has all the segments int n_chained_segs = 1, max = 0, max_i = -1, max_off = -1, off = 0; for (i = 0; i < n_regs0; ++i) { // find the best chain if (max < (int)(u[i]>>32)) max = u[i]>>32, max_i = i, max_off = off; off += (uint32_t)u[i]; } for (i = 1; i < (int32_t)u[max_i]; ++i) // count the number of segments in the best chain if ((a[max_off+i].y&MM_SEED_SEG_MASK) != (a[max_off+i-1].y&MM_SEED_SEG_MASK)) ++n_chained_segs; if (n_chained_segs < n_segs) rechain = 1; } else rechain = 1; if (rechain) { // redo chaining with a higher max_occ threshold kfree(b->km, a); kfree(b->km, u); kfree(b->km, mini_pos); if (opt_2->flag & MM_F_HEAP_SORT) a = collect_seed_hits_heap(b->km, opt_2, opt_2->max_occ, mi, qname, &mv, qlen_sum, &n_a, &rep_len, &n_mini_pos, &mini_pos); else a = collect_seed_hits(b->km, opt_2, opt_2->max_occ, mi, qname, &mv, qlen_sum, &n_a, &rep_len, &n_mini_pos, &mini_pos); a = mm_chain_dp(max_chain_gap_ref, min_chain_gap_ref, max_chain_gap_qry, opt_2->bw, opt_2->max_chain_skip, opt_2->max_chain_iter, opt_2->min_cnt, opt_2->min_chain_score, opt->chain_gap_scale, is_splice, n_segs, n_a, a, &n_regs0, &u, b->km); } } b->frag_gap = max_chain_gap_ref; b->rep_len = rep_len; regs0 = mm_gen_regs(b->km, hash, qlen_sum, n_regs0, u, a); if (mm_dbg_flag & MM_DBG_PRINT_SEED) for (j = 0; j < n_regs0; ++j) for (i = regs0[j].as; i < regs0[j].as + regs0[j].cnt; ++i) fprintf(stderr, "CN\t%d\t%s\t%d\t%c\t%d\t%d\t%d\n", j, mi->seq[a[i].x<<1>>33].name, (int32_t)a[i].x, "+-"[a[i].x>>63], (int32_t)a[i].y, (int32_t)(a[i].y>>32&0xff), i == regs0[j].as? 0 : ((int32_t)a[i].y - (int32_t)a[i-1].y) - ((int32_t)a[i].x - (int32_t)a[i-1].x)); chain_post(opt_2, max_chain_gap_ref, mi, b->km, qlen_sum, n_segs, qlens, &n_regs0, regs0, a); if (!is_sr) mm_est_err(mi, qlen_sum, n_regs0, regs0, a, n_mini_pos, mini_pos); if (n_segs == 1) { // uni-segment regs0 = align_regs(opt_2, mi, b->km, sub_qlens[0], sub_seqs[0], &n_regs0, regs0, a); mm_set_mapq(b->km, n_regs0, regs0, opt_2->min_chain_score, opt_2->a, rep_len, is_sr); n_regs[0] = n_regs0, regs[0] = regs0; } else { // multi-segment mm_seg_t *seg; seg = mm_seg_gen(b->km, hash, n_segs, qlens, n_regs0, regs0, n_regs, regs, a); // split fragment chain to separate segment chains free(regs0); for (i = 0; i < n_segs; ++i) { mm_set_parent(b->km, opt->mask_level, opt->mask_len, n_regs[i], regs[i], opt->a * 2 + opt->b, opt->flag&MM_F_HARD_MLEVEL, opt->alt_drop); // update mm_reg1_t::parent regs[i] = align_regs(opt_2, mi, b->km, qlens[i], seqs[i], &n_regs[i], regs[i], seg[i].a); mm_set_mapq(b->km, n_regs[i], regs[i], opt_2->min_chain_score, opt_2->a, rep_len, is_sr); } mm_seg_free(b->km, n_segs, seg); if (n_segs == 2 && opt_2->pe_ori >= 0 && (opt_2->flag&MM_F_CIGAR)) mm_pair(b->km, max_chain_gap_ref, opt_2->pe_bonus, opt_2->a * 2 + opt_2->b, opt_2->a, qlens, n_regs, regs); // pairing } int mostPromisingMapping = -1; int max_mapq_fragment = 0; //For valid mapping, save anchors for (j = 0; j < n_regs0; ++j) { max_mapq_fragment = std::max ((int32_t)regs0[j].mapq, max_mapq_fragment); max_mapq_currentPos = std::max (max_mapq_fragment, max_mapq_currentPos); //Check for high confidence (mapq), length if (regs0[j].mapq >= opt_2->min_mapq && regs0[j].blen >= opt_2->min_qcov * sub_len && regs0[j].cnt > 0) { mappingFound = true; mostPromisingMapping = j; collect_n_a[suffix_id] = regs0[j].cnt; if (mm_dbg_flag & MM_DBG_POLISH) { //print MCAS information in paf-like format, helpful for debugging & dot-plotting MCAS alignments fprintf(stderr, "PO\t%s %d %d %d %c %s %d %d %d %d %d %d %d [FOUND] \n", qname, qlens[0], sub_begin - sub_len + regs0[j].qs, sub_begin - sub_len + regs0[j].qe, "+-"[regs0[j].rev] , mi->seq[regs0[j].rid].name, mi->seq[regs0[j].rid].len, regs0[j].rs, regs0[j].re, regs0[j].mapq, suffix_id, sub_begin, -1 * sub_len); } break; } } if ((mm_dbg_flag & MM_DBG_POLISH) && !mappingFound) fprintf(stderr, "PO\tqname:%s, suffid:%d, begin:%d, len:%d, max_mapq:%d, n_regs0:%d [NONE FOUND] \n", qname, suffix_id, sub_begin, -1 * sub_len, max_mapq_fragment, n_regs0); if (mappingFound) { assert (collect_n_a[suffix_id] > 0); assert (mostPromisingMapping >= 0); #pragma omp critical { collect_a[suffix_id] = (mm128_t*)kmalloc(b_master->km, collect_n_a[suffix_id] * sizeof(mm128_t)); } j = mostPromisingMapping; for (i = 0; i < regs0[j].cnt; ++i) { mm128_t _a_ = a[i + regs0[j].as]; //correct coordinates of each anchor while storing if (_a_.x >> 63) //reverse strand _a_.y += (qlens[0]-1) - sub_begin; else _a_.y += sub_begin - sub_len + 1; //offset of first base of substring collect_a[suffix_id][i] = _a_; } //mark mapped interval in boolean vector #pragma omp critical { for(i = sub_begin - sub_len +1; i <= sub_begin; i++) seqMapped[i] = 1; } } for (j = 0; j < n_regs0; ++j) {free (regs0[j].p);} free (regs0); kfree(b->km, mv.a); kfree(b->km, a); kfree(b->km, u); kfree(b->km, mini_pos); if (mappingFound || !n_regs0) break; // mappingFound-> found shortest prefix; !n_regs0-> no candidate } } if ((mm_dbg_flag & MM_DBG_POLISH) && !mappingFound) fprintf(stderr, "PO\tqname:%s, begin:%d, max_mapq_currentPos:%d [NONE FOUND] \n", qname, sub_begin, max_mapq_currentPos); } //free openmp thread specific memory kfree(b->km, sub_qlens); kfree(b->km, sub_seqs[0]); kfree(b->km, sub_seqs); mm_tbuf_destroy(b); } } if (mm_dbg_flag & MM_DBG_POLISH) { int mappedcnt = 0; for (i = 0; i < qlens[0]; i++) if (seqMapped[i]) mappedcnt++; fprintf(stderr, "PO\tqname:%s, count of mapped query bases = %d among %d\n", qname, mappedcnt, qlens[0]); } //define new set of options for next stage //we can make stage 2 as sensitive as possible with very few seeds remaining mm_mapopt_t opt3 = *opt; mm_mapopt_t *opt_3 = &opt3; opt_3->zdrop_inv = std::min (opt->zdrop_inv, opt->stage2_zdrop_inv); opt_3->bw= std::max(opt->bw, opt->stage2_bw); //increased gap helps compensate for sometimes missing seeds along correct alignments opt_3->max_gap = std::max(opt->max_gap, opt->stage2_max_gap); //Re-run mapping with the above selected anchors { for (i = 0, qlen_sum = 0; i < n_segs; ++i) qlen_sum += qlens[i], n_regs[i] = 0, regs[i] = 0, n_regs0 = 0; if (qlen_sum == 0 || n_segs <= 0 || n_segs > MM_MAX_SEG) return; if (opt_3->max_qlen > 0 && qlen_sum > opt_3->max_qlen) return; hash = qname? __ac_X31_hash_string(qname) : 0; hash ^= __ac_Wang_hash(qlen_sum) + __ac_Wang_hash(opt_3->seed); hash = __ac_Wang_hash(hash); //Use anchors from our own analysis n_a = 0; for (i = 0; i < countStartingPositions; i++) n_a += collect_n_a[i]; if ((mm_dbg_flag & MM_DBG_POLISH) && opt->SVaware) fprintf(stderr, "PO\tqname:%s, n_a (before filtering and checking for duplicates) :%" PRId64 "\n", qname, n_a); if (n_a) { //allocate sufficient memory a = (mm128_t*)kmalloc(b->km, n_a * sizeof(mm128_t)); //set values of anchors int64_t n_a_counter = 0; for (i = 0; i < countStartingPositions; i++) for (j=0; j<collect_n_a[i]; j++) a[n_a_counter++] = collect_a[i][j]; //discard duplicate entries int64_t n_a_unique = 0; std::sort(a, a + n_a, [](const mm128_t &a, const mm128_t &b){return std::tie(a.x, a.y) < std::tie(b.x, b.y);}); //traverse through the array elements for (i = 0; i < n_a;) { j = i; while (j < n_a && std::tie(a[i].x, a[i].y) == std::tie(a[j].x, a[j].y)) j++; //j will increment at least by one here a[n_a_unique++] = a[i]; i = j; } n_a = n_a_unique; if (mm_dbg_flag & MM_DBG_POLISH) fprintf(stderr, "PO\tqname:%s, n_a (after filtering and checking for duplicates) :%" PRId64 ", min_cnt:%d\n", qname, n_a, opt_3->min_cnt); //sort anchors by reference position before moving on radix_sort_128x(a, a + n_a); if (n_a < opt_3->min_cnt) //insufficient no. of seeds { n_a = 0; //reset to 0 kfree(b->km, a); } } } //collect additional anchors from unmapped intervals { //if we have found MCAS-based anchors, but with a few unmapped read intervals int unmappedcnt = 0; for (i = 0; i < qlens[0]; i++) if (seqMapped[i]==0) unmappedcnt++; if (n_a > 0 && unmappedcnt > 0) { char **unmapped_seqs = (char **) kmalloc(b->km, 1 * sizeof(char*)); unmapped_seqs[0] = (char *)kmalloc(b->km, qlens[0] * sizeof(char)); for (i = 0; i < qlens[0]; i++) { if (seqMapped[i] > 0) unmapped_seqs[0][i] = 'N'; else unmapped_seqs[0][i] = seqs[0][i]; } if (mm_dbg_flag & MM_DBG_POLISH) fprintf(stderr, "PO\tqname:%s, n_a (before mapping the unmapped read substrings) :%" PRId64 "\n", qname, n_a); mm128_t *a_remaining; int64_t n_a_remaining; mv = {0,0,0}; collect_minimizers(b->km, opt_3, mi, n_segs, qlens, unmapped_seqs, &mv); if (opt_3->flag & MM_F_HEAP_SORT) a_remaining = collect_seed_hits_heap(b->km, opt_3, opt_3->mid_occ, mi, qname, &mv, qlen_sum, &n_a_remaining, &rep_len, &n_mini_pos, &mini_pos); else a_remaining = collect_seed_hits(b->km, opt_3, opt_3->mid_occ, mi, qname, &mv, qlen_sum, &n_a_remaining, &rep_len, &n_mini_pos, &mini_pos); kfree(b->km, mv.a); kfree(b->km, mini_pos); int64_t n_a_whole = n_a_remaining + n_a; mm128_t *a_whole = (mm128_t*)kmalloc(b->km, n_a_whole * sizeof(mm128_t)); for (i=0; i<n_a; i++) { #pragma GCC diagnostic ignored "-Wmaybe-uninitialized" a_whole[i] = a[i]; } for (i=n_a; i<n_a_whole; i++) { a_whole[i] = a_remaining[i-n_a]; } //sort anchors by reference position before moving on radix_sort_128x(a_whole, a_whole + n_a_whole); kfree(b->km, a); kfree(b->km, a_remaining); a = a_whole; n_a = n_a_whole; kfree(b->km, unmapped_seqs[0]); kfree(b->km, unmapped_seqs); if (mm_dbg_flag & MM_DBG_POLISH) fprintf(stderr, "PO\tqname:%s, n_a (after mapping the unmapped read substrings) :%" PRId64 "\n", qname, n_a); } } { if (!n_a) //MCAS-method couldn't be used { //go with the default route if ((mm_dbg_flag & MM_DBG_POLISH) && opt->SVaware) fprintf(stderr, "PO\tfalling back to default mapping algorithm for read: %s\n", qname); //revert to original parameters *opt_3 = *opt; mv = {0,0,0}; collect_minimizers(b->km, opt_3, mi, n_segs, qlens, seqs, &mv); if (opt_3->flag & MM_F_HEAP_SORT) a = collect_seed_hits_heap(b->km, opt_3, opt_3->mid_occ, mi, qname, &mv, qlen_sum, &n_a, &rep_len, &n_mini_pos, &mini_pos); else a = collect_seed_hits(b->km, opt_3, opt_3->mid_occ, mi, qname, &mv, qlen_sum, &n_a, &rep_len, &n_mini_pos, &mini_pos); kfree(b->km, mv.a); kfree(b->km, mini_pos); } if (mm_dbg_flag & MM_DBG_PRINT_SEED) { fprintf(stderr, "RS\t%d\n", rep_len); for (i = 0; i < n_a; ++i) fprintf(stderr, "SD\t%s\t%d\t%c\t%d\t%d\t%d\n", mi->seq[a[i].x<<1>>33].name, (int32_t)a[i].x, "+-"[a[i].x>>63], (int32_t)a[i].y, (int32_t)(a[i].y>>32&0xff), i == 0? 0 : ((int32_t)a[i].y - (int32_t)a[i-1].y) - ((int32_t)a[i].x - (int32_t)a[i-1].x)); } // set max chaining gap on the query and the reference sequence if (is_sr) max_chain_gap_qry = qlen_sum > opt_3->max_gap? qlen_sum : opt_3->max_gap; else max_chain_gap_qry = opt_3->max_gap; if (opt_3->max_gap_ref > 0) { max_chain_gap_ref = opt_3->max_gap_ref; // always honor mm_mapopt_3_t::max_gap_ref if set } else if (opt_3->max_frag_len > 0) { max_chain_gap_ref = opt_3->max_frag_len - qlen_sum; if (max_chain_gap_ref < opt_3->max_gap) max_chain_gap_ref = opt_3->max_gap; } else max_chain_gap_ref = opt_3->max_gap; if (opt_3->min_gap_ref < max_chain_gap_ref) min_chain_gap_ref = opt_3->min_gap_ref; else min_chain_gap_ref = max_chain_gap_ref; a = mm_chain_dp(max_chain_gap_ref, min_chain_gap_ref, max_chain_gap_qry, opt_3->bw, opt_3->max_chain_skip, opt_3->max_chain_iter, opt_3->min_cnt, opt_3->min_chain_score, opt->chain_gap_scale, is_splice, n_segs, n_a, a, &n_regs0, &u, b->km); if (opt_3->max_occ > opt_3->mid_occ && rep_len > 0) { int rechain = 0; if (n_regs0 > 0) { // test if the best chain has all the segments int n_chained_segs = 1, max = 0, max_i = -1, max_off = -1, off = 0; for (i = 0; i < n_regs0; ++i) { // find the best chain if (max < (int)(u[i]>>32)) max = u[i]>>32, max_i = i, max_off = off; off += (uint32_t)u[i]; } for (i = 1; i < (int32_t)u[max_i]; ++i) // count the number of segments in the best chain if ((a[max_off+i].y&MM_SEED_SEG_MASK) != (a[max_off+i-1].y&MM_SEED_SEG_MASK)) ++n_chained_segs; if (n_chained_segs < n_segs) rechain = 1; } else rechain = 1; if (rechain) { // redo chaining with a higher max_occ threshold kfree(b->km, a); kfree(b->km, u); //kfree(b->km, mini_pos); //already freed above if (opt_3->flag & MM_F_HEAP_SORT) a = collect_seed_hits_heap(b->km, opt_3, opt_3->max_occ, mi, qname, &mv, qlen_sum, &n_a, &rep_len, &n_mini_pos, &mini_pos); else a = collect_seed_hits(b->km, opt_3, opt_3->max_occ, mi, qname, &mv, qlen_sum, &n_a, &rep_len, &n_mini_pos, &mini_pos); a = mm_chain_dp(max_chain_gap_ref, min_chain_gap_ref, max_chain_gap_qry, opt_3->bw, opt_3->max_chain_skip, opt_3->max_chain_iter, opt_3->min_cnt, opt_3->min_chain_score, opt->chain_gap_scale, is_splice, n_segs, n_a, a, &n_regs0, &u, b->km); } } b->frag_gap = max_chain_gap_ref; b->rep_len = rep_len; regs0 = mm_gen_regs(b->km, hash, qlen_sum, n_regs0, u, a); if (mm_dbg_flag & MM_DBG_PRINT_SEED) for (j = 0; j < n_regs0; ++j) for (i = regs0[j].as; i < regs0[j].as + regs0[j].cnt; ++i) fprintf(stderr, "CN\t%d\t%s\t%d\t%c\t%d\t%d\t%d\n", j, mi->seq[a[i].x<<1>>33].name, (int32_t)a[i].x, "+-"[a[i].x>>63], (int32_t)a[i].y, (int32_t)(a[i].y>>32&0xff), i == regs0[j].as? 0 : ((int32_t)a[i].y - (int32_t)a[i-1].y) - ((int32_t)a[i].x - (int32_t)a[i-1].x)); chain_post(opt_3, max_chain_gap_ref, mi, b->km, qlen_sum, n_segs, qlens, &n_regs0, regs0, a); //This function generates lot of warnings /*if (!is_sr) mm_est_err(mi, qlen_sum, n_regs0, regs0, a, n_mini_pos, mini_pos);*/ if (n_segs == 1) { // uni-segment regs0 = align_regs(opt_3, mi, b->km, qlens[0], seqs[0], &n_regs0, regs0, a); mm_set_mapq(b->km, n_regs0, regs0, opt_3->min_chain_score, opt_3->a, rep_len, is_sr); n_regs[0] = n_regs0, regs[0] = regs0; //TODO: find a better way to compute mapping quality } else { // multi-segment mm_seg_t *seg; seg = mm_seg_gen(b->km, hash, n_segs, qlens, n_regs0, regs0, n_regs, regs, a); // split fragment chain to separate segment chains free(regs0); for (i = 0; i < n_segs; ++i) { mm_set_parent(b->km, opt_3->mask_level, opt_3->mask_len, n_regs[i], regs[i], opt_3->a * 2 + opt_3->b, opt_3->flag&MM_F_HARD_MLEVEL, opt->alt_drop); // update mm_reg1_t::parent regs[i] = align_regs(opt_3, mi, b->km, qlens[i], seqs[i], &n_regs[i], regs[i], seg[i].a); mm_set_mapq(b->km, n_regs[i], regs[i], opt_3->min_chain_score, opt_3->a, rep_len, is_sr); } mm_seg_free(b->km, n_segs, seg); if (n_segs == 2 && opt_3->pe_ori >= 0 && (opt_3->flag&MM_F_CIGAR)) mm_pair(b->km, max_chain_gap_ref, opt_3->pe_bonus, opt_3->a * 2 + opt_3->b, opt_3->a, qlens, n_regs, regs); // pairing } kfree(b->km, a); kfree(b->km, u); /*kfree(b->km, mini_pos);*/ /*kfree(b->km, mv.a);*/ } for (i = 0; i < countStartingPositions; i++) if (collect_n_a[i] > 0) kfree(b->km, collect_a[i]); kfree(b->km, collect_a); kfree(b->km, collect_n_a); kfree(b->km, seqMapped); if (b->km) { km_stat(b->km, &kmst); if (mm_dbg_flag & MM_DBG_PRINT_QNAME) fprintf(stderr, "QM\t%s\t%d\tcap=%ld,nCore=%ld,largest=%ld\n", qname, qlen_sum, kmst.capacity, kmst.n_cores, kmst.largest); assert(kmst.n_blocks == kmst.n_cores); // otherwise, there is a memory leak if (kmst.largest > 1U<<28) { km_destroy(b->km); b->km = km_init(); } } } mm_reg1_t *mm_map(const mm_idx_t *mi, int qlen, const char *seq, int *n_regs, mm_tbuf_t *b, const mm_mapopt_t *opt, const char *qname) { mm_reg1_t *regs; mm_map_frag(mi, 1, &qlen, &seq, n_regs, &regs, b, opt, qname); return regs; } /************************** * Multi-threaded mapping * **************************/ typedef struct { int mini_batch_size, n_processed, n_threads, n_fp; const mm_mapopt_t *opt; mm_bseq_file_t **fp; const mm_idx_t *mi; kstring_t str; int n_parts; uint32_t *rid_shift; FILE *fp_split, **fp_parts; } pipeline_t; typedef struct { const pipeline_t *p; int n_seq, n_frag; mm_bseq1_t *seq; int *n_reg, *seg_off, *n_seg, *rep_len, *frag_gap; mm_reg1_t **reg; mm_tbuf_t **buf; } step_t; static void worker_for(void *_data, long i, int tid) // kt_for() callback { step_t *s = (step_t*)_data; int qlens[MM_MAX_SEG], j, off = s->seg_off[i], pe_ori = s->p->opt->pe_ori; const char *qseqs[MM_MAX_SEG]; mm_tbuf_t *b = s->buf[tid]; assert(s->n_seg[i] <= MM_MAX_SEG); if (mm_dbg_flag & MM_DBG_PRINT_QNAME) fprintf(stderr, "QR\t%s\t%d\t%d\n", s->seq[off].name, tid, s->seq[off].l_seq); for (j = 0; j < s->n_seg[i]; ++j) { if (s->n_seg[i] == 2 && ((j == 0 && (pe_ori>>1&1)) || (j == 1 && (pe_ori&1)))) mm_revcomp_bseq(&s->seq[off + j]); qlens[j] = s->seq[off + j].l_seq; qseqs[j] = s->seq[off + j].seq; } if (s->p->opt->flag & MM_F_INDEPEND_SEG) { for (j = 0; j < s->n_seg[i]; ++j) { mm_map_frag(s->p->mi, 1, &qlens[j], &qseqs[j], &s->n_reg[off+j], &s->reg[off+j], b, s->p->opt, s->seq[off+j].name); s->rep_len[off + j] = b->rep_len; s->frag_gap[off + j] = b->frag_gap; } } else { mm_map_frag(s->p->mi, s->n_seg[i], qlens, qseqs, &s->n_reg[off], &s->reg[off], b, s->p->opt, s->seq[off].name); for (j = 0; j < s->n_seg[i]; ++j) { s->rep_len[off + j] = b->rep_len; s->frag_gap[off + j] = b->frag_gap; } } for (j = 0; j < s->n_seg[i]; ++j) // flip the query strand and coordinate to the original read strand if (s->n_seg[i] == 2 && ((j == 0 && (pe_ori>>1&1)) || (j == 1 && (pe_ori&1)))) { int k, t; mm_revcomp_bseq(&s->seq[off + j]); for (k = 0; k < s->n_reg[off + j]; ++k) { mm_reg1_t *r = &s->reg[off + j][k]; t = r->qs; r->qs = qlens[j] - r->qe; r->qe = qlens[j] - t; r->rev = !r->rev; } } } static void merge_hits(step_t *s) { int f, i, k0, k, max_seg = 0, *n_reg_part, *rep_len_part, *frag_gap_part, *qlens; void *km; FILE **fp = s->p->fp_parts; const mm_mapopt_t *opt = s->p->opt; km = km_init(); for (f = 0; f < s->n_frag; ++f) max_seg = max_seg > s->n_seg[f]? max_seg : s->n_seg[f]; qlens = CALLOC(int, max_seg + s->p->n_parts * 3); n_reg_part = qlens + max_seg; rep_len_part = n_reg_part + s->p->n_parts; frag_gap_part = rep_len_part + s->p->n_parts; for (f = 0, k = k0 = 0; f < s->n_frag; ++f) { k0 = k; for (i = 0; i < s->n_seg[f]; ++i, ++k) { int j, l, t, rep_len = 0; qlens[i] = s->seq[k].l_seq; for (j = 0, s->n_reg[k] = 0; j < s->p->n_parts; ++j) { mm_err_fread(&n_reg_part[j], sizeof(int), 1, fp[j]); mm_err_fread(&rep_len_part[j], sizeof(int), 1, fp[j]); mm_err_fread(&frag_gap_part[j], sizeof(int), 1, fp[j]); s->n_reg[k] += n_reg_part[j]; if (rep_len < rep_len_part[j]) rep_len = rep_len_part[j]; } s->reg[k] = CALLOC(mm_reg1_t, s->n_reg[k]); for (j = 0, l = 0; j < s->p->n_parts; ++j) { for (t = 0; t < n_reg_part[j]; ++t, ++l) { mm_reg1_t *r = &s->reg[k][l]; uint32_t capacity; mm_err_fread(r, sizeof(mm_reg1_t), 1, fp[j]); r->rid += s->p->rid_shift[j]; if (opt->flag & MM_F_CIGAR) { mm_err_fread(&capacity, 4, 1, fp[j]); r->p = (mm_extra_t*)calloc(capacity, 4); r->p->capacity = capacity; mm_err_fread(r->p, r->p->capacity, 4, fp[j]); } } } mm_hit_sort(km, &s->n_reg[k], s->reg[k], opt->alt_drop); mm_set_parent(km, opt->mask_level, opt->mask_len, s->n_reg[k], s->reg[k], opt->a * 2 + opt->b, opt->flag&MM_F_HARD_MLEVEL, opt->alt_drop); if (!(opt->flag & MM_F_ALL_CHAINS)) { mm_select_sub(km, opt->pri_ratio, s->p->mi->k*2, opt->best_n, &s->n_reg[k], s->reg[k]); mm_set_sam_pri(s->n_reg[k], s->reg[k]); } mm_set_mapq(km, s->n_reg[k], s->reg[k], opt->min_chain_score, opt->a, rep_len, !!(opt->flag & MM_F_SR)); } if (s->n_seg[f] == 2 && opt->pe_ori >= 0 && (opt->flag&MM_F_CIGAR)) mm_pair(km, frag_gap_part[0], opt->pe_bonus, opt->a * 2 + opt->b, opt->a, qlens, &s->n_reg[k0], &s->reg[k0]); } free(qlens); km_destroy(km); } static void *worker_pipeline(void *shared, int step, void *in) { int i, j, k; pipeline_t *p = (pipeline_t*)shared; if (step == 0) { // step 0: read sequences int with_qual = (!!(p->opt->flag & MM_F_OUT_SAM) && !(p->opt->flag & MM_F_NO_QUAL)); int with_comment = !!(p->opt->flag & MM_F_COPY_COMMENT); int frag_mode = (p->n_fp > 1 || !!(p->opt->flag & MM_F_FRAG_MODE)); step_t *s; s = (step_t*)calloc(1, sizeof(step_t)); if (p->n_fp > 1) s->seq = mm_bseq_read_frag2(p->n_fp, p->fp, p->mini_batch_size, with_qual, with_comment, &s->n_seq); else s->seq = mm_bseq_read3(p->fp[0], p->mini_batch_size, with_qual, with_comment, frag_mode, &s->n_seq); if (s->seq) { s->p = p; for (i = 0; i < s->n_seq; ++i) s->seq[i].rid = p->n_processed++; //reshuffle based on length here, longer read first //NOTE: this would affect the ordering of reads in output { mm_bseq1_t *seq_copy = (mm_bseq1_t*) kmalloc(0, sizeof(mm_bseq1_t) * s->n_seq); std::vector< std::pair<int, int> > lengths; for (i = 0; i < s->n_seq; ++i) lengths.emplace_back (s->seq[i].l_seq, i); std::sort (lengths.begin(), lengths.end(), std::greater<std::pair<int,int>>()); for (i = 0; i < s->n_seq; ++i) { int prev_id = lengths[i].second; //copy all pointers seq_copy[i].l_seq = s->seq[prev_id].l_seq; seq_copy[i].rid = s->seq[prev_id].rid; seq_copy[i].name = s->seq[prev_id].name; seq_copy[i].seq = s->seq[prev_id].seq; seq_copy[i].qual = s->seq[prev_id].qual; seq_copy[i].comment = s->seq[prev_id].comment; } free(s->seq); s->seq = seq_copy; } s->buf = (mm_tbuf_t**)calloc(p->n_threads, sizeof(mm_tbuf_t*)); for (i = 0; i < p->n_threads; ++i) s->buf[i] = mm_tbuf_init(); s->n_reg = (int*)calloc(5 * s->n_seq, sizeof(int)); s->seg_off = s->n_reg + s->n_seq; // seg_off, n_seg, rep_len and frag_gap are allocated together with n_reg s->n_seg = s->seg_off + s->n_seq; s->rep_len = s->n_seg + s->n_seq; s->frag_gap = s->rep_len + s->n_seq; s->reg = (mm_reg1_t**)calloc(s->n_seq, sizeof(mm_reg1_t*)); for (i = 1, j = 0; i <= s->n_seq; ++i) if (i == s->n_seq || !frag_mode || !mm_qname_same(s->seq[i-1].name, s->seq[i].name)) { s->n_seg[s->n_frag] = i - j; s->seg_off[s->n_frag++] = j; j = i; } return s; } else free(s); } else if (step == 1) { // step 1: map if (p->n_parts > 0) merge_hits((step_t*)in); else kt_for(p->n_threads, worker_for, in, ((step_t*)in)->n_frag); return in; } else if (step == 2) { // step 2: output void *km = 0; step_t *s = (step_t*)in; const mm_idx_t *mi = p->mi; for (i = 0; i < p->n_threads; ++i) mm_tbuf_destroy(s->buf[i]); free(s->buf); if ((p->opt->flag & MM_F_OUT_CS) && !(mm_dbg_flag & MM_DBG_NO_KALLOC)) km = km_init(); for (k = 0; k < s->n_frag; ++k) { int seg_st = s->seg_off[k], seg_en = s->seg_off[k] + s->n_seg[k]; for (i = seg_st; i < seg_en; ++i) { mm_bseq1_t *t = &s->seq[i]; if (p->opt->split_prefix && p->n_parts == 0) { // then write to temporary files mm_err_fwrite(&s->n_reg[i], sizeof(int), 1, p->fp_split); mm_err_fwrite(&s->rep_len[i], sizeof(int), 1, p->fp_split); mm_err_fwrite(&s->frag_gap[i], sizeof(int), 1, p->fp_split); for (j = 0; j < s->n_reg[i]; ++j) { mm_reg1_t *r = &s->reg[i][j]; mm_err_fwrite(r, sizeof(mm_reg1_t), 1, p->fp_split); if (p->opt->flag & MM_F_CIGAR) { mm_err_fwrite(&r->p->capacity, 4, 1, p->fp_split); mm_err_fwrite(r->p, r->p->capacity, 4, p->fp_split); } } } else if (s->n_reg[i] > 0) { // the query has at least one hit for (j = 0; j < s->n_reg[i]; ++j) { mm_reg1_t *r = &s->reg[i][j]; assert(!r->sam_pri || r->id == r->parent); if ((p->opt->flag & MM_F_NO_PRINT_2ND) && r->id != r->parent) continue; if (p->opt->flag & MM_F_OUT_SAM) mm_write_sam3(&p->str, mi, t, i - seg_st, j, s->n_seg[k], &s->n_reg[seg_st], (const mm_reg1_t*const*)&s->reg[seg_st], km, p->opt->flag, s->rep_len[i]); else mm_write_paf3(&p->str, mi, t, r, km, p->opt->flag, s->rep_len[i]); mm_err_puts(p->str.s); } } else if ((p->opt->flag & MM_F_PAF_NO_HIT) || ((p->opt->flag & MM_F_OUT_SAM) && !(p->opt->flag & MM_F_SAM_HIT_ONLY))) { // output an empty hit, if requested if (p->opt->flag & MM_F_OUT_SAM) mm_write_sam3(&p->str, mi, t, i - seg_st, -1, s->n_seg[k], &s->n_reg[seg_st], (const mm_reg1_t*const*)&s->reg[seg_st], km, p->opt->flag, s->rep_len[i]); else mm_write_paf3(&p->str, mi, t, 0, 0, p->opt->flag, s->rep_len[i]); mm_err_puts(p->str.s); } } for (i = seg_st; i < seg_en; ++i) { for (j = 0; j < s->n_reg[i]; ++j) free(s->reg[i][j].p); free(s->reg[i]); free(s->seq[i].seq); free(s->seq[i].name); if (s->seq[i].qual) free(s->seq[i].qual); if (s->seq[i].comment) free(s->seq[i].comment); } } free(s->reg); free(s->n_reg); free(s->seq); // seg_off, n_seg, rep_len and frag_gap were allocated with reg; no memory leak here km_destroy(km); if (mm_verbose >= 3) fprintf(stderr, "[M::%s::%.3f*%.2f] mapped %d sequences\n", __func__, realtime() - mm_realtime0, cputime() / (realtime() - mm_realtime0), s->n_seq); free(s); } return 0; } static mm_bseq_file_t **open_bseqs(int n, const char **fn) { mm_bseq_file_t **fp; int i, j; fp = (mm_bseq_file_t**)calloc(n, sizeof(mm_bseq_file_t*)); for (i = 0; i < n; ++i) { if ((fp[i] = mm_bseq_open(fn[i])) == 0) { if (mm_verbose >= 1) fprintf(stderr, "ERROR: failed to open file '%s': %s\n", fn[i], strerror(errno)); for (j = 0; j < i; ++j) mm_bseq_close(fp[j]); free(fp); return 0; } } return fp; } int mm_map_file_frag(const mm_idx_t *idx, int n_segs, const char **fn, const mm_mapopt_t *opt, int n_threads) { int i, pl_threads; pipeline_t pl; if (n_segs < 1) return -1; memset(&pl, 0, sizeof(pipeline_t)); pl.n_fp = n_segs; pl.fp = open_bseqs(pl.n_fp, fn); if (pl.fp == 0) return -1; pl.opt = opt, pl.mi = idx; pl.n_threads = n_threads > 1? n_threads : 1; pl.mini_batch_size = opt->mini_batch_size; if (opt->split_prefix) pl.fp_split = mm_split_init(opt->split_prefix, idx); pl_threads = n_threads == 1? 1 : (opt->flag&MM_F_2_IO_THREADS)? 3 : 2; pl_threads = 1; //TODO: this change helped avoid seg-faults on Phoenix cluster (figure out why) //GDB was indicating seg-faults in bseq.c kt_pipeline(pl_threads, worker_pipeline, &pl, 3); free(pl.str.s); if (pl.fp_split) fclose(pl.fp_split); for (i = 0; i < pl.n_fp; ++i) mm_bseq_close(pl.fp[i]); free(pl.fp); return 0; } int mm_map_file(const mm_idx_t *idx, const char *fn, const mm_mapopt_t *opt, int n_threads) { return mm_map_file_frag(idx, 1, &fn, opt, n_threads); } int mm_split_merge(int n_segs, const char **fn, const mm_mapopt_t *opt, int n_split_idx) { int i; pipeline_t pl; mm_idx_t *mi; if (n_segs < 1 || n_split_idx < 1) return -1; memset(&pl, 0, sizeof(pipeline_t)); pl.n_fp = n_segs; pl.fp = open_bseqs(pl.n_fp, fn); if (pl.fp == 0) return -1; pl.opt = opt; pl.mini_batch_size = opt->mini_batch_size; pl.n_parts = n_split_idx; pl.fp_parts = CALLOC(FILE*, pl.n_parts); pl.rid_shift = CALLOC(uint32_t, pl.n_parts); pl.mi = mi = mm_split_merge_prep(opt->split_prefix, n_split_idx, pl.fp_parts, pl.rid_shift); if (pl.mi == 0) { free(pl.fp_parts); free(pl.rid_shift); return -1; } for (i = n_split_idx - 1; i > 0; --i) pl.rid_shift[i] = pl.rid_shift[i - 1]; for (pl.rid_shift[0] = 0, i = 1; i < n_split_idx; ++i) pl.rid_shift[i] += pl.rid_shift[i - 1]; if (opt->flag & MM_F_OUT_SAM) for (i = 0; i < (int32_t)pl.mi->n_seq; ++i) printf("@SQ\tSN:%s\tLN:%d\n", pl.mi->seq[i].name, pl.mi->seq[i].len); kt_pipeline(2, worker_pipeline, &pl, 3); free(pl.str.s); mm_idx_destroy(mi); free(pl.rid_shift); for (i = 0; i < n_split_idx; ++i) fclose(pl.fp_parts[i]); free(pl.fp_parts); for (i = 0; i < pl.n_fp; ++i) mm_bseq_close(pl.fp[i]); free(pl.fp); mm_split_rm_tmp(opt->split_prefix, n_split_idx); return 0; }
mixedulm_linear_solver.h
// KRATOS ___| | | | // \___ \ __| __| | | __| __| | | __| _` | | // | | | | | ( | | | | ( | | // _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS // // License: BSD License // license: StructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_MIXEDULM_SOLVER_H_INCLUDED ) #define KRATOS_MIXEDULM_SOLVER_H_INCLUDED // System includes #include <string> #include <iostream> #include <sstream> #include <cstddef> // External includes // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "linear_solvers/reorderer.h" #include "linear_solvers/iterative_solver.h" #include "utilities/openmp_utils.h" #include "contact_structural_mechanics_application_variables.h" #include "utilities/sparse_matrix_multiplication_utility.h" #include "custom_utilities/logging_settings.hpp" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class MixedULMLinearSolver * @ingroup ContactStructuralMechanicsApplication * @brief This solver is designed for the solution of mixed U-LM problems (this solver in particular is optimized for dual LM, to avoid the resolution). * @details It uses a block structure diving the matrix in UU LMLM ULM LMU blocks * and uses "standard" linear solvers for the different blocks as well as a GMRES for the outer part * @author Vicente Mataix Ferrandiz */ template<class TSparseSpaceType, class TDenseSpaceType, class TPreconditionerType = Preconditioner<TSparseSpaceType, TDenseSpaceType>, class TReordererType = Reorderer<TSparseSpaceType, TDenseSpaceType> > class MixedULMLinearSolver : public IterativeSolver<TSparseSpaceType, TDenseSpaceType,TPreconditionerType, TReordererType> { public: ///@} ///@name Enums ///@{ /// This enum is used to identify each index whick kind is enum class BlockType { OTHER, MASTER, SLAVE_INACTIVE, SLAVE_ACTIVE, LM_INACTIVE, LM_ACTIVE }; ///@name Type Definitions ///@{ /// The flag that indicates if the blocks are allocated KRATOS_DEFINE_LOCAL_FLAG( BLOCKS_ARE_ALLOCATED ); /// The flag that indicates if the solution is initialized KRATOS_DEFINE_LOCAL_FLAG( IS_INITIALIZED ); /// Pointer definition of MixedULMLinearSolver KRATOS_CLASS_POINTER_DEFINITION (MixedULMLinearSolver); /// The base class corresponds to the an iterative solver typedef IterativeSolver<TSparseSpaceType, TDenseSpaceType, TPreconditionerType, TReordererType> BaseType; /// The base class for the linear solver typedef LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType> LinearSolverType; /// The pointer to a linear solver typedef typename LinearSolverType::Pointer LinearSolverPointerType; /// The sparse matrix type typedef typename TSparseSpaceType::MatrixType SparseMatrixType; /// The vector type typedef typename TSparseSpaceType::VectorType VectorType; /// The dense matrix type typedef typename TDenseSpaceType::MatrixType DenseMatrixType; /// The dense vector type typedef typename TDenseSpaceType::VectorType DenseVectorType; /// The node type typedef Node<3> NodeType; /// The definition of the dof type typedef typename ModelPart::DofType DofType; /// The array containing the dofs typedef typename ModelPart::DofsArrayType DofsArrayType; /// An array of conditions typedef ModelPart::ConditionsContainerType ConditionsArrayType; /// An array of nodes typedef ModelPart::NodesContainerType NodesArrayType; /// The size type typedef std::size_t SizeType; /// The index type typedef std::size_t IndexType; /// A vector of indexes typedef DenseVector<IndexType> IndexVectorType; /// A vector of types typedef DenseVector<BlockType> BlockTypeVectorType; static constexpr double ZeroTolerance = std::numeric_limits<double>::epsilon(); ///@} ///@name Life Cycle ///@{ /** * @brief Default constructor * @param pSolverDispBlock The linear solver used for the displacement block * @param MaxTolerance The maximal tolrance considered * @param MaxIterationNumber The maximal number of iterations */ MixedULMLinearSolver ( LinearSolverPointerType pSolverDispBlock, const double MaxTolerance, const std::size_t MaxIterationNumber ) : BaseType (MaxTolerance, MaxIterationNumber), mpSolverDispBlock(pSolverDispBlock) { // Initializing the remaining variables mOptions.Set(BLOCKS_ARE_ALLOCATED, false); mOptions.Set(IS_INITIALIZED, false); } /** * @brief Second constructor, it uses a Kratos parameters as input instead of direct input * @param pSolverDispBlock The linear solver used for the displacement block * @param ThisParameters The configuration parameters considered */ MixedULMLinearSolver( LinearSolverPointerType pSolverDispBlock, Parameters ThisParameters = Parameters(R"({})") ): BaseType (), mpSolverDispBlock(pSolverDispBlock) { KRATOS_TRY // Now validate agains defaults -- this also ensures no type mismatch Parameters default_parameters = GetDefaultParameters(); ThisParameters.ValidateAndAssignDefaults(default_parameters); // Initializing the remaining variables this->SetTolerance( ThisParameters["tolerance"].GetDouble() ); this->SetMaxIterationsNumber( ThisParameters["max_iteration_number"].GetInt() ); mEchoLevel = ThisParameters["echo_level"].GetInt(); mOptions.Set(BLOCKS_ARE_ALLOCATED, false); mOptions.Set(IS_INITIALIZED, false); KRATOS_CATCH("") } /// Copy constructor. MixedULMLinearSolver (const MixedULMLinearSolver& rOther) : BaseType(rOther), mpSolverDispBlock(rOther.mpSolverDispBlock), mOptions(rOther.mOptions), mMasterIndices(rOther.mMasterIndices), mSlaveInactiveIndices(rOther.mSlaveInactiveIndices), mSlaveActiveIndices(rOther.mSlaveActiveIndices), mLMInactiveIndices(rOther.mLMInactiveIndices), mLMActiveIndices(rOther.mLMActiveIndices), mOtherIndices(rOther.mOtherIndices), mGlobalToLocalIndexing(rOther.mGlobalToLocalIndexing), mWhichBlockType(rOther.mWhichBlockType), mKDispModified(rOther.mKDispModified), mKLMAModified(rOther.mKLMAModified), mKLMIModified(rOther.mKLMIModified), mKSAN(rOther.mKSAN), mKSAM(rOther.mKSAM), mKSASI(rOther.mKSASI), mKSASA(rOther.mKSASA), mPOperator(rOther.mPOperator), mCOperator(rOther.mCOperator), mResidualLMActive(rOther.mResidualLMActive), mResidualLMInactive(rOther.mResidualLMInactive), mResidualDisp(rOther.mResidualDisp), mLMActive(rOther.mLMActive), mLMInactive(rOther.mLMInactive), mDisp(rOther.mDisp), mEchoLevel(rOther.mEchoLevel), mFileCreated(rOther.mFileCreated) { } /// Destructor. ~MixedULMLinearSolver() override {} ///@} ///@name Operators ///@{ /// Assignment operator. MixedULMLinearSolver& operator= (const MixedULMLinearSolver& Other) { return *this; } ///@} ///@name Operations ///@{ /** * @brief This function is designed to be called as few times as possible. It creates the data structures * that only depend on the connectivity of the matrix (and not on its coefficients) * @details So that the memory can be allocated once and expensive operations can be done only when strictly * needed * @param rA System matrix * @param rX Solution vector. it's also the initial guess for iterative linear solvers. * @param rB Right hand side vector. */ void Initialize ( SparseMatrixType& rA, VectorType& rX, VectorType& rB ) override { if (mOptions.Is(BLOCKS_ARE_ALLOCATED)) { mpSolverDispBlock->Initialize(mKDispModified, mDisp, mResidualDisp); mOptions.Set(IS_INITIALIZED, true); } else KRATOS_DETAIL("MixedULM Initialize") << "Linear solver intialization is deferred to the moment at which blocks are available" << std::endl; } /** * @brief This function is designed to be called every time the coefficients change in the system * that is, normally at the beginning of each solve. * @details For example if we are implementing a direct solver, this is the place to do the factorization * so that then the backward substitution can be performed effectively more than once * @param rA System matrix * @param rX Solution vector. it's also the initial guess for iterative linear solvers. * @param rB Right hand side vector. */ void InitializeSolutionStep ( SparseMatrixType& rA, VectorType& rX, VectorType& rB ) override { // Copy to local matrices if (mOptions.IsNot(BLOCKS_ARE_ALLOCATED)) { FillBlockMatrices (true, rA, rX, rB); mOptions.Set(BLOCKS_ARE_ALLOCATED, true); } else { FillBlockMatrices (false, rA, rX, rB); mOptions.Set(BLOCKS_ARE_ALLOCATED, true); } if(mOptions.IsNot(IS_INITIALIZED)) this->Initialize(rA,rX,rB); mpSolverDispBlock->InitializeSolutionStep(mKDispModified, mDisp, mResidualDisp); } /** * @brief This function actually performs the solution work, eventually taking advantage of what was done before in the * @details Initialize and InitializeSolutionStep functions. * @param rA System matrix * @param rX Solution vector. it's also the initial guess for iterative linear solvers. * @param rB Right hand side vector. */ void PerformSolutionStep ( SparseMatrixType& rA, VectorType& rX, VectorType& rB ) override { // Auxiliar size const SizeType lm_active_size = mLMActiveIndices.size(); const SizeType lm_inactive_size = mLMInactiveIndices.size(); const SizeType total_disp_size = mOtherIndices.size() + mMasterIndices.size() + mSlaveInactiveIndices.size() + mSlaveActiveIndices.size(); // Get the u and lm residuals GetUPart (rB, mResidualDisp); // Solve u block if (mDisp.size() != total_disp_size) mDisp.resize(total_disp_size, false); mpSolverDispBlock->Solve (mKDispModified, mDisp, mResidualDisp); // Write back solution SetUPart(rX, mDisp); // Solve LM if (lm_active_size > 0) { // Now we compute the residual of the LM GetLMAPart (rB, mResidualLMActive); // LM = D⁻1*rLM if (mLMActive.size() != lm_active_size) mLMActive.resize(lm_active_size, false); TSparseSpaceType::Mult (mKLMAModified, mResidualLMActive, mLMActive); // Write back solution SetLMAPart(rX, mLMActive); } if (lm_inactive_size > 0) { // Now we compute the residual of the LM GetLMIPart (rB, mResidualLMInactive); // LM = D⁻1*rLM if (mLMInactive.size() != lm_inactive_size) mLMInactive.resize(lm_inactive_size, false); TSparseSpaceType::Mult (mKLMIModified, mResidualLMInactive, mLMInactive); // Write back solution SetLMIPart(rX, mLMInactive); } } /** * @brief This function is designed to be called at the end of the solve step. * @details For example this is the place to remove any data that we do not want to save for later * @param rA System matrix * @param rX Solution vector. it's also the initial guess for iterative linear solvers. * @param rB Right hand side vector. */ void FinalizeSolutionStep ( SparseMatrixType& rA, VectorType& rX, VectorType& rB ) override { mpSolverDispBlock->FinalizeSolutionStep(mKDispModified, mDisp, mResidualDisp); } /** * @brief This function is designed to clean up all internal data in the solver. * @details Clear is designed to leave the solver object as if newly created. After a clear a new Initialize is needed */ void Clear() override { mOptions.Set(BLOCKS_ARE_ALLOCATED, false); mpSolverDispBlock->Clear(); // We clear the matrixes and vectors mKDispModified.clear(); /// The modified displacement block mKLMAModified.clear(); /// The modified active LM block (diagonal) mKLMIModified.clear(); /// The modified inaactive LM block (diagonal) mKSAN.clear(); /// The slave active-displacement block mKSAM.clear(); /// The active slave-master block mKSASI.clear(); /// The active slave-inactive slave block mKSASA.clear(); /// The active slave-slave active block mPOperator.clear(); /// The operator used for the master blocks mCOperator.clear(); /// The operator used for the active slave block mResidualLMActive.clear(); /// The residual corresponding the active LM mResidualLMInactive.clear(); /// The residual corresponding the inactive LM mResidualDisp.clear(); /// The residual of the displacements mLMActive.clear(); /// The solution of the active LM mLMInactive.clear(); /// The solution of the inactive LM mDisp.clear(); /// The solution of the displacement mOptions.Set(IS_INITIALIZED, false); } /** * @brief Normal solve method. * @details Solves the linear system Ax=b and puts the result on SystemVector& rX. rVectorx is also th initial guess for iterative methods. * @param rA System matrix * @param rX Solution vector. it's also the initial guess for iterative linear solvers. * @param rB Right hand side vector. */ bool Solve( SparseMatrixType& rA, VectorType& rX, VectorType& rB ) override { // We print the system before condensate (if needed) if (mEchoLevel == 2) { //if it is needed to print the debug info KRATOS_INFO("RHS BEFORE CONDENSATION") << "RHS = " << rB << std::endl; } else if (mEchoLevel == 3) { //if it is needed to print the debug info KRATOS_INFO("LHS BEFORE CONDENSATION") << "SystemMatrix = " << rA << std::endl; KRATOS_INFO("RHS BEFORE CONDENSATION") << "RHS = " << rB << std::endl; } else if (mEchoLevel >= 4) { //print to matrix market file const std::string matrix_market_name = "before_condensation_A_" + std::to_string(mFileCreated) + ".mm"; TSparseSpaceType::WriteMatrixMarketMatrix(matrix_market_name.c_str(), rA, false); const std::string matrix_market_vectname = "before_condensation_b_" + std::to_string(mFileCreated) + ".mm.rhs"; TSparseSpaceType::WriteMatrixMarketVector(matrix_market_vectname.c_str(), rB); } if (mOptions.IsNot(IS_INITIALIZED)) this->Initialize (rA,rX,rB); this->InitializeSolutionStep (rA,rX,rB); this->PerformSolutionStep (rA,rX,rB); this->FinalizeSolutionStep (rA,rX,rB); // We print the resulting system (if needed) if (mEchoLevel == 2) { //if it is needed to print the debug info KRATOS_INFO("Dx") << "Solution obtained = " << mDisp << std::endl; KRATOS_INFO("RHS") << "RHS = " << mResidualDisp << std::endl; } else if (mEchoLevel == 3) { //if it is needed to print the debug info KRATOS_INFO("LHS") << "SystemMatrix = " << mKDispModified << std::endl; KRATOS_INFO("Dx") << "Solution obtained = " << mDisp << std::endl; KRATOS_INFO("RHS") << "RHS = " << mResidualDisp << std::endl; } else if (mEchoLevel >= 4) { //print to matrix market file const std::string matrix_market_name = "A_" + std::to_string(mFileCreated) + ".mm"; TSparseSpaceType::WriteMatrixMarketMatrix(matrix_market_name.c_str(), mKDispModified, false); const std::string matrix_market_vectname = "b_" + std::to_string(mFileCreated) + ".mm.rhs"; TSparseSpaceType::WriteMatrixMarketVector(matrix_market_vectname.c_str(), mResidualDisp); mFileCreated++; } return false; } /** * @brief Multi solve method for solving a set of linear systems with same coefficient matrix. * @details Solves the linear system Ax=b and puts the result on SystemVector& rX. rVectorx is also th initial guess for iterative methods. * @param rA System matrix * @param rX Solution vector. it's also the initial guess for iterative linear solvers. * @param rB Right hand side vector. */ bool Solve ( SparseMatrixType& rA, DenseMatrixType& rX, DenseMatrixType& rB ) override { return false; } /** * @brief Some solvers may require a minimum degree of knowledge of the structure of the matrix. To make an example * when solving a mixed u-p problem, it is important to identify the row associated to v and p. * @details Another example is the automatic prescription of rotation null-space for smoothed-aggregation solvers * which require knowledge on the spatial position of the nodes associated to a given dof. * This function tells if the solver requires such data */ bool AdditionalPhysicalDataIsNeeded() override { return true; } /** * @brief Some solvers may require a minimum degree of knowledge of the structure of the matrix. * @details To make an example when solving a mixed u-p problem, it is important to identify the row associated to v and p. Another example is the automatic prescription of rotation null-space for smoothed-aggregation solvers which require knowledge on the spatial position of the nodes associated to a given dof. This function is the place to eventually provide such data * @param rA System matrix * @param rX Solution vector. It's also the initial guess for iterative linear solvers. * @param rB Right hand side vector. */ void ProvideAdditionalData ( SparseMatrixType& rA, VectorType& rX, VectorType& rB, DofsArrayType& rDofSet, ModelPart& rModelPart ) override { // Allocating auxiliar parameters IndexType node_id; // Count LM dofs SizeType n_lm_inactive_dofs = 0, n_lm_active_dofs = 0; SizeType n_master_dofs = 0; SizeType n_slave_inactive_dofs = 0, n_slave_active_dofs = 0; SizeType tot_active_dofs = 0; // We separate if we consider a block builder and solver or an elimination builder and solver if (rModelPart.IsNot(TO_SPLIT)) { // In case of block builder and solver for (auto& i_dof : rDofSet) { node_id = i_dof.Id(); const NodeType& node = rModelPart.GetNode(node_id); if (i_dof.EquationId() < rA.size1()) { tot_active_dofs++; if (IsLMDof(i_dof)) { if (node.Is(ACTIVE)) n_lm_active_dofs++; else n_lm_inactive_dofs++; } else if (node.Is(INTERFACE) && IsDisplacementDof(i_dof)) { if (node.Is(MASTER)) { n_master_dofs++; } else if (node.Is(SLAVE)) { if (node.Is(ACTIVE)) n_slave_active_dofs++; else n_slave_inactive_dofs++; } } } } } else { // In case of elimination builder and solver for (auto& i_dof : rDofSet) { node_id = i_dof.Id(); const NodeType& node = rModelPart.GetNode(node_id); tot_active_dofs++; if (IsLMDof(i_dof)) { if (node.Is(ACTIVE)) n_lm_active_dofs++; else n_lm_inactive_dofs++; } else if (node.Is(INTERFACE) && IsDisplacementDof(i_dof)) { if (node.Is(MASTER)) { n_master_dofs++; } else if (node.Is(SLAVE)) { if (node.Is(ACTIVE)) n_slave_active_dofs++; else n_slave_inactive_dofs++; } } } } KRATOS_ERROR_IF(tot_active_dofs != rA.size1()) << "Total system size does not coincide with the free dof map: " << tot_active_dofs << " vs " << rA.size1() << std::endl; // Resize arrays as needed if (mMasterIndices.size() != n_master_dofs) mMasterIndices.resize (n_master_dofs,false); if (mSlaveInactiveIndices.size() != n_slave_inactive_dofs) mSlaveInactiveIndices.resize (n_slave_inactive_dofs,false); if (mSlaveActiveIndices.size() != n_slave_active_dofs) mSlaveActiveIndices.resize (n_slave_active_dofs,false); if (mLMInactiveIndices.size() != n_lm_inactive_dofs) mLMInactiveIndices.resize (n_lm_inactive_dofs,false); if (mLMActiveIndices.size() != n_lm_active_dofs) mLMActiveIndices.resize (n_lm_active_dofs,false); const SizeType n_other_dofs = tot_active_dofs - n_lm_inactive_dofs - n_lm_active_dofs - n_master_dofs - n_slave_inactive_dofs - n_slave_active_dofs; if (mOtherIndices.size() != n_other_dofs) mOtherIndices.resize (n_other_dofs, false); if (mGlobalToLocalIndexing.size() != tot_active_dofs) mGlobalToLocalIndexing.resize (tot_active_dofs,false); if (mWhichBlockType.size() != tot_active_dofs) mWhichBlockType.resize(tot_active_dofs, false); // Size check KRATOS_ERROR_IF_NOT(n_lm_active_dofs == n_slave_active_dofs) << "The number of active LM dofs: " << n_lm_active_dofs << " and active slave nodes dofs: " << n_slave_active_dofs << " does not coincide" << std::endl; /** * Construct aux_lists as needed * "other_counter[i]" i will contain the position in the global system of the i-th NON-LM node * "lm_active_counter[i]" will contain the in the global system of the i-th NON-LM node * mGlobalToLocalIndexing[i] will contain the position in the local blocks of the */ SizeType lm_inactive_counter = 0, lm_active_counter = 0; SizeType master_counter = 0; SizeType slave_inactive_counter = 0, slave_active_counter = 0; SizeType other_counter = 0; IndexType global_pos = 0; // We separate if we consider a block builder and solver or an elimination builder and solver if (rModelPart.IsNot(TO_SPLIT)) { // In case of block builder and solver for (auto& i_dof : rDofSet) { node_id = i_dof.Id(); const NodeType& r_node = rModelPart.GetNode(node_id); if (i_dof.EquationId() < rA.size1()) { if (IsLMDof(i_dof)) { if (r_node.Is(ACTIVE)) { mLMActiveIndices[lm_active_counter] = global_pos; mGlobalToLocalIndexing[global_pos] = lm_active_counter; mWhichBlockType[global_pos] = BlockType::LM_ACTIVE; ++lm_active_counter; } else { mLMInactiveIndices[lm_inactive_counter] = global_pos; mGlobalToLocalIndexing[global_pos] = lm_inactive_counter; mWhichBlockType[global_pos] = BlockType::LM_INACTIVE; ++lm_inactive_counter; } } else if ( r_node.Is(INTERFACE) && IsDisplacementDof(i_dof)) { if (r_node.Is(MASTER)) { mMasterIndices[master_counter] = global_pos; mGlobalToLocalIndexing[global_pos] = master_counter; mWhichBlockType[global_pos] = BlockType::MASTER; ++master_counter; } else if (r_node.Is(SLAVE)) { if (r_node.Is(ACTIVE)) { mSlaveActiveIndices[slave_active_counter] = global_pos; mGlobalToLocalIndexing[global_pos] = slave_active_counter; mWhichBlockType[global_pos] = BlockType::SLAVE_ACTIVE; ++slave_active_counter; } else { mSlaveInactiveIndices[slave_inactive_counter] = global_pos; mGlobalToLocalIndexing[global_pos] = slave_inactive_counter; mWhichBlockType[global_pos] = BlockType::SLAVE_INACTIVE; ++slave_inactive_counter; } } else { // We need to consider always an else to ensure that the system size is consistent mOtherIndices[other_counter] = global_pos; mGlobalToLocalIndexing[global_pos] = other_counter; mWhichBlockType[global_pos] = BlockType::OTHER; ++other_counter; } } else { mOtherIndices[other_counter] = global_pos; mGlobalToLocalIndexing[global_pos] = other_counter; mWhichBlockType[global_pos] = BlockType::OTHER; ++other_counter; } ++global_pos; } } } else { // In case of elimination builder and solver for (auto& i_dof : rDofSet) { node_id = i_dof.Id(); const NodeType& r_node = rModelPart.GetNode(node_id); if (IsLMDof(i_dof)) { if (r_node.Is(ACTIVE)) { mLMActiveIndices[lm_active_counter] = global_pos; mGlobalToLocalIndexing[global_pos] = lm_active_counter; mWhichBlockType[global_pos] = BlockType::LM_ACTIVE; ++lm_active_counter; } else { mLMInactiveIndices[lm_inactive_counter] = global_pos; mGlobalToLocalIndexing[global_pos] = lm_inactive_counter; mWhichBlockType[global_pos] = BlockType::LM_INACTIVE; ++lm_inactive_counter; } } else if ( r_node.Is(INTERFACE) && IsDisplacementDof(i_dof)) { if (r_node.Is(MASTER)) { mMasterIndices[master_counter] = global_pos; mGlobalToLocalIndexing[global_pos] = master_counter; mWhichBlockType[global_pos] = BlockType::MASTER; ++master_counter; } else if (r_node.Is(SLAVE)) { if (r_node.Is(ACTIVE)) { mSlaveActiveIndices[slave_active_counter] = global_pos; mGlobalToLocalIndexing[global_pos] = slave_active_counter; mWhichBlockType[global_pos] = BlockType::SLAVE_ACTIVE; ++slave_active_counter; } else { mSlaveInactiveIndices[slave_inactive_counter] = global_pos; mGlobalToLocalIndexing[global_pos] = slave_inactive_counter; mWhichBlockType[global_pos] = BlockType::SLAVE_INACTIVE; ++slave_inactive_counter; } } else { // We need to consider always an else to ensure that the system size is consistent mOtherIndices[other_counter] = global_pos; mGlobalToLocalIndexing[global_pos] = other_counter; mWhichBlockType[global_pos] = BlockType::OTHER; ++other_counter; } } else { mOtherIndices[other_counter] = global_pos; mGlobalToLocalIndexing[global_pos] = other_counter; mWhichBlockType[global_pos] = BlockType::OTHER; ++other_counter; } ++global_pos; } } KRATOS_DEBUG_ERROR_IF(master_counter != n_master_dofs) << "The number of active slave dofs counter : " << master_counter << "is higher than the expected: " << n_master_dofs << std::endl; KRATOS_DEBUG_ERROR_IF(slave_active_counter != n_slave_active_dofs) << "The number of active slave dofs counter : " << slave_active_counter << "is higher than the expected: " << n_slave_active_dofs << std::endl; KRATOS_DEBUG_ERROR_IF(slave_inactive_counter != n_slave_inactive_dofs) << "The number of inactive slave dofs counter : " << slave_inactive_counter << "is higher than the expected: " << n_slave_inactive_dofs << std::endl; KRATOS_DEBUG_ERROR_IF(lm_active_counter != n_lm_active_dofs) << "The number of active LM dofs counter : " << lm_active_counter << "is higher than the expected: " << n_lm_active_dofs << std::endl; KRATOS_DEBUG_ERROR_IF(lm_inactive_counter != n_lm_inactive_dofs) << "The number of inactive LM dofs counter : " << lm_inactive_counter << "is higher than the expected: " << n_lm_inactive_dofs << std::endl; KRATOS_DEBUG_ERROR_IF(other_counter != n_other_dofs) << "The number of other dofs counter : " << other_counter << "is higher than the expected: " << n_other_dofs << std::endl; } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "Mixed displacement LM linear solver"; } /// Print information about this object. void PrintInfo (std::ostream& rOStream) const override { rOStream << "Mixed displacement LM linear solver"; } /// Print object's data. void PrintData (std::ostream& rOStream) const override { } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * @brief T his function generates the subblocks of matrix A * @details as A = ( KNN KNM KNSI KNSA 0 0 ) u * ( KMN KMM KMSI KMSA -MI^T -MA^T ) u_master * ( KSIN KSIM KSISI KSISA DII^T DIA^T ) u_slave_inactive * ( KSAN KSAM KSASI KSASA DAI^T DAA^T ) u_slave_active * ( 0 0 0 0 ALMI 0 ) LMInactive * ( 0 KLMAM KLMASI KLMASA 0 KLMALMA ) LMActive * We will call as A = ( KNN KNM KNSI KNSA 0 0 ) u * ( KMN KMM KMSI KMSA KMLMI KMLMA ) u_master * ( KSIN KSIM KSISI KSISA KSILMI KSILMA ) u_slave_inactive * ( KSAN KSAM KSASI KSASA KSALMI KSALMA ) u_slave_active * ( 0 0 0 0 KLMILMI 0 ) LMInactive * ( 0 KLMAM KLMASI KLMASA 0 KLMALMA ) LMActive * Subblocks are allocated or nor depending on the value of "NeedAllocation" * @param rA System matrix * @param rX Solution vector. it's also the initial guess for iterative linear solvers. * @param rB Right hand side vector. */ void FillBlockMatrices ( const bool NeedAllocation, SparseMatrixType& rA, VectorType& rX, VectorType& rB ) { KRATOS_TRY // Auxiliar sizes const SizeType other_dof_size = mOtherIndices.size(); const SizeType master_size = mMasterIndices.size(); const SizeType slave_inactive_size = mSlaveInactiveIndices.size(); const SizeType slave_active_size = mSlaveActiveIndices.size(); const SizeType lm_active_size = mLMActiveIndices.size(); const SizeType lm_inactive_size = mLMInactiveIndices.size(); if (NeedAllocation) AllocateBlocks(); // Get access to A data const IndexType* index1 = rA.index1_data().begin(); const IndexType* index2 = rA.index2_data().begin(); const double* values = rA.value_data().begin(); // Allocate the auxiliar blocks by push_back SparseMatrixType KMLMA(master_size, lm_active_size); /// The master-active LM block (this is the big block of M) SparseMatrixType KLMALMA(lm_active_size, lm_active_size); /// The active LM-active LM block SparseMatrixType KSALMA(slave_active_size, lm_active_size); /// The active slave-active LM block (this is the big block of D, diagonal) SparseMatrixType KLMILMI(lm_inactive_size, lm_inactive_size); /// The inactive LM- inactive LM block (diagonal) IndexType* KMLMA_ptr = new IndexType[master_size + 1]; IndexType* mKSAN_ptr = new IndexType[slave_active_size + 1]; IndexType* mKSAM_ptr = new IndexType[slave_active_size + 1]; IndexType* mKSASI_ptr = new IndexType[slave_active_size + 1]; IndexType* mKSASA_ptr = new IndexType[slave_active_size + 1]; IndexType* KSALMA_ptr = new IndexType[slave_active_size + 1]; IndexType* KLMILMI_ptr = new IndexType[lm_inactive_size + 1]; IndexType* KLMALMA_ptr = new IndexType[lm_active_size + 1]; #pragma omp parallel for for (int i = 0; i < static_cast<int>(master_size + 1); i++) KMLMA_ptr[i] = 0; #pragma omp parallel for for (int i = 0; i < static_cast<int>(slave_active_size + 1); i++) { mKSAN_ptr[i] = 0; mKSAM_ptr[i] = 0; mKSASI_ptr[i] = 0; mKSASA_ptr[i] = 0; KSALMA_ptr[i] = 0; } #pragma omp parallel for for (int i = 0; i < static_cast<int>(lm_inactive_size + 1); i++) KLMILMI_ptr[i] = 0; #pragma omp parallel for for (int i = 0; i < static_cast<int>(lm_active_size + 1); i++) KLMALMA_ptr[i] = 0; #pragma omp parallel { // We iterate over original matrix #pragma omp for for (int i=0; i<static_cast<int>(rA.size1()); i++) { const IndexType row_begin = index1[i]; const IndexType row_end = index1[i+1]; const IndexType local_row_id = mGlobalToLocalIndexing[i]; IndexType KMLMA_cols = 0; IndexType mKSAN_cols = 0; IndexType mKSAM_cols = 0; IndexType mKSASI_cols = 0; IndexType mKSASA_cols = 0; IndexType KSALMA_cols = 0; IndexType KLMILMI_cols = 0; IndexType KLMALMA_cols = 0; if ( mWhichBlockType[i] == BlockType::MASTER) { // KMLMA for (IndexType j=row_begin; j<row_end; j++) { const IndexType col_index = index2[j]; if ( mWhichBlockType[col_index] == BlockType::LM_ACTIVE) { // KMLMA block ++KMLMA_cols; } } KRATOS_DEBUG_ERROR_IF(local_row_id > master_size) << "MASTER:: Local row ID: " << local_row_id <<" is greater than the number of rows " << master_size << std::endl; KMLMA_ptr[local_row_id + 1] = KMLMA_cols; } else if ( mWhichBlockType[i] == BlockType::SLAVE_ACTIVE) { //either KSAN or KSAM or KSASA or KSASA or KSALM for (IndexType j=row_begin; j<row_end; j++) { const IndexType col_index = index2[j]; if (mWhichBlockType[col_index] == BlockType::OTHER) { // KSAN block ++mKSAN_cols; } else if (mWhichBlockType[col_index] == BlockType::MASTER) { // KSAM block ++mKSAM_cols; } else if (mWhichBlockType[col_index] == BlockType::SLAVE_INACTIVE) { // KSASI block ++mKSASI_cols; } else if (mWhichBlockType[col_index] == BlockType::SLAVE_ACTIVE) { // KSASA block ++mKSASA_cols; } else if ( mWhichBlockType[col_index] == BlockType::LM_ACTIVE) { // KSALMA block (diagonal) ++KSALMA_cols; } } KRATOS_DEBUG_ERROR_IF(local_row_id > slave_active_size) << "SLAVE_ACTIVE:: Local row ID: " << local_row_id <<" is greater than the number of rows " << slave_active_size << std::endl; mKSAN_ptr[local_row_id + 1] = mKSAN_cols; mKSAM_ptr[local_row_id + 1] = mKSAM_cols; mKSASI_ptr[local_row_id + 1] = mKSASI_cols; mKSASA_ptr[local_row_id + 1] = mKSASA_cols; KSALMA_ptr[local_row_id + 1] = KSALMA_cols; } else if ( mWhichBlockType[i] == BlockType::LM_INACTIVE) { // KLMILMI for (IndexType j=row_begin; j<row_end; j++) { const IndexType col_index = index2[j]; if (mWhichBlockType[col_index] == BlockType::LM_INACTIVE) { // KLMILMI block (diagonal) ++KLMILMI_cols; } } KRATOS_DEBUG_ERROR_IF(local_row_id > lm_inactive_size) << "LM_INACTIVE:: Local row ID: " << local_row_id <<" is greater than the number of rows " << lm_inactive_size << std::endl; KLMILMI_ptr[local_row_id + 1] = KLMILMI_cols; } else if ( mWhichBlockType[i] == BlockType::LM_ACTIVE) { // KLMALMA for (IndexType j=row_begin; j<row_end; j++) { const IndexType col_index = index2[j]; if (mWhichBlockType[col_index] == BlockType::LM_ACTIVE) { // KLMALMA block ++KLMALMA_cols; } } KRATOS_DEBUG_ERROR_IF(local_row_id > lm_active_size) << "LM_ACTIVE:: Local row ID: " << local_row_id <<" is greater than the number of rows " << lm_active_size << std::endl; KLMALMA_ptr[local_row_id + 1] = KLMALMA_cols; } } } // We initialize the blocks sparse matrix std::partial_sum(KMLMA_ptr, KMLMA_ptr + master_size + 1, KMLMA_ptr); const std::size_t KMLMA_nonzero_values = KMLMA_ptr[master_size]; IndexType* aux_index2_KMLMA= new IndexType[KMLMA_nonzero_values]; double* aux_val_KMLMA= new double[KMLMA_nonzero_values]; std::partial_sum(mKSAN_ptr, mKSAN_ptr + slave_active_size + 1, mKSAN_ptr); const std::size_t mKSAN_nonzero_values = mKSAN_ptr[slave_active_size]; IndexType* aux_index2_mKSAN= new IndexType[mKSAN_nonzero_values]; double* aux_val_mKSAN= new double[mKSAN_nonzero_values]; std::partial_sum(mKSAM_ptr, mKSAM_ptr + slave_active_size + 1, mKSAM_ptr); const std::size_t mKSAM_nonzero_values = mKSAM_ptr[slave_active_size]; IndexType* aux_index2_mKSAM= new IndexType[mKSAM_nonzero_values]; double* aux_val_mKSAM= new double[mKSAM_nonzero_values]; std::partial_sum(mKSASI_ptr, mKSASI_ptr + slave_active_size + 1, mKSASI_ptr); const std::size_t mKSASI_nonzero_values = mKSASI_ptr[slave_active_size]; IndexType* aux_index2_mKSASI= new IndexType[mKSASI_nonzero_values]; double* aux_val_mKSASI= new double[mKSASI_nonzero_values]; std::partial_sum(mKSASA_ptr, mKSASA_ptr + slave_active_size + 1, mKSASA_ptr); const std::size_t mKSASA_nonzero_values = mKSASA_ptr[slave_active_size]; IndexType* aux_index2_mKSASA= new IndexType[mKSASA_nonzero_values]; double* aux_val_mKSASA = new double[mKSASA_nonzero_values]; std::partial_sum(KSALMA_ptr, KSALMA_ptr + slave_active_size + 1, KSALMA_ptr); const std::size_t KSALMA_nonzero_values = KSALMA_ptr[slave_active_size]; IndexType* aux_index2_KSALMA= new IndexType[KSALMA_nonzero_values]; double* aux_val_KSALMA = new double[KSALMA_nonzero_values]; std::partial_sum(KLMILMI_ptr, KLMILMI_ptr + lm_inactive_size + 1, KLMILMI_ptr); const std::size_t KLMILMI_nonzero_values = KLMILMI_ptr[lm_inactive_size]; IndexType* aux_index2_KLMILMI= new IndexType[KLMILMI_nonzero_values]; double* aux_val_KLMILMI = new double[KLMILMI_nonzero_values]; std::partial_sum(KLMALMA_ptr, KLMALMA_ptr + lm_active_size + 1, KLMALMA_ptr); const std::size_t KLMALMA_nonzero_values = KLMALMA_ptr[lm_active_size]; IndexType* aux_index2_KLMALMA = new IndexType[KLMALMA_nonzero_values]; double* aux_val_KLMALMA = new double[KLMALMA_nonzero_values]; #pragma omp parallel { // We iterate over original matrix #pragma omp for for (int i=0; i<static_cast<int>(rA.size1()); i++) { const IndexType row_begin = index1[i]; const IndexType row_end = index1[i+1]; const IndexType local_row_id = mGlobalToLocalIndexing[i]; if ( mWhichBlockType[i] == BlockType::MASTER) { // KMLMA IndexType KMLMA_row_beg = KMLMA_ptr[local_row_id]; IndexType KMLMA_row_end = KMLMA_row_beg; for (IndexType j=row_begin; j<row_end; j++) { const IndexType col_index = index2[j]; if ( mWhichBlockType[col_index] == BlockType::LM_ACTIVE) { // KMLMA block const double value = values[j]; const IndexType local_col_id = mGlobalToLocalIndexing[col_index]; aux_index2_KMLMA[KMLMA_row_end] = local_col_id; aux_val_KMLMA[KMLMA_row_end] = value; ++KMLMA_row_end; } } } else if ( mWhichBlockType[i] == BlockType::SLAVE_ACTIVE) { //either KSAN or KSAM or KSASA or KSASA or KSALM IndexType mKSAN_row_beg = mKSAN_ptr[local_row_id]; IndexType mKSAN_row_end = mKSAN_row_beg; IndexType mKSAM_row_beg = mKSAM_ptr[local_row_id]; IndexType mKSAM_row_end = mKSAM_row_beg; IndexType mKSASI_row_beg = mKSASI_ptr[local_row_id]; IndexType mKSASI_row_end = mKSASI_row_beg; IndexType mKSASA_row_beg = mKSASA_ptr[local_row_id]; IndexType mKSASA_row_end = mKSASA_row_beg; IndexType KSALMA_row_beg = KSALMA_ptr[local_row_id]; IndexType KSALMA_row_end = KSALMA_row_beg; for (IndexType j=row_begin; j<row_end; j++) { const IndexType col_index = index2[j]; const double value = values[j]; const IndexType local_col_id = mGlobalToLocalIndexing[col_index]; if (mWhichBlockType[col_index] == BlockType::OTHER) { // KSAN block aux_index2_mKSAN[mKSAN_row_end] = local_col_id; aux_val_mKSAN[mKSAN_row_end] = value; ++mKSAN_row_end; } else if (mWhichBlockType[col_index] == BlockType::MASTER) { // KSAM block aux_index2_mKSAM[mKSAM_row_end] = local_col_id; aux_val_mKSAM[mKSAM_row_end] = value; ++mKSAM_row_end; } else if (mWhichBlockType[col_index] == BlockType::SLAVE_INACTIVE) { // KSASI block aux_index2_mKSASI[mKSASI_row_end] = local_col_id; aux_val_mKSASI[mKSASI_row_end] = value; ++mKSASI_row_end; } else if (mWhichBlockType[col_index] == BlockType::SLAVE_ACTIVE) { // KSASA block aux_index2_mKSASA[mKSASA_row_end] = local_col_id; aux_val_mKSASA[mKSASA_row_end] = value; ++mKSASA_row_end; } else if ( mWhichBlockType[col_index] == BlockType::LM_ACTIVE) { // KSALMA block (diagonal) aux_index2_KSALMA[KSALMA_row_end] = local_col_id; aux_val_KSALMA[KSALMA_row_end] = value; ++KSALMA_row_end; } } } else if ( mWhichBlockType[i] == BlockType::LM_INACTIVE) { // KLMILMI IndexType KLMILMI_row_beg = KLMILMI_ptr[local_row_id]; IndexType KLMILMI_row_end = KLMILMI_row_beg; for (IndexType j=row_begin; j<row_end; j++) { const IndexType col_index = index2[j]; if (mWhichBlockType[col_index] == BlockType::LM_INACTIVE) { // KLMILMI block (diagonal) const double value = values[j]; const IndexType local_col_id = mGlobalToLocalIndexing[col_index]; aux_index2_KLMILMI[KLMILMI_row_end] = local_col_id; aux_val_KLMILMI[KLMILMI_row_end] = value; ++KLMILMI_row_end; } } } else if ( mWhichBlockType[i] == BlockType::LM_ACTIVE) { // KLMALMA IndexType KLMALMA_row_beg = KLMALMA_ptr[local_row_id]; IndexType KLMALMA_row_end = KLMALMA_row_beg; for (IndexType j=row_begin; j<row_end; j++) { const IndexType col_index = index2[j]; if (mWhichBlockType[col_index] == BlockType::LM_ACTIVE) { // KLMALMA block const double value = values[j]; const IndexType local_col_id = mGlobalToLocalIndexing[col_index]; aux_index2_KLMALMA[KLMALMA_row_end] = local_col_id; aux_val_KLMALMA[KLMALMA_row_end] = value; ++KLMALMA_row_end; } } } } } CreateMatrix(KMLMA, master_size, lm_active_size, KMLMA_ptr, aux_index2_KMLMA, aux_val_KMLMA); CreateMatrix(mKSAN, slave_active_size, other_dof_size, mKSAN_ptr, aux_index2_mKSAN, aux_val_mKSAN); CreateMatrix(mKSAM, slave_active_size, master_size, mKSAM_ptr, aux_index2_mKSAM, aux_val_mKSAM); CreateMatrix(mKSASI, slave_active_size, slave_inactive_size, mKSASI_ptr, aux_index2_mKSASI, aux_val_mKSASI); CreateMatrix(mKSASA, slave_active_size, slave_active_size, mKSASA_ptr, aux_index2_mKSASA, aux_val_mKSASA); CreateMatrix(KSALMA, slave_active_size, lm_active_size, KSALMA_ptr, aux_index2_KSALMA, aux_val_KSALMA); CreateMatrix(KLMILMI, lm_inactive_size, lm_inactive_size, KLMILMI_ptr, aux_index2_KLMILMI, aux_val_KLMILMI); CreateMatrix(KLMALMA, lm_active_size, lm_active_size, KLMALMA_ptr, aux_index2_KLMALMA, aux_val_KLMALMA); // We compute directly the inverse of the KSALMA matrix // KSALMA it is supposed to be a diagonal matrix (in fact it is the key point of this formulation) // (NOTE: technically it is not a stiffness matrix, we give that name) if (lm_active_size > 0) { ComputeDiagonalByLumping(KSALMA, mKLMAModified, ZeroTolerance); } // We compute directly the inverse of the KLMILMI matrix // KLMILMI it is supposed to be a diagonal matrix (in fact it is the key point of this formulation) // (NOTE: technically it is not a stiffness matrix, we give that name) if (lm_inactive_size > 0) { ComputeDiagonalByLumping(KLMILMI, mKLMIModified, ZeroTolerance); } // Compute the P and C operators if (slave_active_size > 0) { SparseMatrixMultiplicationUtility::MatrixMultiplication(KMLMA, mKLMAModified, mPOperator); SparseMatrixMultiplicationUtility::MatrixMultiplication(KLMALMA, mKLMAModified, mCOperator); } // We proceed with the auxiliar products for the master blocks SparseMatrixType master_auxKSAN(master_size, other_dof_size); SparseMatrixType master_auxKSAM(master_size, master_size); SparseMatrixType master_auxKSASI(master_size, slave_inactive_size); SparseMatrixType master_auxKSASA(master_size, slave_active_size); if (slave_active_size > 0) { SparseMatrixMultiplicationUtility::MatrixMultiplication(mPOperator, mKSAN, master_auxKSAN); SparseMatrixMultiplicationUtility::MatrixMultiplication(mPOperator, mKSAM, master_auxKSAM); if (slave_inactive_size > 0) SparseMatrixMultiplicationUtility::MatrixMultiplication(mPOperator, mKSASI, master_auxKSASI); SparseMatrixMultiplicationUtility::MatrixMultiplication(mPOperator, mKSASA, master_auxKSASA); } // We proceed with the auxiliar products for the active slave blocks SparseMatrixType aslave_auxKSAN(slave_active_size, other_dof_size); SparseMatrixType aslave_auxKSAM(slave_active_size, master_size); SparseMatrixType aslave_auxKSASI(slave_active_size, slave_inactive_size); SparseMatrixType aslave_auxKSASA(slave_active_size, slave_active_size); if (slave_active_size > 0) { SparseMatrixMultiplicationUtility::MatrixMultiplication(mCOperator, mKSAN, aslave_auxKSAN); SparseMatrixMultiplicationUtility::MatrixMultiplication(mCOperator, mKSAM, aslave_auxKSAM); if (slave_inactive_size > 0) SparseMatrixMultiplicationUtility::MatrixMultiplication(mCOperator, mKSASI, aslave_auxKSASI); SparseMatrixMultiplicationUtility::MatrixMultiplication(mCOperator, mKSASA, aslave_auxKSASA); } // Auxiliar indexes const SizeType other_dof_initial_index = 0; const SizeType master_dof_initial_index = other_dof_size; const SizeType slave_inactive_dof_initial_index = master_dof_initial_index + master_size; const SizeType assembling_slave_dof_initial_index = slave_inactive_dof_initial_index + slave_inactive_size; // The auxiliar index structure const SizeType nrows = mKDispModified.size1(); const SizeType ncols = mKDispModified.size2(); IndexType* K_disp_modified_ptr_aux1 = new IndexType[nrows + 1]; K_disp_modified_ptr_aux1[0] = 0; #pragma omp parallel { #pragma omp for for (int i=0; i<static_cast<int>(rA.size1()); i++) { if ( mWhichBlockType[i] == BlockType::OTHER) { //either KNN or KNM or KNSI or KNSA ComputeNonZeroColumnsDispDoFs( index1, index2, values, i, other_dof_initial_index, K_disp_modified_ptr_aux1); } else if ( mWhichBlockType[i] == BlockType::MASTER) { //either KMN or KMM or KMSI or KMLM ComputeNonZeroColumnsDispDoFs( index1, index2, values, i, master_dof_initial_index, K_disp_modified_ptr_aux1); } else if ( mWhichBlockType[i] == BlockType::SLAVE_INACTIVE) { //either KSIN or KSIM or KSISI or KSISA ComputeNonZeroColumnsDispDoFs( index1, index2, values, i, slave_inactive_dof_initial_index, K_disp_modified_ptr_aux1); } else if ( mWhichBlockType[i] == BlockType::LM_ACTIVE) { //either KLMAM or KLMASI or KLMASA ComputeNonZeroColumnsPartialDispDoFs( index1, index2, values, i, assembling_slave_dof_initial_index, K_disp_modified_ptr_aux1); } } } // We initialize the final sparse matrix std::partial_sum(K_disp_modified_ptr_aux1, K_disp_modified_ptr_aux1 + nrows + 1, K_disp_modified_ptr_aux1); const SizeType nonzero_values_aux1 = K_disp_modified_ptr_aux1[nrows]; IndexType* aux_index2_K_disp_modified_aux1 = new IndexType[nonzero_values_aux1]; double* aux_val_K_disp_modified_aux1 = new double[nonzero_values_aux1]; #pragma omp parallel { #pragma omp for for (int i=0; i<static_cast<int>(rA.size1()); i++) { if ( mWhichBlockType[i] == BlockType::OTHER) { //either KNN or KNM or KNSI or KNSA ComputeAuxiliarValuesDispDoFs( index1, index2, values, i, other_dof_initial_index, K_disp_modified_ptr_aux1, aux_index2_K_disp_modified_aux1, aux_val_K_disp_modified_aux1); } else if ( mWhichBlockType[i] == BlockType::MASTER) { //either KMN or KMM or KMSI or KMLM ComputeAuxiliarValuesDispDoFs( index1, index2, values, i, master_dof_initial_index, K_disp_modified_ptr_aux1, aux_index2_K_disp_modified_aux1, aux_val_K_disp_modified_aux1); } else if ( mWhichBlockType[i] == BlockType::SLAVE_INACTIVE) { //either KSIN or KSIM or KSISI or KSISA ComputeAuxiliarValuesDispDoFs( index1, index2, values, i, slave_inactive_dof_initial_index, K_disp_modified_ptr_aux1, aux_index2_K_disp_modified_aux1, aux_val_K_disp_modified_aux1); } else if ( mWhichBlockType[i] == BlockType::LM_ACTIVE) { //either KLMAM or KLMASI or KLMASA ComputeAuxiliarValuesPartialDispDoFs( index1, index2, values, i, assembling_slave_dof_initial_index, K_disp_modified_ptr_aux1, aux_index2_K_disp_modified_aux1, aux_val_K_disp_modified_aux1); } } } // Create the first auxiliar matrix CreateMatrix(mKDispModified, nrows, ncols, K_disp_modified_ptr_aux1, aux_index2_K_disp_modified_aux1, aux_val_K_disp_modified_aux1); // Now we create the second matrix block to sum IndexType* K_disp_modified_ptr_aux2 = new IndexType[nrows + 1]; #pragma omp parallel for for (int i = 0; i < static_cast<int>(nrows + 1); i++) K_disp_modified_ptr_aux2[i] = 0; #pragma omp parallel { #pragma omp for for (int i=0; i<static_cast<int>(master_size); i++) { IndexType K_disp_modified_cols_aux2 = 0; // Get access to master_auxKSAN data if (master_auxKSAN.nnz() > 0 && other_dof_size > 0) { SparseMatrixMultiplicationUtility::ComputeNonZeroBlocks(master_auxKSAN, i, K_disp_modified_cols_aux2); } // Get access to master_auxKSAM data if (master_auxKSAM.nnz() > 0) { SparseMatrixMultiplicationUtility::ComputeNonZeroBlocks(master_auxKSAM, i, K_disp_modified_cols_aux2); } // Get access to master_auxKSASI data if (master_auxKSASI.nnz() > 0 && slave_inactive_size > 0) { SparseMatrixMultiplicationUtility::ComputeNonZeroBlocks(master_auxKSASI, i, K_disp_modified_cols_aux2); } // Get access to master_auxKSASA data if (master_auxKSASA.nnz() > 0 && slave_active_size > 0) { SparseMatrixMultiplicationUtility::ComputeNonZeroBlocks(master_auxKSASA, i, K_disp_modified_cols_aux2); } K_disp_modified_ptr_aux2[master_dof_initial_index + i + 1] = K_disp_modified_cols_aux2; } #pragma omp for for (int i=0; i<static_cast<int>(slave_active_size); i++) { IndexType K_disp_modified_cols_aux2 = 0; // Get access to aslave_auxKSAN data if (aslave_auxKSAN.nnz() > 0 && other_dof_size > 0) { SparseMatrixMultiplicationUtility::ComputeNonZeroBlocks(aslave_auxKSAN, i, K_disp_modified_cols_aux2); } // Get access to aslave_auxKSAM data if (aslave_auxKSAM.nnz() > 0 && master_size > 0) { SparseMatrixMultiplicationUtility::ComputeNonZeroBlocks(aslave_auxKSAM, i, K_disp_modified_cols_aux2); } // Get access to aslave_auxKSASI data if (aslave_auxKSASI.nnz() > 0 && slave_inactive_size > 0) { SparseMatrixMultiplicationUtility::ComputeNonZeroBlocks(aslave_auxKSASI, i, K_disp_modified_cols_aux2); } // Get access to aslave_auxKSASA data if (aslave_auxKSASA.nnz() > 0) { SparseMatrixMultiplicationUtility::ComputeNonZeroBlocks(aslave_auxKSASA, i, K_disp_modified_cols_aux2); } K_disp_modified_ptr_aux2[assembling_slave_dof_initial_index + i + 1] = K_disp_modified_cols_aux2; } } // We initialize the final sparse matrix std::partial_sum(K_disp_modified_ptr_aux2, K_disp_modified_ptr_aux2 + nrows + 1, K_disp_modified_ptr_aux2); const SizeType nonzero_values_aux2 = K_disp_modified_ptr_aux2[nrows]; IndexType* aux_index2_K_disp_modified_aux2 = new IndexType[nonzero_values_aux2]; double* aux_val_K_disp_modified_aux2 = new double[nonzero_values_aux2]; #pragma omp parallel { #pragma omp for for (int i=0; i<static_cast<int>(master_size); i++) { const IndexType row_beg = K_disp_modified_ptr_aux2[master_dof_initial_index + i]; IndexType row_end = row_beg; // Get access to master_auxKSAN data if (master_auxKSAN.nnz() > 0 && other_dof_size > 0) { SparseMatrixMultiplicationUtility::ComputeAuxiliarValuesBlocks(master_auxKSAN, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, other_dof_initial_index); } // Get access to master_auxKSAM data if (master_auxKSAM.nnz() > 0) { SparseMatrixMultiplicationUtility::ComputeAuxiliarValuesBlocks(master_auxKSAM, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, master_dof_initial_index); } // Get access to master_auxKSASI data if (master_auxKSASI.nnz() > 0 && slave_inactive_size > 0) { SparseMatrixMultiplicationUtility::ComputeAuxiliarValuesBlocks(master_auxKSASI, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, slave_inactive_dof_initial_index); } // Get access to master_auxKSASA data if (master_auxKSASA.nnz() > 0 && slave_active_size > 0) { SparseMatrixMultiplicationUtility::ComputeAuxiliarValuesBlocks(master_auxKSASA, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, assembling_slave_dof_initial_index); } } #pragma omp for for (int i=0; i<static_cast<int>(slave_active_size); i++) { const IndexType row_beg = K_disp_modified_ptr_aux2[assembling_slave_dof_initial_index + i]; IndexType row_end = row_beg; // Get access to aslave_auxKSAN data if (aslave_auxKSAN.nnz() > 0 && other_dof_size > 0) { SparseMatrixMultiplicationUtility::ComputeAuxiliarValuesBlocks(aslave_auxKSAN, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, other_dof_initial_index); } // Get access to aslave_auxKSAM data if (aslave_auxKSAM.nnz() > 0 && master_size > 0) { SparseMatrixMultiplicationUtility::ComputeAuxiliarValuesBlocks(aslave_auxKSAM, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, master_dof_initial_index); } // Get access to aslave_auxKSASI data if (aslave_auxKSASI.nnz() > 0 && slave_inactive_size > 0) { SparseMatrixMultiplicationUtility::ComputeAuxiliarValuesBlocks(aslave_auxKSASI, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, slave_inactive_dof_initial_index); } // Get access to aslave_auxKSASA data if (aslave_auxKSASA.nnz() > 0) { SparseMatrixMultiplicationUtility::ComputeAuxiliarValuesBlocks(aslave_auxKSASA, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, assembling_slave_dof_initial_index); } } } // Create the second auxiliar matrix SparseMatrixType K_disp_modified_aux2(nrows, ncols); CreateMatrix(K_disp_modified_aux2, nrows, ncols, K_disp_modified_ptr_aux2, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2); // We sum the auxiliar matrices SparseMatrixMultiplicationUtility::MatrixAdd<SparseMatrixType, SparseMatrixType>(mKDispModified, K_disp_modified_aux2, - 1.0); // Finally we ensure that the matrix is structurally symmetric EnsureStructuralSymmetryMatrix(mKDispModified); #ifdef KRATOS_DEBUG CheckMatrix(mKDispModified); #endif // // DEBUG // LOG_MATRIX_PRETTY(rA) // LOG_MATRIX_PRETTY(mKDispModified) KRATOS_CATCH ("") } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ LinearSolverPointerType mpSolverDispBlock; /// The pointer to the displacement linear solver Flags mOptions; /// This stores the flags IndexVectorType mMasterIndices; /// The vector storing the indices of the master nodes in contact IndexVectorType mSlaveInactiveIndices; /// The vector storing the indices of the slave nodes in contact (Inactive) IndexVectorType mSlaveActiveIndices; /// The vector storing the indices of the slave nodes in contact (Active) IndexVectorType mLMInactiveIndices; /// The vector storing the indices of the LM (Inactive) IndexVectorType mLMActiveIndices; /// The vector storing the indices of the LM (Active) IndexVectorType mOtherIndices; /// The vector containing the indices for other DoF IndexVectorType mGlobalToLocalIndexing; /// This vector stores the correspondance between the local and global BlockTypeVectorType mWhichBlockType; /// This vector stores the LM block belongings SparseMatrixType mKDispModified; /// The modified displacement block SparseMatrixType mKLMAModified; /// The modified active LM block (inverted diagonal) SparseMatrixType mKLMIModified; /// The modified inactive LM block (inverted diagonal) SparseMatrixType mKSAN; /// The slave active-displacement block SparseMatrixType mKSAM; /// The active slave-master block SparseMatrixType mKSASI; /// The active slave-inactive slave block SparseMatrixType mKSASA; /// The inactive slave-active slave block SparseMatrixType mPOperator; /// The operator used for the master blocks SparseMatrixType mCOperator; /// The operator used for the active slave block VectorType mResidualLMActive; /// The residual of the active lagrange multipliers VectorType mResidualLMInactive; /// The residual of the inactive lagrange multipliers VectorType mResidualDisp; /// The residual of the rest of displacements VectorType mLMActive; /// The solution of the active lagrange multiplies VectorType mLMInactive; /// The solution of the inactive lagrange multiplies VectorType mDisp; /// The solution of the rest of displacements IndexType mEchoLevel = 0; /// The echo level of the solver IndexType mFileCreated = 0; /// The index used to identify the file created ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ /** * @brief This method is mean to avoid code duplication when computing the non zero terms in the Aux1 matrix * @param Index1 The indexes of nonzero rows * @param Index2 The indexes of nonzero columns * @param Values The array containing the values of the matrix * @param CurrentRow The current row computed * @param InitialIndex The index corresponding to the current row in the global contribution * @param Ptr The nonzero terms of each column */ inline void ComputeNonZeroColumnsDispDoFs( const IndexType* Index1, const IndexType* Index2, const double* Values, const int CurrentRow, const IndexType InitialIndex, IndexType* Ptr ) { const IndexType row_begin = Index1[CurrentRow]; const IndexType row_end = Index1[CurrentRow + 1]; IndexType cols = 0; const IndexType local_row_id = mGlobalToLocalIndexing[CurrentRow] + InitialIndex; for (IndexType j=row_begin; j<row_end; j++) { const IndexType col_index = Index2[j]; if (mWhichBlockType[col_index] == BlockType::OTHER) { ++cols; } else if (mWhichBlockType[col_index] == BlockType::MASTER) { ++cols; } else if (mWhichBlockType[col_index] == BlockType::SLAVE_INACTIVE) { ++cols; } else if (mWhichBlockType[col_index] == BlockType::SLAVE_ACTIVE) { ++cols; } } Ptr[local_row_id + 1] = cols; } /** * @brief This method is mean to avoid code duplication when computing the non zero terms in the Aux1 matrix * @details The same as the previous one but not taking into account the contribution of the other dofs * @param Index1 The indexes of nonzero rows * @param Index2 The indexes of nonzero columns * @param Values The array containing the values of the matrix * @param CurrentRow The current row computed * @param InitialIndex The index corresponding to the current row in the global contribution * @param Ptr The nonzero terms of each column */ inline void ComputeNonZeroColumnsPartialDispDoFs( const IndexType* Index1, const IndexType* Index2, const double* Values, const int CurrentRow, const IndexType InitialIndex, IndexType* Ptr ) { const IndexType row_begin = Index1[CurrentRow]; const IndexType row_end = Index1[CurrentRow + 1]; IndexType cols = 0; const IndexType local_row_id = mGlobalToLocalIndexing[CurrentRow] + InitialIndex; for (IndexType j=row_begin; j<row_end; j++) { const IndexType col_index = Index2[j]; if (mWhichBlockType[col_index] == BlockType::MASTER) { ++cols; } else if (mWhichBlockType[col_index] == BlockType::SLAVE_INACTIVE) { ++cols; } else if (mWhichBlockType[col_index] == BlockType::SLAVE_ACTIVE) { ++cols; } } Ptr[local_row_id + 1] = cols; } /** * @brief This method is mean to avoid code duplication when evaluate the terms of the Aux1 matrix * @param Index1 The indexes of nonzero rows * @param Index2 The indexes of nonzero columns * @param Values The array containing the values of the matrix * @param CurrentRow The current row computed * @param InitialIndex The index corresponding to the current row in the global contribution * @param Ptr The nonzero terms of each column * @param AuxIndex2 The indexes of the non zero columns * @param AuxVals The values of the final matrix */ inline void ComputeAuxiliarValuesDispDoFs( const IndexType* Index1, const IndexType* Index2, const double* Values, const int CurrentRow, const IndexType InitialIndex, IndexType* Ptr, IndexType* AuxIndex2, double* AuxVals ) { // Auxiliar sizes const SizeType other_dof_size = mOtherIndices.size(); const SizeType master_size = mMasterIndices.size(); const SizeType slave_inactive_size = mSlaveInactiveIndices.size(); // Auxiliar indexes const SizeType other_dof_initial_index = 0; const SizeType master_dof_initial_index = other_dof_size; const SizeType slave_inactive_dof_initial_index = master_dof_initial_index + master_size; const SizeType assembling_slave_dof_initial_index = slave_inactive_dof_initial_index + slave_inactive_size; // Some indexes const IndexType local_row_id = mGlobalToLocalIndexing[CurrentRow] + InitialIndex; const IndexType row_begin_A = Index1[CurrentRow]; const IndexType row_end_A = Index1[CurrentRow + 1]; const IndexType row_beg = Ptr[local_row_id]; IndexType row_end = row_beg; for (IndexType j=row_begin_A; j<row_end_A; j++) { const IndexType col_index = Index2[j]; const IndexType local_col_id = mGlobalToLocalIndexing[col_index]; const double value = Values[j]; if (mWhichBlockType[col_index] == BlockType::OTHER) { AuxIndex2[row_end] = local_col_id + other_dof_initial_index; AuxVals[row_end] = value; ++row_end; } else if (mWhichBlockType[col_index] == BlockType::MASTER) { AuxIndex2[row_end] = local_col_id + master_dof_initial_index; AuxVals[row_end] = value; ++row_end; } else if (mWhichBlockType[col_index] == BlockType::SLAVE_INACTIVE) { AuxIndex2[row_end] = local_col_id + slave_inactive_dof_initial_index; AuxVals[row_end] = value; ++row_end; } else if (mWhichBlockType[col_index] == BlockType::SLAVE_ACTIVE) { AuxIndex2[row_end] = local_col_id + assembling_slave_dof_initial_index; AuxVals[row_end] = value; ++row_end; } } } /** * @brief This method is mean to avoid code duplication when evaluate the terms of the Aux1 matrix * @details The same as the previous one but not taking into account the contribution of the other dofs * @param Index1 The indexes of nonzero rows * @param Index2 The indexes of nonzero columns * @param Values The array containing the values of the matrix * @param CurrentRow The current row computed * @param InitialIndex The index corresponding to the current row in the global contribution * @param Ptr The nonzero terms of each column * @param AuxIndex2 The indexes of the non zero columns * @param AuxVals The values of the final matrix */ inline void ComputeAuxiliarValuesPartialDispDoFs( const IndexType* Index1, const IndexType* Index2, const double* Values, const int CurrentRow, const IndexType InitialIndex, IndexType* Ptr, IndexType* AuxIndex2, double* AuxVals ) { // Auxiliar sizes const SizeType other_dof_size = mOtherIndices.size(); const SizeType master_size = mMasterIndices.size(); const SizeType slave_inactive_size = mSlaveInactiveIndices.size(); // Auxiliar indexes const SizeType master_dof_initial_index = other_dof_size; const SizeType slave_inactive_dof_initial_index = master_dof_initial_index + master_size; const SizeType assembling_slave_dof_initial_index = slave_inactive_dof_initial_index + slave_inactive_size; // Some indexes const IndexType local_row_id = mGlobalToLocalIndexing[CurrentRow] + InitialIndex; const IndexType row_begin_A = Index1[CurrentRow]; const IndexType row_end_A = Index1[CurrentRow + 1]; const IndexType row_beg = Ptr[local_row_id]; IndexType row_end = row_beg; for (IndexType j=row_begin_A; j<row_end_A; j++) { const IndexType col_index = Index2[j]; const IndexType local_col_id = mGlobalToLocalIndexing[col_index]; const double value = Values[j]; if (mWhichBlockType[col_index] == BlockType::MASTER) { AuxIndex2[row_end] = local_col_id + master_dof_initial_index; AuxVals[row_end] = value; ++row_end; } else if (mWhichBlockType[col_index] == BlockType::SLAVE_INACTIVE) { AuxIndex2[row_end] = local_col_id + slave_inactive_dof_initial_index; AuxVals[row_end] = value; ++row_end; } else if (mWhichBlockType[col_index] == BlockType::SLAVE_ACTIVE) { AuxIndex2[row_end] = local_col_id + assembling_slave_dof_initial_index; AuxVals[row_end] = value; ++row_end; } } } /** * @brief It allocates all the blocks and operators */ inline void AllocateBlocks() { // We clear the matrixes mKDispModified.clear(); /// The modified displacement block mKLMAModified.clear(); /// The modified active LM block (diagonal) mKLMIModified.clear(); /// The modified inaactive LM block (diagonal) mKSAN.clear(); /// The slave active-displacement block mKSAM.clear(); /// The active slave-master block mKSASI.clear(); /// The active slave-inactive slave block mKSASA.clear(); /// The active slave-slave active block mPOperator.clear(); /// The operator used for the master blocks mCOperator.clear(); /// The operator used for the active slave block mResidualLMActive.clear(); /// The residual corresponding the active LM mResidualLMInactive.clear(); /// The residual corresponding the inactive LM mResidualDisp.clear(); /// The residual of the displacements mLMActive.clear(); /// The solution of the active LM mLMInactive.clear(); /// The solution of the inactive LM mDisp.clear(); /// The solution of the displacement // Auxiliar sizes const SizeType other_dof_size = mOtherIndices.size(); const SizeType master_size = mMasterIndices.size(); const SizeType slave_inactive_size = mSlaveInactiveIndices.size(); const SizeType slave_active_size = mSlaveActiveIndices.size(); const SizeType lm_active_size = mLMActiveIndices.size(); const SizeType lm_inactive_size = mLMInactiveIndices.size(); const SizeType total_size = other_dof_size + master_size + slave_inactive_size + slave_active_size; // We do the allocation mKDispModified.resize(total_size, total_size, false); /// The modified displacement block mKLMAModified.resize(lm_active_size, lm_active_size, false); /// The modified active LM block (diagonal) mKLMAModified.reserve(lm_active_size); mKLMIModified.resize(lm_inactive_size, lm_inactive_size, false); /// The modified inactve LM block (diagonal) mKLMIModified.reserve(lm_inactive_size); mKSAN.resize(slave_active_size, other_dof_size, false); /// The slave active-displacement block mKSAM.resize(slave_active_size, master_size, false); /// The active slave-master block mKSASI.resize(slave_active_size, slave_inactive_size, false); /// The active slave-inactive slave block mKSASA.resize(slave_active_size, slave_active_size, false); /// The active slave-slave active block mPOperator.resize(master_size, slave_active_size, false); /// The operator used for the master blocks mCOperator.resize(lm_active_size, slave_active_size, false); /// The operator used for the active slave block mResidualLMActive.resize(lm_active_size, false ); /// The residual corresponding the active LM mResidualLMInactive.resize(lm_inactive_size, false ); /// The residual corresponding the inactive LM mResidualDisp.resize(total_size ); /// The residual of the displacements mLMActive.resize(lm_active_size, false); /// The solution of the active LM mLMInactive.resize(lm_inactive_size, false); /// The solution of the inactive LM mDisp.resize(total_size, false); /// The solution of the displacement } /** * @brief This function extracts from a vector which has the size of the overall r, the part that corresponds to u-dofs * @param rTotalResidual The total residual of the problem * @param ResidualU The vector containing the residual relative to the displacements */ inline void GetUPart ( const VectorType& rTotalResidual, VectorType& ResidualU ) { // Auxiliar sizes const SizeType other_dof_size = mOtherIndices.size(); const SizeType master_size = mMasterIndices.size(); const SizeType slave_inactive_size = mSlaveInactiveIndices.size(); const SizeType slave_active_size = mSlaveActiveIndices.size(); const SizeType lm_active_size = mLMActiveIndices.size(); const SizeType total_size = other_dof_size + master_size + slave_inactive_size + slave_active_size; // Resize in case the size is not correct if (ResidualU.size() != total_size ) ResidualU.resize (total_size, false); #pragma omp parallel for for (int i = 0; i<static_cast<int>(other_dof_size); i++) ResidualU[i] = rTotalResidual[mOtherIndices[i]]; // The corresponding residual for the active slave DoF's VectorType aux_res_active_slave(slave_active_size); #pragma omp parallel for for (int i = 0; i<static_cast<int>(slave_active_size); i++) aux_res_active_slave[i] = rTotalResidual[mSlaveActiveIndices[i]]; if (slave_active_size > 0) { // We compute the complementary residual for the master dofs VectorType aux_complement_master_residual(master_size); TSparseSpaceType::Mult(mPOperator, aux_res_active_slave, aux_complement_master_residual); #pragma omp parallel for for (int i = 0; i<static_cast<int>(master_size); i++) ResidualU[other_dof_size + i] = rTotalResidual[mMasterIndices[i]] - aux_complement_master_residual[i]; } else { #pragma omp parallel for for (int i = 0; i<static_cast<int>(master_size); i++) ResidualU[other_dof_size + i] = rTotalResidual[mMasterIndices[i]]; } #pragma omp parallel for for (int i = 0; i<static_cast<int>(slave_inactive_size); i++) ResidualU[other_dof_size + master_size + i] = rTotalResidual[mSlaveInactiveIndices[i]]; if (slave_active_size > 0) { // We compute the complementary residual for the master dofs VectorType aux_complement_active_lm_residual(lm_active_size); TSparseSpaceType::Mult(mCOperator, aux_res_active_slave, aux_complement_active_lm_residual); #pragma omp parallel for for (int i = 0; i<static_cast<int>(lm_active_size); i++) ResidualU[other_dof_size + master_size + slave_inactive_size + i] = rTotalResidual[mLMActiveIndices[i]] - aux_complement_active_lm_residual[i]; } else { #pragma omp parallel for for (int i = 0; i<static_cast<int>(lm_active_size); i++) ResidualU[other_dof_size + master_size + slave_inactive_size + i] = rTotalResidual[mLMActiveIndices[i]]; } } /** * @brief This function extracts from a vector which has the size of the overall r, the part that corresponds to active lm-dofs * @param rTotalResidual The total residual of the problem * @param rResidualLMA The vector containing the residual relative to the active LM */ inline void GetLMAPart( const VectorType& rTotalResidual, VectorType& rResidualLMA ) { // Auxiliar sizes const SizeType other_dof_size = mOtherIndices.size(); const SizeType master_size = mMasterIndices.size(); const SizeType slave_inactive_size = mSlaveInactiveIndices.size(); const SizeType slave_active_size = mSlaveActiveIndices.size(); // We add the other if (slave_active_size > 0) { // We get the displacement residual of the active slave nodes if (rResidualLMA.size() != slave_active_size ) rResidualLMA.resize (slave_active_size, false); #pragma omp parallel for for (int i = 0; i<static_cast<int>(rResidualLMA.size()); i++) rResidualLMA[i] = rTotalResidual[mSlaveActiveIndices[i]]; // From the computed displacements we get the components of the displacements for each block VectorType disp_N(other_dof_size); VectorType disp_M(master_size); VectorType disp_SI(slave_inactive_size); VectorType disp_SA(slave_active_size); #pragma omp parallel for for (int i = 0; i<static_cast<int>(other_dof_size); i++) disp_N[i] = mDisp[i]; #pragma omp parallel for for (int i = 0; i<static_cast<int>(master_size); i++) disp_M[i] = mDisp[other_dof_size + i]; #pragma omp parallel for for (int i = 0; i<static_cast<int>(slave_inactive_size); i++) disp_SI[i] = mDisp[other_dof_size + master_size + i]; #pragma omp parallel for for (int i = 0; i<static_cast<int>(slave_active_size); i++) disp_SA[i] = mDisp[other_dof_size + master_size + slave_inactive_size + i]; VectorType aux_mult(slave_active_size); TSparseSpaceType::Mult(mKSAN, disp_N, aux_mult); TSparseSpaceType::UnaliasedAdd (rResidualLMA, -1.0, aux_mult); TSparseSpaceType::Mult(mKSAM, disp_M, aux_mult); TSparseSpaceType::UnaliasedAdd (rResidualLMA, -1.0, aux_mult); if (slave_inactive_size > 0) { TSparseSpaceType::Mult(mKSASI, disp_SI, aux_mult); TSparseSpaceType::UnaliasedAdd (rResidualLMA, -1.0, aux_mult); } TSparseSpaceType::Mult(mKSASA, disp_SA, aux_mult); TSparseSpaceType::UnaliasedAdd (rResidualLMA, -1.0, aux_mult); } } /** * @brief This function extracts from a vector which has the size of the overall r, the part that corresponds to inactive lm-dofs * @param rTotalResidual The total residual of the problem * @param rResidualLMI The vector containing the residual relative to the inactive LM */ inline void GetLMIPart ( const VectorType& rTotalResidual, VectorType& rResidualLMI ) { // Auxiliar size const SizeType lm_inactive_size = mLMInactiveIndices.size(); // We get the displacement residual of the active slave nodes if (rResidualLMI.size() != lm_inactive_size ) rResidualLMI.resize (lm_inactive_size, false); #pragma omp parallel for for (int i = 0; i<static_cast<int>(lm_inactive_size); i++) rResidualLMI[i] = rTotalResidual[mLMInactiveIndices[i]]; } /** * @brief This method writes the displacement part * @param rTotalResidual The total residual of the problem * @param ResidualU The vector containing the residual relative to the displacements */ inline void SetUPart ( VectorType& rTotalResidual, const VectorType& ResidualU ) { #pragma omp parallel for for (int i = 0; i<static_cast<int>(mOtherIndices.size()); i++) rTotalResidual[mOtherIndices[i]] = ResidualU[i]; #pragma omp parallel for for (int i = 0; i<static_cast<int>(mMasterIndices.size()); i++) rTotalResidual[mMasterIndices[i]] = ResidualU[mOtherIndices.size() + i]; #pragma omp parallel for for (int i = 0; i<static_cast<int>(mSlaveInactiveIndices.size()); i++) rTotalResidual[mSlaveInactiveIndices[i]] = ResidualU[mOtherIndices.size() + mMasterIndices.size() + i]; #pragma omp parallel for for (int i = 0; i<static_cast<int>(mSlaveActiveIndices.size()); i++) rTotalResidual[mSlaveActiveIndices[i]] = ResidualU[mOtherIndices.size() + mMasterIndices.size() + mSlaveInactiveIndices.size() + i]; } /** * @brief This method writes the active Lagrange Multiplier part * @param rTotalResidual The total residual of the problem * @param ResidualLMA The vector containing the residual relative to the active LM */ inline void SetLMAPart ( VectorType& rTotalResidual, const VectorType& ResidualLMA ) { #pragma omp parallel for for (int i = 0; i< static_cast<int>(ResidualLMA.size()); i++) rTotalResidual[mLMActiveIndices[i]] = ResidualLMA[i]; } /** * @brief This method writes the inaactive Lagrange Multiplier part * @param rTotalResidual The total residual of the problem * @param ResidualLMI The vector containing the residual relative to the inactive LM */ inline void SetLMIPart ( VectorType& rTotalResidual, const VectorType& ResidualLMI ) { #pragma omp parallel for for (int i = 0; i< static_cast<int>(ResidualLMI.size()); i++) rTotalResidual[mLMInactiveIndices[i]] = ResidualLMI[i]; } /** * @brief This method is intended to use to ensure the matrix is structurally symmetric * @param rA The matrix to be checked */ void EnsureStructuralSymmetryMatrix (SparseMatrixType& rA) { // We compute the transposed matrix const SizeType size_system_1 = rA.size1(); const SizeType size_system_2 = rA.size2(); SparseMatrixType transpose(size_system_2, size_system_1); SparseMatrixMultiplicationUtility::TransposeMatrix<SparseMatrixType, SparseMatrixType>(transpose, rA, 0.0); // Finally we sum the auxiliar matrices SparseMatrixMultiplicationUtility::MatrixAdd<SparseMatrixType, SparseMatrixType>(rA, transpose, 1.0); } /** * @brief This method is intended to use to check the matrix * @param rA The matrix to be checked */ double CheckMatrix (const SparseMatrixType& rA) { // Get access to A data const std::size_t* index1 = rA.index1_data().begin(); const std::size_t* index2 = rA.index2_data().begin(); const double* values = rA.value_data().begin(); double norm = 0.0; for (std::size_t i=0; i<rA.size1(); ++i) { std::size_t row_begin = index1[i]; std::size_t row_end = index1[i+1]; if (row_end - row_begin == 0) KRATOS_WARNING("Checking sparse matrix") << "Line " << i << " has no elements" << std::endl; for (std::size_t j=row_begin; j<row_end; j++) { KRATOS_ERROR_IF( index2[j] > rA.size2() ) << "Array above size of A" << std::endl; norm += values[j]*values[j]; } } return std::sqrt (norm); } /** * @brief This method is designed to create the final solution sparse matrix from the auxiliar values * @detail Before create it reorder the columns. It deletes the auxiliar values after compute the matrix * @param AuxK The matrix solution * @param NRows The number of rows of the matrix * @param NCols The number of columns of the matrix * @param Ptr The indexes taht indicate the number of nonzero values in each column * @param AuxIndex2 The indexes of the nonzero columns * @param AuxVal The array containing the values of the sparse matrix */ void CreateMatrix( SparseMatrixType& AuxK, const SizeType NRows, const SizeType NCols, IndexType* Ptr, IndexType* AuxIndex2, double* AuxVal ) { // We reorder the rows SparseMatrixMultiplicationUtility::SortRows(Ptr, NRows, NCols, AuxIndex2, AuxVal); // Finally we build the final matrix SparseMatrixMultiplicationUtility::CreateSolutionMatrix(AuxK, NRows, NCols, Ptr, AuxIndex2, AuxVal); // Release memory delete[] Ptr; delete[] AuxIndex2; delete[] AuxVal; } /** * @brief This method is intended to lump an existing matrix * @param rA The matrix to be lumped * @param rdiagA The resulting matrix * @param Tolerance The tolerance considered to check if the values are almost 0 * @todo Improve the lumping in case of not pure diagonal matrix */ void ComputeDiagonalByLumping ( const SparseMatrixType& rA, SparseMatrixType& rdiagA, const double Tolerance = ZeroTolerance ) { // Aux values const std::size_t size_A = rA.size1(); // VectorType diagA_vector(size_A); // // // In case of not pure lumped matrix // if (rA.nnz() > size_A) { // // Get access to A data // const std::size_t* index1 = rA.index1_data().begin(); // const double* values = rA.value_data().begin(); // // #pragma omp parallel for // for (int i=0; i< static_cast<int>(size_A); i++) { // const std::size_t row_begin = index1[i]; // const std::size_t row_end = index1[i+1]; // double temp = 0.0; // for (std::size_t j=row_begin; j<row_end; j++) // temp += values[j]*values[j]; // // diagA_vector[i] = std::sqrt(temp); // } // } else { // Otherwise // #pragma omp parallel for // for (int i=0; i< static_cast<int>(size_A); i++) { // diagA_vector[i] = rA(i, i); // } // } IndexType* ptr = new IndexType[size_A + 1]; ptr[0] = 0; IndexType* aux_index2 = new IndexType[size_A]; double* aux_val = new double[size_A]; #pragma omp parallel for for (int i = 0; i < static_cast<int>(size_A); i++) { ptr[i+1] = i+1; aux_index2[i] = i; const double value = rA(i, i); // const double value = diagA_vector[i]; if (std::abs(value) > Tolerance) aux_val[i] = 1.0/value; else // Auxiliar value aux_val[i] = 1.0; } SparseMatrixMultiplicationUtility::CreateSolutionMatrix(rdiagA, size_A, size_A, ptr, aux_index2, aux_val); delete[] ptr; delete[] aux_index2; delete[] aux_val; } /** * @brief Checks if the degree of freedom belongs to a displacement DoF * @param rDoF The degree of freedom * @return True if the DoF corresponds with a displacement dof */ static inline bool IsDisplacementDof(const DofType& rDoF) { const auto& r_variable = rDoF.GetVariable(); if (r_variable == DISPLACEMENT_X || r_variable == DISPLACEMENT_Y || r_variable == DISPLACEMENT_Z) { return true; } return false; } /** * @brief Checks if the degree of freedom belongs to a LM DoF * @param rDoF The degree of freedom * @return True if the DoF corresponds with a LM dof */ static inline bool IsLMDof(const DofType& rDoF) { const auto& r_variable = rDoF.GetVariable(); if (r_variable == VECTOR_LAGRANGE_MULTIPLIER_X || r_variable == VECTOR_LAGRANGE_MULTIPLIER_Y || r_variable == VECTOR_LAGRANGE_MULTIPLIER_Z) { return true; } return false; } /** * @brief This method returns the defaulr parameters in order to avoid code duplication * @return Returns the default parameters */ Parameters GetDefaultParameters() { Parameters default_parameters( R"( { "solver_type" : "mixed_ulm_linear_solver", "tolerance" : 1.0e-6, "max_iteration_number" : 200, "echo_level" : 0 } )" ); return default_parameters; } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; // Class MixedULMLinearSolver ///@} ///@name Type Definitions ///@{ // Here one should use the KRATOS_CREATE_LOCAL_FLAG, but it does not play nice with template parameters template<class TSparseSpaceType, class TDenseSpaceType, class TPreconditionerType, class TReordererType> const Kratos::Flags MixedULMLinearSolver<TSparseSpaceType, TDenseSpaceType,TPreconditionerType, TReordererType>::BLOCKS_ARE_ALLOCATED(Kratos::Flags::Create(0)); template<class TSparseSpaceType, class TDenseSpaceType, class TPreconditionerType, class TReordererType> const Kratos::Flags MixedULMLinearSolver<TSparseSpaceType, TDenseSpaceType,TPreconditionerType, TReordererType>::IS_INITIALIZED(Kratos::Flags::Create(1)); ///@} ///@name Input and output ///@{ /// input stream function template<class TSparseSpaceType, class TDenseSpaceType, class TPreconditionerType, class TReordererType> inline std::istream& operator >> (std::istream& IStream, MixedULMLinearSolver<TSparseSpaceType, TDenseSpaceType,TPreconditionerType, TReordererType>& rThis) { return IStream; } /// output stream function template<class TSparseSpaceType, class TDenseSpaceType, class TPreconditionerType, class TReordererType> inline std::ostream& operator << (std::ostream& rOStream, const MixedULMLinearSolver<TSparseSpaceType, TDenseSpaceType,TPreconditionerType, TReordererType>& rThis) { rThis.PrintInfo (rOStream); rOStream << std::endl; rThis.PrintData (rOStream); return rOStream; } ///@} } // namespace Kratos. #endif // KRATOS_MIXEDULM_SOLVER_H_INCLUDED defined